inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try<fim_suffix>:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
t<fim_suffix>ry:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try<fim_suffix>:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
t<fim_suffix>ry:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
t<fim_suffix>ry:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
t<fim_suffix>ry:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
whi<fim_suffix>le True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | WHILE | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while Tru<fim_suffix>e:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | WHILE | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while Tru<fim_suffix>e:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | WHILE | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except qu<fim_suffix>eue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty<fim_suffix>:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except q<fim_suffix>ueue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_<fim_suffix>jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle> | null | ANNOTATION | complete_current_header_empty_completion |
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe<fim_suffix>_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle> | null | ANNOTATION | complete_current_header_empty_completion |
<filename>microagents/agents/agent_persistence_manager.py<fim_prefix>from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"<fim_suffix>""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/integrations/sqlite_agent_persistence.py<fim_prefix>import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"<fim_suffix>""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with s<fim_suffix>tatus stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
""<fim_suffix>"
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
""<fim_suffix>"
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it<fim_suffix> to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent_manager.py<fim_prefix>import logging
from typing import List, Optional, Any
from agents.agent_lifecycle import AgentLifecycle
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from integrations.openaiwrapper import OpenAIAPIWrapper
logger= logging.getLogger()
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper, max_agents: int = 20, db_filename : str = "agents.db"):
self.max_agents = max_agents
self.openai_wrapper = openai_wrapper
self.agent_persistence = AgentPersistenceManager(db_filename)
self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)
self.load_agents()
def stop_all_agents(self) -> None:
"""Stops all agents."""
self.agent_lifecycle.stop_all_agents()
def cleanup_agents(self):
"""Remove all agents with status stopped = True"""
self.agent_lifecycle.cleanup_agents()
def load_agents(self):
"""Loads agents from the database."""
loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)
self.agent_lifecycle.agents.extend(loaded_agents)
logger.info(f"Loaded {len(loaded_agents)} agents from the database.")
def get_agents(self) -> List[Any]:
"""Returns the list<fim_suffix> of agents."""
self.cleanup_agents()
return self.agent_lifecycle.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
logger.info(f"Agent {agent.purpose} is currently idle.")<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
""<fim_suffix>"
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent_manager.py<fim_prefix>import logging
from typing import List, Optional, Any
from agents.agent_lifecycle import AgentLifecycle
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from integrations.openaiwrapper import OpenAIAPIWrapper
logger= logging.getLogger()
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper, max_agents: int = 20, db_filename : str = "agents.db"):
self.max_agents = max_agents
self.openai_wrapper = openai_wrapper
self.agent_persistence = AgentPersistenceManager(db_filename)
self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)
self.load_agents()
def stop_all_agents(self) -> None:
"""Stops all agents."""
self.agent_lifecycle.stop_all_agents()
def cleanup_agents(self):
"""Remove all agent<fim_suffix>s with status stopped = True"""
self.agent_lifecycle.cleanup_agents()
def load_agents(self):
"""Loads agents from the database."""
loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)
self.agent_lifecycle.agents.extend(loaded_agents)
logger.info(f"Loaded {len(loaded_agents)} agents from the database.")
def get_agents(self) -> List[Any]:
"""Returns the list of agents."""
self.cleanup_agents()
return self.agent_lifecycle.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
logger.info(f"Agent {agent.purpose} is currently idle.")<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
""<fim_suffix>"
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.p<fim_suffix>arent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
se<fim_suffix>lf.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result =<fim_suffix> self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_promp<fim_suffix>t = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/integrations/sqlite_agent_persistence.py<fim_prefix>import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.curso<fim_suffix>r()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_em<fim_suffix>bedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.con<fim_suffix>nection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.p<fim_suffix>arent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(<fim_suffix>self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = dept<fim_suffix>h
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try<fim_suffix>:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
t<fim_suffix>ry:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
t<fim_suffix>ry:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
tr<fim_suffix>y:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Excep<fim_suffix>tion as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Excepti<fim_suffix>on as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Excep<fim_suffix>tion as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception<fim_suffix> as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>microagents/agents/agent_persistence_manager.py<fim_prefix>from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for<fim_suffix> purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.a<fim_suffix>gents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_p<fim_suffix>rime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
i<fim_suffix>f id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_simi<fim_suffix>larity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_emb<fim_suffix>edding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/agents/agent_similarity.py<fim_prefix>import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response an<fim_suffix>d len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is<fim_suffix> not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/agents/agent_persistence_manager.py<fim_prefix>from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if <fim_suffix>agent:
agents.append(agent)
return agents
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if paren<fim_suffix>t:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def deco<fim_suffix>rator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def w<fim_suffix>rapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>microagents/integrations/memoize.py<fim_prefix>import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(<fim_suffix>*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>microagents/integrations/sqlite_agent_persistence.py<fim_prefix>import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
#<fim_suffix> add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>microagents/agents/microagent.py<fim_prefix>import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components u<fim_suffix>sed by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "❌ Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "❌ Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / de<fim_suffix>nominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] <fim_suffix>+= score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_s<fim_suffix>core = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[<fim_suffix>str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[u<fim_suffix>rl] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator =<fim_suffix> freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
r<fim_suffix>esult = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without<fim_suffix>_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url<fim_suffix>_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n<fim_suffix>_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word <fim_suffix>in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.<fim_suffix>items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for k<fim_suffix>w in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in <fim_suffix>self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for<fim_suffix> url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>microsearch/src/microsearch/engine.py<fim_prefix>from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in ol<fim_suffix>d:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_t<fim_suffix>ypes[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
li<fim_suffix>ne_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block <fim_suffix>= {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.te<fim_suffix>xt)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = sel<fim_suffix>f.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = to<fim_suffix>ken
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_toke<fim_suffix>nzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.i<fim_suffix>s_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/spell_utils.py<fim_prefix>import logging
import os
import string
from symspellpy.symspellpy import SymSpell
from symspellpy.symspellpy import Verbosity
import nlm_ingestor.ingestor as ingestor
from nlm_ingestor.ingestor import patterns
logger = logging.getLogger(__name__)
class SpellUtil:
def __init__(self):
self.sym_spell = SymSpell(2, 7)
dictionary_path = os.path.join(
os.path.dirname(os.path.abspath(ingestor.__file__)),
"../ingestor_models/symspell/frequency_dictionary_en_82_765.txt",
)
bigram_path = os.path.join(
os.path.dirname(os.path.abspath(ingestor.__file__)),
"../ingestor_models/symspell/frequency_dictionary_en_82_765.txt",
)
if not self.sym_spell.load_dictionary(
dictionary_path, term_index=0, count_index=1,
):
logging.error(f"Dictionary file not found: {dictionary_path}")
return
if not self.sym_spell.load_bigram_dictionary(
bigram_path, term_index=0, count_index=2,
):
logger.error(f"Bigram dictionary file not found: {bigram_path}")
return
def lookup_word(self, input_term):
max_edit_distance_lookup = 2
suggestion_verbosity = Verbosity.CLOSEST
# ignore_token = None
ignore_token = "|".join(patterns.spell_check)
suggestions = self.sym_spell.lookup(
input_term,
suggestion_verbosity,
max_edit_distance_lookup,
transfer_casing=False,
ignore_token=ignore_token,
)
# print(suggestions)
# for suggestion in suggestions:
# print("{}, {}, {}".format(suggestion.term, suggestion.distance,
# suggestion.count))
if len(suggestions) > 0:
return suggestions[0].term
else:
return input_term
# def lookup_sentence(self, input_term):
def lookup_compound(self, input_term):
max_edit_distance_lookup = 2
suggestions = self.sym_spell.lookup_compound(
input_term,
max_edit_distance_lookup,
transfer_casing=True,
ignore_non_words=True,
)
# for suggestion in suggestions:
# print("{}, {}, {}".format(suggestion.term, suggestion.distance,
# suggestion.count))
if len(suggestions) > 0:
return suggestions[0].term
else:
return input_term
def segment(self, input_term):
is_mixed_case_term = not input_term.islower()
if is_mixed_case_term:
input_term = input_term.lower()
suggestion = self.sym_spell.<fim_suffix>word_segmentation(input_term)
corrected_string = suggestion.corrected_string
if is_mixed_case_term:
corrected_string = string.capwords(corrected_string)
return corrected_string
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
retu<fim_suffix>rn sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if<fim_suffix> len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type =<fim_suffix>= "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/spell_utils.py<fim_prefix>import logging
import os
import string
from symspellpy.symspellpy import SymSpell
from symspellpy.symspellpy import Verbosity
import nlm_ingestor.ingestor as ingestor
from nlm_ingestor.ingestor import patterns
logger = logging.getLogger(__name__)
class SpellUtil:
def __init__(self):
self.sym_spell = SymSpell(2, 7)
dictionary_path = os.path.join(
os.path.dirname(os.path.abspath(ingestor.__file__)),
"../ingestor_models/symspell/frequency_dictionary_en_82_765.txt",
)
bigram_path = os.path.join(
os.path.dirname(os.path.abspath(ingestor.__file__)),
"../ingestor_models/symspell/frequency_dictionary_en_82_765.txt",
)
if not self.sym_spell.load_dictionary(
dictionary_path, term_index=0, count_index=1,
):
logging.error(f"Dictionary file not found: {dictionary_path}")
return
if not self.sym_spell.load_bigram_dictionary(
bigram_path, term_index=0, count_index=2,
):
logger.error(f"Bigram dictionary file not found: {bigram_path}")
return
def lookup_word(self, input_term):
max_edit_distance_lookup = 2
suggestion_verbosity = Verbosity.CLOSEST
# ignore_token = None
ignore_token = "|".join(patterns.spell_check)
suggestions = self.sym_spell.lookup(
input_term,
suggestion_verbosity,
max_edit_distance_lookup,
transfer_casing=False,
ignore_token=ignore_token,
)
# print(suggestions)
# for suggestion in suggestions:
# print("{}, {}, {}".format(suggestion.term, suggestion.distance,
# suggestion.count))
if len(suggestions) > 0:
return suggestions[0].term
else:
return input_term
# def lookup_sentence(self, input_term):
def lookup_compound(self, input_term):
max_edit_distance_lookup = 2
suggestions = self.sym_spell.lookup_compound(
input_term,
max_edit_distance_lookup,
transfer_casing=True,
ignore_non_words=True,
)
# for suggestion in suggestions:
# print("{}, {}, {}".format(suggestion.term, suggestion.distance,
# suggestion.count))
if len(suggestions) > 0:
return suggestions[0].term
else:
return input_term
def segment(self, input_term):
is_mixed_case_term = not input_term.islower()
if is_mixed_case_term:
input_term = input_term.lower()
suggestion = self.sym_spell.word_segmentation(input_term)
corrected_string = suggestion.corrected_string
if is_mixed_case_<fim_suffix>term:
corrected_string = string.capwords(corrected_string)
return corrected_string
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if <fim_suffix>n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if<fim_suffix> (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
i<fim_suffix>f n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
i<fim_suffix>f not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in <fim_suffix>org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for<fim_suffix> rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in l<fim_suffix>ines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_<fim_suffix>type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.spli<fim_suffix>t()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/line_parser.py<fim_prefix>import datetime
import logging
import math
import re
import string
from nltk.corpus import stopwords
from .patterns import abbreviations
from .patterns import states
from .patterns import states_abbreviations
from .styling_utils import mode_of_list
try:
stop_words = set(stopwords.words("english"))
except Exception as e:
logging.error(e)
import nltk
stopwords = nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
stop_words.add("per")
continuing_chars = "!\"&'+,./:;<=?@\\]^_`|}~"
list_chars = [
"•",
"➢",
"*",
"ƒ",
"",
"",
"",
"",
"»",
"☐",
"·",
"�",
"▪",
"▪",
"○",
"",
"–",
]
list_types = {
"•": "circle",
"➢": "wide_symbol_arrow",
"*": "star",
"ƒ": "f",
"": "clock",
"": "small_square",
"": "narrow_symbol_arrow",
"": "large_square",
"»": "double_arrow",
"☐": "hollow_square",
"·": "circle",
"�": "special_char",
"▪": "very_small_square",
"▪": "very_small_square",
"○": "hollow_circle",
"": "hollow_squere",
"–": "dash",
"‒": "another-dash",
"̶": "underscore",
}
unicode_list_types = {
"\\uf0b7": "•",
"\\uf0fc": "",
}
footnote_types = {
"©"
}
ambiguous_list_chars = ["+", "-"]
units = ["acres", "miles", "-"] # - could represent a null value in a row
punctuations = string.punctuation + "“"
start_quotations = ["'", '"', "“"]
end_quotations = ["'", '"', "”"]
"""
Quote Pattern details:
\\W ==> Match non-alphanumeric characters. Helps in mitigating words like O'Reilly.
["“\'] ==> Quote patterns
(?!\\D\\s) ==> Negative Lookahead for single character following the quote.
Helps in removing words like Macy's, don't ...
(?!\\d+) ==> Negative Lookahead for one or more digits following the pattern.
Helps in removing words like '19, '2019
(.*?)[,;.]?[”"\'] ==> Match all other data.
"""
# Add / Modify Quotation pattern in ingestor_utils/utils.py also.
quote_pattern = re.compile(
r'(?:(?<=\W)|(?<=^))["“‘’\']+(?!\D\s)(?!\d+)(.*?)[,;.]?[”"‘’\']+',
) # (r'["“\'](.*?)[,;.]?[”"\']')
single_char_pattern = re.compile(r'[a-zA-Z]')
multi_char_pattern = re.compile(r'[a-zA-Z]+')
roman_number_pattern = re.compile(r'[ixvIXV]+$')
ends_with_sentence_delimiter_pattern = re.compile(r"(?<![.;:][a-zA-Z0-9])(?<!INC|inc|Inc)[.;:]+(?![\w])[\"“‘’”\'\s]*$")
conjunction_list = ["for", "and", "not", "but", "or", "yet", "so", "between"]
class Word:
def __init__(self, token):
self.text = token
self.is_percent = False
self.is_number = False
self.is_year = False # year does not count<fim_suffix> as a number
self.is_dollar = False
self.is_million = False
self.is_billion = False
self.is_thousand = False
self.is_date_entry = False
self.is_negative = False
self.length = len(self.text)
self.is_stop_word = self.text.lower() in stop_words
self.is_number_range = False
self.parts = []
text_without_punct = self.text
while (
len(text_without_punct) > 1 and
(text_without_punct[-1] in string.punctuation or text_without_punct[-1] in end_quotations)
):
text_without_punct = text_without_punct[0:-1]
# remove leading unbalancced punctuations
while (
len(text_without_punct) > 1 and
(text_without_punct[0] in string.punctuation or text_without_punct[0] in start_quotations)
):
text_without_punct = text_without_punct[1:]
self.text_without_punct = text_without_punct
self.is_noun = self.text_without_punct[0].isupper()
n = self.check_numeric()
self.check_date()
try:
if n:
n = round(float(n))
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 2
self.num_digits = digits
if digits == 4 and self.text.replace(",", "") == self.text:
self.is_year = True
self.is_number = False
else:
self.num_digits = 0
except Exception as e:
logging.error(e)
self.num_digits = 0
def check_date(self):
if "/" in self.text or "-" in self.text:
text = self.text.replace("/", "-")
date_patterns = [
"%b-%d",
"%B-%d",
"%B-%d-%y",
"%B-%d-%Y",
"%b-%d-%Y",
"%b-%d-%y",
"%m-%d",
"%m-%d-%y",
"%m-%d-%Y",
]
for pat in date_patterns:
try:
datetime.datetime.strptime(text, pat)
self.is_date_entry = True
return
except ValueError:
pass
else:
self.is_date_entry = False
def check_numeric(self):
word = self.text.lower()
if not word.isalpha():
if word.isprintable():
if not word.isnumeric():
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
if word.startswith("-"):
self.is_negative = True
word = word[1:]
if word.startswith("$"):
self.is_dollar = True
word = word[1:]
elif word.endswith("$"):
self.is_dollar = True
word = word[0:-1]
elif word.endswith("%"):
self.is_percent = True
word = word[0:-1]
elif word.endswith("m"):
self.is_million = True
elif word.endswith("bn"):
self.is_billion = True
if word.startswith("(") and word.endswith(")"):
word = word[1:-1]
word = word.replace(",", "")
if word.isnumeric() or word.replace(".", "", 1).isnumeric():
self.is_number = True
parts = word.split("-")
if (
len(parts) == 2
and parts[0].isnumeric()
and parts[1].isnumeric()
):
self.is_number_range = True
self.parts = parts
else:
self.is_number = True
if self.is_number:
numeric_part = word
return numeric_part
class Line:
def __init__(
self,
line_str,
text_list=[],
style_dict={},
page_details={},
noun_chunk_ending_tokens=[],
):
self.text = line_str.strip()
self.visual_line = VisualLine(text_list, style_dict, page_details)
self.words = []
self.is_independent = False
self.is_header = False
self.is_header_without_comma = False
self.noun_chunks = []
self.quoted_words = quote_pattern.findall(self.text)
self.noun_chunk_ending_tokens = {x.lower() for x in noun_chunk_ending_tokens}
self.parse_line()
def check_header(self):
# Section X, Article Y, Note 1 etc.
first_word_header = self.first_word.lower() in ["section", "article", "note"]
# If there are a certain percentage of title words (first letter capitalize)
title_ratio = (
self.title_word_count / self.eff_word_count
if self.eff_word_count > 0
else 1.0
)
# print(self.title_word_count, self.eff_word_count, title_ratio)
# Section 1 is a header but Section 1: Hello 3 is not
has_enough_titles = title_ratio > 0.9 and self.eff_word_count < 10
has_header_structure = (
(first_word_header or has_enough_titles) and self.number_count == 1
) or self.numbered_line or self.text.isupper()
# has_header_structure = has_header_structure and self.eff_word_count <
last_word_number = (
self.last_word.lower() in units
or self.last_word_number
and not has_header_structure
)
last_word_date = self.last_word_date and not has_header_structure
# Find lines ending with sentence delimiter. But exclude text like "L.P."
ends_with_delim = ends_with_sentence_delimiter_pattern.search(self.text) is not None
sentence_structure = self.ends_with_period and not (
has_header_structure and title_ratio > 0.9
) and ends_with_delim
last_letter_is_punctuation = (
self.last_word[-1] in punctuations and self.last_word[-1] not in ":?.)]%" and
ends_with_delim
)
self.is_header_without_comma = (
not sentence_structure
and not self.has_list_char
and not self.first_char in footnote_types
and has_enough_titles
and not last_word_number
and (
self.number_count == 0
or (has_header_structure and self.number_count <= 1)
)
and not self.has_continuing_chars
and not last_word_date
and self.first_word_title
and not self.last_word_is_stop_word
and not self.is_zipcode_or_po
and not last_letter_is_punctuation
and not "://" in self.text # url pattern
)
self.is_header = self.is_header_without_comma and \
((not self.text.count(',') > 1) if not self.text.lower().startswith('section') else True)
def check_ends_with_period(self):
# punct_rule = self.last_char in string.punctuation and self.last_char not in [':', '.']
last_word_is_title = self.last_word in ["Mr.", "Dr.", "Mrs."]
self.ends_with_period = self.last_char in ["."] and not last_word_is_title
def check_table_row(self):
if not self.is_header:
value_count = (
self.number_count
+ self.dollar_count
+ self.pct_count
+ self.text.count(" - ")
)
word_symbols = self.word_count - self.dollar_sign_count
if word_symbols == 0:
word_symbols = 1
word_ratio = (
value_count + self.title_word_count + self.date_entry_count
) / word_symbols
self.is_table_row = (
(
(value_count > 0 or self.date_entry_count > 0)
and word_ratio > 0.7
and not self.ends_with_period
and not self.is_zipcode_or_po
)
and not self.last_word_is_stop_word
or ("...." in self.text)
)
else:
self.is_table_row = False
def check_list_item(self):
text = self.text.strip()
self.has_list_char = text[0] in list_types.keys()
# if not self.has_list_char and text[0] in ambiguous_list_chars:
# self.has_list_char = text[1:].strip()[0].isalpha()
self.is_list_item = self.has_list_char and self.first_word[-1] not in ":?.)]%$"
if self.is_list_item:
self.list_type = list_types[text[0]]
# matches 1.1 1.2.1 1 etc.
def check_numbered_line(self, word):
trunc_word = word
ends_with_parens = word.endswith(")")
number_end_char = word.endswith(".") or ends_with_parens
number_start_char = word.startswith("(")
if number_start_char and not ends_with_parens:
return False
if word[-1] in ["%", "$", ","]:
return False
if number_end_char:
trunc_word = word[:-1]
if number_start_char:
trunc_word = trunc_word[1:]
# To handle scenarios like (ii)(A)
if ")(" in trunc_word:
trunc_word = trunc_word.split(")(")[0]
parts = trunc_word.split(".")
self.integer_numbered_line = False
self.roman_numbered_line = False
self.letter_numbered_line = False
self.dot_numbered_line = False
mixed_list_items = False
max_digits = 2
max_roman = 6
for idx, part in enumerate(parts):
# print(">part: ", part, re.sub(r"[a-zA-Z]+", "", part).isdigit() or idx > 0)
if len(part) <= max_digits:
# (1), (2), (3)
self.integer_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(")")
)
# 1. 2. 3.
self.dot_numbered_line = part.isdigit() and (
len(parts) > 1 or word.endswith(".")
)
# a. b. c. or a) b) c)
# idx > 0 for patterns like 10.a
# a1 b1 c1 etc.
self.letter_numbered_line = (
True
if single_char_pattern.match(part)
and (
(number_end_char and len(part) == 1 and len(parts) == 1)
or multi_char_pattern.sub("", part).isdigit()
or idx > 0
)
else False
)
if len(part) <= max_roman:
# xi, i, iv
self.roman_numbered_line = (
True if roman_number_pattern.match(part) and idx == 0 else False
)
if part.endswith(")") and part[0].isalnum() and "(" in part:
mixed_list_items = True
# else:
# self.integer_numbered_line = False
# A-1
# self.letter_numbered_line = (
# True if re.match("[a-zA-Z]+-?[0-9]+$", part) else False
# )
self.numbered_line = (
self.integer_numbered_line
or self.roman_numbered_line
or self.letter_numbered_line
or self.dot_numbered_line
) and not mixed_list_items
if not self.numbered_line:
break
if self.numbered_line:
self.start_number = trunc_word
self.line_without_number = self.text[len(word) + 1 :]
self.full_number = self.text[:len(word)]
# check if line is part of address
def check_zipcode_or_pobox(self):
# check if line matches format P.O. box xxxxx
pobox = (
self.word_count == 3
and self.last_word_number
and self.first_word.lower() in ["po", "p.o", "p.o."]
)
# check if line is last part of address, matching format "city, state zipcode"
zipcode = (
self.word_count
< 7 # ensure line is standalone address, not part of larger sentence
and (
self.contains_state # line contains comma followed by state name or abbreviation
# line ends in zipcode, with format xxxxx or xxxxx-xxxx
and (
(self.last_word_number or self.last_word[-4:].isdigit())
and (
(len(self.last_word) == 10 and self.last_word[-5] == "-")
or len(self.last_word) == 5
)
)
and not self.ends_with_period
)
)
self.is_zipcode_or_po = pobox or zipcode
def set_line_type(self):
line_type = "para"
if self.is_table_row:
line_type = "table_row"
elif self.is_header:
line_type = "header"
elif self.is_list_item or self.numbered_line:
line_type = "list_item"
else:
line_type = "para"
self.line_type = line_type
def parse_line(self):
self.words = []
self.title_word_count = 0
self.alpha_count = 0
self.list_type = ""
self.integer_numbered_line = False
self.roman_numbered_line = False
self.dot_numbered_line = False
self.numbered_line = False
self.stop_word_count = 0
self.dollar_count = 0
self.pct_count = 0
self.number_count = 0
self.last_word_number = False
self.first_word_title = False
self.letter_numbered_line = False
self.ends_with_hyphen = False
self.last_word_date = False
self.is_reference_author_name = False
self.date_entry_count = 0
self.last_word_is_stop_word = False # self.last_word in self.stopwords
self.hit_colon = False
self.is_zipcode_or_po = False
self.contains_state = False
self.addresses = []
# todo - this is a stopgap solution, need to make it more efficient
tokens = self.text.split()
self.length = len(self.text)
self.word_count = len(tokens)
self.dollar_sign_count = tokens.count("$")
last_idx = self.word_count - 1
first_alpha_found = False
prev_token_comma = False
self.eff_length = 0
single_letter_word_count = 0
noun_chunk_buf = []
if self.length == 0:
return
for idx, token in enumerate(tokens):
if token in unicode_list_types.keys():
token = unicode_list_types[token]
if token.__contains__(":"):
self.hit_colon = True
# remove punctuation unless (word) or unless it is the first token or if it has colon
last_char = token[-1]
# remove punctuation unless (word) or unless it is the first token
if (
(token[-1] in string.punctuation or token[-1] in end_quotations)
and not (token[0] in string.punctuation or token[0] in start_quotations)
and (not idx == 0 or token[-1] == ":")
):
token = token[0:-1]
if len(token) == 0:
continue
# if prev token contained comma, check if current token is state name
if prev_token_comma and (
token.lower() in states or token.lower() in states_abbreviations
):
self.contains_state = True
prev_token_comma = False
if prev_token_comma:
prev_token_comma = False
if last_char == ",":
prev_token_comma = True
if idx == 0 and not token.lower() == "i" and not token.lower() == "a":
self.check_numbered_line(token)
if token.istitle() or token.isupper(): # and not self.hit_colon:
self.title_word_count = self.title_word_count + 1
if token.isalpha():
# if not self.hit_colon:
self.alpha_count = self.alpha_count + 1
if not first_alpha_found:
first_alpha_found = True
if idx == 0:
self.first_word_title = token[0].isupper()
word = Word(token)
if word.is_number:
self.number_count = self.number_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_date_entry:
self.date_entry_count += 1
if idx == last_idx:
self.last_word_date = True
if word.is_dollar:
self.dollar_count = self.dollar_count + 1
if idx == last_idx:
self.last_word_number = True
if word.is_percent:
self.pct_count = self.pct_count + 1
if idx == last_idx:
self.last_word_number = True
self.eff_length += word.length
if word.length == 1:
single_letter_word_count += 1
if word.is_stop_word:
if not self.hit_colon:
self.stop_word_count = self.stop_word_count + 1
if idx == last_idx and len(token) != 1 and not token.isupper():
self.last_word_is_stop_word = True
if word.is_noun or word.text == "&":
noun = word.text_without_punct
prev_word = self.words[-1] if len(self.words) > 0 else None
if prev_word and (prev_word.is_number or prev_word.is_number_range) and not noun_chunk_buf:
noun_chunk_buf.append(prev_word.text_without_punct) # get stuff like 150 Broadway
if noun.endswith("'s"):
noun = noun[0:-2]
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
elif (
"".join([x.lower() for x in noun if x not in {".", ","}])
in self.noun_chunk_ending_tokens
):
noun_chunk_buf.append(noun)
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
else:
noun_chunk_buf.append(noun)
elif len(noun_chunk_buf) and word.is_number and word.text[0] not in ["$"]:
noun_chunk_buf.append(word.text_without_punct)
elif len(noun_chunk_buf):
self.noun_chunks.append(" ".join(noun_chunk_buf))
noun_chunk_buf = []
self.words.append(word)
if len(noun_chunk_buf) > 0:
self.noun_chunks.append(" ".join(noun_chunk_buf))
self.noun_chunks = sorted(list(set(filter(lambda x: x.lower() not in stop_words, self.noun_chunks))))
self.first_word = tokens[0]
self.last_word = tokens[-1]
self.last_char = self.text[-1]
self.ends_with_period = self.last_char == "."
self.ends_with_comma = self.last_char == ","
self.end_with_period_single_char = len(self.text) > 2 and self.text[-2] == "."
self.eff_word_count = self.alpha_count - self.stop_word_count
self.check_ends_with_period()
self.first_char = self.text[0]
self.has_continuing_chars = not self.numbered_line and (
self.first_char.islower() or self.first_char in continuing_chars
)
self.last_continuing_char = self.last_char in continuing_chars
self.check_zipcode_or_pobox()
self.check_list_item()
self.check_header()
self.check_table_row()
self.separate_line = (
self.is_header
or self.is_table_row
or self.is_list_item
or self.is_zipcode_or_po
)
self.is_list_or_row = self.is_table_row or self.is_list_item
self.is_header_or_row = (
self.is_header or self.is_table_row or self.is_zipcode_or_po
)
self.ends_with_abbreviation = self.ends_with_period and (
(self.last_word.find(".") != len(self.last_word) - 1)
or self.last_word.lower() in abbreviations
or len(self.last_word) <= 3
)
self.incomplete_line = not self.is_header_or_row and (
not self.ends_with_period
or self.ends_with_abbreviation
or self.end_with_period_single_char
)
self.continuing_line = self.has_continuing_chars and not self.separate_line
self.has_spaced_characters = single_letter_word_count / self.word_count > 0.8
self.set_line_type()
if self.is_header or self.is_header_without_comma:
if "," in self.text or self.last_word.isupper() and len(self.last_word) <= 2:
self.is_reference_author_name = True
self.last_word_is_co_ordinate_conjunction = self.ends_with_comma or self.last_word in conjunction_list
# print(self.separate_line)
# self.continuing_line = not self.separate_line and
def to_json(self):
json_lp = dict(self.__dict__)
del json_lp["visual_line"]
words = []
for word in self.words:
words.append(word.__dict__)
json_lp["words"] = words
return json_lp
class VisualLine:
def __init__(self, text_list=[], style_dict={}, page_stats={}):
self.text_list = text_list
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
self.fs = None
self.fw = None
self.start_fs = None
self.end_fs = None
self.diff_prev_y = None
self.diff_next_y = None
self.is_comparably_sized = False
self.is_comparably_bolded = False
self.is_prev_space_smallest = False
self.is_next_space_smallest = False
self.wrapped_page = False
self.text = " ".join(self.text_list)
if style_dict:
self.start_x = style_dict["start_x"][0]
self.start_y = style_dict["start_y"][0]
self.end_x = style_dict["end_x"][-1]
self.end_y = style_dict["end_y"][-1]
self.fs = style_dict["line_fs"][0]
self.fw = style_dict["line_fw"][0]
self.diff_prev_y = style_dict["diff_prev_y"][0]
self.diff_next_y = style_dict["diff_next_y"][0]
self.font_family = (
style_dict["font_family"][0] if len(style_dict["font_family"]) else None
)
self.font_style = (
style_dict["font_style"][0] if len(style_dict["font_style"]) else None
)
self.min_x = (
self.start_x
) # these variables are adjustable during line joins for line width
self.max_x = self.end_x
self.start_x_list = style_dict["start_x"] # joined ents
self.end_x_list = style_dict["end_x"] # joined ents
self.start_x_list_single_ent = style_dict["start_x_list"][0]
self.end_x_list_single_ent = style_dict["end_x_list"][0]
self.mode_fs = mode_of_list(style_dict["line_fs"])
self.tab_count = 0
# calculates tabs for when tika misses word split
if len(self.start_x_list_single_ent) == len(self.end_x_list_single_ent):
self.start_end_list = list(
zip(self.start_x_list_single_ent, self.end_x_list_single_ent),
)
for word_x, next_word_x in zip(
self.start_end_list[:-1],
self.start_end_list[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count += 1
else:
self.start_end_list = []
self.tab_count_join = 0 # tab count after join in ptolines
# calculates tabs for when tika misses word split
if len(self.start_x_list) == len(self.end_x_list):
self.start_end_list_join = list(
zip(self.start_x_list, self.end_x_list),
)
for word_x, next_word_x in zip(
self.start_end_list_join[:-1],
self.start_end_list_join[1:],
):
word_start_x, word_end_x = word_x
next_word_start_x, next_word_end_x = next_word_x
word_distance = next_word_start_x - word_end_x
if word_distance > 20:
self.tab_count_join += 1
else:
self.start_end_list_join = []
if len(self.text.split()) == 2 and self.tab_count == 1:
self.text_list = self.text.split()
# Count tabs in text list, Eventually make it a function of font size
self.start_fs = round(style_dict["start_fs"][0], 1)
self.end_fs = round(style_dict["end_fs"][-1], 1)
self.compute_visual_features(page_stats)
def compute_visual_features(self, page_stats):
# compute font size relative to most common font
font_sizes_mode = page_stats["mode_fs"]
if self.fs > (4 / 3) * font_sizes_mode:
self.is_comparably_sized = True
else:
self.is_comparably_sized = False
# compute font weight relative to 600.0 which has generally
# been observed to correspond to bolding of some sort
font_weights_mode = page_stats["mode_fw"]
if font_weights_mode >= 600.0:
self.is_comparably_bolded = False
elif self.fw > 600.0:
self.is_comparably_bolded = True
# compare line height for similar type (same font) lines
if page_stats["fs_and_diff_prev_y"].get((self.fs, self.diff_prev_y), 0) > 2:
for k, v in page_stats["fs_and_diff_prev_y"].items():
if k == self.fs and 0 <= v < self.diff_prev_y:
break
else:
self.is_prev_space_smallest = True
if page_stats["fs_and_diff_next_y"].get((self.fs, self.diff_next_y), 0) > 2:
for k, v in page_stats["fs_and_diff_next_y"].items():
if k == self.fs and 0 <= v < self.diff_next_y:
break
else:
self.is_next_space_smallest = True
def should_join_table(self, next_line):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# check list of spaced words
curr_line_ents = len(self.text_list)
next_line_ents = len(next_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# compare alignment of elements in both lists
if ent_match:
return
return False
def should_join_para(self):
return False
def should_join_header(self):
return False
def __str__(self):
output_str = f"\ntext_list = {self.text_list},\nstart_x = {self.start_x}, \nstart_y = {self.start_y}\nend_x = {self.end_x},\nend_y = {self.end_y},\nfs = {self.fs},\nfw = {self.fw},\nstart_fs = {self.start_fs},\nend_fs = {self.end_fs},\ndiff_prev_y = {self.diff_prev_y},\ndiff_next_y = {self.diff_next_y},\nis_comparably_sized = {self.is_comparably_sized},\nis_comparably_bolded = {self.is_comparably_bolded},\nis_prev_space_small = {self.is_prev_space_smallest}\nis_next_space_small = {self.is_next_space_smallest},"
output_str += f"\nfont_style = {self.font_style}"
return output_str
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case <fim_suffix>for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span is too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of mod<fim_suffix>ified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor/processors.py<fim_prefix>import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and st<fim_suffix>art a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>nlm-ingestor/nlm_ingestor/ingestor_utils/utils.py<fim_prefix>import json
import re
import numpy as np
from nltk import load
from nltk import PunktSentenceTokenizer
nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
nlm_abbs = {
"u.s",
"u.s.a",
"n.w",
"p.o",
"po",
"st",
"ave",
"blvd",
"ctr",
"cir",
"ct",
"dr",
"mtn",
"apt",
"hwy",
"esq",
"fig",
"no",
"sec",
"n.a",
"s.a.b",
"non-u.s",
"cap",
'u.s.c',
"ste",
}
nlm_special_abbs = {
"inc",
}
abbs = nltk_abbs | nlm_abbs
nltk_tokenzier = PunktSentenceTokenizer()
rules = []
for abb in abbs:
# match start of the sentence
pattern = fr"^{abb}.\s"
replaced = f"{abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match token in sentence
pattern = fr"\s{abb}.\s"
replaced = f" {abb}_ "
# case insensitive replacement for synonyms
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
for abb in nlm_special_abbs:
pattern = fr"{abb}\."
replaced = f"{abb}_"
rule = re.compile(pattern, re.IGNORECASE)
rules.append((rule, replaced))
# match content inside brackets
# (?<=\() ==> starts with "("
# ([^)]+) ==> repeat not ")"
# (?=\))") ==> ends with ")"
bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))")
space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.')
quotation_pattern = re.compile(r'[”“"‘’\']')
def sent_tokenize(org_texts):
if not org_texts:
return org_texts
sents = []
# in case org_texts has \n, break it into multiple paragraph
# edge case for html and markdown
for org_text in org_texts.split("\n"):
org_text = space_rule.sub(r'\1', org_text)
modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925
orig_offset = abs(len(org_text) - len(modified_text))
# do not break bracket
for span_group in bracket_rule.finditer(modified_text):
start_byte, end_byte = span_group.span()
span = modified_text[start_byte:end_byte]
# skip this logic when span i<fim_suffix>s too big? disabled for now
# if len(span.split()) >= 10:
# continue
modified_text = modified_text.replace(
f"({span})", f"_{span.replace('.','_')}_",
)
for rule, replaced in rules:
modified_text = rule.sub(replaced, modified_text)
# Normalize all the quotation.
modified_text = quotation_pattern.sub("\"", modified_text)
modified_sents = nltk_tokenzier.tokenize(modified_text)
offset = orig_offset
sent_idx = 0
while offset < len(modified_text) and sent_idx < len(modified_sents):
if modified_text[offset] == " ":
offset += 1
continue
# cut org_text based on lengths of modified_sent
modified_sent = modified_sents[sent_idx]
sents.append(org_text[offset: offset + len(modified_sent)])
offset += len(modified_sent)
sent_idx += 1
if len(sents) >= 2 and re.match(r"^.\.$", sents[0]):
sents[1] = sents[0] + " " + sents[1]
sents = sents[1:]
return sents
def divide_list_into_chunks(lst, n):
# looping till length l
for i in range(0, len(lst), n):
yield lst[i : i + n]
def normalize(X):
norms = np.einsum("ij,ij->i", X, X)
np.sqrt(norms, norms)
X /= norms[:, np.newaxis]
return X
def detect_block_center_aligned(block, page_width):
center_location = block["box_style"][1] + block["box_style"][3] / 2
center_aligned = abs(center_location - page_width / 2) < page_width * 0.01
width_check = block["box_style"][3] * 2 < page_width
return center_aligned and width_check
def detect_block_center_of_page(block, page_height):
bottom = block["box_style"][0] + block["box_style"][4]
center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3)
return center_of_page
def check_char_is_word_boundary(c):
if c.isalnum():
return False
if c in ['-', '_']:
return False
return True
def blocks_to_sents(blocks, flatten_merged_table=False, debug=False):
block_texts = []
block_info = []
header_block_idx = -1
header_match_idx = -1
header_match_idx_offset = -1
header_block_text = ""
is_rendering_table = False
is_rendering_merged_cells = False
table_idx = 0
levels = []
prev_header = None
block_idx = 0
for block_idx, block in enumerate(blocks):
block_type = block["block_type"]
if block_type == "header":
if debug:
print("---", block["level"], block["block_text"])
header_block_text = block["block_text"]
header_block_idx = block["block_idx"]
header_match_idx = header_match_idx_offset + 1
if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0:
while len(levels) > 0 and levels[-1]["level"] >= block["level"]:
if debug:
print("<<", levels[-1]["level"], levels[-1]["block_text"])
levels.pop(-1)
if debug:
print(">>", block["block_text"])
levels.append(block)
prev_header = block
if debug:
print("-", [str(level['level']) + "-" + level['block_text'] for level in levels])
block["header_text"] = header_block_text
block["header_block_idx"] = header_block_idx
block["header_match_idx"] = header_match_idx
block["block_idx"] = block_idx
level_chain = []
for level in levels:
level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]})
# remove a level for header
if block_type == "header":
level_chain = level_chain[:-1]
level_chain.reverse()
block["level_chain"] = level_chain
# if block_type == "header" or block_type == "table_row":
if (
block_type == "header"
and not is_rendering_table and 'is_table_start' not in block
):
block_texts.append(block["block_text"])
# append text from next block to header block
# TODO: something happened here, it messed up the match_text
# if block_type == "header" and block_idx + 1 < len(blocks):
# block[
# "block_text"
# ] += blocks[block_idx+1]['block_text']
block_info.append(block)
header_match_idx_offset += 1
elif (
block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item"
) and not is_rendering_table:
block_sents = block["block_sents"]
header_match_idx_offset += len(block_sents)
for sent in block_sents:
block_texts.append(sent)
block_info.append(block)
elif 'is_table_start' in block:
is_rendering_table = True
if 'has_merged_cells' in block:
is_rendering_merged_cells = True
elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row":
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if is_rendering_table:
if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table:
eff_header_block = block["effective_header"]
eff_para_block = block["effective_para"]
eff_header_block["header_text"] = block["header_text"]
eff_header_block["header_block_idx"] = block["block_idx"]
eff_header_block["header_match_idx"] = header_match_idx_offset + 1
eff_header_block["level"] = block["level"] + 1
eff_header_block["level_chain"] = block["level_chain"]
eff_para_block["header_block_idx"] = block["block_idx"]
eff_para_block["header_match_idx"] = header_match_idx_offset + 1
eff_para_block["level"] = block["level"] + 2
eff_para_block["level_chain"] = [
{
"block_idx": eff_header_block["block_idx"],
"block_text": eff_header_block["block_text"],
},
] + eff_header_block["level_chain"]
header_match_idx_offset += 1
block_info.append(block["effective_header"])
block_texts.append(block["effective_header"]["block_text"])
for sent in block["effective_para"]["block_sents"]:
block_texts.append(sent)
block_info.append(block["effective_para"])
header_match_idx_offset += len(block["effective_para"]["block_sents"])
else:
block["table_idx"] = table_idx
block_info.append(block)
block_texts.append(block["block_text"])
header_match_idx_offset += 1
if 'is_table_end' in block:
is_rendering_table = False
table_idx += 1
return block_texts, block_info
def get_block_texts(blocks):
block_texts = []
block_info = []
for block in blocks:
block_type = block["block_type"]
if (
block_type == "list_item"
or block_type == "para"
or block_type == "numbered_list_item"
or block_type == "header"
):
block_texts.append(block["block_text"])
block_info.append(block)
return block_texts, block_info<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
Subsets and Splits