repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
bcbnz/pylabels | labels/sheet.py | Sheet._shade_missing_label | def _shade_missing_label(self):
"""Helper method to shade a missing label. Not intended for external use.
"""
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# Fill with a rectangle; the clipping path will take care of the borders.
r = shapes.Rect(0, 0, float(self._lw), float(self._lh))
r.fillColor = self.shade_missing
r.strokeColor = None
label.add(r)
# Add the label to the page.
label.shift(*self._calculate_edges())
self._current_page.add(label) | python | def _shade_missing_label(self):
"""Helper method to shade a missing label. Not intended for external use.
"""
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# Fill with a rectangle; the clipping path will take care of the borders.
r = shapes.Rect(0, 0, float(self._lw), float(self._lh))
r.fillColor = self.shade_missing
r.strokeColor = None
label.add(r)
# Add the label to the page.
label.shift(*self._calculate_edges())
self._current_page.add(label) | Helper method to shade a missing label. Not intended for external use. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L328-L344 |
bcbnz/pylabels | labels/sheet.py | Sheet._shade_remaining_missing | def _shade_remaining_missing(self):
"""Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
"""
# Sanity check.
if not self.shade_missing:
return
# Run through each missing label left in the current page and shade it.
missing = self._used.get(self.page_count, set())
for position in missing:
self._position = position
self._shade_missing_label() | python | def _shade_remaining_missing(self):
"""Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
"""
# Sanity check.
if not self.shade_missing:
return
# Run through each missing label left in the current page and shade it.
missing = self._used.get(self.page_count, set())
for position in missing:
self._position = position
self._shade_missing_label() | Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L346-L362 |
bcbnz/pylabels | labels/sheet.py | Sheet._draw_label | def _draw_label(self, obj, count):
"""Helper method to draw on the current label. Not intended for external use.
"""
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# And one for the available area (i.e., after padding).
available = Drawing(float(self._dw), float(self._dh))
available.add(self._clip_drawing)
# Call the drawing function.
self.drawing_callable(available, float(self._dw), float(self._dh), obj)
# Render the contents on the label.
available.shift(float(self._lp), float(self._bp))
label.add(available)
# Draw the border if requested.
if self.border:
label.add(self._border)
# Add however many copies we need to.
for i in range(count):
# Find the next available label.
self._next_unused_label()
# Have we been told to skip this page?
if self.pages_to_draw and self.page_count not in self.pages_to_draw:
continue
# Add the label to the page. ReportLab stores the added drawing by
# reference so we have to copy it N times.
thislabel = copy(label)
thislabel.shift(*self._calculate_edges())
self._current_page.add(thislabel) | python | def _draw_label(self, obj, count):
"""Helper method to draw on the current label. Not intended for external use.
"""
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# And one for the available area (i.e., after padding).
available = Drawing(float(self._dw), float(self._dh))
available.add(self._clip_drawing)
# Call the drawing function.
self.drawing_callable(available, float(self._dw), float(self._dh), obj)
# Render the contents on the label.
available.shift(float(self._lp), float(self._bp))
label.add(available)
# Draw the border if requested.
if self.border:
label.add(self._border)
# Add however many copies we need to.
for i in range(count):
# Find the next available label.
self._next_unused_label()
# Have we been told to skip this page?
if self.pages_to_draw and self.page_count not in self.pages_to_draw:
continue
# Add the label to the page. ReportLab stores the added drawing by
# reference so we have to copy it N times.
thislabel = copy(label)
thislabel.shift(*self._calculate_edges())
self._current_page.add(thislabel) | Helper method to draw on the current label. Not intended for external use. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L364-L400 |
bcbnz/pylabels | labels/sheet.py | Sheet.add_labels | def add_labels(self, objects, count=1):
"""Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
"""
# If we can convert it to an int, do so and use the itertools.repeat()
# method to create an infinite iterator from it. Otherwise, assume it
# is an iterable or sequence.
try:
count = int(count)
except TypeError:
pass
else:
count = repeat(count)
# If it is not an iterable (e.g., a list or range object),
# create an iterator over it.
if not hasattr(count, 'next') and not hasattr(count, '__next__'):
count = iter(count)
# Go through the objects.
for obj in objects:
# Check we have a count for this one.
try:
thiscount = next(count)
except StopIteration:
break
# Draw it.
self._draw_label(obj, thiscount) | python | def add_labels(self, objects, count=1):
"""Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
"""
# If we can convert it to an int, do so and use the itertools.repeat()
# method to create an infinite iterator from it. Otherwise, assume it
# is an iterable or sequence.
try:
count = int(count)
except TypeError:
pass
else:
count = repeat(count)
# If it is not an iterable (e.g., a list or range object),
# create an iterator over it.
if not hasattr(count, 'next') and not hasattr(count, '__next__'):
count = iter(count)
# Go through the objects.
for obj in objects:
# Check we have a count for this one.
try:
thiscount = next(count)
except StopIteration:
break
# Draw it.
self._draw_label(obj, thiscount) | Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L419-L468 |
bcbnz/pylabels | labels/sheet.py | Sheet.save | def save(self, filelike):
"""Save the file as a PDF.
Parameters
----------
filelike: path or file-like object
The filename or file-like object to save the labels under. Any
existing contents will be overwritten.
"""
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Create a canvas.
canvas = Canvas(filelike, pagesize=self._pagesize)
# Render each created page onto the canvas.
for page in self._pages:
renderPDF.draw(page, canvas, 0, 0)
canvas.showPage()
# Done.
canvas.save() | python | def save(self, filelike):
"""Save the file as a PDF.
Parameters
----------
filelike: path or file-like object
The filename or file-like object to save the labels under. Any
existing contents will be overwritten.
"""
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Create a canvas.
canvas = Canvas(filelike, pagesize=self._pagesize)
# Render each created page onto the canvas.
for page in self._pages:
renderPDF.draw(page, canvas, 0, 0)
canvas.showPage()
# Done.
canvas.save() | Save the file as a PDF.
Parameters
----------
filelike: path or file-like object
The filename or file-like object to save the labels under. Any
existing contents will be overwritten. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L470-L492 |
bcbnz/pylabels | labels/sheet.py | Sheet.preview | def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh | python | def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh | Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L494-L553 |
bcbnz/pylabels | labels/sheet.py | Sheet.preview_string | def preview_string(self, page, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page as a string.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
s = renderPM.drawToString(self._pages[page-1], format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh
# Done.
return s | python | def preview_string(self, page, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page as a string.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
s = renderPM.drawToString(self._pages[page-1], format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh
# Done.
return s | Render a preview image of a page as a string.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L555-L610 |
bcbnz/pylabels | labels/specifications.py | Specification._calculate | def _calculate(self):
"""Checks the dimensions of the sheet are valid and consistent.
NB: this is called internally when needed; there should be no need for
user code to call it.
"""
# Check the dimensions are larger than zero.
for dimension in ('_sheet_width', '_sheet_height', '_columns', '_rows', '_label_width', '_label_height'):
if getattr(self, dimension) <= 0:
name = dimension.replace('_', ' ').strip().capitalize()
raise InvalidDimension("{0:s} must be greater than zero.".format(name))
# Check margins / gaps are not smaller than zero if given.
# At the same time, force the values to decimals.
for margin in ('_left_margin', '_column_gap', '_right_margin', '_top_margin', '_row_gap', '_bottom_margin',
'_left_padding', '_right_padding', '_top_padding', '_bottom_padding'):
val = getattr(self, margin)
if val is not None:
if margin in self._autoset:
val = None
else:
val = Decimal(val)
if val < 0:
name = margin.replace('_', ' ').strip().capitalize()
raise InvalidDimension("{0:s} cannot be less than zero.".format(name))
setattr(self, margin, val)
else:
self._autoset.add(margin)
# Check the corner radius.
if self._corner_radius < 0:
raise InvalidDimension("Corner radius cannot be less than zero.")
if self._corner_radius > (self._label_width / 2):
raise InvalidDimension("Corner radius cannot be more than half the label width.")
if self._corner_radius > (self._label_height / 2):
raise InvalidDimension("Corner radius cannot be more than half the label height.")
# If there is no padding, we don't need the padding radius.
if (self._left_padding + self._right_padding + self._top_padding + self._bottom_padding) == 0:
if self._padding_radius != 0:
raise InvalidDimension("Padding radius must be zero if there is no padding.")
else:
if (self._left_padding + self._right_padding) >= self._label_width:
raise InvalidDimension("Sum of horizontal padding must be less than the label width.")
if (self._top_padding + self._bottom_padding) >= self._label_height:
raise InvalidDimension("Sum of vertical padding must be less than the label height.")
if self._padding_radius < 0:
raise InvalidDimension("Padding radius cannot be less than zero.")
# Calculate the amount of spare space.
hspace = self._sheet_width - (self._label_width * self._columns)
vspace = self._sheet_height - (self._label_height * self._rows)
# Cannot fit.
if hspace < 0:
raise InvalidDimension("Labels are too wide to fit on the sheet.")
if vspace < 0:
raise InvalidDimension("Labels are too tall to fit on the sheet.")
# Process the horizontal margins / gaps.
hcount = 1 + self._columns
if self._left_margin is not None:
hspace -= self._left_margin
if hspace < 0:
raise InvalidDimension("Left margin is too wide for the labels to fit on the sheet.")
hcount -= 1
if self._column_gap is not None:
hspace -= ((self._columns - 1) * self._column_gap)
if hspace < 0:
raise InvalidDimension("Column gap is too wide for the labels to fit on the sheet.")
hcount -= (self._columns - 1)
if self._right_margin is not None:
hspace -= self._right_margin
if hspace < 0.01 and hspace > -0.01:
self._right_margin += hspace
hspace = 0
if hspace < 0:
raise InvalidDimension("Right margin is too wide for the labels to fit on the sheet.")
hcount -= 1
# Process the vertical margins / gaps.
vcount = 1 + self._rows
if self._top_margin is not None:
vspace -= self._top_margin
if vspace < 0:
raise InvalidDimension("Top margin is too tall for the labels to fit on the sheet.")
vcount -= 1
if self._row_gap is not None:
vspace -= ((self._rows - 1) * self._row_gap)
if vspace < 0:
raise InvalidDimension("Row gap is too tall for the labels to fit on the sheet.")
vcount -= (self._rows - 1)
if self._bottom_margin is not None:
vspace -= self._bottom_margin
if vspace < 0.01 and vspace > -0.01:
self._bottom_margin += vspace
vspace = 0
if vspace < 0:
raise InvalidDimension("Bottom margin is too tall for the labels to fit on the sheet.")
vcount -= 1
# If all the margins are specified, they must use up all available space.
if hcount == 0 and hspace != 0:
raise InvalidDimension("Not all width used by manually specified margins/gaps; {}mm left.".format(hspace))
if vcount == 0 and vspace != 0:
raise InvalidDimension("Not all height used by manually specified margins/gaps; {}mm left.".format(vspace))
# Split any extra horizontal space and allocate it.
if hcount:
auto_margin = hspace / hcount
for margin in ('_left_margin', '_column_gap', '_right_margin'):
if getattr(self, margin) is None:
setattr(self, margin, auto_margin)
# And allocate any extra vertical space.
if vcount:
auto_margin = vspace / vcount
for margin in ('_top_margin', '_row_gap', '_bottom_margin'):
if getattr(self, margin) is None:
setattr(self, margin, auto_margin) | python | def _calculate(self):
"""Checks the dimensions of the sheet are valid and consistent.
NB: this is called internally when needed; there should be no need for
user code to call it.
"""
# Check the dimensions are larger than zero.
for dimension in ('_sheet_width', '_sheet_height', '_columns', '_rows', '_label_width', '_label_height'):
if getattr(self, dimension) <= 0:
name = dimension.replace('_', ' ').strip().capitalize()
raise InvalidDimension("{0:s} must be greater than zero.".format(name))
# Check margins / gaps are not smaller than zero if given.
# At the same time, force the values to decimals.
for margin in ('_left_margin', '_column_gap', '_right_margin', '_top_margin', '_row_gap', '_bottom_margin',
'_left_padding', '_right_padding', '_top_padding', '_bottom_padding'):
val = getattr(self, margin)
if val is not None:
if margin in self._autoset:
val = None
else:
val = Decimal(val)
if val < 0:
name = margin.replace('_', ' ').strip().capitalize()
raise InvalidDimension("{0:s} cannot be less than zero.".format(name))
setattr(self, margin, val)
else:
self._autoset.add(margin)
# Check the corner radius.
if self._corner_radius < 0:
raise InvalidDimension("Corner radius cannot be less than zero.")
if self._corner_radius > (self._label_width / 2):
raise InvalidDimension("Corner radius cannot be more than half the label width.")
if self._corner_radius > (self._label_height / 2):
raise InvalidDimension("Corner radius cannot be more than half the label height.")
# If there is no padding, we don't need the padding radius.
if (self._left_padding + self._right_padding + self._top_padding + self._bottom_padding) == 0:
if self._padding_radius != 0:
raise InvalidDimension("Padding radius must be zero if there is no padding.")
else:
if (self._left_padding + self._right_padding) >= self._label_width:
raise InvalidDimension("Sum of horizontal padding must be less than the label width.")
if (self._top_padding + self._bottom_padding) >= self._label_height:
raise InvalidDimension("Sum of vertical padding must be less than the label height.")
if self._padding_radius < 0:
raise InvalidDimension("Padding radius cannot be less than zero.")
# Calculate the amount of spare space.
hspace = self._sheet_width - (self._label_width * self._columns)
vspace = self._sheet_height - (self._label_height * self._rows)
# Cannot fit.
if hspace < 0:
raise InvalidDimension("Labels are too wide to fit on the sheet.")
if vspace < 0:
raise InvalidDimension("Labels are too tall to fit on the sheet.")
# Process the horizontal margins / gaps.
hcount = 1 + self._columns
if self._left_margin is not None:
hspace -= self._left_margin
if hspace < 0:
raise InvalidDimension("Left margin is too wide for the labels to fit on the sheet.")
hcount -= 1
if self._column_gap is not None:
hspace -= ((self._columns - 1) * self._column_gap)
if hspace < 0:
raise InvalidDimension("Column gap is too wide for the labels to fit on the sheet.")
hcount -= (self._columns - 1)
if self._right_margin is not None:
hspace -= self._right_margin
if hspace < 0.01 and hspace > -0.01:
self._right_margin += hspace
hspace = 0
if hspace < 0:
raise InvalidDimension("Right margin is too wide for the labels to fit on the sheet.")
hcount -= 1
# Process the vertical margins / gaps.
vcount = 1 + self._rows
if self._top_margin is not None:
vspace -= self._top_margin
if vspace < 0:
raise InvalidDimension("Top margin is too tall for the labels to fit on the sheet.")
vcount -= 1
if self._row_gap is not None:
vspace -= ((self._rows - 1) * self._row_gap)
if vspace < 0:
raise InvalidDimension("Row gap is too tall for the labels to fit on the sheet.")
vcount -= (self._rows - 1)
if self._bottom_margin is not None:
vspace -= self._bottom_margin
if vspace < 0.01 and vspace > -0.01:
self._bottom_margin += vspace
vspace = 0
if vspace < 0:
raise InvalidDimension("Bottom margin is too tall for the labels to fit on the sheet.")
vcount -= 1
# If all the margins are specified, they must use up all available space.
if hcount == 0 and hspace != 0:
raise InvalidDimension("Not all width used by manually specified margins/gaps; {}mm left.".format(hspace))
if vcount == 0 and vspace != 0:
raise InvalidDimension("Not all height used by manually specified margins/gaps; {}mm left.".format(vspace))
# Split any extra horizontal space and allocate it.
if hcount:
auto_margin = hspace / hcount
for margin in ('_left_margin', '_column_gap', '_right_margin'):
if getattr(self, margin) is None:
setattr(self, margin, auto_margin)
# And allocate any extra vertical space.
if vcount:
auto_margin = vspace / vcount
for margin in ('_top_margin', '_row_gap', '_bottom_margin'):
if getattr(self, margin) is None:
setattr(self, margin, auto_margin) | Checks the dimensions of the sheet are valid and consistent.
NB: this is called internally when needed; there should be no need for
user code to call it. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/specifications.py#L135-L255 |
bcbnz/pylabels | labels/specifications.py | Specification.bounding_boxes | def bounding_boxes(self, mode='fraction', output='dict'):
"""Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter.
"""
boxes = {}
# Check the parameters.
if mode not in ('fraction', 'actual'):
raise ValueError("Unknown mode {0}.".format(mode))
if output not in ('dict', 'json'):
raise ValueError("Unknown output {0}.".format(output))
# Iterate over the rows.
for row in range(1, self.rows + 1):
# Top and bottom of all labels in the row.
top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap))
bottom = top + self.label_height
# Now iterate over all columns in this row.
for column in range(1, self.columns + 1):
# Left and right position of this column.
left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap))
right = left + self.label_width
# Output in the appropriate mode format.
if mode == 'fraction':
box = {
'top': top / self.sheet_height,
'bottom': bottom / self.sheet_height,
'left': left / self.sheet_width,
'right': right / self.sheet_width,
}
elif mode == 'actual':
box = {'top': top, 'bottom': bottom, 'left': left, 'right': right}
# Add to the collection.
if output == 'json':
boxes['{0:d}x{1:d}'.format(row, column)] = box
box['top'] = float(box['top'])
box['bottom'] = float(box['bottom'])
box['left'] = float(box['left'])
box['right'] = float(box['right'])
else:
boxes[(row, column)] = box
# Done.
if output == 'json':
return json.dumps(boxes)
return boxes | python | def bounding_boxes(self, mode='fraction', output='dict'):
"""Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter.
"""
boxes = {}
# Check the parameters.
if mode not in ('fraction', 'actual'):
raise ValueError("Unknown mode {0}.".format(mode))
if output not in ('dict', 'json'):
raise ValueError("Unknown output {0}.".format(output))
# Iterate over the rows.
for row in range(1, self.rows + 1):
# Top and bottom of all labels in the row.
top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap))
bottom = top + self.label_height
# Now iterate over all columns in this row.
for column in range(1, self.columns + 1):
# Left and right position of this column.
left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap))
right = left + self.label_width
# Output in the appropriate mode format.
if mode == 'fraction':
box = {
'top': top / self.sheet_height,
'bottom': bottom / self.sheet_height,
'left': left / self.sheet_width,
'right': right / self.sheet_width,
}
elif mode == 'actual':
box = {'top': top, 'bottom': bottom, 'left': left, 'right': right}
# Add to the collection.
if output == 'json':
boxes['{0:d}x{1:d}'.format(row, column)] = box
box['top'] = float(box['top'])
box['bottom'] = float(box['bottom'])
box['left'] = float(box['left'])
box['right'] = float(box['right'])
else:
boxes[(row, column)] = box
# Done.
if output == 'json':
return json.dumps(boxes)
return boxes | Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter. | https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/specifications.py#L257-L325 |
estnltk/estnltk | estnltk/wiki/parser.py | templatesCollector | def templatesCollector(text, open, close):
"""leaves related articles and wikitables in place"""
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others | python | def templatesCollector(text, open, close):
"""leaves related articles and wikitables in place"""
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others | leaves related articles and wikitables in place | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/parser.py#L66-L82 |
estnltk/estnltk | estnltk/prettyprinter/prettyprinter.py | assert_legal_arguments | def assert_legal_arguments(kwargs):
"""Assert that PrettyPrinter arguments are correct.
Raises
------
ValueError
In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
"""
seen_layers = set()
for k, v in kwargs.items():
if k not in LEGAL_ARGUMENTS:
raise ValueError('Illegal argument <{0}>!'.format(k))
if k in AESTHETICS:
if v in seen_layers:
raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v))
seen_layers.add(v)
if k in VALUES:
if not isinstance(v, six.string_types) and not isinstance(v, list):
raise ValueError('Value <{0}> must be either string or list'.format(k))
if isinstance(v, list):
if len(v) == 0:
raise ValueError('Rules cannot be empty list')
for rule_matcher, rule_value in v:
if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types):
raise ValueError('Rule tuple elements must be strings') | python | def assert_legal_arguments(kwargs):
"""Assert that PrettyPrinter arguments are correct.
Raises
------
ValueError
In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
"""
seen_layers = set()
for k, v in kwargs.items():
if k not in LEGAL_ARGUMENTS:
raise ValueError('Illegal argument <{0}>!'.format(k))
if k in AESTHETICS:
if v in seen_layers:
raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v))
seen_layers.add(v)
if k in VALUES:
if not isinstance(v, six.string_types) and not isinstance(v, list):
raise ValueError('Value <{0}> must be either string or list'.format(k))
if isinstance(v, list):
if len(v) == 0:
raise ValueError('Rules cannot be empty list')
for rule_matcher, rule_value in v:
if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types):
raise ValueError('Rule tuple elements must be strings') | Assert that PrettyPrinter arguments are correct.
Raises
------
ValueError
In case there are unknown arguments or a single layer is mapped to more than one aesthetic. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L17-L41 |
estnltk/estnltk | estnltk/prettyprinter/prettyprinter.py | parse_arguments | def parse_arguments(kwargs):
"""Function that parses PrettyPrinter arguments.
Detects which aesthetics are mapped to which layers
and collects user-provided values.
Parameters
----------
kwargs: dict
The keyword arguments to PrettyPrinter.
Returns
-------
dict, dict
First dictionary is aesthetic to layer mapping.
Second dictionary is aesthetic to user value mapping.
"""
aesthetics = {}
values = {}
for aes in AESTHETICS:
if aes in kwargs:
aesthetics[aes] = kwargs[aes]
val_name = AES_VALUE_MAP[aes]
# map the user-provided CSS value or use the default
values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes])
return aesthetics, values | python | def parse_arguments(kwargs):
"""Function that parses PrettyPrinter arguments.
Detects which aesthetics are mapped to which layers
and collects user-provided values.
Parameters
----------
kwargs: dict
The keyword arguments to PrettyPrinter.
Returns
-------
dict, dict
First dictionary is aesthetic to layer mapping.
Second dictionary is aesthetic to user value mapping.
"""
aesthetics = {}
values = {}
for aes in AESTHETICS:
if aes in kwargs:
aesthetics[aes] = kwargs[aes]
val_name = AES_VALUE_MAP[aes]
# map the user-provided CSS value or use the default
values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes])
return aesthetics, values | Function that parses PrettyPrinter arguments.
Detects which aesthetics are mapped to which layers
and collects user-provided values.
Parameters
----------
kwargs: dict
The keyword arguments to PrettyPrinter.
Returns
-------
dict, dict
First dictionary is aesthetic to layer mapping.
Second dictionary is aesthetic to user value mapping. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L44-L68 |
estnltk/estnltk | estnltk/prettyprinter/prettyprinter.py | PrettyPrinter.css | def css(self):
"""Returns
-------
str
The CSS.
"""
css_list = [DEFAULT_MARK_CSS]
for aes in self.aesthetics:
css_list.extend(get_mark_css(aes, self.values[aes]))
#print('\n'.join(css_list))
return '\n'.join(css_list) | python | def css(self):
"""Returns
-------
str
The CSS.
"""
css_list = [DEFAULT_MARK_CSS]
for aes in self.aesthetics:
css_list.extend(get_mark_css(aes, self.values[aes]))
#print('\n'.join(css_list))
return '\n'.join(css_list) | Returns
-------
str
The CSS. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L109-L119 |
estnltk/estnltk | estnltk/prettyprinter/prettyprinter.py | PrettyPrinter.render | def render(self, text, add_header=False):
"""Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML.
"""
html = mark_text(text, self.aesthetics, self.rules)
html = html.replace('\n', '<br/>')
if add_header:
html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER])
#print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER)))
return html | python | def render(self, text, add_header=False):
"""Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML.
"""
html = mark_text(text, self.aesthetics, self.rules)
html = html.replace('\n', '<br/>')
if add_header:
html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER])
#print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER)))
return html | Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L121-L140 |
estnltk/estnltk | estnltk/estner/crfsuiteutil.py | Trainer.train | def train(self, nerdocs, mode_filename):
"""Train a CRF model using given documents.
Parameters
----------
nerdocs: list of estnltk.estner.ner.Document.
The documents for model training.
mode_filename: str
The fielname where to save the model.
"""
trainer = pycrfsuite.Trainer(algorithm=self.algorithm,
params={'c2': self.c2},
verbose=self.verbose)
for doc in nerdocs:
for snt in doc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = [t.label for t in snt]
trainer.append(xseq, yseq)
trainer.train(mode_filename) | python | def train(self, nerdocs, mode_filename):
"""Train a CRF model using given documents.
Parameters
----------
nerdocs: list of estnltk.estner.ner.Document.
The documents for model training.
mode_filename: str
The fielname where to save the model.
"""
trainer = pycrfsuite.Trainer(algorithm=self.algorithm,
params={'c2': self.c2},
verbose=self.verbose)
for doc in nerdocs:
for snt in doc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = [t.label for t in snt]
trainer.append(xseq, yseq)
trainer.train(mode_filename) | Train a CRF model using given documents.
Parameters
----------
nerdocs: list of estnltk.estner.ner.Document.
The documents for model training.
mode_filename: str
The fielname where to save the model. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/estner/crfsuiteutil.py#L28-L49 |
estnltk/estnltk | estnltk/estner/crfsuiteutil.py | Tagger.tag | def tag(self, nerdoc):
"""Tag the given document.
Parameters
----------
nerdoc: estnltk.estner.Document
The document to be tagged.
Returns
-------
labels: list of lists of str
Predicted token Labels for each sentence in the document
"""
labels = []
for snt in nerdoc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = self.tagger.tag(xseq)
labels.append(yseq)
return labels | python | def tag(self, nerdoc):
"""Tag the given document.
Parameters
----------
nerdoc: estnltk.estner.Document
The document to be tagged.
Returns
-------
labels: list of lists of str
Predicted token Labels for each sentence in the document
"""
labels = []
for snt in nerdoc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = self.tagger.tag(xseq)
labels.append(yseq)
return labels | Tag the given document.
Parameters
----------
nerdoc: estnltk.estner.Document
The document to be tagged.
Returns
-------
labels: list of lists of str
Predicted token Labels for each sentence in the document | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/estner/crfsuiteutil.py#L69-L87 |
estnltk/estnltk | estnltk/wiki/wikiextra.py | balancedSlicer | def balancedSlicer(text, openDelim='[', closeDelim=']'):
"""
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: text between the delimiters
"""
openbr = 0
cur = 0
for char in text:
cur +=1
if char == openDelim:
openbr += 1
if char == closeDelim:
openbr -= 1
if openbr == 0:
break
return text[:cur], cur | python | def balancedSlicer(text, openDelim='[', closeDelim=']'):
"""
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: text between the delimiters
"""
openbr = 0
cur = 0
for char in text:
cur +=1
if char == openDelim:
openbr += 1
if char == closeDelim:
openbr -= 1
if openbr == 0:
break
return text[:cur], cur | Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: text between the delimiters | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/wikiextra.py#L7-L24 |
estnltk/estnltk | estnltk/wiki/convert.py | json_2_text | def json_2_text(inp, out, verbose = False):
"""Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object.
"""
for root, dirs, filenames in os.walk(inp):
for f in filenames:
log = codecs.open(os.path.join(root, f), 'r')
j_obj = json.load(log)
j_obj = json_format(j_obj)
#not needed, cause the json_format takes care of the right structuring
#text = Text(j_obj)
textWriter(j_obj, out, verbose) | python | def json_2_text(inp, out, verbose = False):
"""Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object.
"""
for root, dirs, filenames in os.walk(inp):
for f in filenames:
log = codecs.open(os.path.join(root, f), 'r')
j_obj = json.load(log)
j_obj = json_format(j_obj)
#not needed, cause the json_format takes care of the right structuring
#text = Text(j_obj)
textWriter(j_obj, out, verbose) | Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/convert.py#L95-L126 |
estnltk/estnltk | estnltk/grammar/match.py | concatenate_matches | def concatenate_matches(a, b, text, name):
"""Concatenate matches a and b.
All submatches will be copied to result."""
match = Match(a.start, b.end, text[a.start:b.end], name)
for k, v in a.matches.items():
match.matches[k] = v
for k, v in b.matches.items():
match.matches[k] = v
if a.name is not None:
aa = copy(a)
del aa[MATCHES]
match.matches[a.name] = aa
if b.name is not None:
bb = copy(b)
del bb[MATCHES]
match.matches[b.name] = bb
return match | python | def concatenate_matches(a, b, text, name):
"""Concatenate matches a and b.
All submatches will be copied to result."""
match = Match(a.start, b.end, text[a.start:b.end], name)
for k, v in a.matches.items():
match.matches[k] = v
for k, v in b.matches.items():
match.matches[k] = v
if a.name is not None:
aa = copy(a)
del aa[MATCHES]
match.matches[a.name] = aa
if b.name is not None:
bb = copy(b)
del bb[MATCHES]
match.matches[b.name] = bb
return match | Concatenate matches a and b.
All submatches will be copied to result. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/match.py#L81-L97 |
estnltk/estnltk | estnltk/grammar/match.py | Match.dict | def dict(self):
"""Dictionary representing this match and all child symbol matches."""
res = copy(self)
if MATCHES in res:
del res[MATCHES]
if NAME in res:
del res[NAME]
res = {self.name: res}
for k, v in self.matches.items():
res[k] = v
if NAME in res[k]:
del res[k][NAME]
return res | python | def dict(self):
"""Dictionary representing this match and all child symbol matches."""
res = copy(self)
if MATCHES in res:
del res[MATCHES]
if NAME in res:
del res[NAME]
res = {self.name: res}
for k, v in self.matches.items():
res[k] = v
if NAME in res[k]:
del res[k][NAME]
return res | Dictionary representing this match and all child symbol matches. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/match.py#L54-L66 |
estnltk/estnltk | estnltk/vabamorf/morf.py | regex_from_markers | def regex_from_markers(markers):
"""Given a string of characters, construct a regex that matches them.
Parameters
----------
markers: str
The list of string containing the markers
Returns
-------
regex
The regular expression matching the given markers.
"""
return re.compile('|'.join([re.escape(c) for c in markers])) | python | def regex_from_markers(markers):
"""Given a string of characters, construct a regex that matches them.
Parameters
----------
markers: str
The list of string containing the markers
Returns
-------
regex
The regular expression matching the given markers.
"""
return re.compile('|'.join([re.escape(c) for c in markers])) | Given a string of characters, construct a regex that matches them.
Parameters
----------
markers: str
The list of string containing the markers
Returns
-------
regex
The regular expression matching the given markers. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L45-L58 |
estnltk/estnltk | estnltk/vabamorf/morf.py | convert | def convert(word):
"""This method converts given `word` to UTF-8 encoding and `bytes` type for the
SWIG wrapper."""
if six.PY2:
if isinstance(word, unicode):
return word.encode('utf-8')
else:
return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain
else: # ==> Py3
if isinstance(word, bytes):
return word.decode('utf-8') # bytes must be in utf8
return word | python | def convert(word):
"""This method converts given `word` to UTF-8 encoding and `bytes` type for the
SWIG wrapper."""
if six.PY2:
if isinstance(word, unicode):
return word.encode('utf-8')
else:
return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain
else: # ==> Py3
if isinstance(word, bytes):
return word.decode('utf-8') # bytes must be in utf8
return word | This method converts given `word` to UTF-8 encoding and `bytes` type for the
SWIG wrapper. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L64-L75 |
estnltk/estnltk | estnltk/vabamorf/morf.py | postprocess_result | def postprocess_result(morphresult, trim_phonetic, trim_compound):
"""Postprocess vabamorf wrapper output."""
word, analysis = morphresult
return {
'text': deconvert(word),
'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
} | python | def postprocess_result(morphresult, trim_phonetic, trim_compound):
"""Postprocess vabamorf wrapper output."""
word, analysis = morphresult
return {
'text': deconvert(word),
'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
} | Postprocess vabamorf wrapper output. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L302-L308 |
estnltk/estnltk | estnltk/vabamorf/morf.py | trim_phonetics | def trim_phonetics(root):
"""Function that trims phonetic markup from the root.
Parameters
----------
root: str
The string to remove the phonetic markup.
Returns
-------
str
The string with phonetic markup removed.
"""
global phonetic_markers
global phonetic_regex
if root in phonetic_markers:
return root
else:
return phonetic_regex.sub('', root) | python | def trim_phonetics(root):
"""Function that trims phonetic markup from the root.
Parameters
----------
root: str
The string to remove the phonetic markup.
Returns
-------
str
The string with phonetic markup removed.
"""
global phonetic_markers
global phonetic_regex
if root in phonetic_markers:
return root
else:
return phonetic_regex.sub('', root) | Function that trims phonetic markup from the root.
Parameters
----------
root: str
The string to remove the phonetic markup.
Returns
-------
str
The string with phonetic markup removed. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L330-L348 |
estnltk/estnltk | estnltk/vabamorf/morf.py | get_root | def get_root(root, phonetic, compound):
"""Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
"""
global compound_regex
if not phonetic:
root = trim_phonetics(root)
if not compound:
root = trim_compounds(root)
return root | python | def get_root(root, phonetic, compound):
"""Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
"""
global compound_regex
if not phonetic:
root = trim_phonetics(root)
if not compound:
root = trim_compounds(root)
return root | Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L370-L387 |
estnltk/estnltk | estnltk/vabamorf/morf.py | get_group_tokens | def get_group_tokens(root):
"""Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens.
"""
global all_markers
if root in all_markers or root in ['-', '_']: # special case
return [[root]]
groups = []
for group in root.split('-'):
toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')]
groups.append(toks)
return groups | python | def get_group_tokens(root):
"""Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens.
"""
global all_markers
if root in all_markers or root in ['-', '_']: # special case
return [[root]]
groups = []
for group in root.split('-'):
toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')]
groups.append(toks)
return groups | Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L390-L410 |
estnltk/estnltk | estnltk/vabamorf/morf.py | fix_spelling | def fix_spelling(words, join=True, joinstring=' '):
"""Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False.
"""
return Vabamorf.instance().fix_spelling(words, join, joinstring) | python | def fix_spelling(words, join=True, joinstring=' '):
"""Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False.
"""
return Vabamorf.instance().fix_spelling(words, join, joinstring) | Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L508-L528 |
estnltk/estnltk | estnltk/vabamorf/morf.py | synthesize | def synthesize(lemma, form, partofspeech='', hint='', guess=True, phonetic=False):
"""Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words.
"""
return Vabamorf.instance().synthesize(lemma, form, partofspeech, hint, guess, phonetic) | python | def synthesize(lemma, form, partofspeech='', hint='', guess=True, phonetic=False):
"""Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words.
"""
return Vabamorf.instance().synthesize(lemma, form, partofspeech, hint, guess, phonetic) | Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L531-L557 |
estnltk/estnltk | estnltk/vabamorf/morf.py | Vabamorf.instance | def instance():
"""Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
"""
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf | python | def instance():
"""Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
"""
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf | Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L101-L111 |
estnltk/estnltk | estnltk/vabamorf/morf.py | Vabamorf.analyze | def analyze(self, words, **kwargs):
"""Perform morphological analysis and disambiguation of given text.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
disambiguate: boolean (default: True)
Disambiguate the output and remove incosistent analysis.
guess: boolean (default: True)
Use guessing in case of unknown words
propername: boolean (default: True)
Perform additional analysis of proper names.
compound: boolean (default: True)
Add compound word markers to root forms.
phonetic: boolean (default: False)
Add phonetic information to root forms.
Returns
-------
list of (list of dict)
List of analysis for each word in input.
"""
# if input is a string, then tokenize it
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
morfresults = self._morf.analyze(
vm.StringVector(words),
kwargs.get('disambiguate', True),
kwargs.get('guess', True),
True, # phonetic and compound information
kwargs.get('propername', True))
trim_phonetic = kwargs.get('phonetic', False)
trim_compound = kwargs.get('compound', True)
return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults] | python | def analyze(self, words, **kwargs):
"""Perform morphological analysis and disambiguation of given text.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
disambiguate: boolean (default: True)
Disambiguate the output and remove incosistent analysis.
guess: boolean (default: True)
Use guessing in case of unknown words
propername: boolean (default: True)
Perform additional analysis of proper names.
compound: boolean (default: True)
Add compound word markers to root forms.
phonetic: boolean (default: False)
Add phonetic information to root forms.
Returns
-------
list of (list of dict)
List of analysis for each word in input.
"""
# if input is a string, then tokenize it
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
morfresults = self._morf.analyze(
vm.StringVector(words),
kwargs.get('disambiguate', True),
kwargs.get('guess', True),
True, # phonetic and compound information
kwargs.get('propername', True))
trim_phonetic = kwargs.get('phonetic', False)
trim_compound = kwargs.get('compound', True)
return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults] | Perform morphological analysis and disambiguation of given text.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
disambiguate: boolean (default: True)
Disambiguate the output and remove incosistent analysis.
guess: boolean (default: True)
Use guessing in case of unknown words
propername: boolean (default: True)
Perform additional analysis of proper names.
compound: boolean (default: True)
Add compound word markers to root forms.
phonetic: boolean (default: False)
Add phonetic information to root forms.
Returns
-------
list of (list of dict)
List of analysis for each word in input. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L129-L169 |
estnltk/estnltk | estnltk/vabamorf/morf.py | Vabamorf.disambiguate | def disambiguate(self, words):
"""Disambiguate previously analyzed words.
Parameters
----------
words: list of dict
A sentence of words.
Returns
-------
list of dict
Sentence of disambiguated words.
"""
words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words])
disambiguated = self._morf.disambiguate(words)
return [postprocess_result(mr, False, True) for mr in disambiguated] | python | def disambiguate(self, words):
"""Disambiguate previously analyzed words.
Parameters
----------
words: list of dict
A sentence of words.
Returns
-------
list of dict
Sentence of disambiguated words.
"""
words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words])
disambiguated = self._morf.disambiguate(words)
return [postprocess_result(mr, False, True) for mr in disambiguated] | Disambiguate previously analyzed words.
Parameters
----------
words: list of dict
A sentence of words.
Returns
-------
list of dict
Sentence of disambiguated words. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L171-L186 |
estnltk/estnltk | estnltk/vabamorf/morf.py | Vabamorf.spellcheck | def spellcheck(self, words, suggestions=True):
"""Spellcheck given sentence.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
suggestions: boolean (default: True)
Add spell suggestions to result.
Returns
-------
list of dict
Each dictionary contains following values:
'word': the original word
'spelling': True, if the word was spelled correctly
'suggestions': list of suggested strings in case of incorrect spelling
"""
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
spellresults = self._morf.spellcheck(words, suggestions)
results = []
for spellresult in spellresults:
suggestions = [deconvert(s) for s in spellresult.suggestions]
result = {
'text': deconvert(spellresult.word),
'spelling': spellresult.spelling,
'suggestions': suggestions
}
results.append(result)
return results | python | def spellcheck(self, words, suggestions=True):
"""Spellcheck given sentence.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
suggestions: boolean (default: True)
Add spell suggestions to result.
Returns
-------
list of dict
Each dictionary contains following values:
'word': the original word
'spelling': True, if the word was spelled correctly
'suggestions': list of suggested strings in case of incorrect spelling
"""
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
spellresults = self._morf.spellcheck(words, suggestions)
results = []
for spellresult in spellresults:
suggestions = [deconvert(s) for s in spellresult.suggestions]
result = {
'text': deconvert(spellresult.word),
'spelling': spellresult.spelling,
'suggestions': suggestions
}
results.append(result)
return results | Spellcheck given sentence.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
suggestions: boolean (default: True)
Add spell suggestions to result.
Returns
-------
list of dict
Each dictionary contains following values:
'word': the original word
'spelling': True, if the word was spelled correctly
'suggestions': list of suggested strings in case of incorrect spelling | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L188-L226 |
estnltk/estnltk | estnltk/vabamorf/morf.py | Vabamorf.fix_spelling | def fix_spelling(self, words, join=True, joinstring=' '):
"""Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False.
"""
fixed_words = []
for word in self.spellcheck(words, suggestions=True):
if word['spelling']:
fixed_words.append(word['text'])
else:
suggestions = word['suggestions']
if len(suggestions) > 0:
fixed_words.append(suggestions[0])
else:
fixed_words.append(word['text'])
if join:
return joinstring.join(fixed_words)
else:
return fixed_words | python | def fix_spelling(self, words, join=True, joinstring=' '):
"""Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False.
"""
fixed_words = []
for word in self.spellcheck(words, suggestions=True):
if word['spelling']:
fixed_words.append(word['text'])
else:
suggestions = word['suggestions']
if len(suggestions) > 0:
fixed_words.append(suggestions[0])
else:
fixed_words.append(word['text'])
if join:
return joinstring.join(fixed_words)
else:
return fixed_words | Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L228-L262 |
estnltk/estnltk | estnltk/vabamorf/morf.py | Vabamorf.synthesize | def synthesize(self, lemma, form, partofspeech='', hint='', guess=True, phonetic=False):
"""Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words.
"""
words = self._morf.synthesize(
convert(lemma.strip()),
convert(form.strip()),
convert(partofspeech.strip()),
convert(hint.strip()),
guess,
phonetic
)
return [deconvert(w) for w in words] | python | def synthesize(self, lemma, form, partofspeech='', hint='', guess=True, phonetic=False):
"""Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words.
"""
words = self._morf.synthesize(
convert(lemma.strip()),
convert(form.strip()),
convert(partofspeech.strip()),
convert(hint.strip()),
guess,
phonetic
)
return [deconvert(w) for w in words] | Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L265-L299 |
estnltk/estnltk | estnltk/clausesegmenter.py | ClauseSegmenter.prepare_sentence | def prepare_sentence(self, sentence):
"""Prepare the sentence for segment detection."""
# depending on how the morphological analysis was added, there may be
# phonetic markup. Remove it, if it exists.
for word in sentence:
for analysis in word[ANALYSIS]:
analysis[ROOT] = analysis[ROOT].replace('~', '')
analysis[ROOT] = re.sub('[?<\]]([aioueöäõü])', '\\1', analysis[ROOT])
return json.dumps({WORDS: sentence}) | python | def prepare_sentence(self, sentence):
"""Prepare the sentence for segment detection."""
# depending on how the morphological analysis was added, there may be
# phonetic markup. Remove it, if it exists.
for word in sentence:
for analysis in word[ANALYSIS]:
analysis[ROOT] = analysis[ROOT].replace('~', '')
analysis[ROOT] = re.sub('[?<\]]([aioueöäõü])', '\\1', analysis[ROOT])
return json.dumps({WORDS: sentence}) | Prepare the sentence for segment detection. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/clausesegmenter.py#L53-L61 |
estnltk/estnltk | estnltk/clausesegmenter.py | ClauseSegmenter.annotate_indices | def annotate_indices(self, sentence):
"""Add clause indexes to already annotated sentence."""
max_index = 0
max_depth = 1
stack_of_indexes = [ max_index ]
for token in sentence:
if CLAUSE_ANNOT not in token:
token[CLAUSE_IDX] = stack_of_indexes[-1]
else:
# Alustavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KIILU_ALGUS":
# Liigume sügavamale, alustame järgmist kiilu
max_index += 1
stack_of_indexes.append(max_index)
if (len(stack_of_indexes) > max_depth):
max_depth = len(stack_of_indexes)
token[CLAUSE_IDX] = stack_of_indexes[-1]
# Lõpetavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KINDEL_PIIR":
# Liigume edasi samal tasandil, alustame järgmist osalauset
max_index += 1
stack_of_indexes[-1] = max_index
elif annotation == "KIILU_LOPP":
# Taandume sügavusest, sulgeme ühe kiilu
stack_of_indexes.pop()
return sentence | python | def annotate_indices(self, sentence):
"""Add clause indexes to already annotated sentence."""
max_index = 0
max_depth = 1
stack_of_indexes = [ max_index ]
for token in sentence:
if CLAUSE_ANNOT not in token:
token[CLAUSE_IDX] = stack_of_indexes[-1]
else:
# Alustavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KIILU_ALGUS":
# Liigume sügavamale, alustame järgmist kiilu
max_index += 1
stack_of_indexes.append(max_index)
if (len(stack_of_indexes) > max_depth):
max_depth = len(stack_of_indexes)
token[CLAUSE_IDX] = stack_of_indexes[-1]
# Lõpetavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KINDEL_PIIR":
# Liigume edasi samal tasandil, alustame järgmist osalauset
max_index += 1
stack_of_indexes[-1] = max_index
elif annotation == "KIILU_LOPP":
# Taandume sügavusest, sulgeme ühe kiilu
stack_of_indexes.pop()
return sentence | Add clause indexes to already annotated sentence. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/clausesegmenter.py#L64-L91 |
estnltk/estnltk | estnltk/clausesegmenter.py | ClauseSegmenter.rename_annotations | def rename_annotations(self, sentence):
"""Function that renames and restructures clause information."""
annotations = []
for token in sentence:
data = {CLAUSE_IDX: token[CLAUSE_IDX]}
if CLAUSE_ANNOT in token:
if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY
elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START
elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END
annotations.append(data)
return annotations | python | def rename_annotations(self, sentence):
"""Function that renames and restructures clause information."""
annotations = []
for token in sentence:
data = {CLAUSE_IDX: token[CLAUSE_IDX]}
if CLAUSE_ANNOT in token:
if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY
elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START
elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END
annotations.append(data)
return annotations | Function that renames and restructures clause information. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/clausesegmenter.py#L93-L106 |
estnltk/estnltk | estnltk/examples/split_large_koondkorpus_files.py | format_time | def format_time( sec ):
''' Re-formats time duration in seconds (*sec*) into more easily readable
form, where (days,) hours, minutes, and seconds are explicitly shown.
Returns the new duration as a formatted string.
'''
import time
if sec < 864000:
# Idea from: http://stackoverflow.com/a/1384565
return time.strftime('%H:%M:%S', time.gmtime(sec))
else:
days = int(sec / 864000)
secs = sec % 864000
return str(days)+'d, '+time.strftime('%H:%M:%S', time.gmtime(secs)) | python | def format_time( sec ):
''' Re-formats time duration in seconds (*sec*) into more easily readable
form, where (days,) hours, minutes, and seconds are explicitly shown.
Returns the new duration as a formatted string.
'''
import time
if sec < 864000:
# Idea from: http://stackoverflow.com/a/1384565
return time.strftime('%H:%M:%S', time.gmtime(sec))
else:
days = int(sec / 864000)
secs = sec % 864000
return str(days)+'d, '+time.strftime('%H:%M:%S', time.gmtime(secs)) | Re-formats time duration in seconds (*sec*) into more easily readable
form, where (days,) hours, minutes, and seconds are explicitly shown.
Returns the new duration as a formatted string. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/examples/split_large_koondkorpus_files.py#L35-L47 |
estnltk/estnltk | estnltk/examples/split_large_koondkorpus_files.py | split_Text | def split_Text( text, file_name, verbose = True ):
''' Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
'''
if verbose:
print(' processing '+file_name+' ... ', end="" )
# Tokenize text into sentences
start = timer()
text = text.tokenize_sentences()
all_sentences = len(text[SENTENCES])
end = timer()
if verbose:
print(' (tok time: '+format_time( end-start )+')', end="" )
if all_sentences > max_sentences:
# Acquire spans of length *max_sentences* from the text
start = timer()
i = 0
spans = []
len_total = 0
while i < all_sentences:
startSent = text[SENTENCES][i]
endSent = text[SENTENCES][min(i+(max_sentences-1), all_sentences-1)]
span = (startSent[START], endSent[END])
len_total += (span[1]-span[0])
spans.append(span)
i += max_sentences
# Divide the text into spans
text_spans = text.texts_from_spans(spans)
assert len(text.text) >= len_total, '(!) Total spans_len must be =< than text_len: '+str(len_total)+'/'+str(len(text.text))
new_texts = []
for i, small_text in enumerate( text_spans ):
newText = Text( small_text )
for key in text.keys():
if key != TEXT and key != SENTENCES and key != PARAGRAPHS:
newText[key] = text[key]
newText['_text_split_id'] = i
newText['_text_split_origin'] = str(spans[i]) # Convert it to string; Otherwise, split_by(*) may mistakenly consider
# it a layer and may run into error while trying to split it;
newText['_text_split_file'] = file_name
#print( json.dumps(newText) )
new_texts.append( newText )
end = timer()
if verbose:
print(' (split time: '+format_time( end-start )+')', end="" )
print(' (sents: '+str(all_sentences)+', new_texts:'+str(len(new_texts))+')', end="")
print()
return new_texts
else:
if verbose:
print(' (sents: '+str(all_sentences)+', no_split)', end=" \n")
return [text] | python | def split_Text( text, file_name, verbose = True ):
''' Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
'''
if verbose:
print(' processing '+file_name+' ... ', end="" )
# Tokenize text into sentences
start = timer()
text = text.tokenize_sentences()
all_sentences = len(text[SENTENCES])
end = timer()
if verbose:
print(' (tok time: '+format_time( end-start )+')', end="" )
if all_sentences > max_sentences:
# Acquire spans of length *max_sentences* from the text
start = timer()
i = 0
spans = []
len_total = 0
while i < all_sentences:
startSent = text[SENTENCES][i]
endSent = text[SENTENCES][min(i+(max_sentences-1), all_sentences-1)]
span = (startSent[START], endSent[END])
len_total += (span[1]-span[0])
spans.append(span)
i += max_sentences
# Divide the text into spans
text_spans = text.texts_from_spans(spans)
assert len(text.text) >= len_total, '(!) Total spans_len must be =< than text_len: '+str(len_total)+'/'+str(len(text.text))
new_texts = []
for i, small_text in enumerate( text_spans ):
newText = Text( small_text )
for key in text.keys():
if key != TEXT and key != SENTENCES and key != PARAGRAPHS:
newText[key] = text[key]
newText['_text_split_id'] = i
newText['_text_split_origin'] = str(spans[i]) # Convert it to string; Otherwise, split_by(*) may mistakenly consider
# it a layer and may run into error while trying to split it;
newText['_text_split_file'] = file_name
#print( json.dumps(newText) )
new_texts.append( newText )
end = timer()
if verbose:
print(' (split time: '+format_time( end-start )+')', end="" )
print(' (sents: '+str(all_sentences)+', new_texts:'+str(len(new_texts))+')', end="")
print()
return new_texts
else:
if verbose:
print(' (sents: '+str(all_sentences)+', no_split)', end=" \n")
return [text] | Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/examples/split_large_koondkorpus_files.py#L50-L103 |
estnltk/estnltk | estnltk/examples/split_large_koondkorpus_files.py | write_Text_into_file | def write_Text_into_file( text, old_file_name, out_dir, suffix='__split', verbose=True ):
''' Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
'''
name = os.path.basename( old_file_name )
if '.' in name:
new_name = re.sub('\.([^.]+)$', suffix+'.\\1', name)
else:
new_name = name + suffix
new_path = os.path.join( out_dir, new_name )
start = timer()
#write_document( text, new_path ) # <--- this leaves indent=2 - takes too much extra space ...
o_f = codecs.open( new_path, mode='wb', encoding='ascii' )
o_f.write( json.dumps( text ) )
o_f.close()
end = timer()
timestamp = format_time( end-start )
if verbose:
print(' ==> '+new_path+' (file writing time: '+timestamp+')' ) | python | def write_Text_into_file( text, old_file_name, out_dir, suffix='__split', verbose=True ):
''' Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
'''
name = os.path.basename( old_file_name )
if '.' in name:
new_name = re.sub('\.([^.]+)$', suffix+'.\\1', name)
else:
new_name = name + suffix
new_path = os.path.join( out_dir, new_name )
start = timer()
#write_document( text, new_path ) # <--- this leaves indent=2 - takes too much extra space ...
o_f = codecs.open( new_path, mode='wb', encoding='ascii' )
o_f.write( json.dumps( text ) )
o_f.close()
end = timer()
timestamp = format_time( end-start )
if verbose:
print(' ==> '+new_path+' (file writing time: '+timestamp+')' ) | Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/examples/split_large_koondkorpus_files.py#L106-L124 |
estnltk/estnltk | estnltk/teicorpus.py | parse_tei_corpora | def parse_tei_corpora(root, prefix='', suffix='.xml', target=['artikkel'], encoding=None):
"""Parse documents from TEI style XML files.
Gives each document FILE attribute that denotes the original filename.
Parameters
----------
root: str
The directory path containing the TEI corpora XMl files.
prefix: str
The prefix of filenames to include (default: '')
suffix: str
The suffix of filenames to include (default: '.xml')
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of estnltk.text.Text
Corpus containing parsed documents from all files. The file path
is stored in FILE attribute of the documents.
"""
documents = []
for fnm in get_filenames(root, prefix, suffix):
path = os.path.join(root, fnm)
docs = parse_tei_corpus(path, target, encoding)
for doc in docs:
doc[FILE] = fnm
documents.extend(docs)
return documents | python | def parse_tei_corpora(root, prefix='', suffix='.xml', target=['artikkel'], encoding=None):
"""Parse documents from TEI style XML files.
Gives each document FILE attribute that denotes the original filename.
Parameters
----------
root: str
The directory path containing the TEI corpora XMl files.
prefix: str
The prefix of filenames to include (default: '')
suffix: str
The suffix of filenames to include (default: '.xml')
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of estnltk.text.Text
Corpus containing parsed documents from all files. The file path
is stored in FILE attribute of the documents.
"""
documents = []
for fnm in get_filenames(root, prefix, suffix):
path = os.path.join(root, fnm)
docs = parse_tei_corpus(path, target, encoding)
for doc in docs:
doc[FILE] = fnm
documents.extend(docs)
return documents | Parse documents from TEI style XML files.
Gives each document FILE attribute that denotes the original filename.
Parameters
----------
root: str
The directory path containing the TEI corpora XMl files.
prefix: str
The prefix of filenames to include (default: '')
suffix: str
The suffix of filenames to include (default: '.xml')
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of estnltk.text.Text
Corpus containing parsed documents from all files. The file path
is stored in FILE attribute of the documents. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/teicorpus.py#L27-L59 |
estnltk/estnltk | estnltk/teicorpus.py | parse_tei_corpus | def parse_tei_corpus(path, target=['artikkel'], encoding=None):
"""Parse documents from a TEI style XML file.
Parameters
----------
path: str
The path of the XML file.
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of esnltk.text.Text
"""
with open(path, 'rb') as f:
html_doc = f.read()
if encoding:
html_doc = html_doc.decode( encoding )
soup = BeautifulSoup(html_doc, 'html5lib')
title = soup.find_all('title')[0].string
documents = []
for div1 in soup.find_all('div1'):
documents.extend(parse_div(div1, dict(), target))
return tokenize_documents(documents) | python | def parse_tei_corpus(path, target=['artikkel'], encoding=None):
"""Parse documents from a TEI style XML file.
Parameters
----------
path: str
The path of the XML file.
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of esnltk.text.Text
"""
with open(path, 'rb') as f:
html_doc = f.read()
if encoding:
html_doc = html_doc.decode( encoding )
soup = BeautifulSoup(html_doc, 'html5lib')
title = soup.find_all('title')[0].string
documents = []
for div1 in soup.find_all('div1'):
documents.extend(parse_div(div1, dict(), target))
return tokenize_documents(documents) | Parse documents from a TEI style XML file.
Parameters
----------
path: str
The path of the XML file.
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of esnltk.text.Text | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/teicorpus.py#L62-L89 |
estnltk/estnltk | estnltk/teicorpus.py | parse_div | def parse_div(soup, metadata, target):
"""Parse a <div> tag from the file.
The sections in XML files are given in <div1>, <div2> and <div3>
tags. Each such tag has a type and name (plus possibly more extra attributes).
If the div type is found in target variable, the div is parsed
into structured paragraphs, sentences and words.
Otherwise, the type and name are added as metadata to subdivs
and stored in.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
metdata: dict
The metadata for parent divs.
target: list of str
List of <div> types, that are considered documents in the XML files.
"""
documents = []
div_type = soup.get('type', None)
div_title = list(soup.children)[0].string.strip()
if div_type in target:
div_authors = soup.find_all('author')
document = {
'type': div_type,
'title': div_title,
'paragraphs': parse_paragraphs(soup)
}
# add author, if it exists
if len(div_authors) > 0:
div_author = div_authors[0].text.strip()
document['author'] = div_author
# add collected metadata
for k, v in metadata.items():
document[k] = v
documents.append(document)
else:
metadata[div_type] = div_title
# recurse subdivs
subdiv_name = get_subdiv(soup.name)
subdivs = []
if subdiv_name is not None:
subdivs = soup.find_all(subdiv_name)
if len(subdivs) > 0:
for subdiv in subdivs:
documents.extend(parse_div(subdiv, deepcopy(metadata), target))
return documents | python | def parse_div(soup, metadata, target):
"""Parse a <div> tag from the file.
The sections in XML files are given in <div1>, <div2> and <div3>
tags. Each such tag has a type and name (plus possibly more extra attributes).
If the div type is found in target variable, the div is parsed
into structured paragraphs, sentences and words.
Otherwise, the type and name are added as metadata to subdivs
and stored in.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
metdata: dict
The metadata for parent divs.
target: list of str
List of <div> types, that are considered documents in the XML files.
"""
documents = []
div_type = soup.get('type', None)
div_title = list(soup.children)[0].string.strip()
if div_type in target:
div_authors = soup.find_all('author')
document = {
'type': div_type,
'title': div_title,
'paragraphs': parse_paragraphs(soup)
}
# add author, if it exists
if len(div_authors) > 0:
div_author = div_authors[0].text.strip()
document['author'] = div_author
# add collected metadata
for k, v in metadata.items():
document[k] = v
documents.append(document)
else:
metadata[div_type] = div_title
# recurse subdivs
subdiv_name = get_subdiv(soup.name)
subdivs = []
if subdiv_name is not None:
subdivs = soup.find_all(subdiv_name)
if len(subdivs) > 0:
for subdiv in subdivs:
documents.extend(parse_div(subdiv, deepcopy(metadata), target))
return documents | Parse a <div> tag from the file.
The sections in XML files are given in <div1>, <div2> and <div3>
tags. Each such tag has a type and name (plus possibly more extra attributes).
If the div type is found in target variable, the div is parsed
into structured paragraphs, sentences and words.
Otherwise, the type and name are added as metadata to subdivs
and stored in.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
metdata: dict
The metadata for parent divs.
target: list of str
List of <div> types, that are considered documents in the XML files. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/teicorpus.py#L97-L149 |
estnltk/estnltk | estnltk/teicorpus.py | parse_paragraphs | def parse_paragraphs(soup):
"""Parse sentences and paragraphs in the section.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
Returns
-------
list of (list of str)
List of paragraphs given as list of sentences.
"""
paragraphs = []
for para in soup.find_all('p'):
sentences = []
for sent in para.find_all('s'):
sentence = sent.text.strip()
if len(sentence) > 0:
sentences.append(sentence)
if len(sentences) > 0:
paragraphs.append({'sentences': sentences})
return paragraphs | python | def parse_paragraphs(soup):
"""Parse sentences and paragraphs in the section.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
Returns
-------
list of (list of str)
List of paragraphs given as list of sentences.
"""
paragraphs = []
for para in soup.find_all('p'):
sentences = []
for sent in para.find_all('s'):
sentence = sent.text.strip()
if len(sentence) > 0:
sentences.append(sentence)
if len(sentences) > 0:
paragraphs.append({'sentences': sentences})
return paragraphs | Parse sentences and paragraphs in the section.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
Returns
-------
list of (list of str)
List of paragraphs given as list of sentences. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/teicorpus.py#L152-L174 |
estnltk/estnltk | estnltk/teicorpus.py | tokenize_documents | def tokenize_documents(docs):
"""Convert the imported documents to :py:class:'~estnltk.text.Text' instances."""
sep = '\n\n'
texts = []
for doc in docs:
text = '\n\n'.join(['\n'.join(para[SENTENCES]) for para in doc[PARAGRAPHS]])
doc[TEXT] = text
del doc[PARAGRAPHS]
texts.append(Text(doc))
return texts | python | def tokenize_documents(docs):
"""Convert the imported documents to :py:class:'~estnltk.text.Text' instances."""
sep = '\n\n'
texts = []
for doc in docs:
text = '\n\n'.join(['\n'.join(para[SENTENCES]) for para in doc[PARAGRAPHS]])
doc[TEXT] = text
del doc[PARAGRAPHS]
texts.append(Text(doc))
return texts | Convert the imported documents to :py:class:'~estnltk.text.Text' instances. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/teicorpus.py#L186-L195 |
estnltk/estnltk | estnltk/tools/train_default_ner_model.py | train_default_model | def train_default_model():
"""Function for training the default NER model.
NB! It overwrites the default model, so do not use it unless
you know what are you doing.
The training data is in file estnltk/corpora/estner.json.bz2 .
The resulting model will be saved to estnltk/estner/models/default.bin
"""
docs = read_json_corpus(DEFAULT_NER_DATASET)
trainer = NerTrainer(default_nersettings)
trainer.train(docs, DEFAULT_NER_MODEL_DIR) | python | def train_default_model():
"""Function for training the default NER model.
NB! It overwrites the default model, so do not use it unless
you know what are you doing.
The training data is in file estnltk/corpora/estner.json.bz2 .
The resulting model will be saved to estnltk/estner/models/default.bin
"""
docs = read_json_corpus(DEFAULT_NER_DATASET)
trainer = NerTrainer(default_nersettings)
trainer.train(docs, DEFAULT_NER_MODEL_DIR) | Function for training the default NER model.
NB! It overwrites the default model, so do not use it unless
you know what are you doing.
The training data is in file estnltk/corpora/estner.json.bz2 .
The resulting model will be saved to estnltk/estner/models/default.bin | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/tools/train_default_ner_model.py#L10-L21 |
estnltk/estnltk | estnltk/javaprocess.py | JavaProcess.process_line | def process_line(self, line):
"""Process a line of data.
Sends the data through the pipe to the process and flush it. Reads a resulting line
and returns it.
Parameters
----------
line: str
The data sent to process. Make sure it does not contain any newline characters.
Returns
-------
str: The line returned by the Java process
Raises
------
Exception
In case of EOF is encountered.
IoError
In case it was impossible to read or write from the subprocess standard input / output.
"""
assert isinstance(line, str)
try:
self._process.stdin.write(as_binary(line))
self._process.stdin.write(as_binary('\n'))
self._process.stdin.flush()
result = as_unicode(self._process.stdout.readline())
if result == '':
stderr = as_unicode(self._process.stderr.read())
raise Exception('EOF encountered while reading stream. Stderr is {0}.'.format(stderr))
return result
except Exception:
self._process.terminate()
raise | python | def process_line(self, line):
"""Process a line of data.
Sends the data through the pipe to the process and flush it. Reads a resulting line
and returns it.
Parameters
----------
line: str
The data sent to process. Make sure it does not contain any newline characters.
Returns
-------
str: The line returned by the Java process
Raises
------
Exception
In case of EOF is encountered.
IoError
In case it was impossible to read or write from the subprocess standard input / output.
"""
assert isinstance(line, str)
try:
self._process.stdin.write(as_binary(line))
self._process.stdin.write(as_binary('\n'))
self._process.stdin.flush()
result = as_unicode(self._process.stdout.readline())
if result == '':
stderr = as_unicode(self._process.stderr.read())
raise Exception('EOF encountered while reading stream. Stderr is {0}.'.format(stderr))
return result
except Exception:
self._process.terminate()
raise | Process a line of data.
Sends the data through the pipe to the process and flush it. Reads a resulting line
and returns it.
Parameters
----------
line: str
The data sent to process. Make sure it does not contain any newline characters.
Returns
-------
str: The line returned by the Java process
Raises
------
Exception
In case of EOF is encountered.
IoError
In case it was impossible to read or write from the subprocess standard input / output. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/javaprocess.py#L52-L87 |
estnltk/estnltk | estnltk/wordnet/wn.py | _get_synset_offsets | def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes] | python | def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes] | Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L53-L88 |
estnltk/estnltk | estnltk/wordnet/wn.py | _get_synsets | def _get_synsets(synset_offsets):
"""Given synset offsets in the WordNet file, parses synset object for every offset.
Notes
-----
Internal function. Do not call directly.
Stores every parsed synset into global synset dictionary under two keys:
synset's name lemma.pos.sense_no and synset's id (unique integer).
Parameters
----------
synset_offsets : list of ints
Lists pointer offsets from which synset objects will be parsed.
Returns
-------
list of Synsets
Lists synset objects which synset_offsets point to.
"""
global parser
if parser is None:
parser = Parser(_WN_FILE)
synsets = []
for offset in synset_offsets:
raw_synset = parser.parse_synset(offset)
synset = Synset(raw_synset)
SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset
SYNSETS_DICT[synset.id] = synset
synsets.append(synset)
return synsets | python | def _get_synsets(synset_offsets):
"""Given synset offsets in the WordNet file, parses synset object for every offset.
Notes
-----
Internal function. Do not call directly.
Stores every parsed synset into global synset dictionary under two keys:
synset's name lemma.pos.sense_no and synset's id (unique integer).
Parameters
----------
synset_offsets : list of ints
Lists pointer offsets from which synset objects will be parsed.
Returns
-------
list of Synsets
Lists synset objects which synset_offsets point to.
"""
global parser
if parser is None:
parser = Parser(_WN_FILE)
synsets = []
for offset in synset_offsets:
raw_synset = parser.parse_synset(offset)
synset = Synset(raw_synset)
SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset
SYNSETS_DICT[synset.id] = synset
synsets.append(synset)
return synsets | Given synset offsets in the WordNet file, parses synset object for every offset.
Notes
-----
Internal function. Do not call directly.
Stores every parsed synset into global synset dictionary under two keys:
synset's name lemma.pos.sense_no and synset's id (unique integer).
Parameters
----------
synset_offsets : list of ints
Lists pointer offsets from which synset objects will be parsed.
Returns
-------
list of Synsets
Lists synset objects which synset_offsets point to. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L90-L124 |
estnltk/estnltk | estnltk/wordnet/wn.py | _get_key_from_raw_synset | def _get_key_from_raw_synset(raw_synset):
"""Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
"""
pos = raw_synset.pos
literal = raw_synset.variants[0].literal
sense = "%02d"%raw_synset.variants[0].sense
return '.'.join([literal,pos,sense]) | python | def _get_key_from_raw_synset(raw_synset):
"""Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
"""
pos = raw_synset.pos
literal = raw_synset.variants[0].literal
sense = "%02d"%raw_synset.variants[0].sense
return '.'.join([literal,pos,sense]) | Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L126-L148 |
estnltk/estnltk | estnltk/wordnet/wn.py | synset | def synset(synset_key):
"""Returns synset object with the provided key.
Notes
-----
Uses lazy initialization - synsets will be fetched from a dictionary after the first request.
Parameters
----------
synset_key : string
Unique synset identifier in the form of `lemma.pos.sense_no`.
Returns
-------
Synset
Synset with key `synset_key`.
None, if no match was found.
"""
if synset_key in SYNSETS_DICT:
return SYNSETS_DICT[synset_key]
def _get_synset_idx(synset_key):
"""Returns synset index for the provided key.
Note
----
Internal function. Do not call directly.
"""
with codecs.open(_SENSE_FILE,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
if split_line[0] == synset_key:
return int(split_line[1].strip())
return None
synset_idx = _get_synset_idx(synset_key)
if synset_idx == None:
return None
synset_offset = _get_synset_offsets([synset_idx])
synset = _get_synsets(synset_offset)
return synset[0] | python | def synset(synset_key):
"""Returns synset object with the provided key.
Notes
-----
Uses lazy initialization - synsets will be fetched from a dictionary after the first request.
Parameters
----------
synset_key : string
Unique synset identifier in the form of `lemma.pos.sense_no`.
Returns
-------
Synset
Synset with key `synset_key`.
None, if no match was found.
"""
if synset_key in SYNSETS_DICT:
return SYNSETS_DICT[synset_key]
def _get_synset_idx(synset_key):
"""Returns synset index for the provided key.
Note
----
Internal function. Do not call directly.
"""
with codecs.open(_SENSE_FILE,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
if split_line[0] == synset_key:
return int(split_line[1].strip())
return None
synset_idx = _get_synset_idx(synset_key)
if synset_idx == None:
return None
synset_offset = _get_synset_offsets([synset_idx])
synset = _get_synsets(synset_offset)
return synset[0] | Returns synset object with the provided key.
Notes
-----
Uses lazy initialization - synsets will be fetched from a dictionary after the first request.
Parameters
----------
synset_key : string
Unique synset identifier in the form of `lemma.pos.sense_no`.
Returns
-------
Synset
Synset with key `synset_key`.
None, if no match was found. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L150-L196 |
estnltk/estnltk | estnltk/wordnet/wn.py | synsets | def synsets(lemma,pos=None):
"""Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided.
Notes
-----
Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary.
Parameters
----------
lemma : str
Lemma of the synset.
pos : str, optional
Part-of-speech specification of the searched synsets, defaults to None.
Returns
-------
list of Synsets
Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified.
Empty list, if no match was found.
"""
def _get_synset_idxes(lemma,pos):
line_prefix_regexp = "%s:%s:(.*)"%(lemma,pos if pos else "\w+")
line_prefix = re.compile(line_prefix_regexp)
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
for line in fin:
result = line_prefix.match(line)
if result:
res_indices = [int(x) for x in result.group(1).split(' ')]
idxes.extend(res_indices)
LEM_POS_2_SS_IDX[lemma][pos].extend(idxes)
return sorted(idxes)
synset_idxes = None
if lemma in LEM_POS_2_SS_IDX:
if pos in LEM_POS_2_SS_IDX[lemma]:
synset_idxes = LEM_POS_2_SS_IDX[lemma][pos]
else:
synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]]
if not synset_idxes:
synset_idxes = _get_synset_idxes(lemma,pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
return stored_synsets + synsets | python | def synsets(lemma,pos=None):
"""Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided.
Notes
-----
Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary.
Parameters
----------
lemma : str
Lemma of the synset.
pos : str, optional
Part-of-speech specification of the searched synsets, defaults to None.
Returns
-------
list of Synsets
Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified.
Empty list, if no match was found.
"""
def _get_synset_idxes(lemma,pos):
line_prefix_regexp = "%s:%s:(.*)"%(lemma,pos if pos else "\w+")
line_prefix = re.compile(line_prefix_regexp)
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
for line in fin:
result = line_prefix.match(line)
if result:
res_indices = [int(x) for x in result.group(1).split(' ')]
idxes.extend(res_indices)
LEM_POS_2_SS_IDX[lemma][pos].extend(idxes)
return sorted(idxes)
synset_idxes = None
if lemma in LEM_POS_2_SS_IDX:
if pos in LEM_POS_2_SS_IDX[lemma]:
synset_idxes = LEM_POS_2_SS_IDX[lemma][pos]
else:
synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]]
if not synset_idxes:
synset_idxes = _get_synset_idxes(lemma,pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
return stored_synsets + synsets | Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided.
Notes
-----
Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary.
Parameters
----------
lemma : str
Lemma of the synset.
pos : str, optional
Part-of-speech specification of the searched synsets, defaults to None.
Returns
-------
list of Synsets
Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified.
Empty list, if no match was found. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L199-L256 |
estnltk/estnltk | estnltk/wordnet/wn.py | all_synsets | def all_synsets(pos=None):
"""Return all the synsets which have the provided pos.
Notes
-----
Returns thousands or tens of thousands of synsets - first time will take significant time.
Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time.
Parameters
----------
pos : str
Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`.
If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time.
Returns
-------
list of Synsets
Lists the Synsets which have `pos` as part-of-speech.
Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`].
"""
def _get_unique_synset_idxes(pos):
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
if pos == None:
for line in fin:
split_line = line.strip().split(':')
idxes.extend([int(x) for x in split_line[2].split()])
else:
for line in fin:
split_line = line.strip().split(':')
if split_line[1] == pos:
idxes.extend([int(x) for x in split_line[2].split()])
idxes = list(set(idxes))
idxes.sort()
return idxes
if pos in LOADED_POS:
return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]]
else:
synset_idxes = _get_unique_synset_idxes(pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
for synset in synsets:
for variant in synset.get_variants():
LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id)
LOADED_POS.add(pos)
return stored_synsets + synsets | python | def all_synsets(pos=None):
"""Return all the synsets which have the provided pos.
Notes
-----
Returns thousands or tens of thousands of synsets - first time will take significant time.
Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time.
Parameters
----------
pos : str
Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`.
If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time.
Returns
-------
list of Synsets
Lists the Synsets which have `pos` as part-of-speech.
Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`].
"""
def _get_unique_synset_idxes(pos):
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
if pos == None:
for line in fin:
split_line = line.strip().split(':')
idxes.extend([int(x) for x in split_line[2].split()])
else:
for line in fin:
split_line = line.strip().split(':')
if split_line[1] == pos:
idxes.extend([int(x) for x in split_line[2].split()])
idxes = list(set(idxes))
idxes.sort()
return idxes
if pos in LOADED_POS:
return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]]
else:
synset_idxes = _get_unique_synset_idxes(pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
for synset in synsets:
for variant in synset.get_variants():
LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id)
LOADED_POS.add(pos)
return stored_synsets + synsets | Return all the synsets which have the provided pos.
Notes
-----
Returns thousands or tens of thousands of synsets - first time will take significant time.
Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time.
Parameters
----------
pos : str
Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`.
If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time.
Returns
-------
list of Synsets
Lists the Synsets which have `pos` as part-of-speech.
Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`]. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L258-L316 |
estnltk/estnltk | estnltk/wordnet/wn.py | lemma | def lemma(lemma_key):
"""Returns the Lemma object with the given key.
Parameters
----------
lemma_key : str
Key of the returned lemma.
Returns
-------
Lemma
Lemma matching the `lemma_key`.
"""
if lemma_key in LEMMAS_DICT:
return LEMMAS_DICT[lemma_key]
split_lemma_key = lemma_key.split('.')
synset_key = '.'.join(split_lemma_key[:3])
lemma_literal = split_lemma_key[3]
lemma_obj = Lemma(synset_key,lemma_literal)
LEMMAS_DICT[lemma_key] = lemma_obj
return lemma_obj | python | def lemma(lemma_key):
"""Returns the Lemma object with the given key.
Parameters
----------
lemma_key : str
Key of the returned lemma.
Returns
-------
Lemma
Lemma matching the `lemma_key`.
"""
if lemma_key in LEMMAS_DICT:
return LEMMAS_DICT[lemma_key]
split_lemma_key = lemma_key.split('.')
synset_key = '.'.join(split_lemma_key[:3])
lemma_literal = split_lemma_key[3]
lemma_obj = Lemma(synset_key,lemma_literal)
LEMMAS_DICT[lemma_key] = lemma_obj
return lemma_obj | Returns the Lemma object with the given key.
Parameters
----------
lemma_key : str
Key of the returned lemma.
Returns
-------
Lemma
Lemma matching the `lemma_key`. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L318-L341 |
estnltk/estnltk | estnltk/wordnet/wn.py | lemmas | def lemmas(lemma,pos=None):
"""Returns all the Lemma objects of which name is `lemma` and which have `pos` as part
of speech.
Parameters
----------
lemma : str
Literal of the sought Lemma objects.
pos : str, optional
Part of speech of the sought Lemma objects. If None, matches any part of speech.
Defaults to None
Returns
-------
list of Lemmas
Lists all the matched Lemmas.
"""
lemma = lemma.lower()
return [lemma_obj
for synset in synsets(lemma,pos)
for lemma_obj in synset.lemmas()
if lemma_obj.name.lower() == lemma] | python | def lemmas(lemma,pos=None):
"""Returns all the Lemma objects of which name is `lemma` and which have `pos` as part
of speech.
Parameters
----------
lemma : str
Literal of the sought Lemma objects.
pos : str, optional
Part of speech of the sought Lemma objects. If None, matches any part of speech.
Defaults to None
Returns
-------
list of Lemmas
Lists all the matched Lemmas.
"""
lemma = lemma.lower()
return [lemma_obj
for synset in synsets(lemma,pos)
for lemma_obj in synset.lemmas()
if lemma_obj.name.lower() == lemma] | Returns all the Lemma objects of which name is `lemma` and which have `pos` as part
of speech.
Parameters
----------
lemma : str
Literal of the sought Lemma objects.
pos : str, optional
Part of speech of the sought Lemma objects. If None, matches any part of speech.
Defaults to None
Returns
-------
list of Lemmas
Lists all the matched Lemmas. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L348-L369 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset._recursive_hypernyms | def _recursive_hypernyms(self, hypernyms):
"""Finds all the hypernyms of the synset transitively.
Notes
-----
Internal method. Do not call directly.
Parameters
----------
hypernyms : set of Synsets
An set of hypernyms met so far.
Returns
-------
set of Synsets
Returns the input set.
"""
hypernyms |= set(self.hypernyms())
for synset in self.hypernyms():
hypernyms |= synset._recursive_hypernyms(hypernyms)
return hypernyms | python | def _recursive_hypernyms(self, hypernyms):
"""Finds all the hypernyms of the synset transitively.
Notes
-----
Internal method. Do not call directly.
Parameters
----------
hypernyms : set of Synsets
An set of hypernyms met so far.
Returns
-------
set of Synsets
Returns the input set.
"""
hypernyms |= set(self.hypernyms())
for synset in self.hypernyms():
hypernyms |= synset._recursive_hypernyms(hypernyms)
return hypernyms | Finds all the hypernyms of the synset transitively.
Notes
-----
Internal method. Do not call directly.
Parameters
----------
hypernyms : set of Synsets
An set of hypernyms met so far.
Returns
-------
set of Synsets
Returns the input set. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L427-L449 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset._min_depth | def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth | python | def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth | Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L451-L473 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.get_related_synsets | def get_related_synsets(self,relation):
"""Retrieves all the synsets which are related by given relation.
Parameters
----------
relation : str
Name of the relation via which the sought synsets are linked.
Returns
-------
list of Synsets
Synsets which are related via `relation`.
"""
results = []
for relation_candidate in self._raw_synset.internalLinks:
if relation_candidate.name == relation:
linked_synset = synset(_get_key_from_raw_synset(relation_candidate.target_concept))
relation_candidate.target_concept = linked_synset._raw_synset
results.append(linked_synset)
return results | python | def get_related_synsets(self,relation):
"""Retrieves all the synsets which are related by given relation.
Parameters
----------
relation : str
Name of the relation via which the sought synsets are linked.
Returns
-------
list of Synsets
Synsets which are related via `relation`.
"""
results = []
for relation_candidate in self._raw_synset.internalLinks:
if relation_candidate.name == relation:
linked_synset = synset(_get_key_from_raw_synset(relation_candidate.target_concept))
relation_candidate.target_concept = linked_synset._raw_synset
results.append(linked_synset)
return results | Retrieves all the synsets which are related by given relation.
Parameters
----------
relation : str
Name of the relation via which the sought synsets are linked.
Returns
-------
list of Synsets
Synsets which are related via `relation`. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L543-L564 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.closure | def closure(self, relation, depth=float('inf')):
"""Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations.
"""
ancestors = []
unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)]
while len(unvisited_ancestors) > 0:
ancestor_depth = unvisited_ancestors.pop()
if ancestor_depth[1] > depth:
continue
unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)])
ancestors.append(ancestor_depth[0])
return list(set(ancestors)) | python | def closure(self, relation, depth=float('inf')):
"""Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations.
"""
ancestors = []
unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)]
while len(unvisited_ancestors) > 0:
ancestor_depth = unvisited_ancestors.pop()
if ancestor_depth[1] > depth:
continue
unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)])
ancestors.append(ancestor_depth[0])
return list(set(ancestors)) | Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L566-L591 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.root_hypernyms | def root_hypernyms(self):
"""Retrieves all the root hypernyms.
Returns
-------
list of Synsets
Roots via hypernymy relation.
"""
visited = set()
hypernyms_next_level = set(self.hypernyms())
current_hypernyms = set(hypernyms_next_level)
while len(hypernyms_next_level) > 0:
current_hypernyms = set(hypernyms_next_level)
hypernyms_next_level = set()
for synset in current_hypernyms:
if synset in visited:
continue
visited.add(synset)
hypernyms_next_level |= set(synset.hypernyms())
return list(current_hypernyms) | python | def root_hypernyms(self):
"""Retrieves all the root hypernyms.
Returns
-------
list of Synsets
Roots via hypernymy relation.
"""
visited = set()
hypernyms_next_level = set(self.hypernyms())
current_hypernyms = set(hypernyms_next_level)
while len(hypernyms_next_level) > 0:
current_hypernyms = set(hypernyms_next_level)
hypernyms_next_level = set()
for synset in current_hypernyms:
if synset in visited:
continue
visited.add(synset)
hypernyms_next_level |= set(synset.hypernyms())
return list(current_hypernyms) | Retrieves all the root hypernyms.
Returns
-------
list of Synsets
Roots via hypernymy relation. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L648-L671 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.lch_similarity | def lch_similarity(self, synset):
"""Calculates Leacock and Chodorow's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ).
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Leacock and Chodorow's from `synset`.
None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match.
"""
if self._raw_synset.pos != synset._raw_synset.pos:
return None
depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos]
distance = self._shortest_path_distance(synset)
if distance >= 0:
return -math.log((distance + 1) / (2.0 * depth))
else:
return None | python | def lch_similarity(self, synset):
"""Calculates Leacock and Chodorow's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ).
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Leacock and Chodorow's from `synset`.
None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match.
"""
if self._raw_synset.pos != synset._raw_synset.pos:
return None
depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos]
distance = self._shortest_path_distance(synset)
if distance >= 0:
return -math.log((distance + 1) / (2.0 * depth))
else:
return None | Calculates Leacock and Chodorow's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ).
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Leacock and Chodorow's from `synset`.
None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L694-L724 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.wup_similarity | def wup_similarity(self, target_synset):
"""Calculates Wu and Palmer's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula ( 2*depth(least_common_subsumer(synset1,synset2)) ) / ( depth(synset1) + depth(synset2) )
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Wu and Palmer's similarity from `synset`.
"""
lchs = self.lowest_common_hypernyms(target_synset)
lcs_depth = lchs[0]._min_depth() if lchs and len(lchs) else None
self_depth = self._min_depth()
other_depth = target_synset._min_depth()
if lcs_depth is None or self_depth is None or other_depth is None:
return None
return (2.0 * lcs_depth) / (self_depth + other_depth) | python | def wup_similarity(self, target_synset):
"""Calculates Wu and Palmer's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula ( 2*depth(least_common_subsumer(synset1,synset2)) ) / ( depth(synset1) + depth(synset2) )
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Wu and Palmer's similarity from `synset`.
"""
lchs = self.lowest_common_hypernyms(target_synset)
lcs_depth = lchs[0]._min_depth() if lchs and len(lchs) else None
self_depth = self._min_depth()
other_depth = target_synset._min_depth()
if lcs_depth is None or self_depth is None or other_depth is None:
return None
return (2.0 * lcs_depth) / (self_depth + other_depth) | Calculates Wu and Palmer's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula ( 2*depth(least_common_subsumer(synset1,synset2)) ) / ( depth(synset1) + depth(synset2) )
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Wu and Palmer's similarity from `synset`. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L726-L751 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.definition | def definition(self):
"""Returns the definition of the synset.
Returns
-------
str
Definition of the synset as a new-line separated concatenated string from all its variants' definitions.
"""
return '\n'.join([variant.gloss for variant in self._raw_synset.variants if variant.gloss]) | python | def definition(self):
"""Returns the definition of the synset.
Returns
-------
str
Definition of the synset as a new-line separated concatenated string from all its variants' definitions.
"""
return '\n'.join([variant.gloss for variant in self._raw_synset.variants if variant.gloss]) | Returns the definition of the synset.
Returns
-------
str
Definition of the synset as a new-line separated concatenated string from all its variants' definitions. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L764-L773 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.examples | def examples(self):
"""Returns the examples of the synset.
Returns
-------
list of str
List of its variants' examples.
"""
examples = []
for example in [variant.examples for variant in self._raw_synset.variants if len(variant.examples)]:
examples.extend(example)
return examples | python | def examples(self):
"""Returns the examples of the synset.
Returns
-------
list of str
List of its variants' examples.
"""
examples = []
for example in [variant.examples for variant in self._raw_synset.variants if len(variant.examples)]:
examples.extend(example)
return examples | Returns the examples of the synset.
Returns
-------
list of str
List of its variants' examples. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L775-L787 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.lemmas | def lemmas(self):
"""Returns the synset's lemmas/variants' literal represantions.
Returns
-------
list of Lemmas
List of its variations' literals as Lemma objects.
"""
return [lemma("%s.%s"%(self.name,variant.literal)) for variant in self._raw_synset.variants] | python | def lemmas(self):
"""Returns the synset's lemmas/variants' literal represantions.
Returns
-------
list of Lemmas
List of its variations' literals as Lemma objects.
"""
return [lemma("%s.%s"%(self.name,variant.literal)) for variant in self._raw_synset.variants] | Returns the synset's lemmas/variants' literal represantions.
Returns
-------
list of Lemmas
List of its variations' literals as Lemma objects. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L789-L798 |
estnltk/estnltk | estnltk/wordnet/wn.py | Synset.lowest_common_hypernyms | def lowest_common_hypernyms(self,target_synset):
"""Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots.
"""
self_hypernyms = self._recursive_hypernyms(set())
other_hypernyms = target_synset._recursive_hypernyms(set())
common_hypernyms = self_hypernyms.intersection(other_hypernyms)
annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for hypernym in common_hypernyms]
annot_common_hypernyms.sort(key = lambda annot_hypernym: annot_hypernym[1],reverse=True)
max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms) > 0 else None
if max_depth != None:
return [annot_common_hypernym[0] for annot_common_hypernym in annot_common_hypernyms if annot_common_hypernym[1] == max_depth]
else:
return None | python | def lowest_common_hypernyms(self,target_synset):
"""Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots.
"""
self_hypernyms = self._recursive_hypernyms(set())
other_hypernyms = target_synset._recursive_hypernyms(set())
common_hypernyms = self_hypernyms.intersection(other_hypernyms)
annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for hypernym in common_hypernyms]
annot_common_hypernyms.sort(key = lambda annot_hypernym: annot_hypernym[1],reverse=True)
max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms) > 0 else None
if max_depth != None:
return [annot_common_hypernym[0] for annot_common_hypernym in annot_common_hypernyms if annot_common_hypernym[1] == max_depth]
else:
return None | Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L800-L827 |
estnltk/estnltk | estnltk/wordnet/wn.py | Lemma.synset | def synset(self):
"""Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
"""
return synset('%s.%s.%s.%s'%(self.synset_literal,self.synset_pos,self.synset_sense,self.literal)) | python | def synset(self):
"""Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
"""
return synset('%s.%s.%s.%s'%(self.synset_literal,self.synset_pos,self.synset_sense,self.literal)) | Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L864-L873 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | convert_vm_json_to_mrf | def convert_vm_json_to_mrf( vabamorf_json ):
''' Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s>
'''
if not isinstance( vabamorf_json, dict ):
raise Exception(' Expected dict as an input argument! ')
json_sentences = []
# 1) flatten paragraphs
if 'paragraphs' in vabamorf_json:
for pr in vabamorf_json['paragraphs']:
if 'sentences' in pr:
for sent in pr['sentences']:
json_sentences.append( sent )
# 2) flatten sentences
elif 'sentences' in vabamorf_json:
for sent in vabamorf_json['sentences']:
json_sentences.append( sent )
# 3) Iterate over sentences and perform conversion
results = []
for sentJson in json_sentences:
results.append('<s>')
for wordJson in sentJson['words']:
if wordJson['text'] == '<s>' or wordJson['text'] == '</s>':
continue
wordStr = wordJson['text']
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson['analysis']:
root = analysisJson['root']
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson['partofspeech']
clitic = analysisJson['clitic']
form = analysisJson['form']
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if 'analysis' not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results | python | def convert_vm_json_to_mrf( vabamorf_json ):
''' Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s>
'''
if not isinstance( vabamorf_json, dict ):
raise Exception(' Expected dict as an input argument! ')
json_sentences = []
# 1) flatten paragraphs
if 'paragraphs' in vabamorf_json:
for pr in vabamorf_json['paragraphs']:
if 'sentences' in pr:
for sent in pr['sentences']:
json_sentences.append( sent )
# 2) flatten sentences
elif 'sentences' in vabamorf_json:
for sent in vabamorf_json['sentences']:
json_sentences.append( sent )
# 3) Iterate over sentences and perform conversion
results = []
for sentJson in json_sentences:
results.append('<s>')
for wordJson in sentJson['words']:
if wordJson['text'] == '<s>' or wordJson['text'] == '</s>':
continue
wordStr = wordJson['text']
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson['analysis']:
root = analysisJson['root']
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson['partofspeech']
clitic = analysisJson['clitic']
form = analysisJson['form']
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if 'analysis' not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results | Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s> | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L95-L153 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | convert_Text_to_mrf | def convert_Text_to_mrf( text ):
''' Converts from Text object into pre-syntactic mrf format, given as a list of
lines, as in the output of etmrf.
*) If the input Text has already been morphologically analysed, uses the existing
analysis;
*) If the input has not been analysed, performs the analysis with required settings:
word quessing is turned on, proper-name analyses are turned off;
'''
from estnltk.text import Text
if not isinstance( text, Text ):
raise Exception(' Expected estnltk\'s Text as an input argument! ')
if not text.is_tagged( ANALYSIS ):
# If morphological analysis has not been performed yet, set the right arguments and
# perform the analysis
kwargs = text.get_kwargs()
kwargs['vabamorf'] = True
kwargs['guess'] = True
kwargs['propername'] = False
kwargs['disambiguate'] = False
text.__kwargs = kwargs
text = text.tag_analysis()
# Iterate over sentences and perform conversion
results = []
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
results.append('<s>')
for i in range(len(sentence)):
wordJson = sentence[i]
wordStr = wordJson[TEXT]
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson[ANALYSIS]:
root = analysisJson[ROOT]
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson[POSTAG]
clitic = analysisJson[CLITIC]
form = analysisJson[FORM]
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if ANALYSIS not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results | python | def convert_Text_to_mrf( text ):
''' Converts from Text object into pre-syntactic mrf format, given as a list of
lines, as in the output of etmrf.
*) If the input Text has already been morphologically analysed, uses the existing
analysis;
*) If the input has not been analysed, performs the analysis with required settings:
word quessing is turned on, proper-name analyses are turned off;
'''
from estnltk.text import Text
if not isinstance( text, Text ):
raise Exception(' Expected estnltk\'s Text as an input argument! ')
if not text.is_tagged( ANALYSIS ):
# If morphological analysis has not been performed yet, set the right arguments and
# perform the analysis
kwargs = text.get_kwargs()
kwargs['vabamorf'] = True
kwargs['guess'] = True
kwargs['propername'] = False
kwargs['disambiguate'] = False
text.__kwargs = kwargs
text = text.tag_analysis()
# Iterate over sentences and perform conversion
results = []
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
results.append('<s>')
for i in range(len(sentence)):
wordJson = sentence[i]
wordStr = wordJson[TEXT]
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson[ANALYSIS]:
root = analysisJson[ROOT]
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson[POSTAG]
clitic = analysisJson[CLITIC]
form = analysisJson[FORM]
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if ANALYSIS not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results | Converts from Text object into pre-syntactic mrf format, given as a list of
lines, as in the output of etmrf.
*) If the input Text has already been morphologically analysed, uses the existing
analysis;
*) If the input has not been analysed, performs the analysis with required settings:
word quessing is turned on, proper-name analyses are turned off; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L156-L204 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | load_fs_mrf_to_syntax_mrf_translation_rules | def load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ):
''' Loads rules that can be used to convert from Filosoft's mrf format to
syntactic analyzer's format. Returns a dict containing rules.
Expects that each line in the input file contains a single rule, and that
different parts of the rule separated by @ symbols, e.g.
1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk-
32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk-
313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus
Only 2nd element and 4th element are extracted from each line; 2nd element
will be the key of the dict entry, and 4th element will be added to the
value of the dict entry (the value is a list of strings);
A list is used for storing values because one Filosoft's analysis could
be mapped to multiple syntactic analyzer's analyses;
Lines that have ¤ in the beginning of the line will be skipped;
'''
rules = {}
in_f = codecs.open(rulesFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if line.startswith('¤'):
continue
parts = line.split('@')
if len(parts) < 4:
raise Exception(' Unexpected format of the line: ', line)
if parts[1] not in rules:
rules[parts[1]] = []
rules[parts[1]].append( parts[3] )
in_f.close()
return rules | python | def load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ):
''' Loads rules that can be used to convert from Filosoft's mrf format to
syntactic analyzer's format. Returns a dict containing rules.
Expects that each line in the input file contains a single rule, and that
different parts of the rule separated by @ symbols, e.g.
1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk-
32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk-
313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus
Only 2nd element and 4th element are extracted from each line; 2nd element
will be the key of the dict entry, and 4th element will be added to the
value of the dict entry (the value is a list of strings);
A list is used for storing values because one Filosoft's analysis could
be mapped to multiple syntactic analyzer's analyses;
Lines that have ¤ in the beginning of the line will be skipped;
'''
rules = {}
in_f = codecs.open(rulesFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if line.startswith('¤'):
continue
parts = line.split('@')
if len(parts) < 4:
raise Exception(' Unexpected format of the line: ', line)
if parts[1] not in rules:
rules[parts[1]] = []
rules[parts[1]].append( parts[3] )
in_f.close()
return rules | Loads rules that can be used to convert from Filosoft's mrf format to
syntactic analyzer's format. Returns a dict containing rules.
Expects that each line in the input file contains a single rule, and that
different parts of the rule separated by @ symbols, e.g.
1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk-
32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk-
313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus
Only 2nd element and 4th element are extracted from each line; 2nd element
will be the key of the dict entry, and 4th element will be added to the
value of the dict entry (the value is a list of strings);
A list is used for storing values because one Filosoft's analysis could
be mapped to multiple syntactic analyzer's analyses;
Lines that have ¤ in the beginning of the line will be skipped; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L214-L248 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | _convert_punctuation | def _convert_punctuation( line ):
''' Converts given analysis line if it describes punctuation; Uses the set
of predefined punctuation conversion rules from _punctConversions;
_punctConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the converted line (same as input, if no conversion was
performed);
'''
for [pattern, replacement] in _punctConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
break
return line | python | def _convert_punctuation( line ):
''' Converts given analysis line if it describes punctuation; Uses the set
of predefined punctuation conversion rules from _punctConversions;
_punctConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the converted line (same as input, if no conversion was
performed);
'''
for [pattern, replacement] in _punctConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
break
return line | Converts given analysis line if it describes punctuation; Uses the set
of predefined punctuation conversion rules from _punctConversions;
_punctConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the converted line (same as input, if no conversion was
performed); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L280-L297 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | convert_mrf_to_syntax_mrf | def convert_mrf_to_syntax_mrf( mrf_lines, conversion_rules ):
''' Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '): # only consider lines of analysis
# 1) Convert punctuation
if _punctOrAbbrev.search(line):
mrf_lines[i] = _convert_punctuation( line )
if '_Y_' not in line:
i += 1
continue
# 2) Convert morphological analyses that have a form specified
withFormMatch = _morfWithForm.search(line)
if withFormMatch:
root = withFormMatch.group(1)
pos = withFormMatch.group(2)
formStr = withFormMatch.group(3)
forms = formStr.split(',')
all_new_lines = []
for form in forms:
morphKey = pos+' '+form.strip()
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
else:
withoutFormMatch = _morfWithoutForm.search(line)
if withoutFormMatch:
# 3) Convert morphological analyses that have only POS specified
root = withoutFormMatch.group(1)
pos = withoutFormMatch.group(2)
morphKey = pos
all_new_lines = []
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
i += 1
return mrf_lines | python | def convert_mrf_to_syntax_mrf( mrf_lines, conversion_rules ):
''' Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '): # only consider lines of analysis
# 1) Convert punctuation
if _punctOrAbbrev.search(line):
mrf_lines[i] = _convert_punctuation( line )
if '_Y_' not in line:
i += 1
continue
# 2) Convert morphological analyses that have a form specified
withFormMatch = _morfWithForm.search(line)
if withFormMatch:
root = withFormMatch.group(1)
pos = withFormMatch.group(2)
formStr = withFormMatch.group(3)
forms = formStr.split(',')
all_new_lines = []
for form in forms:
morphKey = pos+' '+form.strip()
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
else:
withoutFormMatch = _morfWithoutForm.search(line)
if withoutFormMatch:
# 3) Convert morphological analyses that have only POS specified
root = withoutFormMatch.group(1)
pos = withoutFormMatch.group(2)
morphKey = pos
all_new_lines = []
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
i += 1
return mrf_lines | Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L306-L369 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | convert_pronouns | def convert_pronouns( mrf_lines ):
''' Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if '_P_' in line: # only consider lines containing pronoun analyses
for [pattern, replacement] in _pronConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
mrf_lines[i] = line
break
i += 1
return mrf_lines | python | def convert_pronouns( mrf_lines ):
''' Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if '_P_' in line: # only consider lines containing pronoun analyses
for [pattern, replacement] in _pronConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
mrf_lines[i] = line
break
i += 1
return mrf_lines | Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L493-L517 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | remove_duplicate_analyses | def remove_duplicate_analyses( mrf_lines, allow_to_delete_all = True ):
''' Removes duplicate analysis lines from mrf_lines.
Uses special logic for handling adposition analyses ('_K_ pre' && '_K_ post')
that do not have subcategorization information:
*) If a word has both adposition analyses, removes '_K_ pre';
*) If a word has '_K_ post', removes it;
Note that '_K_ pre' and '_K_ post' with subcategorization information will
be kept.
The parameter allow_to_delete_all specifies whether it is allowed to delete
all analysis or not. If allow_to_delete_all == False, then one last analysis
won't be deleted, regardless whether it should be deleted considering the
adposition-deletion rules;
The original implementation corresponds to the settings allow_to_delete_all=True
(and this is also the default value of the parameter);
Returns the input list where the removals have been applied;
'''
i = 0
seen_analyses = []
analyses_count = 0
to_delete = []
Kpre_index = -1
Kpost_index = -1
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' '):
if Kpre_index != -1 and Kpost_index != -1:
# If there was both _K_pre and _K_post, add _K_pre to removables;
to_delete.append( Kpre_index )
elif Kpost_index != -1:
# If there was only _K_post, add _K_post to removables;
to_delete.append( Kpost_index )
# Delete found duplicates
if to_delete:
for k, j in enumerate(sorted(to_delete, reverse=True)):
# If we must preserve at least one analysis, and
# it has been found that all should be deleted, then
# keep the last one
if not allow_to_delete_all and \
analyses_count == len(to_delete) and \
k == len(to_delete) - 1:
continue
# Delete the analysis line
del mrf_lines[j]
i -= 1
# Reset the memory for each new word/token
seen_analyses = []
analyses_count = 0
to_delete = []
Kpre_index = -1
Kpost_index = -1
elif line.startswith(' '): # the line of analysis
analyses_count += 1
if line in seen_analyses:
# Remember line that has been already seen as a duplicate
to_delete.append( i )
else:
# Remember '_K pre' and '_K_ post' indices
if re.search('/_K_\s+pre\s+//', line):
Kpre_index = i
elif re.search('/_K_\s+post\s+//', line):
Kpost_index = i
# Remember that the line has already been seen
seen_analyses.append( line )
i += 1
return mrf_lines | python | def remove_duplicate_analyses( mrf_lines, allow_to_delete_all = True ):
''' Removes duplicate analysis lines from mrf_lines.
Uses special logic for handling adposition analyses ('_K_ pre' && '_K_ post')
that do not have subcategorization information:
*) If a word has both adposition analyses, removes '_K_ pre';
*) If a word has '_K_ post', removes it;
Note that '_K_ pre' and '_K_ post' with subcategorization information will
be kept.
The parameter allow_to_delete_all specifies whether it is allowed to delete
all analysis or not. If allow_to_delete_all == False, then one last analysis
won't be deleted, regardless whether it should be deleted considering the
adposition-deletion rules;
The original implementation corresponds to the settings allow_to_delete_all=True
(and this is also the default value of the parameter);
Returns the input list where the removals have been applied;
'''
i = 0
seen_analyses = []
analyses_count = 0
to_delete = []
Kpre_index = -1
Kpost_index = -1
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' '):
if Kpre_index != -1 and Kpost_index != -1:
# If there was both _K_pre and _K_post, add _K_pre to removables;
to_delete.append( Kpre_index )
elif Kpost_index != -1:
# If there was only _K_post, add _K_post to removables;
to_delete.append( Kpost_index )
# Delete found duplicates
if to_delete:
for k, j in enumerate(sorted(to_delete, reverse=True)):
# If we must preserve at least one analysis, and
# it has been found that all should be deleted, then
# keep the last one
if not allow_to_delete_all and \
analyses_count == len(to_delete) and \
k == len(to_delete) - 1:
continue
# Delete the analysis line
del mrf_lines[j]
i -= 1
# Reset the memory for each new word/token
seen_analyses = []
analyses_count = 0
to_delete = []
Kpre_index = -1
Kpost_index = -1
elif line.startswith(' '): # the line of analysis
analyses_count += 1
if line in seen_analyses:
# Remember line that has been already seen as a duplicate
to_delete.append( i )
else:
# Remember '_K pre' and '_K_ post' indices
if re.search('/_K_\s+pre\s+//', line):
Kpre_index = i
elif re.search('/_K_\s+post\s+//', line):
Kpost_index = i
# Remember that the line has already been seen
seen_analyses.append( line )
i += 1
return mrf_lines | Removes duplicate analysis lines from mrf_lines.
Uses special logic for handling adposition analyses ('_K_ pre' && '_K_ post')
that do not have subcategorization information:
*) If a word has both adposition analyses, removes '_K_ pre';
*) If a word has '_K_ post', removes it;
Note that '_K_ pre' and '_K_ post' with subcategorization information will
be kept.
The parameter allow_to_delete_all specifies whether it is allowed to delete
all analysis or not. If allow_to_delete_all == False, then one last analysis
won't be deleted, regardless whether it should be deleted considering the
adposition-deletion rules;
The original implementation corresponds to the settings allow_to_delete_all=True
(and this is also the default value of the parameter);
Returns the input list where the removals have been applied; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L528-L595 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | add_hashtag_info | def add_hashtag_info( mrf_lines ):
''' Augments analysis lines with various hashtag information:
*) marks words with capital beginning with #cap;
*) marks finite verbs with #FinV;
*) marks nud/tud/mine/nu/tu/v/tav/mata/ja forms;
Hashtags are added at the end of the analysis content (just before the
last '//');
Returns the input list where the augmentation has been applied;
'''
i = 0
cap = False
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' ') and len(line) > 0:
cap = (line[0]).isupper()
elif line.startswith(' '):
if cap:
line = re.sub('(//.+\S)\s+//', '\\1 #cap //', line)
if _morfFinV.search( line ) and not _morfNotFinV.search( line ):
line = re.sub('(//.+\S)\s+//', '\\1 #FinV //', line)
for [pattern, replacement] in _mrfHashTagConversions:
line = re.sub(pattern, replacement, line)
mrf_lines[i] = line
i += 1
return mrf_lines | python | def add_hashtag_info( mrf_lines ):
''' Augments analysis lines with various hashtag information:
*) marks words with capital beginning with #cap;
*) marks finite verbs with #FinV;
*) marks nud/tud/mine/nu/tu/v/tav/mata/ja forms;
Hashtags are added at the end of the analysis content (just before the
last '//');
Returns the input list where the augmentation has been applied;
'''
i = 0
cap = False
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' ') and len(line) > 0:
cap = (line[0]).isupper()
elif line.startswith(' '):
if cap:
line = re.sub('(//.+\S)\s+//', '\\1 #cap //', line)
if _morfFinV.search( line ) and not _morfNotFinV.search( line ):
line = re.sub('(//.+\S)\s+//', '\\1 #FinV //', line)
for [pattern, replacement] in _mrfHashTagConversions:
line = re.sub(pattern, replacement, line)
mrf_lines[i] = line
i += 1
return mrf_lines | Augments analysis lines with various hashtag information:
*) marks words with capital beginning with #cap;
*) marks finite verbs with #FinV;
*) marks nud/tud/mine/nu/tu/v/tav/mata/ja forms;
Hashtags are added at the end of the analysis content (just before the
last '//');
Returns the input list where the augmentation has been applied; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L622-L647 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | load_subcat_info | def load_subcat_info( subcat_lex_file ):
''' Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
'''
rules = {}
nonSpacePattern = re.compile('^\S+$')
posTagPattern = re.compile('_._')
in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8')
lemma = ''
subcatRules = ''
for line in in_f:
line = line.rstrip()
if nonSpacePattern.match(line) and not posTagPattern.search(line):
lemma = line
elif posTagPattern.search(line):
subcatRules = line
if len(lemma) > 0 and len(subcatRules) > 0:
if lemma not in rules:
rules[lemma] = []
parts = subcatRules.split('&')
for part in parts:
part = part.strip()
rules[lemma].append( part )
lemma = ''
subcatRules = ''
in_f.close()
#print( len(rules.keys()) ) # 4484
return rules | python | def load_subcat_info( subcat_lex_file ):
''' Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
'''
rules = {}
nonSpacePattern = re.compile('^\S+$')
posTagPattern = re.compile('_._')
in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8')
lemma = ''
subcatRules = ''
for line in in_f:
line = line.rstrip()
if nonSpacePattern.match(line) and not posTagPattern.search(line):
lemma = line
elif posTagPattern.search(line):
subcatRules = line
if len(lemma) > 0 and len(subcatRules) > 0:
if lemma not in rules:
rules[lemma] = []
parts = subcatRules.split('&')
for part in parts:
part = part.strip()
rules[lemma].append( part )
lemma = ''
subcatRules = ''
in_f.close()
#print( len(rules.keys()) ) # 4484
return rules | Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L657-L705 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | tag_subcat_info | def tag_subcat_info( mrf_lines, subcat_rules ):
''' Adds subcategorization information (hashtags) to verbs and adpositions;
Argument subcat_rules must be a dict containing subcategorization information,
loaded via method load_subcat_info();
Performs word lemma lookups in subcat_rules, and in case of a match, checks
word part-of-speech conditions. If the POS conditions match, adds subcategorization
information either to a single analysis line, or to multiple analysis lines
(depending on the exact conditions in the rule);
Returns the input list where verb/adposition analyses have been augmented
with available subcategorization information;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '):
lemma_match = analysisLemmaPat.match(line)
if lemma_match:
lemma = lemma_match.group(1)
# Find whether there is subcategorization info associated
# with the lemma
if lemma in subcat_rules:
analysis_match = analysisPat.search(line)
if not analysis_match:
raise Exception(' Could not find analysis from the line:',line)
analysis = analysis_match.group(1)
for rule in subcat_rules[lemma]:
condition, addition = rule.split('>')
# Check the condition string; If there are multiple conditions,
# all must be satisfied for the rule to fire
condition = condition.strip()
conditions = condition.split()
satisfied1 = [ _check_condition(c, analysis) for c in conditions ]
if all( satisfied1 ):
#
# There can be multiple additions:
# 1) additions without '|' must be added to a single analysis line;
# 2) additions separated by '|' must be placed on separate analysis
# lines;
#
additions = addition.split('|')
j = i
# Add new line or lines
for a in additions:
line_copy = line if i == j else line[:]
items_to_add = a.split()
for item in items_to_add:
if not _check_condition(item, analysis):
line_copy = \
re.sub('(//.+\S)\s+//', '\\1 '+item+' //', line_copy)
if j == i:
# 1) replace the existing line
mrf_lines[i] = line_copy
else:
# 2) add a new line
mrf_lines.insert(i, line_copy)
j += 1
i = j - 1
# No need to search forward
break
i += 1
return mrf_lines | python | def tag_subcat_info( mrf_lines, subcat_rules ):
''' Adds subcategorization information (hashtags) to verbs and adpositions;
Argument subcat_rules must be a dict containing subcategorization information,
loaded via method load_subcat_info();
Performs word lemma lookups in subcat_rules, and in case of a match, checks
word part-of-speech conditions. If the POS conditions match, adds subcategorization
information either to a single analysis line, or to multiple analysis lines
(depending on the exact conditions in the rule);
Returns the input list where verb/adposition analyses have been augmented
with available subcategorization information;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '):
lemma_match = analysisLemmaPat.match(line)
if lemma_match:
lemma = lemma_match.group(1)
# Find whether there is subcategorization info associated
# with the lemma
if lemma in subcat_rules:
analysis_match = analysisPat.search(line)
if not analysis_match:
raise Exception(' Could not find analysis from the line:',line)
analysis = analysis_match.group(1)
for rule in subcat_rules[lemma]:
condition, addition = rule.split('>')
# Check the condition string; If there are multiple conditions,
# all must be satisfied for the rule to fire
condition = condition.strip()
conditions = condition.split()
satisfied1 = [ _check_condition(c, analysis) for c in conditions ]
if all( satisfied1 ):
#
# There can be multiple additions:
# 1) additions without '|' must be added to a single analysis line;
# 2) additions separated by '|' must be placed on separate analysis
# lines;
#
additions = addition.split('|')
j = i
# Add new line or lines
for a in additions:
line_copy = line if i == j else line[:]
items_to_add = a.split()
for item in items_to_add:
if not _check_condition(item, analysis):
line_copy = \
re.sub('(//.+\S)\s+//', '\\1 '+item+' //', line_copy)
if j == i:
# 1) replace the existing line
mrf_lines[i] = line_copy
else:
# 2) add a new line
mrf_lines.insert(i, line_copy)
j += 1
i = j - 1
# No need to search forward
break
i += 1
return mrf_lines | Adds subcategorization information (hashtags) to verbs and adpositions;
Argument subcat_rules must be a dict containing subcategorization information,
loaded via method load_subcat_info();
Performs word lemma lookups in subcat_rules, and in case of a match, checks
word part-of-speech conditions. If the POS conditions match, adds subcategorization
information either to a single analysis line, or to multiple analysis lines
(depending on the exact conditions in the rule);
Returns the input list where verb/adposition analyses have been augmented
with available subcategorization information; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L721-L784 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | convert_to_cg3_input | def convert_to_cg3_input( mrf_lines ):
''' Converts given mrf lines from syntax preprocessing format to cg3 input
format:
*) surrounds words/tokens with "< and >"
*) surrounds word lemmas with " in analysis;
*) separates word endings from lemmas in analysis, and adds prefix 'L';
*) removes '//' and '//' from analysis;
*) converts hashtags to tags surrounded by < and >;
... and provides other various fix-ups;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' ') and len(line) > 0:
#
# A line containing word/token
#
# a. surround the word with "< and >"
line = re.sub('^(\S.*)([\n\r]*)$','"<\\1>"\\2', line)
# b. fix the sentence begin/end tags
line = re.sub('<<(s|/s)>>', '<\\1>', line)
mrf_lines[i] = line
elif line.startswith(' '):
#
# A line containing analysis
#
# 1. perform various fixes:
line = re.sub('#cap #cap','cap', line)
line = re.sub('#cap','cap', line)
line = re.sub('\*\*CLB','CLB', line)
line = re.sub('#Correct!','<Correct!>', line)
line = re.sub('####','', line)
line = re.sub('#(\S+)','<\\1>', line)
line = re.sub('\$([,.;!?:<]+)','\\1', line)
line = re.sub('_Y_\s+\? _Z_','_Z_', line)
line = re.sub('_Y_\s+\?\s+_Z_','_Z_', line)
line = re.sub('_Y_\s+_Z_','_Z_', line)
line = re.sub('_Z_\s+\?','_Z_', line)
# 2. convert analysis line \w word ending
line = re.sub('^\s+(\S+)(.*)\+(\S+)\s*//_(\S)_ (.*)//(.*)$', \
' "\\1\\2" L\\3 \\4 \\5 \\6', line)
# 3. convert analysis line \wo word ending
line = re.sub('^\s+(\S+)(.*)\s+//_(\S)_ (.*)//(.*)$', \
' "\\1\\2" \\3 \\4 \\5', line)
mrf_lines[i] = line
i += 1
return mrf_lines | python | def convert_to_cg3_input( mrf_lines ):
''' Converts given mrf lines from syntax preprocessing format to cg3 input
format:
*) surrounds words/tokens with "< and >"
*) surrounds word lemmas with " in analysis;
*) separates word endings from lemmas in analysis, and adds prefix 'L';
*) removes '//' and '//' from analysis;
*) converts hashtags to tags surrounded by < and >;
... and provides other various fix-ups;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' ') and len(line) > 0:
#
# A line containing word/token
#
# a. surround the word with "< and >"
line = re.sub('^(\S.*)([\n\r]*)$','"<\\1>"\\2', line)
# b. fix the sentence begin/end tags
line = re.sub('<<(s|/s)>>', '<\\1>', line)
mrf_lines[i] = line
elif line.startswith(' '):
#
# A line containing analysis
#
# 1. perform various fixes:
line = re.sub('#cap #cap','cap', line)
line = re.sub('#cap','cap', line)
line = re.sub('\*\*CLB','CLB', line)
line = re.sub('#Correct!','<Correct!>', line)
line = re.sub('####','', line)
line = re.sub('#(\S+)','<\\1>', line)
line = re.sub('\$([,.;!?:<]+)','\\1', line)
line = re.sub('_Y_\s+\? _Z_','_Z_', line)
line = re.sub('_Y_\s+\?\s+_Z_','_Z_', line)
line = re.sub('_Y_\s+_Z_','_Z_', line)
line = re.sub('_Z_\s+\?','_Z_', line)
# 2. convert analysis line \w word ending
line = re.sub('^\s+(\S+)(.*)\+(\S+)\s*//_(\S)_ (.*)//(.*)$', \
' "\\1\\2" L\\3 \\4 \\5 \\6', line)
# 3. convert analysis line \wo word ending
line = re.sub('^\s+(\S+)(.*)\s+//_(\S)_ (.*)//(.*)$', \
' "\\1\\2" \\3 \\4 \\5', line)
mrf_lines[i] = line
i += 1
return mrf_lines | Converts given mrf lines from syntax preprocessing format to cg3 input
format:
*) surrounds words/tokens with "< and >"
*) surrounds word lemmas with " in analysis;
*) separates word endings from lemmas in analysis, and adds prefix 'L';
*) removes '//' and '//' from analysis;
*) converts hashtags to tags surrounded by < and >;
... and provides other various fix-ups;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L794-L843 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | SyntaxPreprocessing.process_vm_json | def process_vm_json( self, json_dict, **kwargs ):
''' Executes the preprocessing pipeline on vabamorf's JSON, given as a dict;
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_vm_json_to_mrf( json_dict )
return self.process_mrf_lines( mrf_lines, **kwargs ) | python | def process_vm_json( self, json_dict, **kwargs ):
''' Executes the preprocessing pipeline on vabamorf's JSON, given as a dict;
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_vm_json_to_mrf( json_dict )
return self.process_mrf_lines( mrf_lines, **kwargs ) | Executes the preprocessing pipeline on vabamorf's JSON, given as a dict;
Returns a list: lines of analyses in the VISL CG3 input format; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L945-L951 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | SyntaxPreprocessing.process_Text | def process_Text( self, text, **kwargs ):
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs ) | python | def process_Text( self, text, **kwargs ):
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs ) | Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L954-L960 |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | SyntaxPreprocessing.process_mrf_lines | def process_mrf_lines( self, mrf_lines, **kwargs ):
''' Executes the preprocessing pipeline on mrf_lines.
The input should be an analysis of the text in Filosoft's old mrf format;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
converted1 = convert_mrf_to_syntax_mrf( mrf_lines, self.fs_to_synt_rules )
converted2 = convert_pronouns( converted1 )
converted3 = remove_duplicate_analyses( converted2, allow_to_delete_all=self.allow_to_remove_all )
converted4 = add_hashtag_info( converted3 )
converted5 = tag_subcat_info( converted4, self.subcat_rules )
converted6 = remove_duplicate_analyses( converted5, allow_to_delete_all=self.allow_to_remove_all )
converted7 = convert_to_cg3_input( converted6 )
return converted7 | python | def process_mrf_lines( self, mrf_lines, **kwargs ):
''' Executes the preprocessing pipeline on mrf_lines.
The input should be an analysis of the text in Filosoft's old mrf format;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
converted1 = convert_mrf_to_syntax_mrf( mrf_lines, self.fs_to_synt_rules )
converted2 = convert_pronouns( converted1 )
converted3 = remove_duplicate_analyses( converted2, allow_to_delete_all=self.allow_to_remove_all )
converted4 = add_hashtag_info( converted3 )
converted5 = tag_subcat_info( converted4, self.subcat_rules )
converted6 = remove_duplicate_analyses( converted5, allow_to_delete_all=self.allow_to_remove_all )
converted7 = convert_to_cg3_input( converted6 )
return converted7 | Executes the preprocessing pipeline on mrf_lines.
The input should be an analysis of the text in Filosoft's old mrf format;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L963-L978 |
estnltk/estnltk | setup.py | get_sources | def get_sources(src_dir='src', ending='.cpp'):
"""Function to get a list of files ending with `ending` in `src_dir`."""
return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)] | python | def get_sources(src_dir='src', ending='.cpp'):
"""Function to get a list of files ending with `ending` in `src_dir`."""
return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)] | Function to get a list of files ending with `ending` in `src_dir`. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/setup.py#L14-L16 |
estnltk/estnltk | estnltk/prettyprinter/terminalprettyprinter.py | _get_ANSI_colored_font | def _get_ANSI_colored_font( color ):
''' Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
'''
color = (color.replace('-','')).lower()
#
# Bright colors:
#
if color == 'white':
return '\033[97m'
elif color in ['cyan', 'aqua']:
return '\033[96m'
elif color in ['purple', 'magneta', 'fuchsia']:
return '\033[95m'
elif color == 'blue':
return '\033[94m'
elif color in ['yellow', 'gold']:
return '\033[93m'
elif color in ['green', 'lime']:
return '\033[92m'
elif color == 'red':
return '\033[91m'
#
# Dark colors:
#
elif color in ['grey', 'gray', 'silver']:
return '\033[37m'
elif color in ['darkcyan', 'teal']:
return '\033[36m'
elif color in ['darkpurple', 'darkmagneta']:
return '\033[35m'
elif color in ['darkblue', 'navy']:
return '\033[34m'
elif color in ['darkyellow', 'olive']:
return '\033[33m'
elif color == 'darkgreen':
return '\033[32m'
elif color in ['darkred', 'maroon']:
return '\033[31m'
return None | python | def _get_ANSI_colored_font( color ):
''' Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
'''
color = (color.replace('-','')).lower()
#
# Bright colors:
#
if color == 'white':
return '\033[97m'
elif color in ['cyan', 'aqua']:
return '\033[96m'
elif color in ['purple', 'magneta', 'fuchsia']:
return '\033[95m'
elif color == 'blue':
return '\033[94m'
elif color in ['yellow', 'gold']:
return '\033[93m'
elif color in ['green', 'lime']:
return '\033[92m'
elif color == 'red':
return '\033[91m'
#
# Dark colors:
#
elif color in ['grey', 'gray', 'silver']:
return '\033[37m'
elif color in ['darkcyan', 'teal']:
return '\033[36m'
elif color in ['darkpurple', 'darkmagneta']:
return '\033[35m'
elif color in ['darkblue', 'navy']:
return '\033[34m'
elif color in ['darkyellow', 'olive']:
return '\033[33m'
elif color == 'darkgreen':
return '\033[32m'
elif color in ['darkred', 'maroon']:
return '\033[31m'
return None | Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/terminalprettyprinter.py#L34-L78 |
estnltk/estnltk | estnltk/prettyprinter/terminalprettyprinter.py | _construct_start_index | def _construct_start_index(text, layer, markup_settings, spansStartingFrom=None):
''' Creates an index which stores all annotations of given text layer,
indexed by the start position of the annotation (annotation[START]).
Alternatively, if the index spansStartingFrom is already provided
as an input argument, obtains annotation information from the given
text layer, and stores into the index;
The method also creates an ( ANSI-terminal compatible ) mark-up of
the annotations (generates start and end tags), following the
specification in markup_settings. The markup_settings should be a
dict, setting at least one of the following visualisation options:
* 'bracket' : True -- annotations will be surrounded with brackets;
This works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in
an ANSI compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color;
this works in an ANSI
compatible terminal;
Each start position (in the index) is associated with a list of
annotation objects (annotations starting from that position).
An annotation object is a list containing the following information:
*) START position (of the annotation),
*) END position (of the annotation),
*) layer (name),
*) startTags -- graphic or textual formatting of the start tag,
*) endTags -- graphic or textual formatting of the end tag,
*) graphicFormatting -- boolean (whether graphic formatting was used?),
*) bracketing -- boolean (whether bracketing was used?),
Multiple annotation objects starting from the same position are sorted by
their length: longer annotations preceding the shorter ones;
The method returns created (or augmented) index (a dict object indexed
by START positions);
'''
if not markup_settings or not isinstance(markup_settings, dict):
raise Exception('Error: markup_settings should be a dict containing markup specification;')
# ----------------------------
# 1) Construct start and end tags, considering the formatting settings
startTags = ''
endTags = ''
graphicFormatting = False
bracketing = False
# -- Underlining
if ('u' in markup_settings and markup_settings['u']) or \
('underline' in markup_settings and markup_settings['underline']):
startTags += '\033[4m'
endTags += '\033[0m'
graphicFormatting = True
colorName = markup_settings['c'] if 'c' in markup_settings else None
colorName = markup_settings['color'] if 'color' in markup_settings else colorName
# -- Coloring
if colorName:
color = _get_ANSI_colored_font( colorName )
if color:
startTags += color
endTags += '\033[0m'
graphicFormatting = True
else:
raise Exception('Unknown color:', colorName)
# -- Bracketing
if ('b' in markup_settings and markup_settings['b']) or \
('bracket' in markup_settings and markup_settings['bracket']):
startTags += '['
# Add ending bracket before graphics ends (otherwise the
# graphics have no effect on the ending bracket)
endTags = ']'+endTags
bracketing = True
# Hack: if both bracketing and graphic formatting are used, add graphic
# formatting before the closing bracket of the endTag (to ensure
# that graphic formatting of the ending bracket is not overwritten
# mistakenly);
if graphicFormatting and bracketing:
startTags2 = startTags.rstrip('[')
endTags = startTags2+endTags
# ----------------------------
# 2) Get extractor for the elements of given layer
# >>> The following code borrows from estnltk.prettyprinter.marker :
# decide which extractor to use
# first just assume we need to use a multi layer text extractor
extractor = lambda t: texts_multi(t, layer)
# if user has specified his/her own callable, use it
if hasattr(layer, '__call__'):
extractor = layer
elif text.is_simple(layer):
# the given layer is simple, so use simple text extractor
extractor = lambda t: texts_simple(t, layer)
# >>>
# ----------------------------
# 3) Store an annotation for each span of given layer
if not spansStartingFrom:
spansStartingFrom = {}
for elem in extractor(text):
if elem[START] not in spansStartingFrom:
spansStartingFrom[elem[START]] = []
span1 = [elem[START], elem[END], layer, startTags, endTags, graphicFormatting, bracketing]
# Insert the span into the index
if not spansStartingFrom[elem[START]]:
spansStartingFrom[elem[START]].append( span1 )
else:
# Make sure that spans are inserted in the order of decreasing length:
# longer spans preceding the shorter ones;
inserted = False
for i in range( len(spansStartingFrom[elem[START]]) ):
span2 = spansStartingFrom[elem[START]][i]
# If an existing span is shorter than the current span, insert the
# current span before the existing span ...
if span1[1] > span2[1]:
spansStartingFrom[elem[START]].insert( i, span1 )
inserted = True
break
elif span1[1] == span2[1] and span1[2] < span2[2]:
# If both spans have equal length, order the spans in the alphabetical
# order of layer names:
spansStartingFrom[elem[START]].insert( i, span1 )
inserted = True
break
if not inserted:
spansStartingFrom[elem[START]].append(span1)
return spansStartingFrom | python | def _construct_start_index(text, layer, markup_settings, spansStartingFrom=None):
''' Creates an index which stores all annotations of given text layer,
indexed by the start position of the annotation (annotation[START]).
Alternatively, if the index spansStartingFrom is already provided
as an input argument, obtains annotation information from the given
text layer, and stores into the index;
The method also creates an ( ANSI-terminal compatible ) mark-up of
the annotations (generates start and end tags), following the
specification in markup_settings. The markup_settings should be a
dict, setting at least one of the following visualisation options:
* 'bracket' : True -- annotations will be surrounded with brackets;
This works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in
an ANSI compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color;
this works in an ANSI
compatible terminal;
Each start position (in the index) is associated with a list of
annotation objects (annotations starting from that position).
An annotation object is a list containing the following information:
*) START position (of the annotation),
*) END position (of the annotation),
*) layer (name),
*) startTags -- graphic or textual formatting of the start tag,
*) endTags -- graphic or textual formatting of the end tag,
*) graphicFormatting -- boolean (whether graphic formatting was used?),
*) bracketing -- boolean (whether bracketing was used?),
Multiple annotation objects starting from the same position are sorted by
their length: longer annotations preceding the shorter ones;
The method returns created (or augmented) index (a dict object indexed
by START positions);
'''
if not markup_settings or not isinstance(markup_settings, dict):
raise Exception('Error: markup_settings should be a dict containing markup specification;')
# ----------------------------
# 1) Construct start and end tags, considering the formatting settings
startTags = ''
endTags = ''
graphicFormatting = False
bracketing = False
# -- Underlining
if ('u' in markup_settings and markup_settings['u']) or \
('underline' in markup_settings and markup_settings['underline']):
startTags += '\033[4m'
endTags += '\033[0m'
graphicFormatting = True
colorName = markup_settings['c'] if 'c' in markup_settings else None
colorName = markup_settings['color'] if 'color' in markup_settings else colorName
# -- Coloring
if colorName:
color = _get_ANSI_colored_font( colorName )
if color:
startTags += color
endTags += '\033[0m'
graphicFormatting = True
else:
raise Exception('Unknown color:', colorName)
# -- Bracketing
if ('b' in markup_settings and markup_settings['b']) or \
('bracket' in markup_settings and markup_settings['bracket']):
startTags += '['
# Add ending bracket before graphics ends (otherwise the
# graphics have no effect on the ending bracket)
endTags = ']'+endTags
bracketing = True
# Hack: if both bracketing and graphic formatting are used, add graphic
# formatting before the closing bracket of the endTag (to ensure
# that graphic formatting of the ending bracket is not overwritten
# mistakenly);
if graphicFormatting and bracketing:
startTags2 = startTags.rstrip('[')
endTags = startTags2+endTags
# ----------------------------
# 2) Get extractor for the elements of given layer
# >>> The following code borrows from estnltk.prettyprinter.marker :
# decide which extractor to use
# first just assume we need to use a multi layer text extractor
extractor = lambda t: texts_multi(t, layer)
# if user has specified his/her own callable, use it
if hasattr(layer, '__call__'):
extractor = layer
elif text.is_simple(layer):
# the given layer is simple, so use simple text extractor
extractor = lambda t: texts_simple(t, layer)
# >>>
# ----------------------------
# 3) Store an annotation for each span of given layer
if not spansStartingFrom:
spansStartingFrom = {}
for elem in extractor(text):
if elem[START] not in spansStartingFrom:
spansStartingFrom[elem[START]] = []
span1 = [elem[START], elem[END], layer, startTags, endTags, graphicFormatting, bracketing]
# Insert the span into the index
if not spansStartingFrom[elem[START]]:
spansStartingFrom[elem[START]].append( span1 )
else:
# Make sure that spans are inserted in the order of decreasing length:
# longer spans preceding the shorter ones;
inserted = False
for i in range( len(spansStartingFrom[elem[START]]) ):
span2 = spansStartingFrom[elem[START]][i]
# If an existing span is shorter than the current span, insert the
# current span before the existing span ...
if span1[1] > span2[1]:
spansStartingFrom[elem[START]].insert( i, span1 )
inserted = True
break
elif span1[1] == span2[1] and span1[2] < span2[2]:
# If both spans have equal length, order the spans in the alphabetical
# order of layer names:
spansStartingFrom[elem[START]].insert( i, span1 )
inserted = True
break
if not inserted:
spansStartingFrom[elem[START]].append(span1)
return spansStartingFrom | Creates an index which stores all annotations of given text layer,
indexed by the start position of the annotation (annotation[START]).
Alternatively, if the index spansStartingFrom is already provided
as an input argument, obtains annotation information from the given
text layer, and stores into the index;
The method also creates an ( ANSI-terminal compatible ) mark-up of
the annotations (generates start and end tags), following the
specification in markup_settings. The markup_settings should be a
dict, setting at least one of the following visualisation options:
* 'bracket' : True -- annotations will be surrounded with brackets;
This works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in
an ANSI compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color;
this works in an ANSI
compatible terminal;
Each start position (in the index) is associated with a list of
annotation objects (annotations starting from that position).
An annotation object is a list containing the following information:
*) START position (of the annotation),
*) END position (of the annotation),
*) layer (name),
*) startTags -- graphic or textual formatting of the start tag,
*) endTags -- graphic or textual formatting of the end tag,
*) graphicFormatting -- boolean (whether graphic formatting was used?),
*) bracketing -- boolean (whether bracketing was used?),
Multiple annotation objects starting from the same position are sorted by
their length: longer annotations preceding the shorter ones;
The method returns created (or augmented) index (a dict object indexed
by START positions); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/terminalprettyprinter.py#L84-L207 |
estnltk/estnltk | estnltk/prettyprinter/terminalprettyprinter.py | _construct_end_index | def _construct_end_index( spansStartingFrom ):
''' Creates an index which stores all annotations (from spansStartingFrom)
by their end position in text (annotation[END]).
Each start position (in the index) is associated with a list of
annotation objects (annotations ending at that position).
An annotation object is also a list containing the following information:
*) endTags -- graphic or textual formatting of the end tag,
*) START position (of the annotation);
*) layer name;
Multiple annotation objects ending at the same position are sorted by
their length: shorter annotations preceding the longer ones;
'''
endIndex = {}
for i in spansStartingFrom:
for span1 in spansStartingFrom[i]:
# keep the record of endTags, start positions (for determining the length)
# and layer names
endSpan1 = [ span1[4], span1[0], span1[2] ]
endLoc1 = span1[1]
if endLoc1 not in endIndex:
endIndex[endLoc1] = []
endIndex[endLoc1].append( endSpan1 )
else:
# Make sure that spans are inserted in the order of increasing length:
# shorter spans preceding the longer ones;
inserted = False
for i in range( len(endIndex[endLoc1]) ):
endSpan2 = endIndex[endLoc1][i]
# If an existing span is longer than the current span, insert the
# current span before the existing span ...
if endSpan2[1] < endSpan1[1]:
endIndex[endLoc1].insert( i, endSpan1 )
inserted = True
break
elif endSpan2[1] == endSpan1[1] and endSpan2[2] < endSpan1[2]:
# If both spans have equal length, order the spans in the
# alphabetical order of layer names:
endIndex[endLoc1].insert( i, endSpan1 )
inserted = True
break
if not inserted:
endIndex[endLoc1].append( endSpan1 )
return endIndex | python | def _construct_end_index( spansStartingFrom ):
''' Creates an index which stores all annotations (from spansStartingFrom)
by their end position in text (annotation[END]).
Each start position (in the index) is associated with a list of
annotation objects (annotations ending at that position).
An annotation object is also a list containing the following information:
*) endTags -- graphic or textual formatting of the end tag,
*) START position (of the annotation);
*) layer name;
Multiple annotation objects ending at the same position are sorted by
their length: shorter annotations preceding the longer ones;
'''
endIndex = {}
for i in spansStartingFrom:
for span1 in spansStartingFrom[i]:
# keep the record of endTags, start positions (for determining the length)
# and layer names
endSpan1 = [ span1[4], span1[0], span1[2] ]
endLoc1 = span1[1]
if endLoc1 not in endIndex:
endIndex[endLoc1] = []
endIndex[endLoc1].append( endSpan1 )
else:
# Make sure that spans are inserted in the order of increasing length:
# shorter spans preceding the longer ones;
inserted = False
for i in range( len(endIndex[endLoc1]) ):
endSpan2 = endIndex[endLoc1][i]
# If an existing span is longer than the current span, insert the
# current span before the existing span ...
if endSpan2[1] < endSpan1[1]:
endIndex[endLoc1].insert( i, endSpan1 )
inserted = True
break
elif endSpan2[1] == endSpan1[1] and endSpan2[2] < endSpan1[2]:
# If both spans have equal length, order the spans in the
# alphabetical order of layer names:
endIndex[endLoc1].insert( i, endSpan1 )
inserted = True
break
if not inserted:
endIndex[endLoc1].append( endSpan1 )
return endIndex | Creates an index which stores all annotations (from spansStartingFrom)
by their end position in text (annotation[END]).
Each start position (in the index) is associated with a list of
annotation objects (annotations ending at that position).
An annotation object is also a list containing the following information:
*) endTags -- graphic or textual formatting of the end tag,
*) START position (of the annotation);
*) layer name;
Multiple annotation objects ending at the same position are sorted by
their length: shorter annotations preceding the longer ones; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/terminalprettyprinter.py#L210-L254 |
estnltk/estnltk | estnltk/prettyprinter/terminalprettyprinter.py | _fix_overlapping_graphics | def _fix_overlapping_graphics( spansStartingFrom ):
''' Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
'''
for startIndex in sorted( spansStartingFrom.keys() ):
for span1 in spansStartingFrom[startIndex]:
# If the span is not graphic, we don't have no worries - we can just skip it
if not span1[5]:
continue
# Otherwise: check for other graphic spans that overlap with the given span
span1Start = span1[0]
span1End = span1[1]
for i in range( span1Start, span1End ):
if i in spansStartingFrom:
for span2 in spansStartingFrom[i]:
span2Start = span2[0]
span2End = span2[1]
# If the spans are not the same, and the span2 is graphic
if span2 != span1 and span2[5]:
# if the overlapping graphic span ends before the current span,
# we have to restart the graphic formatting of given span after
# the end of the overlapping span
if span2End <= span1End:
if not span1[6]:
# If span1 is not bracketed, just add it at the end of
# the overlapping span
span2[4] += span1[3]
else:
# If span1 is bracketed, add it at the end of the
# overlapping span without brackets
wb = span1[3].rstrip('[')
span2[4] += wb | python | def _fix_overlapping_graphics( spansStartingFrom ):
''' Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
'''
for startIndex in sorted( spansStartingFrom.keys() ):
for span1 in spansStartingFrom[startIndex]:
# If the span is not graphic, we don't have no worries - we can just skip it
if not span1[5]:
continue
# Otherwise: check for other graphic spans that overlap with the given span
span1Start = span1[0]
span1End = span1[1]
for i in range( span1Start, span1End ):
if i in spansStartingFrom:
for span2 in spansStartingFrom[i]:
span2Start = span2[0]
span2End = span2[1]
# If the spans are not the same, and the span2 is graphic
if span2 != span1 and span2[5]:
# if the overlapping graphic span ends before the current span,
# we have to restart the graphic formatting of given span after
# the end of the overlapping span
if span2End <= span1End:
if not span1[6]:
# If span1 is not bracketed, just add it at the end of
# the overlapping span
span2[4] += span1[3]
else:
# If span1 is bracketed, add it at the end of the
# overlapping span without brackets
wb = span1[3].rstrip('[')
span2[4] += wb | Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/terminalprettyprinter.py#L257-L296 |
estnltk/estnltk | estnltk/prettyprinter/terminalprettyprinter.py | _preformat | def _preformat( text, layers, markup_settings = None ):
''' Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and returns formatted text as a
string.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
Returns
-------
text: str
preformatted text, where elements of given layers have been marked up, using
an ANSI-terminal compatible markup;
'''
if markup_settings and len(layers) != len(markup_settings):
raise Exception(' Input arguments layers and markup_settings should be equal size lists.')
elif not markup_settings and len(layers) <= len(default_markup_settings):
# Use default markup settings
markup_settings = default_markup_settings[0:len(layers)]
elif not markup_settings:
raise Exception(' Input argument markup_settings not defined.')
#
# 1) Construct the index of annotations (for each layer);
# Annotations are indexed by their start positions;
# The index also contains start and end tags of each annotation;
spansStartingFrom = {}
for i in range( len(layers) ):
layer = layers[i]
settings = markup_settings[i]
spansStartingFrom = _construct_start_index(text, layer, settings, spansStartingFrom)
#
# 2) Fix overlapping graphic annotations in the index
# (to ensure continuous formatting of annotations)
_fix_overlapping_graphics( spansStartingFrom )
#
# 3) Index the annotations by their end positions
endTags = _construct_end_index( spansStartingFrom )
#
# 4) Construct the output string
return_str = []
for i in range( len(text[TEXT]) ):
c = text[TEXT][i]
emptyTags = []
if i in endTags:
for tag in endTags[i]:
if tag[1] != i:
# Non-empty tag
return_str.append( tag[0] )
else:
# Empty tag
emptyTags.append( tag )
if i in spansStartingFrom:
for span in spansStartingFrom[i]:
return_str.append( span[3] )
if span[0] == span[1]:
# Empty tag: Add the closing tag
for emptyEndTag in emptyTags:
if span[2] == emptyEndTag[2]:
return_str.append( emptyEndTag[0] )
return_str.append( c )
if len(text[TEXT]) in spansStartingFrom:
for span in spansStartingFrom[len(text[TEXT])]:
return_str.append( span[3] )
if len(text[TEXT]) in endTags:
for tag in endTags[len(text[TEXT])]:
return_str.append( tag[0] )
# Hack: fix for a potential overflow / unclosed graphics
if return_str and '\033' in return_str[-1] and \
not return_str[-1].endswith('\033[0m'):
return_str.append( '\033[0m' )
return ''.join(return_str) | python | def _preformat( text, layers, markup_settings = None ):
''' Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and returns formatted text as a
string.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
Returns
-------
text: str
preformatted text, where elements of given layers have been marked up, using
an ANSI-terminal compatible markup;
'''
if markup_settings and len(layers) != len(markup_settings):
raise Exception(' Input arguments layers and markup_settings should be equal size lists.')
elif not markup_settings and len(layers) <= len(default_markup_settings):
# Use default markup settings
markup_settings = default_markup_settings[0:len(layers)]
elif not markup_settings:
raise Exception(' Input argument markup_settings not defined.')
#
# 1) Construct the index of annotations (for each layer);
# Annotations are indexed by their start positions;
# The index also contains start and end tags of each annotation;
spansStartingFrom = {}
for i in range( len(layers) ):
layer = layers[i]
settings = markup_settings[i]
spansStartingFrom = _construct_start_index(text, layer, settings, spansStartingFrom)
#
# 2) Fix overlapping graphic annotations in the index
# (to ensure continuous formatting of annotations)
_fix_overlapping_graphics( spansStartingFrom )
#
# 3) Index the annotations by their end positions
endTags = _construct_end_index( spansStartingFrom )
#
# 4) Construct the output string
return_str = []
for i in range( len(text[TEXT]) ):
c = text[TEXT][i]
emptyTags = []
if i in endTags:
for tag in endTags[i]:
if tag[1] != i:
# Non-empty tag
return_str.append( tag[0] )
else:
# Empty tag
emptyTags.append( tag )
if i in spansStartingFrom:
for span in spansStartingFrom[i]:
return_str.append( span[3] )
if span[0] == span[1]:
# Empty tag: Add the closing tag
for emptyEndTag in emptyTags:
if span[2] == emptyEndTag[2]:
return_str.append( emptyEndTag[0] )
return_str.append( c )
if len(text[TEXT]) in spansStartingFrom:
for span in spansStartingFrom[len(text[TEXT])]:
return_str.append( span[3] )
if len(text[TEXT]) in endTags:
for tag in endTags[len(text[TEXT])]:
return_str.append( tag[0] )
# Hack: fix for a potential overflow / unclosed graphics
if return_str and '\033' in return_str[-1] and \
not return_str[-1].endswith('\033[0m'):
return_str.append( '\033[0m' )
return ''.join(return_str) | Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and returns formatted text as a
string.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
Returns
-------
text: str
preformatted text, where elements of given layers have been marked up, using
an ANSI-terminal compatible markup; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/terminalprettyprinter.py#L317-L409 |
estnltk/estnltk | estnltk/prettyprinter/terminalprettyprinter.py | tprint | def tprint( text, layers, markup_settings = None ):
''' Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and prints the formatted text to the
screen.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
'''
if markup_settings and len(layers) != len(markup_settings):
raise Exception(' Input arguments layers and markup_settings should be equal size lists.')
elif not markup_settings and len(layers) <= len(default_markup_settings):
# Use a subset from default markup settings
markup_settings = default_markup_settings[0:len(layers)]
elif not markup_settings:
raise Exception(' Input argument markup_settings not defined.')
print( _preformat(text, layers, markup_settings=markup_settings) ) | python | def tprint( text, layers, markup_settings = None ):
''' Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and prints the formatted text to the
screen.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
'''
if markup_settings and len(layers) != len(markup_settings):
raise Exception(' Input arguments layers and markup_settings should be equal size lists.')
elif not markup_settings and len(layers) <= len(default_markup_settings):
# Use a subset from default markup settings
markup_settings = default_markup_settings[0:len(layers)]
elif not markup_settings:
raise Exception(' Input argument markup_settings not defined.')
print( _preformat(text, layers, markup_settings=markup_settings) ) | Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and prints the formatted text to the
screen.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer) | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/terminalprettyprinter.py#L418-L454 |
estnltk/estnltk | estnltk/prettyprinter/templates.py | get_mark_css | def get_mark_css(aes_name, css_value):
"""Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks
"""
css_prop = AES_CSS_MAP[aes_name]
if isinstance(css_value, list):
return get_mark_css_for_rules(aes_name, css_prop, css_value)
else:
return get_mark_simple_css(aes_name, css_prop, css_value) | python | def get_mark_css(aes_name, css_value):
"""Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks
"""
css_prop = AES_CSS_MAP[aes_name]
if isinstance(css_value, list):
return get_mark_css_for_rules(aes_name, css_prop, css_value)
else:
return get_mark_simple_css(aes_name, css_prop, css_value) | Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/templates.py#L44-L63 |
estnltk/estnltk | estnltk/mw_verbs/verbchain_nom_vinf_extender.py | VerbChainNomVInfExtender._loadSubcatRelations | def _loadSubcatRelations( self, inputFile ):
''' Laeb sisendfailist (inputFile) verb-nom/adv-vinf rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(verbikirjeldus)\TAB(nom/adv-kirjeldus)\TAB(vinfkirjeldus)
nt
leid NEG aeg;S;((sg|pl) (p)|adt) da
leid POS võimalus;S;(sg|pl) (n|p|g) da
Salvestab laetud tulemused klassimuutujatesse nomAdvWordTemplates, verbRules
ja verbToVinf;
'''
self.nomAdvWordTemplates = dict()
self.verbRules = dict()
self.verbToVinf = dict()
in_f = codecs.open(inputFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
items = line.split('\t')
if len(items) == 3:
verb = items[0]
nounAdv = items[1]
vinf = items[2]
if nounAdv not in self.nomAdvWordTemplates:
(root,pos,form) = nounAdv.split(';')
if not root.startswith('^') and not root.endswith('$'):
root = '^'+root+'$'
constraints = {ROOT:root, POSTAG:pos}
if form:
constraints[FORM] = form
self.nomAdvWordTemplates[nounAdv] = WordTemplate(constraints)
if verb not in self.verbRules:
self.verbRules[verb] = []
if verb not in self.verbToVinf:
self.verbToVinf[verb] = []
self.verbRules[verb].append( (nounAdv, 'V_'+vinf) )
if 'V_'+vinf not in self.verbToVinf[verb]:
self.verbToVinf[verb].append( 'V_'+vinf )
else:
raise Exception(' Unexpected number of items in the input lexicon line: '+line)
in_f.close() | python | def _loadSubcatRelations( self, inputFile ):
''' Laeb sisendfailist (inputFile) verb-nom/adv-vinf rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(verbikirjeldus)\TAB(nom/adv-kirjeldus)\TAB(vinfkirjeldus)
nt
leid NEG aeg;S;((sg|pl) (p)|adt) da
leid POS võimalus;S;(sg|pl) (n|p|g) da
Salvestab laetud tulemused klassimuutujatesse nomAdvWordTemplates, verbRules
ja verbToVinf;
'''
self.nomAdvWordTemplates = dict()
self.verbRules = dict()
self.verbToVinf = dict()
in_f = codecs.open(inputFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
items = line.split('\t')
if len(items) == 3:
verb = items[0]
nounAdv = items[1]
vinf = items[2]
if nounAdv not in self.nomAdvWordTemplates:
(root,pos,form) = nounAdv.split(';')
if not root.startswith('^') and not root.endswith('$'):
root = '^'+root+'$'
constraints = {ROOT:root, POSTAG:pos}
if form:
constraints[FORM] = form
self.nomAdvWordTemplates[nounAdv] = WordTemplate(constraints)
if verb not in self.verbRules:
self.verbRules[verb] = []
if verb not in self.verbToVinf:
self.verbToVinf[verb] = []
self.verbRules[verb].append( (nounAdv, 'V_'+vinf) )
if 'V_'+vinf not in self.verbToVinf[verb]:
self.verbToVinf[verb].append( 'V_'+vinf )
else:
raise Exception(' Unexpected number of items in the input lexicon line: '+line)
in_f.close() | Laeb sisendfailist (inputFile) verb-nom/adv-vinf rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(verbikirjeldus)\TAB(nom/adv-kirjeldus)\TAB(vinfkirjeldus)
nt
leid NEG aeg;S;((sg|pl) (p)|adt) da
leid POS võimalus;S;(sg|pl) (n|p|g) da
Salvestab laetud tulemused klassimuutujatesse nomAdvWordTemplates, verbRules
ja verbToVinf; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_nom_vinf_extender.py#L66-L105 |
estnltk/estnltk | estnltk/mw_verbs/verbchain_nom_vinf_extender.py | VerbChainNomVInfExtender.tokenMatchesNomAdvVinf | def tokenMatchesNomAdvVinf( self, token, verb, vinf):
''' Teeb kindlaks, kas etteantud token v6iks olla verb'i alluv ning vinf'i ylemus (st
paikneda nende vahel). Kui see nii on, tagastab j2rjendi vahele sobiva s6na morf
analyysidega (meetodi _getMatchingAnalysisIDs abil), vastasel juhul tagastab tyhja
j2rjendi;
'''
if verb in self.verbRules:
for (nounAdv, vinf1) in self.verbRules[verb]:
if vinf == vinf1 and (self.nomAdvWordTemplates[nounAdv]).matches(token):
return _getMatchingAnalysisIDs( token, self.nomAdvWordTemplates[nounAdv] )
return [] | python | def tokenMatchesNomAdvVinf( self, token, verb, vinf):
''' Teeb kindlaks, kas etteantud token v6iks olla verb'i alluv ning vinf'i ylemus (st
paikneda nende vahel). Kui see nii on, tagastab j2rjendi vahele sobiva s6na morf
analyysidega (meetodi _getMatchingAnalysisIDs abil), vastasel juhul tagastab tyhja
j2rjendi;
'''
if verb in self.verbRules:
for (nounAdv, vinf1) in self.verbRules[verb]:
if vinf == vinf1 and (self.nomAdvWordTemplates[nounAdv]).matches(token):
return _getMatchingAnalysisIDs( token, self.nomAdvWordTemplates[nounAdv] )
return [] | Teeb kindlaks, kas etteantud token v6iks olla verb'i alluv ning vinf'i ylemus (st
paikneda nende vahel). Kui see nii on, tagastab j2rjendi vahele sobiva s6na morf
analyysidega (meetodi _getMatchingAnalysisIDs abil), vastasel juhul tagastab tyhja
j2rjendi; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_nom_vinf_extender.py#L107-L117 |
estnltk/estnltk | estnltk/mw_verbs/verbchain_nom_vinf_extender.py | VerbChainNomVInfExtender.extendChainsInSentence | def extendChainsInSentence( self, sentence, foundChains ):
''' Rakendab meetodit self.extendChainsInClause() antud lause igal osalausel.
'''
# 1) Preprocessing
clauses = getClausesByClauseIDs( sentence )
# 2) Extend verb chains in each clause
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
self.extendChainsInClause(clause, clauseID, foundChains) | python | def extendChainsInSentence( self, sentence, foundChains ):
''' Rakendab meetodit self.extendChainsInClause() antud lause igal osalausel.
'''
# 1) Preprocessing
clauses = getClausesByClauseIDs( sentence )
# 2) Extend verb chains in each clause
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
self.extendChainsInClause(clause, clauseID, foundChains) | Rakendab meetodit self.extendChainsInClause() antud lause igal osalausel. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_nom_vinf_extender.py#L120-L130 |
estnltk/estnltk | estnltk/mw_verbs/verbchain_nom_vinf_extender.py | VerbChainNomVInfExtender._isLikelyNotPhrase | def _isLikelyNotPhrase( self, headVerbRoot, headVerbWID, nomAdvWID, widToToken):
''' Kontrollib, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (poleks fraasi peas6na).
Tagastab True, kui:
*) nom/adv j2rgneb vahetult peaverbile
*) või nom/adv on vahetult osalause alguses
*) või nom-ile eelneb vahetult selline s6na, mis kindlasti ei saa olla
eestäiendiks
*) või nom/adv puhul on tegemist olema-verbi adv-ga;
'''
minWID = min(widToToken.keys())
nomAdvToken = widToToken[nomAdvWID]
isNom = self.wtNom.matches(nomAdvToken)
if headVerbWID+1 == nomAdvWID:
# 1) Kui nom/adv j2rgneb vahetult verbile, siis on ysna turvaline arvata,
# et need kuuluvad kokku, nt:
# ja seda tunnet on_0 raske_0 millegi vastu vahetada_0 .
# mida nad peavad_0 vajalikuks_0 läände müüa_0
return True
elif minWID == nomAdvWID:
# 2) Kui nom/adv on vahetult osalause alguses, siis on samuti üsna turvaline
# eeldada, et see kuulub verbiga kokku, nt:
# Tarvis_0 on_0 muretseda_0 veel auhinnafond 250 000 dollarit .
# Raske_0 on_0 temaga vaielda_0 .
return True
elif isNom and nomAdvWID-1 in widToToken:
prevToken = widToToken[nomAdvWID-1]
if self.wtNotSyntAttrib.matches(prevToken):
#
# 3.1) Kui nom-ile eelneb vahetult adverb, mis tavaliselt allub otse
# verbile ning ei funktsioneeri eest2iendina (nt 'ju', 'ikka',
# 'vist', 'veel' jms), siis on ysna turvaline eeldada, et nom
# ei ole mingi fraasi osa:
# Kaudseid näitajaid on_0 aga võimalik_0 analüüsida_0
# Pole_0 ju mõtet_0 hakata_0 teile ette laduma asjaolusid
# on_0 veel raske_0 kommenteerida_0
#
return True
elif self.wtNom.matches(prevToken):
if self.wtNomSemCase.matches(prevToken):
if not self.wtNomSemCase.matches(nomAdvToken):
#
# 3.2) Kui nom-ile vahetult eelnev s6na on semantilises k22ndes, aga nom
# mitte, pole nad suure t6en2osusega seotud, nt:
# Siis jääb_0 ootajal võimalus_0 öelda_0
# vahendajate juurdehindlust on_0 riigil võimalik_0 kontrollida_0 .
# Ka üürnikul on_0 selle seadusega õigus_0 maksta_0 vähem üüri
#
return True
else:
#
# 3.3) Kui nii nom kui vahetult eelnev s6na on m6lemad semantilises k22ndes,
# aga k22nded on erinevad, ei moodusta nad t6en2oliselt yhte fraasi, nt:
# pole_0 ettevõttel plaanis_0 Tartus kaugküttesooja hinda tõsta_0 .
# Ginrichi teatel on_0 vabariiklastel kavas_0 luua_0 erikomisjon ,
# et ühegi parkimismaja rajamist pole_0 linnal kavas_0 toetada_0 .
#
analyses1 = self.wtNomSemCase.matchingAnalyses(prevToken)
analyses2 = self.wtNomSemCase.matchingAnalyses(nomAdvToken)
forms1 = set([a[FORM] for a in analyses1])
forms2 = set([a[FORM] for a in analyses2])
if len(forms1.intersection(forms2))==0:
return True
elif not isNom and headVerbRoot.startswith('ole '):
#
# X) Kui tegemist on olema-ga liituva adv-ga, eeldame, et see on suurema t6en2osusega yksik,
# st pole mingi fraasi koosseisus:
# Theresel polnud_0 raskeid seasöögiämbreid tarvis_0 ubida_0 .
# Seepärast pole_0 meil ka häbi vaja_0 tunda_0
#
# NB! Alati see siiski nii ei ole, st võib liituda tähendust intensiivistav 'väga',
# 'pisut', 'palju' jms adverb, nt:
# Meil pole_0 siin palju vaja_0 pingutada_0
#
return True
return False | python | def _isLikelyNotPhrase( self, headVerbRoot, headVerbWID, nomAdvWID, widToToken):
''' Kontrollib, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (poleks fraasi peas6na).
Tagastab True, kui:
*) nom/adv j2rgneb vahetult peaverbile
*) või nom/adv on vahetult osalause alguses
*) või nom-ile eelneb vahetult selline s6na, mis kindlasti ei saa olla
eestäiendiks
*) või nom/adv puhul on tegemist olema-verbi adv-ga;
'''
minWID = min(widToToken.keys())
nomAdvToken = widToToken[nomAdvWID]
isNom = self.wtNom.matches(nomAdvToken)
if headVerbWID+1 == nomAdvWID:
# 1) Kui nom/adv j2rgneb vahetult verbile, siis on ysna turvaline arvata,
# et need kuuluvad kokku, nt:
# ja seda tunnet on_0 raske_0 millegi vastu vahetada_0 .
# mida nad peavad_0 vajalikuks_0 läände müüa_0
return True
elif minWID == nomAdvWID:
# 2) Kui nom/adv on vahetult osalause alguses, siis on samuti üsna turvaline
# eeldada, et see kuulub verbiga kokku, nt:
# Tarvis_0 on_0 muretseda_0 veel auhinnafond 250 000 dollarit .
# Raske_0 on_0 temaga vaielda_0 .
return True
elif isNom and nomAdvWID-1 in widToToken:
prevToken = widToToken[nomAdvWID-1]
if self.wtNotSyntAttrib.matches(prevToken):
#
# 3.1) Kui nom-ile eelneb vahetult adverb, mis tavaliselt allub otse
# verbile ning ei funktsioneeri eest2iendina (nt 'ju', 'ikka',
# 'vist', 'veel' jms), siis on ysna turvaline eeldada, et nom
# ei ole mingi fraasi osa:
# Kaudseid näitajaid on_0 aga võimalik_0 analüüsida_0
# Pole_0 ju mõtet_0 hakata_0 teile ette laduma asjaolusid
# on_0 veel raske_0 kommenteerida_0
#
return True
elif self.wtNom.matches(prevToken):
if self.wtNomSemCase.matches(prevToken):
if not self.wtNomSemCase.matches(nomAdvToken):
#
# 3.2) Kui nom-ile vahetult eelnev s6na on semantilises k22ndes, aga nom
# mitte, pole nad suure t6en2osusega seotud, nt:
# Siis jääb_0 ootajal võimalus_0 öelda_0
# vahendajate juurdehindlust on_0 riigil võimalik_0 kontrollida_0 .
# Ka üürnikul on_0 selle seadusega õigus_0 maksta_0 vähem üüri
#
return True
else:
#
# 3.3) Kui nii nom kui vahetult eelnev s6na on m6lemad semantilises k22ndes,
# aga k22nded on erinevad, ei moodusta nad t6en2oliselt yhte fraasi, nt:
# pole_0 ettevõttel plaanis_0 Tartus kaugküttesooja hinda tõsta_0 .
# Ginrichi teatel on_0 vabariiklastel kavas_0 luua_0 erikomisjon ,
# et ühegi parkimismaja rajamist pole_0 linnal kavas_0 toetada_0 .
#
analyses1 = self.wtNomSemCase.matchingAnalyses(prevToken)
analyses2 = self.wtNomSemCase.matchingAnalyses(nomAdvToken)
forms1 = set([a[FORM] for a in analyses1])
forms2 = set([a[FORM] for a in analyses2])
if len(forms1.intersection(forms2))==0:
return True
elif not isNom and headVerbRoot.startswith('ole '):
#
# X) Kui tegemist on olema-ga liituva adv-ga, eeldame, et see on suurema t6en2osusega yksik,
# st pole mingi fraasi koosseisus:
# Theresel polnud_0 raskeid seasöögiämbreid tarvis_0 ubida_0 .
# Seepärast pole_0 meil ka häbi vaja_0 tunda_0
#
# NB! Alati see siiski nii ei ole, st võib liituda tähendust intensiivistav 'väga',
# 'pisut', 'palju' jms adverb, nt:
# Meil pole_0 siin palju vaja_0 pingutada_0
#
return True
return False | Kontrollib, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (poleks fraasi peas6na).
Tagastab True, kui:
*) nom/adv j2rgneb vahetult peaverbile
*) või nom/adv on vahetult osalause alguses
*) või nom-ile eelneb vahetult selline s6na, mis kindlasti ei saa olla
eestäiendiks
*) või nom/adv puhul on tegemist olema-verbi adv-ga; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_nom_vinf_extender.py#L133-L207 |
estnltk/estnltk | estnltk/mw_verbs/verbchain_nom_vinf_extender.py | VerbChainNomVInfExtender._canBeExpanded | def _canBeExpanded( self, headVerbRoot, headVerbWID, suitableNomAdvExpansions, expansionVerbs, widToToken ):
''' Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None;
'''
if len(suitableNomAdvExpansions)==1 and expansionVerbs:
# Kontrollime, kas leidub t2pselt yks laiendiks sobiv verb (kui leidub
# rohkem, on kontekst kahtlane ja raske otsustada, kas tasub laiendada
# v6i mitte)
suitableExpansionVerbs = \
[expVerb for expVerb in expansionVerbs if expVerb[2] == suitableNomAdvExpansions[0][2]]
if len( suitableExpansionVerbs ) == 1:
# Kontrollime, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (ei oleks fraasi
# peas6na);
nomAdvWID = suitableNomAdvExpansions[0][0]
if self._isLikelyNotPhrase( headVerbRoot, headVerbWID, nomAdvWID, widToToken ):
return suitableExpansionVerbs[0]
return None | python | def _canBeExpanded( self, headVerbRoot, headVerbWID, suitableNomAdvExpansions, expansionVerbs, widToToken ):
''' Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None;
'''
if len(suitableNomAdvExpansions)==1 and expansionVerbs:
# Kontrollime, kas leidub t2pselt yks laiendiks sobiv verb (kui leidub
# rohkem, on kontekst kahtlane ja raske otsustada, kas tasub laiendada
# v6i mitte)
suitableExpansionVerbs = \
[expVerb for expVerb in expansionVerbs if expVerb[2] == suitableNomAdvExpansions[0][2]]
if len( suitableExpansionVerbs ) == 1:
# Kontrollime, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (ei oleks fraasi
# peas6na);
nomAdvWID = suitableNomAdvExpansions[0][0]
if self._isLikelyNotPhrase( headVerbRoot, headVerbWID, nomAdvWID, widToToken ):
return suitableExpansionVerbs[0]
return None | Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_nom_vinf_extender.py#L210-L229 |
estnltk/estnltk | estnltk/mw_verbs/verbchain_nom_vinf_extender.py | VerbChainNomVInfExtender.extendChainsInClause | def extendChainsInClause( self, clause, clauseID, foundChains ):
''' Proovime etteantud osalauses leiduvaid verbiahelaid täiendada 'verb-nom/adv-vinf'
rektsiooniseostega, nt:
andma + võimalus + Vda : talle anti_0 võimalus_0 olukorda parandada_0
olema + vaja + Vda : nüüd on_0 küll vaja_0 asi lõpetada_0
Teeme seda kahel moel:
1) kui mingi olemasoleva verbiahela keskelt on puudu 'nom/adv' (nt 'andma', 'jätma'
verbide vinf rektsiooniseoste leidmisel võib tekkida selliseid lünki), siis
lisame ahela keskele 'nom/adv' sõna.
2) kui verbiahela lõpus on verb, mis on sageli ülemuseks 'nom/adv' sõnale, millest
omakorda sõltub mingi Vinf verb (Vma, Vda), ning need on osalausekontekstis olemas,
lisame need verbiahela lõppu;
'''
expansionPerformed = False
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if clauseID == verbObj[CLAUSE_IDX] and (len(verbObj[PATTERN])==1 and \
re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
widToToken = { token[WORD_ID] : token for token in clause }
verbDaMa = WordTemplate({POSTAG:'V', FORM:'^(da|ma)$'})
verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'})
#
# 1) Yritame leida, millised juba tuvastatud verbiahelatest on sellised, kust on
# vahelt puudu nom/adv s6na, nt:
# annab_0 kunstnik jälle põhjuse endast kirjutada_0
# see annab_0 võimaluse laua tagant tõusta_0 ja_0 minema jalutada_0
# Kui leiame ahelast puuduoleva s6na ja lisame selle ahelasse ...
#
for verbObj in foundChains:
if clauseID == verbObj[CLAUSE_IDX]:
headVerb = ''
headVerbWID = -1
dependentVerb = ''
dependentVerbWIDs = []
firstDependentVerbID = -1
# Leiame ahela l6pust ylemus-verbi ja sellele alluva verbi
if len(verbObj[PATTERN]) > 3 and verbObj[PATTERN][-2] == '&':
headVerb = verbObj[ROOTS][-4]+" "+verbObj[POLARITY]
dependentVerb = verbObj[MORPH][-3]
headVerbWID = verbObj[PHRASE][-4]
dependentVerbWIDs.append( verbObj[PHRASE][-3] )
dependentVerbWIDs.append( verbObj[PHRASE][-1] )
firstDependentVerbID = len(verbObj[PHRASE])-3
elif len(verbObj[PATTERN]) > 1 and verbObj[PATTERN][-2]=='verb':
headVerb = verbObj[ROOTS][-2]+" "+verbObj[POLARITY]
dependentVerb = verbObj[MORPH][-1]
headVerbWID = verbObj[PHRASE][-2]
dependentVerbWIDs.append(verbObj[PHRASE][-1])
firstDependentVerbID = len(verbObj[PHRASE])-1
# Kontrollime, kas ylemusverb ja sellele alluv verb v6iksid olla yhendatud
# mingi nom/adv s6na kaudu
if headVerb in self.verbRules and headVerb in self.verbToVinf and \
dependentVerb in self.verbToVinf[headVerb]:
# Teeme kindlaks, kas s6nade vahele j22b puuduolev nom/adv
minInd = min(min(dependentVerbWIDs), headVerbWID-1)
maxInd = max(max(dependentVerbWIDs)-1, headVerbWID)
if minInd < maxInd:
for i in range(minInd, maxInd+1):
if i in widToToken and i not in annotatedWords:
token = widToToken[i]
matchingAnalyses = self.tokenMatchesNomAdvVinf( token, headVerb, dependentVerb )
if matchingAnalyses and not expansionPerformed:
# Kontrollime, kas vaheletorgatav sõna paikneb nii, et see on suure
# tõenäosusega üksiksõna, mitte fraas.
if self._isLikelyNotPhrase( headVerb, headVerbWID, token[WORD_ID], widToToken ):
# Torkame nimis6na/adverbi vahele
verbObj[PHRASE].insert( firstDependentVerbID, token[WORD_ID] )
verbObj[PATTERN].insert( firstDependentVerbID, 'nom/adv' )
verbObj[ANALYSIS_IDS].insert( firstDependentVerbID, matchingAnalyses )
annotatedWords.append( token[WORD_ID] )
expansionPerformed = True
else:
# Kui me ei saa olla kindlad, et vaheletorgatav sõna pole fraas, paneme
# küsimärgi, näitamaks, et verbiahelast on suure tõenäosusega midagi
# puudu ...
verbObj[OTHER_VERBS] = True
#_debugPrint( ' '+('+'.join(verbObj[PATTERN]))+' | '+_getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
#
# 2) Yritame luua uusi ahelaid, laiendades verbe olemasolevate ahelate l6pus:
#
# millega antakse_0 võimalus_1 sõlmida_1 uus kokkulepe .
# puudub_0 võimalus_1 spetsialiste täistööajaga rakendada_1 .
# kui on_0 võimalus_1 rahulikult vooluga kaasa minna_1
#
clauseMaxWID = max( list(widToToken.keys()) )
for verbObj in foundChains:
if clauseID == verbObj[CLAUSE_IDX] and verbObj[OTHER_VERBS]:
if (len(verbObj[PATTERN])==1 or (len(verbObj[PATTERN])>1 and \
verbObj[PATTERN][-2] != '&')):
headVerb = verbObj[ROOTS][-1]+" "+verbObj[POLARITY]
headVerbWID = verbObj[PHRASE][-1]
#
# 2.1) Esimeses l2henduses vaatame tavalisi verbe (mitte-olema);
#
if headVerb in self.verbRules and not headVerb.startswith('ole '):
minInd = headVerbWID-1 if verbObj[PATTERN][0]!='ega' else headVerbWID
suitableNomAdvExpansions = []
expansionVerbs = []
for i in range(minInd, clauseMaxWID+1):
if i in widToToken and i not in annotatedWords:
token = widToToken[i]
if _isFollowedByComma( i, clause ):
# Katkestame, kui satume koma otsa (kuna ei saa kindel olla,
# et teisel pool koma on olevad jupid kuuluvad ikka verbi
# juurde)
break
if verbDaMa.matches( token ):
analysisIDs = _getMatchingAnalysisIDs( token, verbDaMa )
form = token[ANALYSIS][analysisIDs[0]][FORM]
expansionVerbs.append( [i, token, "V_"+form ] )
else:
for (nounAdv, vinf1) in self.verbRules[headVerb]:
if (self.nomAdvWordTemplates[nounAdv]).matches(token):
suitableNomAdvExpansions.append( [i, token, vinf1, \
(self.nomAdvWordTemplates[nounAdv]), nounAdv ] )
# Teeme kindlaks, kas kontekst on laiendamiseks piisavalt yhene/selge ...
suitableExpansionVerb = \
self._canBeExpanded( headVerb, headVerbWID, suitableNomAdvExpansions, \
expansionVerbs, widToToken )
if suitableExpansionVerb:
phraseExt = [suitableNomAdvExpansions[0][0], suitableExpansionVerb[0]]
expIsOle = verbOle.matches(suitableExpansionVerb[1])
patternExt = ['nom/adv', 'ole' if expIsOle else 'verb']
analysisIDsExt = [ \
_getMatchingAnalysisIDs( suitableNomAdvExpansions[0][1], \
suitableNomAdvExpansions[0][3] ), \
_getMatchingAnalysisIDs( suitableExpansionVerb[1], verbDaMa ) ]
# Lisame ahelale pikendused
verbObj[PHRASE].extend( phraseExt )
verbObj[PATTERN].extend( patternExt )
verbObj[ANALYSIS_IDS].extend( analysisIDsExt )
annotatedWords.extend( phraseExt )
expansionPerformed = True
#if headVerb.startswith('and '):
# _debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
#_debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
elif headVerb in self.verbRules and headVerb.startswith('ole '):
#
# 2.2) Vaatame olema-verbi rektsiooniseoseid;
#
minInd = headVerbWID-1 if verbObj[PATTERN][0]!='ega' else headVerbWID
suitableNomAdvExpansions = []
expansionVerbs = []
for i in range(minInd, clauseMaxWID+1):
if i in widToToken and i not in annotatedWords:
token = widToToken[i]
if verbDaMa.matches( token ):
analysisIDs = _getMatchingAnalysisIDs( token, verbDaMa )
form = token[ANALYSIS][analysisIDs[0]][FORM]
expansionVerbs.append( [i, token, "V_"+form ] )
else:
for (nounAdv, vinf1) in self.verbRules[headVerb]:
if (self.nomAdvWordTemplates[nounAdv]).matches(token):
suitableNomAdvExpansions.append( [i, token, vinf1, \
(self.nomAdvWordTemplates[nounAdv]), nounAdv] )
if _isFollowedByComma( i, clause ):
# Katkestame, kui satume koma otsa (kuna ei saa kindel olla,
# et teisel pool koma on olevad jupid kuuluvad ikka verbi
# juurde)
break
# Teeme kindlaks, kas kontekst on laiendamiseks piisavalt yhene/selge ...
suitableExpansionVerb = \
self._canBeExpanded( headVerb, headVerbWID, suitableNomAdvExpansions, \
expansionVerbs, widToToken )
if suitableExpansionVerb:
phraseExt = [suitableNomAdvExpansions[0][0], suitableExpansionVerb[0]]
expIsOle = verbOle.matches(suitableExpansionVerb[1])
patternExt = ['nom/adv', 'ole' if expIsOle else 'verb']
analysisIDsExt = [ \
_getMatchingAnalysisIDs( suitableNomAdvExpansions[0][1], \
suitableNomAdvExpansions[0][3] ), \
_getMatchingAnalysisIDs( suitableExpansionVerb[1], verbDaMa ) ]
# Lisame ahelale pikendused
verbObj[PHRASE].extend( phraseExt )
verbObj[PATTERN].extend( patternExt )
verbObj[ANALYSIS_IDS].extend( analysisIDsExt )
annotatedWords.extend( phraseExt )
expansionPerformed = True
#_debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
#if suitableNomAdvExpansions[0][4].startswith('aeg;'):
# _debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+_getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
return expansionPerformed | python | def extendChainsInClause( self, clause, clauseID, foundChains ):
''' Proovime etteantud osalauses leiduvaid verbiahelaid täiendada 'verb-nom/adv-vinf'
rektsiooniseostega, nt:
andma + võimalus + Vda : talle anti_0 võimalus_0 olukorda parandada_0
olema + vaja + Vda : nüüd on_0 küll vaja_0 asi lõpetada_0
Teeme seda kahel moel:
1) kui mingi olemasoleva verbiahela keskelt on puudu 'nom/adv' (nt 'andma', 'jätma'
verbide vinf rektsiooniseoste leidmisel võib tekkida selliseid lünki), siis
lisame ahela keskele 'nom/adv' sõna.
2) kui verbiahela lõpus on verb, mis on sageli ülemuseks 'nom/adv' sõnale, millest
omakorda sõltub mingi Vinf verb (Vma, Vda), ning need on osalausekontekstis olemas,
lisame need verbiahela lõppu;
'''
expansionPerformed = False
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if clauseID == verbObj[CLAUSE_IDX] and (len(verbObj[PATTERN])==1 and \
re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
widToToken = { token[WORD_ID] : token for token in clause }
verbDaMa = WordTemplate({POSTAG:'V', FORM:'^(da|ma)$'})
verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'})
#
# 1) Yritame leida, millised juba tuvastatud verbiahelatest on sellised, kust on
# vahelt puudu nom/adv s6na, nt:
# annab_0 kunstnik jälle põhjuse endast kirjutada_0
# see annab_0 võimaluse laua tagant tõusta_0 ja_0 minema jalutada_0
# Kui leiame ahelast puuduoleva s6na ja lisame selle ahelasse ...
#
for verbObj in foundChains:
if clauseID == verbObj[CLAUSE_IDX]:
headVerb = ''
headVerbWID = -1
dependentVerb = ''
dependentVerbWIDs = []
firstDependentVerbID = -1
# Leiame ahela l6pust ylemus-verbi ja sellele alluva verbi
if len(verbObj[PATTERN]) > 3 and verbObj[PATTERN][-2] == '&':
headVerb = verbObj[ROOTS][-4]+" "+verbObj[POLARITY]
dependentVerb = verbObj[MORPH][-3]
headVerbWID = verbObj[PHRASE][-4]
dependentVerbWIDs.append( verbObj[PHRASE][-3] )
dependentVerbWIDs.append( verbObj[PHRASE][-1] )
firstDependentVerbID = len(verbObj[PHRASE])-3
elif len(verbObj[PATTERN]) > 1 and verbObj[PATTERN][-2]=='verb':
headVerb = verbObj[ROOTS][-2]+" "+verbObj[POLARITY]
dependentVerb = verbObj[MORPH][-1]
headVerbWID = verbObj[PHRASE][-2]
dependentVerbWIDs.append(verbObj[PHRASE][-1])
firstDependentVerbID = len(verbObj[PHRASE])-1
# Kontrollime, kas ylemusverb ja sellele alluv verb v6iksid olla yhendatud
# mingi nom/adv s6na kaudu
if headVerb in self.verbRules and headVerb in self.verbToVinf and \
dependentVerb in self.verbToVinf[headVerb]:
# Teeme kindlaks, kas s6nade vahele j22b puuduolev nom/adv
minInd = min(min(dependentVerbWIDs), headVerbWID-1)
maxInd = max(max(dependentVerbWIDs)-1, headVerbWID)
if minInd < maxInd:
for i in range(minInd, maxInd+1):
if i in widToToken and i not in annotatedWords:
token = widToToken[i]
matchingAnalyses = self.tokenMatchesNomAdvVinf( token, headVerb, dependentVerb )
if matchingAnalyses and not expansionPerformed:
# Kontrollime, kas vaheletorgatav sõna paikneb nii, et see on suure
# tõenäosusega üksiksõna, mitte fraas.
if self._isLikelyNotPhrase( headVerb, headVerbWID, token[WORD_ID], widToToken ):
# Torkame nimis6na/adverbi vahele
verbObj[PHRASE].insert( firstDependentVerbID, token[WORD_ID] )
verbObj[PATTERN].insert( firstDependentVerbID, 'nom/adv' )
verbObj[ANALYSIS_IDS].insert( firstDependentVerbID, matchingAnalyses )
annotatedWords.append( token[WORD_ID] )
expansionPerformed = True
else:
# Kui me ei saa olla kindlad, et vaheletorgatav sõna pole fraas, paneme
# küsimärgi, näitamaks, et verbiahelast on suure tõenäosusega midagi
# puudu ...
verbObj[OTHER_VERBS] = True
#_debugPrint( ' '+('+'.join(verbObj[PATTERN]))+' | '+_getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
#
# 2) Yritame luua uusi ahelaid, laiendades verbe olemasolevate ahelate l6pus:
#
# millega antakse_0 võimalus_1 sõlmida_1 uus kokkulepe .
# puudub_0 võimalus_1 spetsialiste täistööajaga rakendada_1 .
# kui on_0 võimalus_1 rahulikult vooluga kaasa minna_1
#
clauseMaxWID = max( list(widToToken.keys()) )
for verbObj in foundChains:
if clauseID == verbObj[CLAUSE_IDX] and verbObj[OTHER_VERBS]:
if (len(verbObj[PATTERN])==1 or (len(verbObj[PATTERN])>1 and \
verbObj[PATTERN][-2] != '&')):
headVerb = verbObj[ROOTS][-1]+" "+verbObj[POLARITY]
headVerbWID = verbObj[PHRASE][-1]
#
# 2.1) Esimeses l2henduses vaatame tavalisi verbe (mitte-olema);
#
if headVerb in self.verbRules and not headVerb.startswith('ole '):
minInd = headVerbWID-1 if verbObj[PATTERN][0]!='ega' else headVerbWID
suitableNomAdvExpansions = []
expansionVerbs = []
for i in range(minInd, clauseMaxWID+1):
if i in widToToken and i not in annotatedWords:
token = widToToken[i]
if _isFollowedByComma( i, clause ):
# Katkestame, kui satume koma otsa (kuna ei saa kindel olla,
# et teisel pool koma on olevad jupid kuuluvad ikka verbi
# juurde)
break
if verbDaMa.matches( token ):
analysisIDs = _getMatchingAnalysisIDs( token, verbDaMa )
form = token[ANALYSIS][analysisIDs[0]][FORM]
expansionVerbs.append( [i, token, "V_"+form ] )
else:
for (nounAdv, vinf1) in self.verbRules[headVerb]:
if (self.nomAdvWordTemplates[nounAdv]).matches(token):
suitableNomAdvExpansions.append( [i, token, vinf1, \
(self.nomAdvWordTemplates[nounAdv]), nounAdv ] )
# Teeme kindlaks, kas kontekst on laiendamiseks piisavalt yhene/selge ...
suitableExpansionVerb = \
self._canBeExpanded( headVerb, headVerbWID, suitableNomAdvExpansions, \
expansionVerbs, widToToken )
if suitableExpansionVerb:
phraseExt = [suitableNomAdvExpansions[0][0], suitableExpansionVerb[0]]
expIsOle = verbOle.matches(suitableExpansionVerb[1])
patternExt = ['nom/adv', 'ole' if expIsOle else 'verb']
analysisIDsExt = [ \
_getMatchingAnalysisIDs( suitableNomAdvExpansions[0][1], \
suitableNomAdvExpansions[0][3] ), \
_getMatchingAnalysisIDs( suitableExpansionVerb[1], verbDaMa ) ]
# Lisame ahelale pikendused
verbObj[PHRASE].extend( phraseExt )
verbObj[PATTERN].extend( patternExt )
verbObj[ANALYSIS_IDS].extend( analysisIDsExt )
annotatedWords.extend( phraseExt )
expansionPerformed = True
#if headVerb.startswith('and '):
# _debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
#_debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
elif headVerb in self.verbRules and headVerb.startswith('ole '):
#
# 2.2) Vaatame olema-verbi rektsiooniseoseid;
#
minInd = headVerbWID-1 if verbObj[PATTERN][0]!='ega' else headVerbWID
suitableNomAdvExpansions = []
expansionVerbs = []
for i in range(minInd, clauseMaxWID+1):
if i in widToToken and i not in annotatedWords:
token = widToToken[i]
if verbDaMa.matches( token ):
analysisIDs = _getMatchingAnalysisIDs( token, verbDaMa )
form = token[ANALYSIS][analysisIDs[0]][FORM]
expansionVerbs.append( [i, token, "V_"+form ] )
else:
for (nounAdv, vinf1) in self.verbRules[headVerb]:
if (self.nomAdvWordTemplates[nounAdv]).matches(token):
suitableNomAdvExpansions.append( [i, token, vinf1, \
(self.nomAdvWordTemplates[nounAdv]), nounAdv] )
if _isFollowedByComma( i, clause ):
# Katkestame, kui satume koma otsa (kuna ei saa kindel olla,
# et teisel pool koma on olevad jupid kuuluvad ikka verbi
# juurde)
break
# Teeme kindlaks, kas kontekst on laiendamiseks piisavalt yhene/selge ...
suitableExpansionVerb = \
self._canBeExpanded( headVerb, headVerbWID, suitableNomAdvExpansions, \
expansionVerbs, widToToken )
if suitableExpansionVerb:
phraseExt = [suitableNomAdvExpansions[0][0], suitableExpansionVerb[0]]
expIsOle = verbOle.matches(suitableExpansionVerb[1])
patternExt = ['nom/adv', 'ole' if expIsOle else 'verb']
analysisIDsExt = [ \
_getMatchingAnalysisIDs( suitableNomAdvExpansions[0][1], \
suitableNomAdvExpansions[0][3] ), \
_getMatchingAnalysisIDs( suitableExpansionVerb[1], verbDaMa ) ]
# Lisame ahelale pikendused
verbObj[PHRASE].extend( phraseExt )
verbObj[PATTERN].extend( patternExt )
verbObj[ANALYSIS_IDS].extend( analysisIDsExt )
annotatedWords.extend( phraseExt )
expansionPerformed = True
#_debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
#if suitableNomAdvExpansions[0][4].startswith('aeg;'):
# _debugPrint( ('+'.join(verbObj[PATTERN]))+' | '+_getJsonAsTextString(clause, markTokens = [ verbObj[PHRASE] ] ))
return expansionPerformed | Proovime etteantud osalauses leiduvaid verbiahelaid täiendada 'verb-nom/adv-vinf'
rektsiooniseostega, nt:
andma + võimalus + Vda : talle anti_0 võimalus_0 olukorda parandada_0
olema + vaja + Vda : nüüd on_0 küll vaja_0 asi lõpetada_0
Teeme seda kahel moel:
1) kui mingi olemasoleva verbiahela keskelt on puudu 'nom/adv' (nt 'andma', 'jätma'
verbide vinf rektsiooniseoste leidmisel võib tekkida selliseid lünki), siis
lisame ahela keskele 'nom/adv' sõna.
2) kui verbiahela lõpus on verb, mis on sageli ülemuseks 'nom/adv' sõnale, millest
omakorda sõltub mingi Vinf verb (Vma, Vda), ning need on osalausekontekstis olemas,
lisame need verbiahela lõppu; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_nom_vinf_extender.py#L232-L418 |
estnltk/estnltk | estnltk/wiki/sections.py | sectionsParser | def sectionsParser(text):
"""
:param text: the whole text of an wikipedia article
:return: a list of nested section objects
[{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."},
sections: [{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."}],],
"""
textStart = 0
#Split the text in sections. Hackish part, but seems to work fine.
entries = re.split("\n=", text[textStart:])
stack = [[]]
intro = {}
sectionTitleRegEx = re.compile(r'={1,}.+={2,}')
section = {}
section['text'] = entries[0]
counts = []
#Presumes the first section is always marked with 2 =
#First count is always 3. (\n=)= First Section of an Article ==
#Parens is omitted. Leaves three = marks.
counts.append(3)
sections = []
sections.append(section)
for i in entries[1:]:
section = {}
title = re.match(sectionTitleRegEx, i)
if title:
titleEnd = title.end()
title = title.group()
text = i[titleEnd:]
level = title.count('=')
section['title']=title.strip('= ')
section['text']=text
sections.append(section.copy())
counts.append(level)
#add images, links, references, tables
for section in sections:
text = section['text']
if 'wikitable' in text or '</table>' in text.lower():
section['text'], section['tables'] = tableCollector(text)
section = relatedArticles(section)
if '<ref' in text:
section = reffinder(section)
if imageRegEx.search(text):
section = imageParser(section)
section['text'] = section['text'].strip()
if ExtLinkBracketedRegex.search(text):
section = addExternalLinks(section)
if '[[' in text:
section = addIntLinks(section)
#clean uneven brackets and whatnot
#take extlink start:end w regex.
el = 'external_links'
if el in section.keys():
#section['text'] = section['text'].replace('[', '').replace(']', '')
text = section['text']
for link in section[el]:
label = link['label']
label = re.compile(re.escape(label))
m = label.search(text)
#if there are unbalanced brackets in the external
#links label inside text then it fails to mark the start and end
try:
link['start'] = m.start()
link['end'] = m.end()
except AttributeError:
print('Problem with external links start:end position!')
print(label)
print(text)
#datastructure nesting thanks to Timo!
if counts:
assert len(counts) == len(sections)
n = len(sections)
pos = 0
levels = [counts[0]]
while pos < n:
count = counts[pos]
elem = sections[pos]
level = levels[-1]
if count == level:
stack[-1].append(elem)
elif count >= level:
stack.append([elem])
levels.append(count)
else:
group = stack.pop()
stack[-1][-1]['sections'] = group
levels.pop()
continue
pos += 1
while len(stack) > 1:
group = stack.pop()
stack[-1][-1]['sections'] = group
stack = stack[0]
return stack | python | def sectionsParser(text):
"""
:param text: the whole text of an wikipedia article
:return: a list of nested section objects
[{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."},
sections: [{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."}],],
"""
textStart = 0
#Split the text in sections. Hackish part, but seems to work fine.
entries = re.split("\n=", text[textStart:])
stack = [[]]
intro = {}
sectionTitleRegEx = re.compile(r'={1,}.+={2,}')
section = {}
section['text'] = entries[0]
counts = []
#Presumes the first section is always marked with 2 =
#First count is always 3. (\n=)= First Section of an Article ==
#Parens is omitted. Leaves three = marks.
counts.append(3)
sections = []
sections.append(section)
for i in entries[1:]:
section = {}
title = re.match(sectionTitleRegEx, i)
if title:
titleEnd = title.end()
title = title.group()
text = i[titleEnd:]
level = title.count('=')
section['title']=title.strip('= ')
section['text']=text
sections.append(section.copy())
counts.append(level)
#add images, links, references, tables
for section in sections:
text = section['text']
if 'wikitable' in text or '</table>' in text.lower():
section['text'], section['tables'] = tableCollector(text)
section = relatedArticles(section)
if '<ref' in text:
section = reffinder(section)
if imageRegEx.search(text):
section = imageParser(section)
section['text'] = section['text'].strip()
if ExtLinkBracketedRegex.search(text):
section = addExternalLinks(section)
if '[[' in text:
section = addIntLinks(section)
#clean uneven brackets and whatnot
#take extlink start:end w regex.
el = 'external_links'
if el in section.keys():
#section['text'] = section['text'].replace('[', '').replace(']', '')
text = section['text']
for link in section[el]:
label = link['label']
label = re.compile(re.escape(label))
m = label.search(text)
#if there are unbalanced brackets in the external
#links label inside text then it fails to mark the start and end
try:
link['start'] = m.start()
link['end'] = m.end()
except AttributeError:
print('Problem with external links start:end position!')
print(label)
print(text)
#datastructure nesting thanks to Timo!
if counts:
assert len(counts) == len(sections)
n = len(sections)
pos = 0
levels = [counts[0]]
while pos < n:
count = counts[pos]
elem = sections[pos]
level = levels[-1]
if count == level:
stack[-1].append(elem)
elif count >= level:
stack.append([elem])
levels.append(count)
else:
group = stack.pop()
stack[-1][-1]['sections'] = group
levels.pop()
continue
pos += 1
while len(stack) > 1:
group = stack.pop()
stack[-1][-1]['sections'] = group
stack = stack[0]
return stack | :param text: the whole text of an wikipedia article
:return: a list of nested section objects
[{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."},
sections: [{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."}],], | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/sections.py#L13-L144 |
estnltk/estnltk | estnltk/textcleaner.py | TextCleaner.clean | def clean(self, text):
"""Remove all unwanted characters from text."""
return ''.join([c for c in text if c in self.alphabet]) | python | def clean(self, text):
"""Remove all unwanted characters from text."""
return ''.join([c for c in text if c in self.alphabet]) | Remove all unwanted characters from text. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L36-L38 |
estnltk/estnltk | estnltk/textcleaner.py | TextCleaner.invalid_characters | def invalid_characters(self, text):
"""Give simple list of invalid characters present in text."""
return ''.join(sorted(set([c for c in text if c not in self.alphabet]))) | python | def invalid_characters(self, text):
"""Give simple list of invalid characters present in text."""
return ''.join(sorted(set([c for c in text if c not in self.alphabet]))) | Give simple list of invalid characters present in text. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L49-L51 |
estnltk/estnltk | estnltk/textcleaner.py | TextCleaner.find_invalid_chars | def find_invalid_chars(self, text, context_size=20):
"""Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
"""
result = defaultdict(list)
for idx, char in enumerate(text):
if char not in self.alphabet:
start = max(0, idx-context_size)
end = min(len(text), idx+context_size)
result[char].append(text[start:end])
return result | python | def find_invalid_chars(self, text, context_size=20):
"""Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
"""
result = defaultdict(list)
for idx, char in enumerate(text):
if char not in self.alphabet:
start = max(0, idx-context_size)
end = min(len(text), idx+context_size)
result[char].append(text[start:end])
return result | Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L53-L69 |
estnltk/estnltk | estnltk/textcleaner.py | TextCleaner.compute_report | def compute_report(self, texts, context_size=10):
"""Compute statistics of invalid characters on given texts.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
context_size: int
How many characters to return as the context.
Returns
-------
dict of (char -> list of tuple (index, context))
Returns a dictionary, where keys are invalid characters.
Values are lists containign tuples with character indices
and context strings.
"""
result = defaultdict(list)
for text in texts:
for char, examples in self.find_invalid_chars(text, context_size).items():
result[char].extend(examples)
return result | python | def compute_report(self, texts, context_size=10):
"""Compute statistics of invalid characters on given texts.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
context_size: int
How many characters to return as the context.
Returns
-------
dict of (char -> list of tuple (index, context))
Returns a dictionary, where keys are invalid characters.
Values are lists containign tuples with character indices
and context strings.
"""
result = defaultdict(list)
for text in texts:
for char, examples in self.find_invalid_chars(text, context_size).items():
result[char].extend(examples)
return result | Compute statistics of invalid characters on given texts.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
context_size: int
How many characters to return as the context.
Returns
-------
dict of (char -> list of tuple (index, context))
Returns a dictionary, where keys are invalid characters.
Values are lists containign tuples with character indices
and context strings. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L71-L92 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.