repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
estnltk/estnltk | estnltk/syntax/utils.py | Tree.add_child_to_subtree | def add_child_to_subtree( self, parent_word_id, tree ):
''' Searches for the tree with *parent_word_id* from the current subtree
(from this tree and from all of its subtrees). If the parent tree is
found, attaches the given *tree* as its child. If the parent tree is
not found, the current tree is not changed.
'''
if (self.word_id == parent_word_id):
self.add_child_to_self( tree )
elif (self.children):
for child in self.children:
child.add_child_to_subtree(parent_word_id, tree) | python | def add_child_to_subtree( self, parent_word_id, tree ):
''' Searches for the tree with *parent_word_id* from the current subtree
(from this tree and from all of its subtrees). If the parent tree is
found, attaches the given *tree* as its child. If the parent tree is
not found, the current tree is not changed.
'''
if (self.word_id == parent_word_id):
self.add_child_to_self( tree )
elif (self.children):
for child in self.children:
child.add_child_to_subtree(parent_word_id, tree) | Searches for the tree with *parent_word_id* from the current subtree
(from this tree and from all of its subtrees). If the parent tree is
found, attaches the given *tree* as its child. If the parent tree is
not found, the current tree is not changed. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L583-L593 |
estnltk/estnltk | estnltk/syntax/utils.py | Tree.get_root | def get_root( self, **kwargs ):
''' Returns this tree if it has no parents, or, alternatively, moves
up via the parent links of this tree until reaching the tree with
no parents, and returnes the parentless tree as the root.
'''
if self.parent == None:
return self
else:
return self.parent.get_root( **kwargs ) | python | def get_root( self, **kwargs ):
''' Returns this tree if it has no parents, or, alternatively, moves
up via the parent links of this tree until reaching the tree with
no parents, and returnes the parentless tree as the root.
'''
if self.parent == None:
return self
else:
return self.parent.get_root( **kwargs ) | Returns this tree if it has no parents, or, alternatively, moves
up via the parent links of this tree until reaching the tree with
no parents, and returnes the parentless tree as the root. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L596-L604 |
estnltk/estnltk | estnltk/syntax/utils.py | Tree._satisfies_conditions | def _satisfies_conditions( self, tree_node, **kwargs ):
''' Check whether given *tree_node* satisfies the conditions given
as arguments in *kwargs*.
By default (if no conditions are given in *kwargs*), returns
True.
If there are multiple conditions listed (e.g. 'label_regexp'
and 'word_template'), *True* is returned only when the node
satisfies all the conditions.
Following conditions are supported:
-----------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded;
'''
matches = []
# A) Check syntactic label by matching a string
syntactic_label = kwargs.get('label', None)
if syntactic_label:
matches.append( bool(tree_node.labels and syntactic_label in tree_node.labels) )
# B) Check syntactic label by matching a regular expression
synt_label_regexp = kwargs.get('label_regexp', None)
if synt_label_regexp:
if isinstance(synt_label_regexp, basestring):
# Compile the regexp (if it hasn't been compiled yet)
synt_label_regexp = re.compile(synt_label_regexp)
kwargs['label_regexp'] = synt_label_regexp
if isinstance(synt_label_regexp, RE_TYPE):
# Apply the pre-compiled regexp
if tree_node.labels:
matches.append( any([synt_label_regexp.match(label) != None for label in tree_node.labels]) )
else:
matches.append( False )
# C) Check whether the word token of the node matches a word template
word_template = kwargs.get('word_template', None)
if word_template:
if isinstance(word_template, WordTemplate):
matches.append( word_template.matches( tree_node.token ) )
else:
raise Exception('(!) Unexpected word_template. Should be from class WordTemplate.')
return len(matches) == 0 or all(matches) | python | def _satisfies_conditions( self, tree_node, **kwargs ):
''' Check whether given *tree_node* satisfies the conditions given
as arguments in *kwargs*.
By default (if no conditions are given in *kwargs*), returns
True.
If there are multiple conditions listed (e.g. 'label_regexp'
and 'word_template'), *True* is returned only when the node
satisfies all the conditions.
Following conditions are supported:
-----------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded;
'''
matches = []
# A) Check syntactic label by matching a string
syntactic_label = kwargs.get('label', None)
if syntactic_label:
matches.append( bool(tree_node.labels and syntactic_label in tree_node.labels) )
# B) Check syntactic label by matching a regular expression
synt_label_regexp = kwargs.get('label_regexp', None)
if synt_label_regexp:
if isinstance(synt_label_regexp, basestring):
# Compile the regexp (if it hasn't been compiled yet)
synt_label_regexp = re.compile(synt_label_regexp)
kwargs['label_regexp'] = synt_label_regexp
if isinstance(synt_label_regexp, RE_TYPE):
# Apply the pre-compiled regexp
if tree_node.labels:
matches.append( any([synt_label_regexp.match(label) != None for label in tree_node.labels]) )
else:
matches.append( False )
# C) Check whether the word token of the node matches a word template
word_template = kwargs.get('word_template', None)
if word_template:
if isinstance(word_template, WordTemplate):
matches.append( word_template.matches( tree_node.token ) )
else:
raise Exception('(!) Unexpected word_template. Should be from class WordTemplate.')
return len(matches) == 0 or all(matches) | Check whether given *tree_node* satisfies the conditions given
as arguments in *kwargs*.
By default (if no conditions are given in *kwargs*), returns
True.
If there are multiple conditions listed (e.g. 'label_regexp'
and 'word_template'), *True* is returned only when the node
satisfies all the conditions.
Following conditions are supported:
-----------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L607-L666 |
estnltk/estnltk | estnltk/syntax/utils.py | Tree.get_children | def get_children( self, **kwargs ):
''' Recursively collects and returns all subtrees of given tree (if no
arguments are given), or, alternatively, collects and returns subtrees
satisfying some specific criteria (pre-specified in the arguments);
Parameters
-----------
depth_limit : int
Specifies how deep into the subtrees of this tree the search goes;
Examples:
depth_limit=2 -- children of this node, and also children's
direct children are considered as collectibles;
depth_limit=1 -- only children of this node are considered;
depth_limit=0 -- the end of search (only this node is considered);
Default: unbounded ( the search is not limited by depth )
include_self : bool
Specifies whether this tree should also be included as a collectible
subtree. If this tree is includes, it still must satisfy all the
criteria before it is included in the collection;
Default: False
sorted : bool
Specifies returned trees should be sorted in the ascending order of
word_ids (basically: by the order of words in the text);
If sorting is not applied, there is no guarantee that resulting trees
follow the order of words in text;
Default: False
Following parameters can be used to set conditions for subtrees:
-----------------------------------------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded;
'''
depth_limit = kwargs.get('depth_limit', 922337203685477580) # Just a nice big number to
# assure that by default,
# there is no depth limit ...
include_self = kwargs.get('include_self', False)
sorted_by_word_ids = kwargs.get('sorted', False)
subtrees = []
if include_self:
if self._satisfies_conditions( self, **kwargs ):
subtrees.append( self )
if depth_limit >= 1 and self.children:
# 1) Add children of given tree
for child in self.children:
if self._satisfies_conditions( child, **kwargs ):
subtrees.append(child)
# 2) Collect children of given tree's children
kwargs['include_self'] = False
kwargs['depth_limit'] = depth_limit - 1
for child in self.children:
childs_results = child.get_children( **kwargs )
if childs_results:
subtrees.extend(childs_results)
if sorted_by_word_ids:
# Sort by word_id-s, in ascending order
subtrees = sorted(subtrees, key=lambda x: x.word_id)
return subtrees | python | def get_children( self, **kwargs ):
''' Recursively collects and returns all subtrees of given tree (if no
arguments are given), or, alternatively, collects and returns subtrees
satisfying some specific criteria (pre-specified in the arguments);
Parameters
-----------
depth_limit : int
Specifies how deep into the subtrees of this tree the search goes;
Examples:
depth_limit=2 -- children of this node, and also children's
direct children are considered as collectibles;
depth_limit=1 -- only children of this node are considered;
depth_limit=0 -- the end of search (only this node is considered);
Default: unbounded ( the search is not limited by depth )
include_self : bool
Specifies whether this tree should also be included as a collectible
subtree. If this tree is includes, it still must satisfy all the
criteria before it is included in the collection;
Default: False
sorted : bool
Specifies returned trees should be sorted in the ascending order of
word_ids (basically: by the order of words in the text);
If sorting is not applied, there is no guarantee that resulting trees
follow the order of words in text;
Default: False
Following parameters can be used to set conditions for subtrees:
-----------------------------------------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded;
'''
depth_limit = kwargs.get('depth_limit', 922337203685477580) # Just a nice big number to
# assure that by default,
# there is no depth limit ...
include_self = kwargs.get('include_self', False)
sorted_by_word_ids = kwargs.get('sorted', False)
subtrees = []
if include_self:
if self._satisfies_conditions( self, **kwargs ):
subtrees.append( self )
if depth_limit >= 1 and self.children:
# 1) Add children of given tree
for child in self.children:
if self._satisfies_conditions( child, **kwargs ):
subtrees.append(child)
# 2) Collect children of given tree's children
kwargs['include_self'] = False
kwargs['depth_limit'] = depth_limit - 1
for child in self.children:
childs_results = child.get_children( **kwargs )
if childs_results:
subtrees.extend(childs_results)
if sorted_by_word_ids:
# Sort by word_id-s, in ascending order
subtrees = sorted(subtrees, key=lambda x: x.word_id)
return subtrees | Recursively collects and returns all subtrees of given tree (if no
arguments are given), or, alternatively, collects and returns subtrees
satisfying some specific criteria (pre-specified in the arguments);
Parameters
-----------
depth_limit : int
Specifies how deep into the subtrees of this tree the search goes;
Examples:
depth_limit=2 -- children of this node, and also children's
direct children are considered as collectibles;
depth_limit=1 -- only children of this node are considered;
depth_limit=0 -- the end of search (only this node is considered);
Default: unbounded ( the search is not limited by depth )
include_self : bool
Specifies whether this tree should also be included as a collectible
subtree. If this tree is includes, it still must satisfy all the
criteria before it is included in the collection;
Default: False
sorted : bool
Specifies returned trees should be sorted in the ascending order of
word_ids (basically: by the order of words in the text);
If sorting is not applied, there is no guarantee that resulting trees
follow the order of words in text;
Default: False
Following parameters can be used to set conditions for subtrees:
-----------------------------------------------------------------
label : str
Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis; If the node does not have the
label, the node will be discarded;
label_regexp : str
A regular expression pattern (as string) describing the
syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node
must have within its analysis;
If none of the node's labels matches the pattern, the node
will be discarded;
word_template : estnltk.mw_verbs.utils.WordTemplate
A WordTemplate describing morphological constraints imposed
to the word of the node;
If the word's morphological features do not match the template,
the node will be discarded; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L669-L743 |
estnltk/estnltk | estnltk/syntax/utils.py | Tree.as_dependencygraph | def as_dependencygraph( self, keep_dummy_root=False, add_morph=True ):
''' Returns this tree as NLTK's DependencyGraph object.
Note that this method constructs 'zero_based' graph,
where counting of the words starts from 0 and the
root index is -1 (not 0, as in Malt-TAB format);
Parameters
-----------
add_morph : bool
Specifies whether the morphological information
(information about word lemmas, part-of-speech, and
features) should be added to graph nodes.
Note that even if **add_morph==True**, morphological
information is only added if it is available via
estnltk's layer token['analysis'];
Default: True
keep_dummy_root : bool
Specifies whether the graph should include a dummy
TOP / ROOT node, which does not refer to any word,
and yet is the topmost node of the tree.
If the dummy root node is not used, then the root
node is the word node headed by -1;
Default: False
For more information about NLTK's DependencyGraph, see:
http://www.nltk.org/_modules/nltk/parse/dependencygraph.html
'''
from nltk.parse.dependencygraph import DependencyGraph
graph = DependencyGraph( zero_based = True )
all_tree_nodes = [self] + self.get_children()
#
# 0) Fix the root
#
if keep_dummy_root:
# Note: we have to re-construct the root node manually,
# as DependencyGraph's current interface seems to provide
# no easy/convenient means for fixing the root node;
graph.nodes[-1] = graph.nodes[0]
graph.nodes[-1].update( { 'address': -1 } )
graph.root = graph.nodes[-1]
del graph.nodes[0]
#
# 1) Update / Add nodes of the graph
#
for child in all_tree_nodes:
rel = 'xxx' if not child.labels else '|'.join(child.labels)
address = child.word_id
word = child.text
graph.nodes[address].update(
{
'address': address,
'word': child.text,
'rel': rel,
} )
if not keep_dummy_root and child == self:
# If we do not keep the dummy root node, set this tree
# as the root node
graph.root = graph.nodes[address]
if add_morph and child.morph:
# Add morphological information, if possible
lemmas = set([analysis[LEMMA] for analysis in child.morph])
postags = set([analysis[POSTAG] for analysis in child.morph])
feats = set([analysis[FORM] for analysis in child.morph])
lemma = ('|'.join( list(lemmas) )).replace(' ','_')
postag = ('|'.join( list(postags) )).replace(' ','_')
feats = ('|'.join( list(feats) )).replace(' ','_')
graph.nodes[address].update(
{
'tag ': postag,
'ctag' : postag,
'feats': feats,
'lemma': lemma
} )
#
# 2) Update / Add arcs of the graph
#
for child in all_tree_nodes:
# Connect children of given word
deps = [] if not child.children else [c.word_id for c in child.children]
head_address = child.word_id
for dep in deps:
graph.add_arc( head_address, dep )
if child.parent == None and keep_dummy_root:
graph.add_arc( -1, head_address )
# Connect the parent of given node
head = -1 if not child.parent else child.parent.word_id
graph.nodes[head_address].update(
{
'head': head,
} )
return graph | python | def as_dependencygraph( self, keep_dummy_root=False, add_morph=True ):
''' Returns this tree as NLTK's DependencyGraph object.
Note that this method constructs 'zero_based' graph,
where counting of the words starts from 0 and the
root index is -1 (not 0, as in Malt-TAB format);
Parameters
-----------
add_morph : bool
Specifies whether the morphological information
(information about word lemmas, part-of-speech, and
features) should be added to graph nodes.
Note that even if **add_morph==True**, morphological
information is only added if it is available via
estnltk's layer token['analysis'];
Default: True
keep_dummy_root : bool
Specifies whether the graph should include a dummy
TOP / ROOT node, which does not refer to any word,
and yet is the topmost node of the tree.
If the dummy root node is not used, then the root
node is the word node headed by -1;
Default: False
For more information about NLTK's DependencyGraph, see:
http://www.nltk.org/_modules/nltk/parse/dependencygraph.html
'''
from nltk.parse.dependencygraph import DependencyGraph
graph = DependencyGraph( zero_based = True )
all_tree_nodes = [self] + self.get_children()
#
# 0) Fix the root
#
if keep_dummy_root:
# Note: we have to re-construct the root node manually,
# as DependencyGraph's current interface seems to provide
# no easy/convenient means for fixing the root node;
graph.nodes[-1] = graph.nodes[0]
graph.nodes[-1].update( { 'address': -1 } )
graph.root = graph.nodes[-1]
del graph.nodes[0]
#
# 1) Update / Add nodes of the graph
#
for child in all_tree_nodes:
rel = 'xxx' if not child.labels else '|'.join(child.labels)
address = child.word_id
word = child.text
graph.nodes[address].update(
{
'address': address,
'word': child.text,
'rel': rel,
} )
if not keep_dummy_root and child == self:
# If we do not keep the dummy root node, set this tree
# as the root node
graph.root = graph.nodes[address]
if add_morph and child.morph:
# Add morphological information, if possible
lemmas = set([analysis[LEMMA] for analysis in child.morph])
postags = set([analysis[POSTAG] for analysis in child.morph])
feats = set([analysis[FORM] for analysis in child.morph])
lemma = ('|'.join( list(lemmas) )).replace(' ','_')
postag = ('|'.join( list(postags) )).replace(' ','_')
feats = ('|'.join( list(feats) )).replace(' ','_')
graph.nodes[address].update(
{
'tag ': postag,
'ctag' : postag,
'feats': feats,
'lemma': lemma
} )
#
# 2) Update / Add arcs of the graph
#
for child in all_tree_nodes:
# Connect children of given word
deps = [] if not child.children else [c.word_id for c in child.children]
head_address = child.word_id
for dep in deps:
graph.add_arc( head_address, dep )
if child.parent == None and keep_dummy_root:
graph.add_arc( -1, head_address )
# Connect the parent of given node
head = -1 if not child.parent else child.parent.word_id
graph.nodes[head_address].update(
{
'head': head,
} )
return graph | Returns this tree as NLTK's DependencyGraph object.
Note that this method constructs 'zero_based' graph,
where counting of the words starts from 0 and the
root index is -1 (not 0, as in Malt-TAB format);
Parameters
-----------
add_morph : bool
Specifies whether the morphological information
(information about word lemmas, part-of-speech, and
features) should be added to graph nodes.
Note that even if **add_morph==True**, morphological
information is only added if it is available via
estnltk's layer token['analysis'];
Default: True
keep_dummy_root : bool
Specifies whether the graph should include a dummy
TOP / ROOT node, which does not refer to any word,
and yet is the topmost node of the tree.
If the dummy root node is not used, then the root
node is the word node headed by -1;
Default: False
For more information about NLTK's DependencyGraph, see:
http://www.nltk.org/_modules/nltk/parse/dependencygraph.html | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L746-L838 |
estnltk/estnltk | estnltk/syntax/utils.py | Tree.get_tree_depth | def get_tree_depth( self ):
''' Finds depth of this tree. '''
if (self.children):
depth = 1
childDepths = []
for child in self.children:
childDepths.append( child.get_tree_depth() )
return depth + max(childDepths)
else:
return 0 | python | def get_tree_depth( self ):
''' Finds depth of this tree. '''
if (self.children):
depth = 1
childDepths = []
for child in self.children:
childDepths.append( child.get_tree_depth() )
return depth + max(childDepths)
else:
return 0 | Finds depth of this tree. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L851-L860 |
estnltk/estnltk | estnltk/syntax/utils.py | Tree.debug_print_tree | def debug_print_tree( self, spacing='' ):
''' *Debug only* method for outputting the tree. '''
print (spacing+" "+str(self.word_id)+" "+str(self.text))
if (self.children):
spacing=spacing+" "
for child in self.children:
child.debug_print_tree(spacing) | python | def debug_print_tree( self, spacing='' ):
''' *Debug only* method for outputting the tree. '''
print (spacing+" "+str(self.word_id)+" "+str(self.text))
if (self.children):
spacing=spacing+" "
for child in self.children:
child.debug_print_tree(spacing) | *Debug only* method for outputting the tree. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L863-L869 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.addRule | def addRule(self, field, regExpPattern):
'''Adds new rule for checking whether a value of the field matches given regular
expression regExpPattern;
Parameters
----------
field: str
keyword, e.g. 'partofspeech', 'root', 'text' etc
regExpPattern: str
a regular expression that the value of the field must match (using method
re.match( regExpPattern, token[field]) ).
'''
compiled = re.compile( regExpPattern )
if field in self.analysisFields:
if self.analysisRules == None:
self.analysisRules = dict()
self.analysisRules[field] = compiled
else:
if self.otherRules == None:
self.otherRules = dict()
self.otherRules[field] = compiled | python | def addRule(self, field, regExpPattern):
'''Adds new rule for checking whether a value of the field matches given regular
expression regExpPattern;
Parameters
----------
field: str
keyword, e.g. 'partofspeech', 'root', 'text' etc
regExpPattern: str
a regular expression that the value of the field must match (using method
re.match( regExpPattern, token[field]) ).
'''
compiled = re.compile( regExpPattern )
if field in self.analysisFields:
if self.analysisRules == None:
self.analysisRules = dict()
self.analysisRules[field] = compiled
else:
if self.otherRules == None:
self.otherRules = dict()
self.otherRules[field] = compiled | Adds new rule for checking whether a value of the field matches given regular
expression regExpPattern;
Parameters
----------
field: str
keyword, e.g. 'partofspeech', 'root', 'text' etc
regExpPattern: str
a regular expression that the value of the field must match (using method
re.match( regExpPattern, token[field]) ). | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L101-L121 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.matches | def matches(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return False
elif self.analysisRules == None and all(otherMatches):
return True
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
totalMatches = []
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if not match:
break
totalMatches.append( all(matches) )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return any(totalMatches)
return False | python | def matches(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return False
elif self.analysisRules == None and all(otherMatches):
return True
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
totalMatches = []
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if not match:
break
totalMatches.append( all(matches) )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return any(totalMatches)
return False | Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L127-L164 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.matchingAnalyses | def matchingAnalyses(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyses (elements of
tokenJson[ANALYSIS]) that are matching all the rules. An empty list is
returned if none of the analyses match (all the rules), or (!) if none of
the rules are describing the ANALYSIS part of the token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
matchingResults = []
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return matchingResults
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if matches and all(matches):
matchingResults.append( analysis )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return matchingResults
return matchingResults | python | def matchingAnalyses(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyses (elements of
tokenJson[ANALYSIS]) that are matching all the rules. An empty list is
returned if none of the analyses match (all the rules), or (!) if none of
the rules are describing the ANALYSIS part of the token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
matchingResults = []
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return matchingResults
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if matches and all(matches):
matchingResults.append( analysis )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return matchingResults
return matchingResults | Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyses (elements of
tokenJson[ANALYSIS]) that are matching all the rules. An empty list is
returned if none of the analyses match (all the rules), or (!) if none of
the rules are describing the ANALYSIS part of the token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L166-L200 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.matchingAnalyseIndexes | def matchingAnalyseIndexes(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyse indexes that correspond
to tokenJson[ANALYSIS] elements that are matching all the rules.
An empty list is returned if none of the analyses match (all the rules),
or (!) if none of the rules are describing the ANALYSIS part of the
token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
matchingResults = self.matchingAnalyses(tokenJson)
if matchingResults:
indexes = [ tokenJson[ANALYSIS].index(analysis) for analysis in matchingResults ]
return indexes
return matchingResults | python | def matchingAnalyseIndexes(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyse indexes that correspond
to tokenJson[ANALYSIS] elements that are matching all the rules.
An empty list is returned if none of the analyses match (all the rules),
or (!) if none of the rules are describing the ANALYSIS part of the
token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
matchingResults = self.matchingAnalyses(tokenJson)
if matchingResults:
indexes = [ tokenJson[ANALYSIS].index(analysis) for analysis in matchingResults ]
return indexes
return matchingResults | Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyse indexes that correspond
to tokenJson[ANALYSIS] elements that are matching all the rules.
An empty list is returned if none of the analyses match (all the rules),
or (!) if none of the rules are describing the ANALYSIS part of the
token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L202-L218 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.matchingPositions | def matchingPositions(self, tokenArray):
'''Returns a list of positions (indexes) in the tokenArray where this WordTemplate
matches (the method self.matches(token) returns True). Returns an empty list if
no matching tokens appear in the input list.
Parameters
----------
tokenArray: list of word tokens;
A list of word tokens along with their pyvabamorf's analyses;
'''
assert isinstance(tokenArray, list), "tokenArray should be list "+str(tokenArray)
matchingPos = []
for i in range( len(tokenArray) ):
token = tokenArray[i]
if self.matches(token):
matchingPos.append( i )
return matchingPos | python | def matchingPositions(self, tokenArray):
'''Returns a list of positions (indexes) in the tokenArray where this WordTemplate
matches (the method self.matches(token) returns True). Returns an empty list if
no matching tokens appear in the input list.
Parameters
----------
tokenArray: list of word tokens;
A list of word tokens along with their pyvabamorf's analyses;
'''
assert isinstance(tokenArray, list), "tokenArray should be list "+str(tokenArray)
matchingPos = []
for i in range( len(tokenArray) ):
token = tokenArray[i]
if self.matches(token):
matchingPos.append( i )
return matchingPos | Returns a list of positions (indexes) in the tokenArray where this WordTemplate
matches (the method self.matches(token) returns True). Returns an empty list if
no matching tokens appear in the input list.
Parameters
----------
tokenArray: list of word tokens;
A list of word tokens along with their pyvabamorf's analyses; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L224-L240 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.matchingTokens | def matchingTokens(self, tokenArray):
'''Returns a list of tokens in the tokenArray that match this WordTemplate (the
method self.matches(token) returns True). Returns an empty list if no matching
tokens appear in the input list.
Parameters
----------
tokenArray: list of word tokens;
A list of word tokens along with their pyvabamorf's analyses;
'''
assert isinstance(tokenArray, list), "tokenArray should be list "+str(tokenArray)
matchingTok = []
for i in range( len(tokenArray) ):
token = tokenArray[i]
if self.matches(token):
matchingTok.append( token )
return matchingTok | python | def matchingTokens(self, tokenArray):
'''Returns a list of tokens in the tokenArray that match this WordTemplate (the
method self.matches(token) returns True). Returns an empty list if no matching
tokens appear in the input list.
Parameters
----------
tokenArray: list of word tokens;
A list of word tokens along with their pyvabamorf's analyses;
'''
assert isinstance(tokenArray, list), "tokenArray should be list "+str(tokenArray)
matchingTok = []
for i in range( len(tokenArray) ):
token = tokenArray[i]
if self.matches(token):
matchingTok.append( token )
return matchingTok | Returns a list of tokens in the tokenArray that match this WordTemplate (the
method self.matches(token) returns True). Returns an empty list if no matching
tokens appear in the input list.
Parameters
----------
tokenArray: list of word tokens;
A list of word tokens along with their pyvabamorf's analyses; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L242-L258 |
estnltk/estnltk | estnltk/mw_verbs/utils.py | WordTemplate.annotateText | def annotateText(self, text, layer, addEmptyAnnotations = True):
''' Applies this WordTemplate ( more specifically: its method self.matchingTokens() )
on all words of given text, and adds results of the matching to the text as
a new annotation layer. Returns the input text (which is augmented with a new
layer).
Parameters
----------
text: Text;
A text where matching should be performed;
layer: str;
name of the new layer;
addEmptyAnnotations: boolean, optional
Whether the new layer should be added, if no match has been found?
default: True
'''
from estnltk.text import Text
assert isinstance(text, Text), "the input should be Text, but it is: "+str(text)
# 1) Find words in text that match the given pattern
tokens = self.matchingTokens( text[WORDS] )
if not addEmptyAnnotations and not tokens:
# if we are not interested in empty annotations
return text
# 2) Annotated given text with found matches
if tokens:
# Matches found: add to the layer
text[layer] = [{START: t[START], END: t[END], TEXT:t[TEXT]} for t in tokens]
else:
# No matches found: add an empty layer
text[layer] = []
return text | python | def annotateText(self, text, layer, addEmptyAnnotations = True):
''' Applies this WordTemplate ( more specifically: its method self.matchingTokens() )
on all words of given text, and adds results of the matching to the text as
a new annotation layer. Returns the input text (which is augmented with a new
layer).
Parameters
----------
text: Text;
A text where matching should be performed;
layer: str;
name of the new layer;
addEmptyAnnotations: boolean, optional
Whether the new layer should be added, if no match has been found?
default: True
'''
from estnltk.text import Text
assert isinstance(text, Text), "the input should be Text, but it is: "+str(text)
# 1) Find words in text that match the given pattern
tokens = self.matchingTokens( text[WORDS] )
if not addEmptyAnnotations and not tokens:
# if we are not interested in empty annotations
return text
# 2) Annotated given text with found matches
if tokens:
# Matches found: add to the layer
text[layer] = [{START: t[START], END: t[END], TEXT:t[TEXT]} for t in tokens]
else:
# No matches found: add an empty layer
text[layer] = []
return text | Applies this WordTemplate ( more specifically: its method self.matchingTokens() )
on all words of given text, and adds results of the matching to the text as
a new annotation layer. Returns the input text (which is augmented with a new
layer).
Parameters
----------
text: Text;
A text where matching should be performed;
layer: str;
name of the new layer;
addEmptyAnnotations: boolean, optional
Whether the new layer should be added, if no match has been found?
default: True | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L265-L295 |
estnltk/estnltk | estnltk/grammar/grammar.py | Symbol.get_matches | def get_matches(self, text, cache=None, conflict_resolver=resolve_using_maximal_coverage):
"""Get the matches of the symbol on given text."""
is_root_node = False
if cache is None:
cache = {}
is_root_node = True
if id(self) in cache:
return cache[id(self)]
matches = self.get_matches_without_cache(text, cache=cache)
cache[id(self)] = matches
# if this is the root node, resolve the matches
if is_root_node and conflict_resolver is not None:
return conflict_resolver(matches)
return matches | python | def get_matches(self, text, cache=None, conflict_resolver=resolve_using_maximal_coverage):
"""Get the matches of the symbol on given text."""
is_root_node = False
if cache is None:
cache = {}
is_root_node = True
if id(self) in cache:
return cache[id(self)]
matches = self.get_matches_without_cache(text, cache=cache)
cache[id(self)] = matches
# if this is the root node, resolve the matches
if is_root_node and conflict_resolver is not None:
return conflict_resolver(matches)
return matches | Get the matches of the symbol on given text. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/grammar.py#L45-L59 |
estnltk/estnltk | estnltk/wiki/references.py | reffinder | def reffinder(sectionObj):
"""
add reference indeces to sectionobj['references']
:param sectionObj
:return: a section obj w references: field
"""
text = sectionObj['text']
reftags = [x for x in refTagRegEx.finditer(text)]
if reftags:
references = []
for tag in reftags:
references.append(int(tag.group(1)))
sectionObj['references'] = references
text = refTagRegEx.sub('', text)
sectionObj['text'] = text
return sectionObj | python | def reffinder(sectionObj):
"""
add reference indeces to sectionobj['references']
:param sectionObj
:return: a section obj w references: field
"""
text = sectionObj['text']
reftags = [x for x in refTagRegEx.finditer(text)]
if reftags:
references = []
for tag in reftags:
references.append(int(tag.group(1)))
sectionObj['references'] = references
text = refTagRegEx.sub('', text)
sectionObj['text'] = text
return sectionObj | add reference indeces to sectionobj['references']
:param sectionObj
:return: a section obj w references: field | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/references.py#L42-L60 |
estnltk/estnltk | estnltk/wiki/references.py | referencesFinder | def referencesFinder(text):
"""
:param text: takes the whole text of an article, searches for references, cleans the text,
marks the reference indeces from zero inside the text.
:return: the tagged text and a tag:reference dictionary to be used in sectionParser
"""
references = referencesRegEx.finditer(text)
count = 0
refs = []
spans = []
for i in references:
refs.append(i.group())
spans.append(i.span())
count += 1
done = set()
nameRegEx = re.compile(r"""(name=["']*.*?["']*)(\s|/|>)""")
for index, obj in enumerate(refs):
if obj.startswith('<ref name='):
nameTag = re.escape(nameRegEx.search(obj).group(1))
if nameTag not in done:
nameTag = re.escape(nameRegEx.search(obj).group(1))
indeces = [i for i, x in enumerate(refs) if re.search(nameTag, x)]
matches = [refs[i] for i in indeces]
full = max(matches, key=len)
for i in indeces:
refs[i] = full
done.add(nameTag)
#eliminate <ref tag or other rudiments from the ref string
for i in range(len(refs)):
#print('SIIT', refs[i])
lastindex = refs[i].rindex('<')
firstindex = refs[i].index('>')+1
refs[i]=refs[i][firstindex:lastindex]
#Handle cite-references
for i in range(len(refs)):
if 'cite' in refs[i].lower():
newText = ''
values = refs[i].split('|')
for j in values:
if '=' in j:
first = j.index('=')
newText += j[first+1:].strip() + ';'
refs[i] = newText
#a ref string:position int dictionary
refspos = {}
c = 0
for i in refs:
if i not in refspos.keys():
refspos[i] = c
c +=1
else:
continue
#print(refspos)
#eliminate old, bad <ref> tags and insert clean ones <ref 1..2..3/> to the same spot.
newText = ''
assert len(spans) == len(refs)
#Could happen... havent yet.
next = 0
for i in range(len(spans)):
start = spans[i][0]
newText+=text[next:start]+'<ref '+str(refspos[refs[i]])+'/>'
next = spans[i][1]
newText+=text[next:]
#switch keys:values in the dictionary for use in sectionsParser
#positiontag:ref
newDict = {y:x for x,y in refspos.items()}
return newText, newDict | python | def referencesFinder(text):
"""
:param text: takes the whole text of an article, searches for references, cleans the text,
marks the reference indeces from zero inside the text.
:return: the tagged text and a tag:reference dictionary to be used in sectionParser
"""
references = referencesRegEx.finditer(text)
count = 0
refs = []
spans = []
for i in references:
refs.append(i.group())
spans.append(i.span())
count += 1
done = set()
nameRegEx = re.compile(r"""(name=["']*.*?["']*)(\s|/|>)""")
for index, obj in enumerate(refs):
if obj.startswith('<ref name='):
nameTag = re.escape(nameRegEx.search(obj).group(1))
if nameTag not in done:
nameTag = re.escape(nameRegEx.search(obj).group(1))
indeces = [i for i, x in enumerate(refs) if re.search(nameTag, x)]
matches = [refs[i] for i in indeces]
full = max(matches, key=len)
for i in indeces:
refs[i] = full
done.add(nameTag)
#eliminate <ref tag or other rudiments from the ref string
for i in range(len(refs)):
#print('SIIT', refs[i])
lastindex = refs[i].rindex('<')
firstindex = refs[i].index('>')+1
refs[i]=refs[i][firstindex:lastindex]
#Handle cite-references
for i in range(len(refs)):
if 'cite' in refs[i].lower():
newText = ''
values = refs[i].split('|')
for j in values:
if '=' in j:
first = j.index('=')
newText += j[first+1:].strip() + ';'
refs[i] = newText
#a ref string:position int dictionary
refspos = {}
c = 0
for i in refs:
if i not in refspos.keys():
refspos[i] = c
c +=1
else:
continue
#print(refspos)
#eliminate old, bad <ref> tags and insert clean ones <ref 1..2..3/> to the same spot.
newText = ''
assert len(spans) == len(refs)
#Could happen... havent yet.
next = 0
for i in range(len(spans)):
start = spans[i][0]
newText+=text[next:start]+'<ref '+str(refspos[refs[i]])+'/>'
next = spans[i][1]
newText+=text[next:]
#switch keys:values in the dictionary for use in sectionsParser
#positiontag:ref
newDict = {y:x for x,y in refspos.items()}
return newText, newDict | :param text: takes the whole text of an article, searches for references, cleans the text,
marks the reference indeces from zero inside the text.
:return: the tagged text and a tag:reference dictionary to be used in sectionParser | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/references.py#L62-L142 |
estnltk/estnltk | estnltk/wiki/images.py | imageParser | def imageParser(sectionObj):
"""return a sectionObj with image data added
[
{
image_url = "http://upload.wikimedia.org/wikipedia/commons/thumb/e/e0/R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg/1024px-R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg"
text: "Rõuge Suurjärv on Eesti sügavaim järv (38 m)."
links: [ ...] // sama loogika nagu sektsiooni tasemel lingid.
links: [ ...] // sama loogika nagu sektsiooni tasemel lingid.
}
]"""
text = ''
lastEnd = 0
ends = []
text = sectionObj['text']
imageStarts = [x.start() for x in imageRegEx.finditer(text)]
if imageStarts:
images = []
for start in imageStarts:
imgText, end = balancedSlicer(text[start:])
end = start + end
ends.append(end)
#imgText = image.group(0).replace('[[', '').replace(']]', '')
img = {'text':imgText}
imgText = imgText.split('|')
#t= imgText[-1].replace(']]', '')
t = imgText[-1][:-2]
url = urlBegin + imgText[0].replace(' ', '_').replace('[[', '')
img['text'] = t
img['url'] = url
if ExtLinkBracketedRegex.search(t):
img = addExternalLinks(img)
intlinks = [x for x in findBalanced(t, openDelim='[[', closeDelim=']]')]
if intlinks:
img = addIntLinks(img)
images.append(img)
sectionObj['images'] = images
spans = []
for i, j in zip(imageStarts, ends):
spans.append((i, j))
sectionObj['text'] = dropSpans(spans, text)
return sectionObj | python | def imageParser(sectionObj):
"""return a sectionObj with image data added
[
{
image_url = "http://upload.wikimedia.org/wikipedia/commons/thumb/e/e0/R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg/1024px-R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg"
text: "Rõuge Suurjärv on Eesti sügavaim järv (38 m)."
links: [ ...] // sama loogika nagu sektsiooni tasemel lingid.
links: [ ...] // sama loogika nagu sektsiooni tasemel lingid.
}
]"""
text = ''
lastEnd = 0
ends = []
text = sectionObj['text']
imageStarts = [x.start() for x in imageRegEx.finditer(text)]
if imageStarts:
images = []
for start in imageStarts:
imgText, end = balancedSlicer(text[start:])
end = start + end
ends.append(end)
#imgText = image.group(0).replace('[[', '').replace(']]', '')
img = {'text':imgText}
imgText = imgText.split('|')
#t= imgText[-1].replace(']]', '')
t = imgText[-1][:-2]
url = urlBegin + imgText[0].replace(' ', '_').replace('[[', '')
img['text'] = t
img['url'] = url
if ExtLinkBracketedRegex.search(t):
img = addExternalLinks(img)
intlinks = [x for x in findBalanced(t, openDelim='[[', closeDelim=']]')]
if intlinks:
img = addIntLinks(img)
images.append(img)
sectionObj['images'] = images
spans = []
for i, j in zip(imageStarts, ends):
spans.append((i, j))
sectionObj['text'] = dropSpans(spans, text)
return sectionObj | return a sectionObj with image data added
[
{
image_url = "http://upload.wikimedia.org/wikipedia/commons/thumb/e/e0/R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg/1024px-R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg"
text: "Rõuge Suurjärv on Eesti sügavaim järv (38 m)."
links: [ ...] // sama loogika nagu sektsiooni tasemel lingid.
links: [ ...] // sama loogika nagu sektsiooni tasemel lingid.
}
] | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/images.py#L13-L63 |
estnltk/estnltk | estnltk/text.py | Text.is_tagged | def is_tagged(self, layer):
"""Is the given element tokenized/tagged?"""
# we have a number of special names that are not layers but instead
# attributes of "words" layer
if layer == ANALYSIS:
if WORDS in self and len(self[WORDS]) > 0:
return ANALYSIS in self[WORDS][0]
elif layer == LAYER_CONLL:
if LAYER_CONLL in self and len(self[LAYER_CONLL]) > 0:
return PARSER_OUT in self[LAYER_CONLL][0]
elif layer == LAYER_VISLCG3:
if LAYER_VISLCG3 in self and len(self[LAYER_VISLCG3]) > 0:
return PARSER_OUT in self[LAYER_VISLCG3][0]
elif layer == LABEL:
if WORDS in self and len(self[WORDS]) > 0:
return LABEL in self[WORDS][0]
elif layer == CLAUSE_ANNOTATION:
if WORDS in self and len(self[WORDS]) > 0:
return CLAUSE_ANNOTATION in self[WORDS][0]
elif layer == WORDNET:
if WORDS in self and len(self[WORDS]) > 0:
if ANALYSIS in self[WORDS][0] and len(self[WORDS][0][ANALYSIS]) > 0:
return WORDNET in self[WORDS][0][ANALYSIS][0]
else:
return layer in self
return False | python | def is_tagged(self, layer):
"""Is the given element tokenized/tagged?"""
# we have a number of special names that are not layers but instead
# attributes of "words" layer
if layer == ANALYSIS:
if WORDS in self and len(self[WORDS]) > 0:
return ANALYSIS in self[WORDS][0]
elif layer == LAYER_CONLL:
if LAYER_CONLL in self and len(self[LAYER_CONLL]) > 0:
return PARSER_OUT in self[LAYER_CONLL][0]
elif layer == LAYER_VISLCG3:
if LAYER_VISLCG3 in self and len(self[LAYER_VISLCG3]) > 0:
return PARSER_OUT in self[LAYER_VISLCG3][0]
elif layer == LABEL:
if WORDS in self and len(self[WORDS]) > 0:
return LABEL in self[WORDS][0]
elif layer == CLAUSE_ANNOTATION:
if WORDS in self and len(self[WORDS]) > 0:
return CLAUSE_ANNOTATION in self[WORDS][0]
elif layer == WORDNET:
if WORDS in self and len(self[WORDS]) > 0:
if ANALYSIS in self[WORDS][0] and len(self[WORDS][0][ANALYSIS]) > 0:
return WORDNET in self[WORDS][0][ANALYSIS][0]
else:
return layer in self
return False | Is the given element tokenized/tagged? | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L168-L193 |
estnltk/estnltk | estnltk/text.py | Text.texts | def texts(self, layer, sep=' '):
"""Retrieve texts for given layer.
Parameters
----------
sep: str
Separator for multilayer elements (default: ' ').
Returns
-------
list of str
List of strings that make up given layer.
"""
return self.texts_from_spans(self.spans(layer), sep) | python | def texts(self, layer, sep=' '):
"""Retrieve texts for given layer.
Parameters
----------
sep: str
Separator for multilayer elements (default: ' ').
Returns
-------
list of str
List of strings that make up given layer.
"""
return self.texts_from_spans(self.spans(layer), sep) | Retrieve texts for given layer.
Parameters
----------
sep: str
Separator for multilayer elements (default: ' ').
Returns
-------
list of str
List of strings that make up given layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L199-L213 |
estnltk/estnltk | estnltk/text.py | Text.texts_from_spans | def texts_from_spans(self, spans, sep=' '):
"""Retrieve texts from a list of (start, end) position spans.
Parameters
----------
sep: str
Separator for multilayer elements (default: ' ').
Returns
-------
list of str
List of strings that correspond to given spans.
"""
text = self.text
texts = []
for start, end in spans:
if isinstance(start, list):
texts.append(sep.join(text[s:e] for s, e in zip(start, end)))
else:
texts.append(text[start:end])
return texts | python | def texts_from_spans(self, spans, sep=' '):
"""Retrieve texts from a list of (start, end) position spans.
Parameters
----------
sep: str
Separator for multilayer elements (default: ' ').
Returns
-------
list of str
List of strings that correspond to given spans.
"""
text = self.text
texts = []
for start, end in spans:
if isinstance(start, list):
texts.append(sep.join(text[s:e] for s, e in zip(start, end)))
else:
texts.append(text[start:end])
return texts | Retrieve texts from a list of (start, end) position spans.
Parameters
----------
sep: str
Separator for multilayer elements (default: ' ').
Returns
-------
list of str
List of strings that correspond to given spans. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L215-L236 |
estnltk/estnltk | estnltk/text.py | Text.spans | def spans(self, layer):
"""Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
"""
spans = []
for data in self[layer]:
spans.append((data[START], data[END]))
return spans | python | def spans(self, layer):
"""Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
"""
spans = []
for data in self[layer]:
spans.append((data[START], data[END]))
return spans | Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L238-L249 |
estnltk/estnltk | estnltk/text.py | Text.starts | def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START])
return starts | python | def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START])
return starts | Retrieve start positions of elements if given layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L251-L256 |
estnltk/estnltk | estnltk/text.py | Text.ends | def ends(self, layer):
"""Retrieve end positions of elements if given layer."""
ends = []
for data in self[layer]:
ends.append(data[END])
return ends | python | def ends(self, layer):
"""Retrieve end positions of elements if given layer."""
ends = []
for data in self[layer]:
ends.append(data[END])
return ends | Retrieve end positions of elements if given layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L258-L263 |
estnltk/estnltk | estnltk/text.py | Text.layer_tagger_mapping | def layer_tagger_mapping(self):
"""Dictionary that maps layer names to taggers that can create that layer."""
return {
PARAGRAPHS: self.tokenize_paragraphs,
SENTENCES: self.tokenize_sentences,
WORDS: self.tokenize_words,
ANALYSIS: self.tag_analysis,
TIMEXES: self.tag_timexes,
NAMED_ENTITIES: self.tag_named_entities,
CLAUSE_ANNOTATION: self.tag_clause_annotations,
CLAUSES: self.tag_clauses,
LAYER_CONLL: self.tag_syntax_vislcg3,
LAYER_VISLCG3: self.tag_syntax_maltparser,
WORDNET: self.tag_wordnet
} | python | def layer_tagger_mapping(self):
"""Dictionary that maps layer names to taggers that can create that layer."""
return {
PARAGRAPHS: self.tokenize_paragraphs,
SENTENCES: self.tokenize_sentences,
WORDS: self.tokenize_words,
ANALYSIS: self.tag_analysis,
TIMEXES: self.tag_timexes,
NAMED_ENTITIES: self.tag_named_entities,
CLAUSE_ANNOTATION: self.tag_clause_annotations,
CLAUSES: self.tag_clauses,
LAYER_CONLL: self.tag_syntax_vislcg3,
LAYER_VISLCG3: self.tag_syntax_maltparser,
WORDNET: self.tag_wordnet
} | Dictionary that maps layer names to taggers that can create that layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L346-L360 |
estnltk/estnltk | estnltk/text.py | Text.tag | def tag(self, layer):
"""Tag the annotations of given layer. It can automatically tag any built-in layer type."""
mapping = self.layer_tagger_mapping
if layer in mapping:
mapping[layer]()
return self | python | def tag(self, layer):
"""Tag the annotations of given layer. It can automatically tag any built-in layer type."""
mapping = self.layer_tagger_mapping
if layer in mapping:
mapping[layer]()
return self | Tag the annotations of given layer. It can automatically tag any built-in layer type. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L362-L367 |
estnltk/estnltk | estnltk/text.py | Text.tokenize_paragraphs | def tokenize_paragraphs(self):
"""Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer."""
tok = self.__paragraph_tokenizer
spans = tok.span_tokenize(self.text)
dicts = []
for start, end in spans:
dicts.append({'start': start, 'end': end})
self[PARAGRAPHS] = dicts
return self | python | def tokenize_paragraphs(self):
"""Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer."""
tok = self.__paragraph_tokenizer
spans = tok.span_tokenize(self.text)
dicts = []
for start, end in spans:
dicts.append({'start': start, 'end': end})
self[PARAGRAPHS] = dicts
return self | Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L369-L377 |
estnltk/estnltk | estnltk/text.py | Text.paragraph_texts | def paragraph_texts(self):
"""The list of texts representing ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.texts(PARAGRAPHS) | python | def paragraph_texts(self):
"""The list of texts representing ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.texts(PARAGRAPHS) | The list of texts representing ``paragraphs`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L387-L391 |
estnltk/estnltk | estnltk/text.py | Text.paragraph_spans | def paragraph_spans(self):
"""The list of spans representing ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.spans(PARAGRAPHS) | python | def paragraph_spans(self):
"""The list of spans representing ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.spans(PARAGRAPHS) | The list of spans representing ``paragraphs`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L394-L398 |
estnltk/estnltk | estnltk/text.py | Text.paragraph_starts | def paragraph_starts(self):
"""The start positions of ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.starts(PARAGRAPHS) | python | def paragraph_starts(self):
"""The start positions of ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.starts(PARAGRAPHS) | The start positions of ``paragraphs`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L401-L405 |
estnltk/estnltk | estnltk/text.py | Text.paragraph_ends | def paragraph_ends(self):
"""The end positions of ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.ends(PARAGRAPHS) | python | def paragraph_ends(self):
"""The end positions of ``paragraphs`` layer elements."""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.ends(PARAGRAPHS) | The end positions of ``paragraphs`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L408-L412 |
estnltk/estnltk | estnltk/text.py | Text.tokenize_sentences | def tokenize_sentences(self):
"""Apply sentence tokenization to this Text instance. Creates ``sentences`` layer.
Automatically tokenizes paragraphs, if they are not already tokenized.
Also, if word tokenization has already been performed, tries to fit
the sentence tokenization into the existing word tokenization;
"""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
tok = self.__sentence_tokenizer
text = self.text
dicts = []
for paragraph in self[PARAGRAPHS]:
para_start, para_end = paragraph[START], paragraph[END]
para_text = text[para_start:para_end]
if not self.is_tagged(WORDS):
# Non-hack variant: word tokenization has not been applied yet,
# so we proceed in natural order (first sentences, then words)
spans = tok.span_tokenize(para_text)
for start, end in spans:
dicts.append({'start': start+para_start, 'end': end+para_start})
else:
# A hack variant: word tokenization has already been made, so
# we try to use existing word tokenization (first words, then sentences)
para_words = \
[ w for w in self[WORDS] if w[START]>=para_start and w[END]<=para_end ]
para_word_texts = \
[ w[TEXT] for w in para_words ]
try:
# Apply sentences_from_tokens method (if available)
sents = tok.sentences_from_tokens( para_word_texts )
except AttributeError as e:
raise
# Align result of the sentence tokenization with the initial word tokenization
# in order to determine the sentence boundaries
i = 0
for sentence in sents:
j = 0
firstToken = None
lastToken = None
while i < len(para_words):
if para_words[i][TEXT] != sentence[j]:
raise Exception('Error on aligning: ', para_word_texts,' and ',sentence,' at positions ',i,j)
if j == 0:
firstToken = para_words[i]
if j == len(sentence) - 1:
lastToken = para_words[i]
i+=1
break
j+=1
i+=1
sentenceDict = \
{'start': firstToken[START], 'end': lastToken[END]}
dicts.append( sentenceDict )
# Note: We also need to invalidate the cached properties providing the
# sentence information, as otherwise, if the properties have been
# called already, new calls would return the old state of sentence
# tokenization;
for sentence_attrib in ['sentences', 'sentence_texts', 'sentence_spans', \
'sentence_starts', 'sentence_ends']:
try:
# invalidate the cache
delattr(self, sentence_attrib)
except AttributeError:
# it's ok, if the cached property has not been called yet
pass
self[SENTENCES] = dicts
return self | python | def tokenize_sentences(self):
"""Apply sentence tokenization to this Text instance. Creates ``sentences`` layer.
Automatically tokenizes paragraphs, if they are not already tokenized.
Also, if word tokenization has already been performed, tries to fit
the sentence tokenization into the existing word tokenization;
"""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
tok = self.__sentence_tokenizer
text = self.text
dicts = []
for paragraph in self[PARAGRAPHS]:
para_start, para_end = paragraph[START], paragraph[END]
para_text = text[para_start:para_end]
if not self.is_tagged(WORDS):
# Non-hack variant: word tokenization has not been applied yet,
# so we proceed in natural order (first sentences, then words)
spans = tok.span_tokenize(para_text)
for start, end in spans:
dicts.append({'start': start+para_start, 'end': end+para_start})
else:
# A hack variant: word tokenization has already been made, so
# we try to use existing word tokenization (first words, then sentences)
para_words = \
[ w for w in self[WORDS] if w[START]>=para_start and w[END]<=para_end ]
para_word_texts = \
[ w[TEXT] for w in para_words ]
try:
# Apply sentences_from_tokens method (if available)
sents = tok.sentences_from_tokens( para_word_texts )
except AttributeError as e:
raise
# Align result of the sentence tokenization with the initial word tokenization
# in order to determine the sentence boundaries
i = 0
for sentence in sents:
j = 0
firstToken = None
lastToken = None
while i < len(para_words):
if para_words[i][TEXT] != sentence[j]:
raise Exception('Error on aligning: ', para_word_texts,' and ',sentence,' at positions ',i,j)
if j == 0:
firstToken = para_words[i]
if j == len(sentence) - 1:
lastToken = para_words[i]
i+=1
break
j+=1
i+=1
sentenceDict = \
{'start': firstToken[START], 'end': lastToken[END]}
dicts.append( sentenceDict )
# Note: We also need to invalidate the cached properties providing the
# sentence information, as otherwise, if the properties have been
# called already, new calls would return the old state of sentence
# tokenization;
for sentence_attrib in ['sentences', 'sentence_texts', 'sentence_spans', \
'sentence_starts', 'sentence_ends']:
try:
# invalidate the cache
delattr(self, sentence_attrib)
except AttributeError:
# it's ok, if the cached property has not been called yet
pass
self[SENTENCES] = dicts
return self | Apply sentence tokenization to this Text instance. Creates ``sentences`` layer.
Automatically tokenizes paragraphs, if they are not already tokenized.
Also, if word tokenization has already been performed, tries to fit
the sentence tokenization into the existing word tokenization; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L414-L480 |
estnltk/estnltk | estnltk/text.py | Text.sentence_texts | def sentence_texts(self):
"""The list of texts representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.texts(SENTENCES) | python | def sentence_texts(self):
"""The list of texts representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.texts(SENTENCES) | The list of texts representing ``sentences`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L490-L494 |
estnltk/estnltk | estnltk/text.py | Text.sentence_spans | def sentence_spans(self):
"""The list of spans representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.spans(SENTENCES) | python | def sentence_spans(self):
"""The list of spans representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.spans(SENTENCES) | The list of spans representing ``sentences`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L497-L501 |
estnltk/estnltk | estnltk/text.py | Text.sentence_starts | def sentence_starts(self):
"""The list of start positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES) | python | def sentence_starts(self):
"""The list of start positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES) | The list of start positions representing ``sentences`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L504-L508 |
estnltk/estnltk | estnltk/text.py | Text.sentence_ends | def sentence_ends(self):
"""The list of end positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.ends(SENTENCES) | python | def sentence_ends(self):
"""The list of end positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.ends(SENTENCES) | The list of end positions representing ``sentences`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L511-L515 |
estnltk/estnltk | estnltk/text.py | Text.tokenize_words | def tokenize_words(self):
"""Apply word tokenization and create ``words`` layer.
Automatically creates ``paragraphs`` and ``sentences`` layers.
"""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
tok = self.__word_tokenizer
text = self.text
dicts = []
for sentence in self[SENTENCES]:
sent_start, sent_end = sentence[START], sentence[END]
sent_text = text[sent_start:sent_end]
spans = tok.span_tokenize(sent_text)
for start, end in spans:
dicts.append({START: start+sent_start, END: end+sent_start, TEXT: sent_text[start:end]})
self[WORDS] = dicts
return self | python | def tokenize_words(self):
"""Apply word tokenization and create ``words`` layer.
Automatically creates ``paragraphs`` and ``sentences`` layers.
"""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
tok = self.__word_tokenizer
text = self.text
dicts = []
for sentence in self[SENTENCES]:
sent_start, sent_end = sentence[START], sentence[END]
sent_text = text[sent_start:sent_end]
spans = tok.span_tokenize(sent_text)
for start, end in spans:
dicts.append({START: start+sent_start, END: end+sent_start, TEXT: sent_text[start:end]})
self[WORDS] = dicts
return self | Apply word tokenization and create ``words`` layer.
Automatically creates ``paragraphs`` and ``sentences`` layers. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L517-L534 |
estnltk/estnltk | estnltk/text.py | Text.tag_analysis | def tag_analysis(self):
"""Tag ``words`` layer with morphological analysis attributes."""
if not self.is_tagged(WORDS):
self.tokenize_words()
sentences = self.divide(WORDS, SENTENCES)
for sentence in sentences:
texts = [word[TEXT] for word in sentence]
all_analysis = vabamorf.analyze(texts, **self.__kwargs)
for word, analysis in zip(sentence, all_analysis):
word[ANALYSIS] = analysis[ANALYSIS]
word[TEXT] = analysis[TEXT]
return self | python | def tag_analysis(self):
"""Tag ``words`` layer with morphological analysis attributes."""
if not self.is_tagged(WORDS):
self.tokenize_words()
sentences = self.divide(WORDS, SENTENCES)
for sentence in sentences:
texts = [word[TEXT] for word in sentence]
all_analysis = vabamorf.analyze(texts, **self.__kwargs)
for word, analysis in zip(sentence, all_analysis):
word[ANALYSIS] = analysis[ANALYSIS]
word[TEXT] = analysis[TEXT]
return self | Tag ``words`` layer with morphological analysis attributes. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L536-L547 |
estnltk/estnltk | estnltk/text.py | Text.word_texts | def word_texts(self):
"""The list of words representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return [word[TEXT] for word in self[WORDS]] | python | def word_texts(self):
"""The list of words representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return [word[TEXT] for word in self[WORDS]] | The list of words representing ``words`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L557-L561 |
estnltk/estnltk | estnltk/text.py | Text.word_spans | def word_spans(self):
"""The list of spans representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.spans(WORDS) | python | def word_spans(self):
"""The list of spans representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.spans(WORDS) | The list of spans representing ``words`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L564-L568 |
estnltk/estnltk | estnltk/text.py | Text.word_starts | def word_starts(self):
"""The list of start positions representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.starts(WORDS) | python | def word_starts(self):
"""The list of start positions representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.starts(WORDS) | The list of start positions representing ``words`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L571-L575 |
estnltk/estnltk | estnltk/text.py | Text.word_ends | def word_ends(self):
"""The list of end positions representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.ends(WORDS) | python | def word_ends(self):
"""The list of end positions representing ``words`` layer elements."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.ends(WORDS) | The list of end positions representing ``words`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L578-L582 |
estnltk/estnltk | estnltk/text.py | Text.analysis | def analysis(self):
"""The list of analysis of ``words`` layer elements."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words] | python | def analysis(self):
"""The list of analysis of ``words`` layer elements."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words] | The list of analysis of ``words`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L585-L589 |
estnltk/estnltk | estnltk/text.py | Text.get_analysis_element | def get_analysis_element(self, element, sep='|'):
"""The list of analysis elements of ``words`` layer.
Parameters
----------
element: str
The name of the element, for example "lemma", "postag".
sep: str
The separator for ambiguous analysis (default: "|").
As morphological analysis cannot always yield unambiguous results, we
return ambiguous values separated by the pipe character as default.
"""
return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words] | python | def get_analysis_element(self, element, sep='|'):
"""The list of analysis elements of ``words`` layer.
Parameters
----------
element: str
The name of the element, for example "lemma", "postag".
sep: str
The separator for ambiguous analysis (default: "|").
As morphological analysis cannot always yield unambiguous results, we
return ambiguous values separated by the pipe character as default.
"""
return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words] | The list of analysis elements of ``words`` layer.
Parameters
----------
element: str
The name of the element, for example "lemma", "postag".
sep: str
The separator for ambiguous analysis (default: "|").
As morphological analysis cannot always yield unambiguous results, we
return ambiguous values separated by the pipe character as default. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L603-L615 |
estnltk/estnltk | estnltk/text.py | Text.roots | def roots(self):
"""The list of word roots.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT) | python | def roots(self):
"""The list of word roots.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT) | The list of word roots.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L618-L626 |
estnltk/estnltk | estnltk/text.py | Text.lemmas | def lemmas(self):
"""The list of lemmas.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(LEMMA) | python | def lemmas(self):
"""The list of lemmas.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(LEMMA) | The list of lemmas.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L629-L637 |
estnltk/estnltk | estnltk/text.py | Text.lemma_lists | def lemma_lists(self):
"""Lemma lists.
Ambiguous cases are separate list elements.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [[an[LEMMA] for an in word[ANALYSIS]] for word in self[WORDS]] | python | def lemma_lists(self):
"""Lemma lists.
Ambiguous cases are separate list elements.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [[an[LEMMA] for an in word[ANALYSIS]] for word in self[WORDS]] | Lemma lists.
Ambiguous cases are separate list elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L640-L647 |
estnltk/estnltk | estnltk/text.py | Text.endings | def endings(self):
"""The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING) | python | def endings(self):
"""The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING) | The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L650-L658 |
estnltk/estnltk | estnltk/text.py | Text.forms | def forms(self):
"""Tthe list of word forms.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(FORM) | python | def forms(self):
"""Tthe list of word forms.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(FORM) | Tthe list of word forms.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L661-L669 |
estnltk/estnltk | estnltk/text.py | Text.postags | def postags(self):
"""The list of word part-of-speech tags.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(POSTAG) | python | def postags(self):
"""The list of word part-of-speech tags.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(POSTAG) | The list of word part-of-speech tags.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L672-L680 |
estnltk/estnltk | estnltk/text.py | Text.postag_descriptions | def postag_descriptions(self):
"""Human-readable POS-tag descriptions."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [POSTAG_DESCRIPTIONS.get(tag, '') for tag in self.get_analysis_element(POSTAG)] | python | def postag_descriptions(self):
"""Human-readable POS-tag descriptions."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [POSTAG_DESCRIPTIONS.get(tag, '') for tag in self.get_analysis_element(POSTAG)] | Human-readable POS-tag descriptions. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L689-L693 |
estnltk/estnltk | estnltk/text.py | Text.root_tokens | def root_tokens(self):
"""Root tokens of word roots."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT_TOKENS) | python | def root_tokens(self):
"""Root tokens of word roots."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT_TOKENS) | Root tokens of word roots. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L696-L700 |
estnltk/estnltk | estnltk/text.py | Text.descriptions | def descriptions(self):
"""Human readable word descriptions."""
descs = []
for postag, form in zip(self.postags, self.forms):
desc = VERB_TYPES.get(form, '')
if len(desc) == 0:
toks = form.split(' ')
if len(toks) == 2:
plur_desc = PLURALITY.get(toks[0], None)
case_desc = CASES.get(toks[1], None)
toks = []
if plur_desc is not None:
toks.append(plur_desc)
if case_desc is not None:
toks.append(case_desc)
desc = ' '.join(toks)
descs.append(desc)
return descs | python | def descriptions(self):
"""Human readable word descriptions."""
descs = []
for postag, form in zip(self.postags, self.forms):
desc = VERB_TYPES.get(form, '')
if len(desc) == 0:
toks = form.split(' ')
if len(toks) == 2:
plur_desc = PLURALITY.get(toks[0], None)
case_desc = CASES.get(toks[1], None)
toks = []
if plur_desc is not None:
toks.append(plur_desc)
if case_desc is not None:
toks.append(case_desc)
desc = ' '.join(toks)
descs.append(desc)
return descs | Human readable word descriptions. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L703-L720 |
estnltk/estnltk | estnltk/text.py | Text.tag_syntax_vislcg3 | def tag_syntax_vislcg3(self):
""" Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis,
and stores the results in the layer named LAYER_VISLCG3."""
if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, VISLCG3Parser):
self.__syntactic_parser = VISLCG3Parser()
return self.tag_syntax() | python | def tag_syntax_vislcg3(self):
""" Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis,
and stores the results in the layer named LAYER_VISLCG3."""
if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, VISLCG3Parser):
self.__syntactic_parser = VISLCG3Parser()
return self.tag_syntax() | Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis,
and stores the results in the layer named LAYER_VISLCG3. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L722-L727 |
estnltk/estnltk | estnltk/text.py | Text.tag_syntax_maltparser | def tag_syntax_maltparser(self):
""" Changes default syntactic parser to MaltParser, performs syntactic analysis,
and stores the results in the layer named LAYER_CONLL."""
if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, MaltParser):
self.__syntactic_parser = MaltParser()
return self.tag_syntax() | python | def tag_syntax_maltparser(self):
""" Changes default syntactic parser to MaltParser, performs syntactic analysis,
and stores the results in the layer named LAYER_CONLL."""
if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, MaltParser):
self.__syntactic_parser = MaltParser()
return self.tag_syntax() | Changes default syntactic parser to MaltParser, performs syntactic analysis,
and stores the results in the layer named LAYER_CONLL. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L729-L734 |
estnltk/estnltk | estnltk/text.py | Text.tag_syntax | def tag_syntax(self):
""" Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used).
"""
# Load default Syntactic tagger:
if self.__syntactic_parser is None:
self.__syntactic_parser = load_default_syntactic_parser()
if not self.is_tagged(ANALYSIS):
if isinstance(self.__syntactic_parser, MaltParser):
# By default: Use disambiguation for MaltParser's input
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = True
self.tag_analysis()
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
# By default: Do not use disambiguation for VISLCG3Parser's input
# (VISLCG3 already does its own rule-based disambiguation)
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = False
self.tag_analysis()
return self.__syntactic_parser.parse_text( self, **self.__kwargs ) | python | def tag_syntax(self):
""" Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used).
"""
# Load default Syntactic tagger:
if self.__syntactic_parser is None:
self.__syntactic_parser = load_default_syntactic_parser()
if not self.is_tagged(ANALYSIS):
if isinstance(self.__syntactic_parser, MaltParser):
# By default: Use disambiguation for MaltParser's input
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = True
self.tag_analysis()
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
# By default: Do not use disambiguation for VISLCG3Parser's input
# (VISLCG3 already does its own rule-based disambiguation)
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = False
self.tag_analysis()
return self.__syntactic_parser.parse_text( self, **self.__kwargs ) | Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used). | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L736-L756 |
estnltk/estnltk | estnltk/text.py | Text.syntax_trees | def syntax_trees( self, layer=None ):
""" Builds syntactic trees (estnltk.syntax.utils.Tree objects) from
syntactic annotations and returns as a list.
If the input argument *layer* is not specified, the type of the
syntactic parser is used to decide, which syntactic analysis layer
should be produced and taken as basis for building syntactic trees;
If a syntactic parser is not available, then a missing *layer* name
is replaced by the first syntactic layer available (1st LAYER_CONLL,
then LAYER_VISLCG3);
Otherwise, the *layer* must be provided by the user and it must be
either LAYER_CONLL or LAYER_VISLCG3.
"""
# If no layer specified, decide the layer based on the type of syntactic
# analyzer used:
if not layer and self.__syntactic_parser:
if isinstance(self.__syntactic_parser, MaltParser):
layer = LAYER_CONLL
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
layer = LAYER_VISLCG3
# If no syntactic analyzer available, pick the layer as the first syntactic
# layer available:
if not layer and self.is_tagged(LAYER_CONLL):
layer = LAYER_CONLL
elif not layer and self.is_tagged(LAYER_VISLCG3):
layer = LAYER_VISLCG3
# Based on the chosen layer, perform the syntactic analysis (if necessary)
# and return the results packaged as tree objects;
if layer:
if layer==LAYER_CONLL:
if not self.is_tagged(layer):
self.tag_syntax_maltparser()
return self.syntax_trees_conll
elif layer==LAYER_VISLCG3:
if not self.is_tagged(layer):
self.tag_syntax_vislcg3()
return self.syntax_trees_vislcg3
else:
raise ValueError('(!) Unexpected layer name: '+str(layer))
else:
raise ValueError('(!) Missing layer name! ') | python | def syntax_trees( self, layer=None ):
""" Builds syntactic trees (estnltk.syntax.utils.Tree objects) from
syntactic annotations and returns as a list.
If the input argument *layer* is not specified, the type of the
syntactic parser is used to decide, which syntactic analysis layer
should be produced and taken as basis for building syntactic trees;
If a syntactic parser is not available, then a missing *layer* name
is replaced by the first syntactic layer available (1st LAYER_CONLL,
then LAYER_VISLCG3);
Otherwise, the *layer* must be provided by the user and it must be
either LAYER_CONLL or LAYER_VISLCG3.
"""
# If no layer specified, decide the layer based on the type of syntactic
# analyzer used:
if not layer and self.__syntactic_parser:
if isinstance(self.__syntactic_parser, MaltParser):
layer = LAYER_CONLL
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
layer = LAYER_VISLCG3
# If no syntactic analyzer available, pick the layer as the first syntactic
# layer available:
if not layer and self.is_tagged(LAYER_CONLL):
layer = LAYER_CONLL
elif not layer and self.is_tagged(LAYER_VISLCG3):
layer = LAYER_VISLCG3
# Based on the chosen layer, perform the syntactic analysis (if necessary)
# and return the results packaged as tree objects;
if layer:
if layer==LAYER_CONLL:
if not self.is_tagged(layer):
self.tag_syntax_maltparser()
return self.syntax_trees_conll
elif layer==LAYER_VISLCG3:
if not self.is_tagged(layer):
self.tag_syntax_vislcg3()
return self.syntax_trees_vislcg3
else:
raise ValueError('(!) Unexpected layer name: '+str(layer))
else:
raise ValueError('(!) Missing layer name! ') | Builds syntactic trees (estnltk.syntax.utils.Tree objects) from
syntactic annotations and returns as a list.
If the input argument *layer* is not specified, the type of the
syntactic parser is used to decide, which syntactic analysis layer
should be produced and taken as basis for building syntactic trees;
If a syntactic parser is not available, then a missing *layer* name
is replaced by the first syntactic layer available (1st LAYER_CONLL,
then LAYER_VISLCG3);
Otherwise, the *layer* must be provided by the user and it must be
either LAYER_CONLL or LAYER_VISLCG3. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L758-L798 |
estnltk/estnltk | estnltk/text.py | Text.tag_labels | def tag_labels(self):
"""Tag named entity labels in the ``words`` layer."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__ner_tagger is None:
self.__ner_tagger = load_default_ner_tagger()
self.__ner_tagger.tag_document(self)
return self | python | def tag_labels(self):
"""Tag named entity labels in the ``words`` layer."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__ner_tagger is None:
self.__ner_tagger = load_default_ner_tagger()
self.__ner_tagger.tag_document(self)
return self | Tag named entity labels in the ``words`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L812-L819 |
estnltk/estnltk | estnltk/text.py | Text.labels | def labels(self):
"""Named entity labels."""
if not self.is_tagged(LABEL):
self.tag_labels()
return [word[LABEL] for word in self.words] | python | def labels(self):
"""Named entity labels."""
if not self.is_tagged(LABEL):
self.tag_labels()
return [word[LABEL] for word in self.words] | Named entity labels. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L822-L826 |
estnltk/estnltk | estnltk/text.py | Text.tag_named_entities | def tag_named_entities(self):
"""Tag ``named_entities`` layer.
This automatically performs morphological analysis along with all dependencies.
"""
if not self.is_tagged(LABEL):
self.tag_labels()
nes = []
word_start = -1
labels = self.labels + ['O'] # last is sentinel
words = self.words
label = 'O'
for i, l in enumerate(labels):
if l.startswith('B-') or l == 'O':
if word_start != -1:
nes.append({START: words[word_start][START],
END: words[i-1][END],
LABEL: label})
if l.startswith('B-'):
word_start = i
label = l[2:]
else:
word_start = -1
self[NAMED_ENTITIES] = nes
return self | python | def tag_named_entities(self):
"""Tag ``named_entities`` layer.
This automatically performs morphological analysis along with all dependencies.
"""
if not self.is_tagged(LABEL):
self.tag_labels()
nes = []
word_start = -1
labels = self.labels + ['O'] # last is sentinel
words = self.words
label = 'O'
for i, l in enumerate(labels):
if l.startswith('B-') or l == 'O':
if word_start != -1:
nes.append({START: words[word_start][START],
END: words[i-1][END],
LABEL: label})
if l.startswith('B-'):
word_start = i
label = l[2:]
else:
word_start = -1
self[NAMED_ENTITIES] = nes
return self | Tag ``named_entities`` layer.
This automatically performs morphological analysis along with all dependencies. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L828-L852 |
estnltk/estnltk | estnltk/text.py | Text.named_entities | def named_entities(self):
"""The elements of ``named_entities`` layer."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases] | python | def named_entities(self):
"""The elements of ``named_entities`` layer."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases] | The elements of ``named_entities`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L855-L860 |
estnltk/estnltk | estnltk/text.py | Text.named_entity_texts | def named_entity_texts(self):
"""The texts representing named entities."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.texts(NAMED_ENTITIES) | python | def named_entity_texts(self):
"""The texts representing named entities."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.texts(NAMED_ENTITIES) | The texts representing named entities. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L863-L867 |
estnltk/estnltk | estnltk/text.py | Text.named_entity_spans | def named_entity_spans(self):
"""The spans of named entities."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.spans(NAMED_ENTITIES) | python | def named_entity_spans(self):
"""The spans of named entities."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.spans(NAMED_ENTITIES) | The spans of named entities. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L870-L874 |
estnltk/estnltk | estnltk/text.py | Text.named_entity_labels | def named_entity_labels(self):
"""The named entity labels without BIO prefixes."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return [ne[LABEL] for ne in self[NAMED_ENTITIES]] | python | def named_entity_labels(self):
"""The named entity labels without BIO prefixes."""
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return [ne[LABEL] for ne in self[NAMED_ENTITIES]] | The named entity labels without BIO prefixes. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L877-L881 |
estnltk/estnltk | estnltk/text.py | Text.tag_timexes | def tag_timexes(self):
"""Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if not self.is_tagged(TIMEXES):
if self.__timex_tagger is None:
self.__timex_tagger = load_default_timex_tagger()
self.__timex_tagger.tag_document(self, **self.__kwargs)
return self | python | def tag_timexes(self):
"""Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if not self.is_tagged(TIMEXES):
if self.__timex_tagger is None:
self.__timex_tagger = load_default_timex_tagger()
self.__timex_tagger.tag_document(self, **self.__kwargs)
return self | Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L883-L893 |
estnltk/estnltk | estnltk/text.py | Text.timex_starts | def timex_starts(self):
"""The list of start positions of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.starts(TIMEXES) | python | def timex_starts(self):
"""The list of start positions of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.starts(TIMEXES) | The list of start positions of ``timexes`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L923-L927 |
estnltk/estnltk | estnltk/text.py | Text.timex_ends | def timex_ends(self):
"""The list of end positions of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.ends(TIMEXES) | python | def timex_ends(self):
"""The list of end positions of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.ends(TIMEXES) | The list of end positions of ``timexes`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L930-L934 |
estnltk/estnltk | estnltk/text.py | Text.timex_spans | def timex_spans(self):
"""The list of spans of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.spans(TIMEXES) | python | def timex_spans(self):
"""The list of spans of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.spans(TIMEXES) | The list of spans of ``timexes`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L937-L941 |
estnltk/estnltk | estnltk/text.py | Text.tag_clause_annotations | def tag_clause_annotations(self):
"""Tag clause annotations in ``words`` layer.
Depends on morphological analysis.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__clause_segmenter is None:
self.__clause_segmenter = load_default_clausesegmenter()
return self.__clause_segmenter.tag(self) | python | def tag_clause_annotations(self):
"""Tag clause annotations in ``words`` layer.
Depends on morphological analysis.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__clause_segmenter is None:
self.__clause_segmenter = load_default_clausesegmenter()
return self.__clause_segmenter.tag(self) | Tag clause annotations in ``words`` layer.
Depends on morphological analysis. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L943-L951 |
estnltk/estnltk | estnltk/text.py | Text.clause_annotations | def clause_annotations(self):
"""The list of clause annotations in ``words`` layer."""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]] | python | def clause_annotations(self):
"""The list of clause annotations in ``words`` layer."""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]] | The list of clause annotations in ``words`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L954-L958 |
estnltk/estnltk | estnltk/text.py | Text.clause_indices | def clause_indices(self):
"""The list of clause indices in ``words`` layer.
The indices are unique only in the boundary of a single sentence.
"""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_IDX, None) for word in self[WORDS]] | python | def clause_indices(self):
"""The list of clause indices in ``words`` layer.
The indices are unique only in the boundary of a single sentence.
"""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_IDX, None) for word in self[WORDS]] | The list of clause indices in ``words`` layer.
The indices are unique only in the boundary of a single sentence. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L961-L967 |
estnltk/estnltk | estnltk/text.py | Text.tag_clauses | def tag_clauses(self):
"""Create ``clauses`` multilayer.
Depends on clause annotations."""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
def from_sentence(words):
"""Function that extracts clauses from a signle sentence."""
clauses = defaultdict(list)
start = words[0][START]
end = words[0][END]
clause = words[0][CLAUSE_IDX]
for word in words:
if word[CLAUSE_IDX] != clause:
clauses[clause].append((start, end))
start, clause = word[START], word[CLAUSE_IDX]
end = word[END]
clauses[clause].append((start, words[-1][END]))
clauses = [(key, {START: [s for s, e in clause], END: [e for s, e in clause]}) for key, clause in clauses.items()]
return [v for k, v in sorted(clauses)]
clauses = []
sentences = self.divide()
for sentence in sentences:
clauses.extend(from_sentence(sentence))
self[CLAUSES] = clauses
return self | python | def tag_clauses(self):
"""Create ``clauses`` multilayer.
Depends on clause annotations."""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
def from_sentence(words):
"""Function that extracts clauses from a signle sentence."""
clauses = defaultdict(list)
start = words[0][START]
end = words[0][END]
clause = words[0][CLAUSE_IDX]
for word in words:
if word[CLAUSE_IDX] != clause:
clauses[clause].append((start, end))
start, clause = word[START], word[CLAUSE_IDX]
end = word[END]
clauses[clause].append((start, words[-1][END]))
clauses = [(key, {START: [s for s, e in clause], END: [e for s, e in clause]}) for key, clause in clauses.items()]
return [v for k, v in sorted(clauses)]
clauses = []
sentences = self.divide()
for sentence in sentences:
clauses.extend(from_sentence(sentence))
self[CLAUSES] = clauses
return self | Create ``clauses`` multilayer.
Depends on clause annotations. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L969-L997 |
estnltk/estnltk | estnltk/text.py | Text.clause_texts | def clause_texts(self):
"""The texts of ``clauses`` multilayer elements.
Non-consequent spans are concatenated with space character by default.
Use :py:meth:`~estnltk.text.Text.texts` method to supply custom separators.
"""
if not self.is_tagged(CLAUSES):
self.tag_clauses()
return self.texts(CLAUSES) | python | def clause_texts(self):
"""The texts of ``clauses`` multilayer elements.
Non-consequent spans are concatenated with space character by default.
Use :py:meth:`~estnltk.text.Text.texts` method to supply custom separators.
"""
if not self.is_tagged(CLAUSES):
self.tag_clauses()
return self.texts(CLAUSES) | The texts of ``clauses`` multilayer elements.
Non-consequent spans are concatenated with space character by default.
Use :py:meth:`~estnltk.text.Text.texts` method to supply custom separators. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1007-L1014 |
estnltk/estnltk | estnltk/text.py | Text.tag_verb_chains | def tag_verb_chains(self):
"""Create ``verb_chains`` layer.
Depends on ``clauses`` layer.
"""
if not self.is_tagged(CLAUSES):
self.tag_clauses()
if self.__verbchain_detector is None:
self.__verbchain_detector = load_default_verbchain_detector()
sentences = self.divide()
verbchains = []
for sentence in sentences:
chains = self.__verbchain_detector.detectVerbChainsFromSent( sentence )
for chain in chains:
# 1) Get spans for all words of the phrase
word_spans = [ ( sentence[idx][START], sentence[idx][END] ) \
for idx in sorted( chain[PHRASE] ) ]
# 2) Assign to the chain
chain[START] = [ span[0] for span in word_spans ]
chain[END] = [ span[1] for span in word_spans ]
verbchains.extend(chains)
self[VERB_CHAINS] = verbchains
return self | python | def tag_verb_chains(self):
"""Create ``verb_chains`` layer.
Depends on ``clauses`` layer.
"""
if not self.is_tagged(CLAUSES):
self.tag_clauses()
if self.__verbchain_detector is None:
self.__verbchain_detector = load_default_verbchain_detector()
sentences = self.divide()
verbchains = []
for sentence in sentences:
chains = self.__verbchain_detector.detectVerbChainsFromSent( sentence )
for chain in chains:
# 1) Get spans for all words of the phrase
word_spans = [ ( sentence[idx][START], sentence[idx][END] ) \
for idx in sorted( chain[PHRASE] ) ]
# 2) Assign to the chain
chain[START] = [ span[0] for span in word_spans ]
chain[END] = [ span[1] for span in word_spans ]
verbchains.extend(chains)
self[VERB_CHAINS] = verbchains
return self | Create ``verb_chains`` layer.
Depends on ``clauses`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1016-L1037 |
estnltk/estnltk | estnltk/text.py | Text.verb_chain_texts | def verb_chain_texts(self):
"""The list of texts of ``verb_chains`` layer elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.texts(VERB_CHAINS) | python | def verb_chain_texts(self):
"""The list of texts of ``verb_chains`` layer elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.texts(VERB_CHAINS) | The list of texts of ``verb_chains`` layer elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1047-L1051 |
estnltk/estnltk | estnltk/text.py | Text.verb_chain_starts | def verb_chain_starts(self):
"""The start positions of ``verb_chains`` elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.starts(VERB_CHAINS) | python | def verb_chain_starts(self):
"""The start positions of ``verb_chains`` elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.starts(VERB_CHAINS) | The start positions of ``verb_chains`` elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1094-L1098 |
estnltk/estnltk | estnltk/text.py | Text.verb_chain_ends | def verb_chain_ends(self):
"""The end positions of ``verb_chains`` elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.ends(VERB_CHAINS) | python | def verb_chain_ends(self):
"""The end positions of ``verb_chains`` elements."""
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.ends(VERB_CHAINS) | The end positions of ``verb_chains`` elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1101-L1105 |
estnltk/estnltk | estnltk/text.py | Text.tag_wordnet | def tag_wordnet(self, **kwargs):
"""Create wordnet attribute in ``words`` layer.
See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method
for applicable keyword arguments.
"""
global wordnet_tagger
if wordnet_tagger is None: # cached wn tagger
wordnet_tagger = WordnetTagger()
self.__wordnet_tagger = wordnet_tagger
if len(kwargs) > 0:
return self.__wordnet_tagger.tag_text(self, **kwargs)
return self.__wordnet_tagger.tag_text(self, **self.__kwargs) | python | def tag_wordnet(self, **kwargs):
"""Create wordnet attribute in ``words`` layer.
See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method
for applicable keyword arguments.
"""
global wordnet_tagger
if wordnet_tagger is None: # cached wn tagger
wordnet_tagger = WordnetTagger()
self.__wordnet_tagger = wordnet_tagger
if len(kwargs) > 0:
return self.__wordnet_tagger.tag_text(self, **kwargs)
return self.__wordnet_tagger.tag_text(self, **self.__kwargs) | Create wordnet attribute in ``words`` layer.
See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method
for applicable keyword arguments. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1112-L1124 |
estnltk/estnltk | estnltk/text.py | Text.wordnet_annotations | def wordnet_annotations(self):
"""The list of wordnet annotations of ``words`` layer."""
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis] | python | def wordnet_annotations(self):
"""The list of wordnet annotations of ``words`` layer."""
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis] | The list of wordnet annotations of ``words`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1127-L1131 |
estnltk/estnltk | estnltk/text.py | Text.synsets | def synsets(self):
"""The list of annotated synsets of ``words`` layer."""
synsets = []
for wn_annots in self.wordnet_annotations:
word_synsets = []
for wn_annot in wn_annots:
for synset in wn_annot.get(SYNSETS, []):
word_synsets.append(deepcopy(synset))
synsets.append(word_synsets)
return synsets | python | def synsets(self):
"""The list of annotated synsets of ``words`` layer."""
synsets = []
for wn_annots in self.wordnet_annotations:
word_synsets = []
for wn_annot in wn_annots:
for synset in wn_annot.get(SYNSETS, []):
word_synsets.append(deepcopy(synset))
synsets.append(word_synsets)
return synsets | The list of annotated synsets of ``words`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1134-L1143 |
estnltk/estnltk | estnltk/text.py | Text.word_literals | def word_literals(self):
"""The list of literals per word in ``words`` layer."""
literals = []
for word_synsets in self.synsets:
word_literals = set()
for synset in word_synsets:
for variant in synset.get(SYN_VARIANTS):
if LITERAL in variant:
word_literals.add(variant[LITERAL])
literals.append(list(sorted(word_literals)))
return literals | python | def word_literals(self):
"""The list of literals per word in ``words`` layer."""
literals = []
for word_synsets in self.synsets:
word_literals = set()
for synset in word_synsets:
for variant in synset.get(SYN_VARIANTS):
if LITERAL in variant:
word_literals.add(variant[LITERAL])
literals.append(list(sorted(word_literals)))
return literals | The list of literals per word in ``words`` layer. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1146-L1156 |
estnltk/estnltk | estnltk/text.py | Text.spelling | def spelling(self):
"""Flag incorrectly spelled words.
Returns a list of booleans, where element at each position denotes, if the word at the same position
is spelled correctly.
"""
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)] | python | def spelling(self):
"""Flag incorrectly spelled words.
Returns a list of booleans, where element at each position denotes, if the word at the same position
is spelled correctly.
"""
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)] | Flag incorrectly spelled words.
Returns a list of booleans, where element at each position denotes, if the word at the same position
is spelled correctly. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1163-L1170 |
estnltk/estnltk | estnltk/text.py | Text.spelling_suggestions | def spelling_suggestions(self):
"""The list of spelling suggestions per misspelled word."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SUGGESTIONS] for data in vabamorf.spellcheck(self.word_texts, suggestions=True)] | python | def spelling_suggestions(self):
"""The list of spelling suggestions per misspelled word."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SUGGESTIONS] for data in vabamorf.spellcheck(self.word_texts, suggestions=True)] | The list of spelling suggestions per misspelled word. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1173-L1177 |
estnltk/estnltk | estnltk/text.py | Text.spellcheck_results | def spellcheck_results(self):
"""The list of True/False values denoting the correct spelling of words."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return vabamorf.spellcheck(self.word_texts, suggestions=True) | python | def spellcheck_results(self):
"""The list of True/False values denoting the correct spelling of words."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return vabamorf.spellcheck(self.word_texts, suggestions=True) | The list of True/False values denoting the correct spelling of words. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1180-L1184 |
estnltk/estnltk | estnltk/text.py | Text.fix_spelling | def fix_spelling(self):
"""Fix spelling of the text.
Note that this method uses the first suggestion that is given for each misspelled word.
It does not perform any sophisticated analysis to determine which one of the suggestions
fits best into the context.
Returns
-------
Text
A copy of this instance with automatically fixed spelling.
"""
if not self.is_tagged(WORDS):
self.tokenize_words()
text = self.text
fixed = vabamorf.fix_spelling(self.word_texts, join=False)
spans = self.word_spans
assert len(fixed) == len(spans)
if len(spans) > 0:
newtoks = []
lastend = 0
for fix, (start, end) in zip(fixed, spans):
newtoks.append(text[lastend:start])
newtoks.append(fix)
lastend = end
newtoks.append(text[lastend:])
return Text(''.join(newtoks), **self.__kwargs)
return self | python | def fix_spelling(self):
"""Fix spelling of the text.
Note that this method uses the first suggestion that is given for each misspelled word.
It does not perform any sophisticated analysis to determine which one of the suggestions
fits best into the context.
Returns
-------
Text
A copy of this instance with automatically fixed spelling.
"""
if not self.is_tagged(WORDS):
self.tokenize_words()
text = self.text
fixed = vabamorf.fix_spelling(self.word_texts, join=False)
spans = self.word_spans
assert len(fixed) == len(spans)
if len(spans) > 0:
newtoks = []
lastend = 0
for fix, (start, end) in zip(fixed, spans):
newtoks.append(text[lastend:start])
newtoks.append(fix)
lastend = end
newtoks.append(text[lastend:])
return Text(''.join(newtoks), **self.__kwargs)
return self | Fix spelling of the text.
Note that this method uses the first suggestion that is given for each misspelled word.
It does not perform any sophisticated analysis to determine which one of the suggestions
fits best into the context.
Returns
-------
Text
A copy of this instance with automatically fixed spelling. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1186-L1213 |
estnltk/estnltk | estnltk/text.py | Text.clean | def clean(self):
"""Return a copy of this Text instance with invalid characters removed."""
return Text(self.__text_cleaner.clean(self[TEXT]), **self.__kwargs) | python | def clean(self):
"""Return a copy of this Text instance with invalid characters removed."""
return Text(self.__text_cleaner.clean(self[TEXT]), **self.__kwargs) | Return a copy of this Text instance with invalid characters removed. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1230-L1232 |
estnltk/estnltk | estnltk/text.py | Text.split_given_spans | def split_given_spans(self, spans, sep=' '):
"""Split the text into several pieces.
Resulting texts have all the layers that are present in the text instance that is splitted.
The elements are copied to resulting pieces that are covered by their spans.
However, this can result in empty layers if no element of a splitted layer fits into
a span of a particular output piece.
The positions of layer elements that are copied are translated according to the container span,
so they are consistent with returned text lengths.
Parameters
----------
spans: list of spans.
The positions determining the regions that will end up as individual pieces.
Spans themselves can be lists of spans, which denote multilayer-style text regions.
sep: str
The separator that is used to join together text pieces of multilayer spans.
Returns
-------
list of Text
One instance of text per span.
"""
N = len(spans)
results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)]
for elem in self:
if isinstance(self[elem], list):
splits = divide_by_spans(self[elem], spans, translate=True, sep=sep)
for idx in range(N):
results[idx][elem] = splits[idx]
return [Text(res) for res in results] | python | def split_given_spans(self, spans, sep=' '):
"""Split the text into several pieces.
Resulting texts have all the layers that are present in the text instance that is splitted.
The elements are copied to resulting pieces that are covered by their spans.
However, this can result in empty layers if no element of a splitted layer fits into
a span of a particular output piece.
The positions of layer elements that are copied are translated according to the container span,
so they are consistent with returned text lengths.
Parameters
----------
spans: list of spans.
The positions determining the regions that will end up as individual pieces.
Spans themselves can be lists of spans, which denote multilayer-style text regions.
sep: str
The separator that is used to join together text pieces of multilayer spans.
Returns
-------
list of Text
One instance of text per span.
"""
N = len(spans)
results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)]
for elem in self:
if isinstance(self[elem], list):
splits = divide_by_spans(self[elem], spans, translate=True, sep=sep)
for idx in range(N):
results[idx][elem] = splits[idx]
return [Text(res) for res in results] | Split the text into several pieces.
Resulting texts have all the layers that are present in the text instance that is splitted.
The elements are copied to resulting pieces that are covered by their spans.
However, this can result in empty layers if no element of a splitted layer fits into
a span of a particular output piece.
The positions of layer elements that are copied are translated according to the container span,
so they are consistent with returned text lengths.
Parameters
----------
spans: list of spans.
The positions determining the regions that will end up as individual pieces.
Spans themselves can be lists of spans, which denote multilayer-style text regions.
sep: str
The separator that is used to join together text pieces of multilayer spans.
Returns
-------
list of Text
One instance of text per span. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1262-L1294 |
estnltk/estnltk | estnltk/text.py | Text.split_by | def split_by(self, layer, sep=' '):
"""Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text
"""
if not self.is_tagged(layer):
self.tag(layer)
return self.split_given_spans(self.spans(layer), sep=sep) | python | def split_by(self, layer, sep=' '):
"""Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text
"""
if not self.is_tagged(layer):
self.tag(layer)
return self.split_given_spans(self.spans(layer), sep=sep) | Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1296-L1315 |
estnltk/estnltk | estnltk/text.py | Text.split_by_regex | def split_by_regex(self, regex_or_pattern, flags=re.U, gaps=True):
"""Split the text into multiple instances using a regex.
Parameters
----------
regex_or_pattern: str or compiled pattern
The regular expression to use for splitting.
flags: int (default: re.U)
The regular expression flags (only used, when user has not supplied compiled regex).
gaps: boolean (default: True)
If True, then regions matched by the regex are not included in the resulting Text instances, which
is expected behaviour.
If False, then only regions matched by the regex are included in the result.
Returns
-------
list of Text
The Text instances obtained by splitting.
"""
text = self[TEXT]
regex = regex_or_pattern
if isinstance(regex, six.string_types):
regex = re.compile(regex_or_pattern, flags=flags)
# else is assumed pattern
last_end = 0
spans = []
if gaps: # tag cap spans
for mo in regex.finditer(text):
start, end = mo.start(), mo.end()
if start > last_end:
spans.append((last_end, start))
last_end = end
if last_end < len(text):
spans.append((last_end, len(text)))
else: # use matched regions
spans = [(mo.start(), mo.end()) for mo in regex.finditer(text)]
return self.split_given_spans(spans) | python | def split_by_regex(self, regex_or_pattern, flags=re.U, gaps=True):
"""Split the text into multiple instances using a regex.
Parameters
----------
regex_or_pattern: str or compiled pattern
The regular expression to use for splitting.
flags: int (default: re.U)
The regular expression flags (only used, when user has not supplied compiled regex).
gaps: boolean (default: True)
If True, then regions matched by the regex are not included in the resulting Text instances, which
is expected behaviour.
If False, then only regions matched by the regex are included in the result.
Returns
-------
list of Text
The Text instances obtained by splitting.
"""
text = self[TEXT]
regex = regex_or_pattern
if isinstance(regex, six.string_types):
regex = re.compile(regex_or_pattern, flags=flags)
# else is assumed pattern
last_end = 0
spans = []
if gaps: # tag cap spans
for mo in regex.finditer(text):
start, end = mo.start(), mo.end()
if start > last_end:
spans.append((last_end, start))
last_end = end
if last_end < len(text):
spans.append((last_end, len(text)))
else: # use matched regions
spans = [(mo.start(), mo.end()) for mo in regex.finditer(text)]
return self.split_given_spans(spans) | Split the text into multiple instances using a regex.
Parameters
----------
regex_or_pattern: str or compiled pattern
The regular expression to use for splitting.
flags: int (default: re.U)
The regular expression flags (only used, when user has not supplied compiled regex).
gaps: boolean (default: True)
If True, then regions matched by the regex are not included in the resulting Text instances, which
is expected behaviour.
If False, then only regions matched by the regex are included in the result.
Returns
-------
list of Text
The Text instances obtained by splitting. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1325-L1362 |
estnltk/estnltk | estnltk/text.py | Text.divide | def divide(self, layer=WORDS, by=SENTENCES):
"""Divide the Text into pieces by keeping references to original elements, when possible.
This is not possible only, if the _element_ is a multispan.
Parameters
----------
element: str
The element to collect and distribute in resulting bins.
by: str
Each resulting bin is defined by spans of this element.
Returns
-------
list of (list of dict)
"""
if not self.is_tagged(layer):
self.tag(layer)
if not self.is_tagged(by):
self.tag(by)
return divide(self[layer], self[by]) | python | def divide(self, layer=WORDS, by=SENTENCES):
"""Divide the Text into pieces by keeping references to original elements, when possible.
This is not possible only, if the _element_ is a multispan.
Parameters
----------
element: str
The element to collect and distribute in resulting bins.
by: str
Each resulting bin is defined by spans of this element.
Returns
-------
list of (list of dict)
"""
if not self.is_tagged(layer):
self.tag(layer)
if not self.is_tagged(by):
self.tag(by)
return divide(self[layer], self[by]) | Divide the Text into pieces by keeping references to original elements, when possible.
This is not possible only, if the _element_ is a multispan.
Parameters
----------
element: str
The element to collect and distribute in resulting bins.
by: str
Each resulting bin is defined by spans of this element.
Returns
-------
list of (list of dict) | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1368-L1388 |
estnltk/estnltk | estnltk/prettyprinter/marker.py | create_tags_with_concatenated_css_classes | def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result | python | def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result | Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/marker.py#L100-L132 |
estnltk/estnltk | estnltk/grammar/conflictresolver.py | resolve_using_maximal_coverage | def resolve_using_maximal_coverage(matches):
"""Given a list of matches, select a subset of matches
such that there are no overlaps and the total number of
covered characters is maximal.
Parameters
----------
matches: list of Match
Returns
--------
list of Match
"""
if len(matches) == 0:
return matches
matches.sort()
N = len(matches)
scores = [len(match) for match in matches]
prev = [-1] * N
for i in range(1, N):
bestscore = -1
bestprev = -1
j = i
while j >= 0:
# if matches do not overlap
if matches[j].is_before(matches[i]):
l = scores[j] + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = j
else:
# in case of overlapping matches
l = scores[j] - len(matches[j]) + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = prev[j]
j = j - 1
scores[i] = bestscore
prev[i] = bestprev
# first find the matching with highest combined score
bestscore = max(scores)
bestidx = len(scores) - scores[-1::-1].index(bestscore) -1
# then backtrack the non-conflicting matchings that should be kept
keepidxs = [bestidx]
bestidx = prev[bestidx]
while bestidx != -1:
keepidxs.append(bestidx)
bestidx = prev[bestidx]
# filter the matches
return [matches[idx] for idx in reversed(keepidxs)] | python | def resolve_using_maximal_coverage(matches):
"""Given a list of matches, select a subset of matches
such that there are no overlaps and the total number of
covered characters is maximal.
Parameters
----------
matches: list of Match
Returns
--------
list of Match
"""
if len(matches) == 0:
return matches
matches.sort()
N = len(matches)
scores = [len(match) for match in matches]
prev = [-1] * N
for i in range(1, N):
bestscore = -1
bestprev = -1
j = i
while j >= 0:
# if matches do not overlap
if matches[j].is_before(matches[i]):
l = scores[j] + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = j
else:
# in case of overlapping matches
l = scores[j] - len(matches[j]) + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = prev[j]
j = j - 1
scores[i] = bestscore
prev[i] = bestprev
# first find the matching with highest combined score
bestscore = max(scores)
bestidx = len(scores) - scores[-1::-1].index(bestscore) -1
# then backtrack the non-conflicting matchings that should be kept
keepidxs = [bestidx]
bestidx = prev[bestidx]
while bestidx != -1:
keepidxs.append(bestidx)
bestidx = prev[bestidx]
# filter the matches
return [matches[idx] for idx in reversed(keepidxs)] | Given a list of matches, select a subset of matches
such that there are no overlaps and the total number of
covered characters is maximal.
Parameters
----------
matches: list of Match
Returns
--------
list of Match | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/conflictresolver.py#L6-L55 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _isSeparatedByPossibleClauseBreakers | def _isSeparatedByPossibleClauseBreakers( tokens, wordID1, wordID2, punctForbidden = True, \
commaForbidden = True, \
conjWordsForbidden = True ):
'''
Teeb kindlaks, kas j2rjendi tokens s6naindeksite vahemikus [wordID1, wordID2) (vahemiku
algus on inklusiivne) leidub sides6nu (ja/ning/ega/v6i), punktuatsiooni (koma,
sidekriipsud, koolon, kolm j2rjestikkust punkti) v6i adverbe-sidendeid aga/kuid/vaid;
Lippudega saab kontrolli l6dvendada:
*) punctForbidden=False lylitab v2lja punktuatsiooni ( kirjavahem2rgid v.a koma )
kontrolli;
*) commaForbidden=False lylitab v2lja koma kontrolli ( ei puuduta teisi kirjavahem2rke )
kontrolli;
*) conjWordsForbidden=False lylitab v2lja sides6nade ja adverb-sidendite kontrolli;
Tagastab True, kui leidub kasv6i yks eelnimetatud juhtudest, vastasel juhul False;
'''
global _breakerJaNingEgaVoi, _breakerAgaKuidVaid, _breakerKomaLopus, _breakerPunktuats
minWID = min(wordID1, wordID2)
maxWID = max(wordID1, wordID2)
insideCheckArea = False
for i in range(len(tokens)):
token = tokens[i]
if token[WORD_ID] >= minWID:
insideCheckArea = True
if token[WORD_ID] >= maxWID:
insideCheckArea = False
if insideCheckArea:
if punctForbidden and _breakerPunktuats.matches(token):
return True
if commaForbidden and _breakerKomaLopus.matches(token):
return True
if conjWordsForbidden and (_breakerAgaKuidVaid.matches(token) or \
_breakerJaNingEgaVoi.matches(token)):
return True
return False | python | def _isSeparatedByPossibleClauseBreakers( tokens, wordID1, wordID2, punctForbidden = True, \
commaForbidden = True, \
conjWordsForbidden = True ):
'''
Teeb kindlaks, kas j2rjendi tokens s6naindeksite vahemikus [wordID1, wordID2) (vahemiku
algus on inklusiivne) leidub sides6nu (ja/ning/ega/v6i), punktuatsiooni (koma,
sidekriipsud, koolon, kolm j2rjestikkust punkti) v6i adverbe-sidendeid aga/kuid/vaid;
Lippudega saab kontrolli l6dvendada:
*) punctForbidden=False lylitab v2lja punktuatsiooni ( kirjavahem2rgid v.a koma )
kontrolli;
*) commaForbidden=False lylitab v2lja koma kontrolli ( ei puuduta teisi kirjavahem2rke )
kontrolli;
*) conjWordsForbidden=False lylitab v2lja sides6nade ja adverb-sidendite kontrolli;
Tagastab True, kui leidub kasv6i yks eelnimetatud juhtudest, vastasel juhul False;
'''
global _breakerJaNingEgaVoi, _breakerAgaKuidVaid, _breakerKomaLopus, _breakerPunktuats
minWID = min(wordID1, wordID2)
maxWID = max(wordID1, wordID2)
insideCheckArea = False
for i in range(len(tokens)):
token = tokens[i]
if token[WORD_ID] >= minWID:
insideCheckArea = True
if token[WORD_ID] >= maxWID:
insideCheckArea = False
if insideCheckArea:
if punctForbidden and _breakerPunktuats.matches(token):
return True
if commaForbidden and _breakerKomaLopus.matches(token):
return True
if conjWordsForbidden and (_breakerAgaKuidVaid.matches(token) or \
_breakerJaNingEgaVoi.matches(token)):
return True
return False | Teeb kindlaks, kas j2rjendi tokens s6naindeksite vahemikus [wordID1, wordID2) (vahemiku
algus on inklusiivne) leidub sides6nu (ja/ning/ega/v6i), punktuatsiooni (koma,
sidekriipsud, koolon, kolm j2rjestikkust punkti) v6i adverbe-sidendeid aga/kuid/vaid;
Lippudega saab kontrolli l6dvendada:
*) punctForbidden=False lylitab v2lja punktuatsiooni ( kirjavahem2rgid v.a koma )
kontrolli;
*) commaForbidden=False lylitab v2lja koma kontrolli ( ei puuduta teisi kirjavahem2rke )
kontrolli;
*) conjWordsForbidden=False lylitab v2lja sides6nade ja adverb-sidendite kontrolli;
Tagastab True, kui leidub kasv6i yks eelnimetatud juhtudest, vastasel juhul False; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L35-L68 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _isClauseFinal | def _isClauseFinal( wordID, clauseTokens ):
'''
Teeb kindlaks, kas etteantud ID-ga s6na on osalause l6pus:
-- s6nale ei j2rgne ykski teine s6na;
-- s6nale j2rgnevad vaid punktuatsioonim2rgid ja/v6i sidendid JA/NING/EGA/VÕI;
Tagastab True, kui eeltoodud tingimused on t2idetud, vastasel juhul False;
'''
jaNingEgaVoi = WordTemplate({ROOT:'^(ja|ning|ega|v[\u014D\u00F5]i)$',POSTAG:'[DJ]'})
punktuatsioon = WordTemplate({POSTAG:'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if i+1 == len(clauseTokens):
return True
else:
for j in range(i+1, len(clauseTokens)):
token2 = clauseTokens[j]
if not (jaNingEgaVoi.matches(token2) or punktuatsioon.matches(token2)):
return False
return True
return False | python | def _isClauseFinal( wordID, clauseTokens ):
'''
Teeb kindlaks, kas etteantud ID-ga s6na on osalause l6pus:
-- s6nale ei j2rgne ykski teine s6na;
-- s6nale j2rgnevad vaid punktuatsioonim2rgid ja/v6i sidendid JA/NING/EGA/VÕI;
Tagastab True, kui eeltoodud tingimused on t2idetud, vastasel juhul False;
'''
jaNingEgaVoi = WordTemplate({ROOT:'^(ja|ning|ega|v[\u014D\u00F5]i)$',POSTAG:'[DJ]'})
punktuatsioon = WordTemplate({POSTAG:'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if i+1 == len(clauseTokens):
return True
else:
for j in range(i+1, len(clauseTokens)):
token2 = clauseTokens[j]
if not (jaNingEgaVoi.matches(token2) or punktuatsioon.matches(token2)):
return False
return True
return False | Teeb kindlaks, kas etteantud ID-ga s6na on osalause l6pus:
-- s6nale ei j2rgne ykski teine s6na;
-- s6nale j2rgnevad vaid punktuatsioonim2rgid ja/v6i sidendid JA/NING/EGA/VÕI;
Tagastab True, kui eeltoodud tingimused on t2idetud, vastasel juhul False; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L70-L90 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _isFollowedByComma | def _isFollowedByComma( wordID, clauseTokens ):
'''
Teeb kindlaks, kas etteantud ID-ga s6nale j2rgneb vahetult koma;
Tagastab True, kui eeltoodud tingimus on t2idetud, vastasel juhul False;
'''
koma = WordTemplate({ROOT:'^,+$', POSTAG:'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if re.match('^.*,$', token[TEXT]):
return True
elif i+1 < len(clauseTokens) and koma.matches(clauseTokens[i+1]):
return True
break
return False | python | def _isFollowedByComma( wordID, clauseTokens ):
'''
Teeb kindlaks, kas etteantud ID-ga s6nale j2rgneb vahetult koma;
Tagastab True, kui eeltoodud tingimus on t2idetud, vastasel juhul False;
'''
koma = WordTemplate({ROOT:'^,+$', POSTAG:'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if re.match('^.*,$', token[TEXT]):
return True
elif i+1 < len(clauseTokens) and koma.matches(clauseTokens[i+1]):
return True
break
return False | Teeb kindlaks, kas etteantud ID-ga s6nale j2rgneb vahetult koma;
Tagastab True, kui eeltoodud tingimus on t2idetud, vastasel juhul False; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L92-L106 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _canFormAraPhrase | def _canFormAraPhrase( araVerb, otherVerb ):
''' Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga;
Arvestab järgimisi ühilduvusi:
ains 2. pööre: ära_neg.o + V_o
ains 3. pööre: ära_neg.gu + V_gu
mitm 1. pööre: ära_neg.me + V_me
ära_neg.me + V_o
ära_neg.gem + V_gem
mitm 2. pööre: ära_neg.ge + V_ge
mitm 3. pööre: ära_neg.gu + V_gu
passiiv: ära_neg.gu + V_tagu
Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi.
Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list
(millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva
verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile);
Indeksite listid on sellised, nagu neid leitakse meetodi
wordtemplate.matchingAnalyseIndexes(token) abil;
'''
global _verbAraAgreements
for i in range(0, len(_verbAraAgreements), 2):
araVerbTemplate = _verbAraAgreements[i]
otherVerbTemplate = _verbAraAgreements[i+1]
matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb)
if matchingAraAnalyses:
matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(otherVerb)
if matchingVerbAnalyses:
return [matchingAraAnalyses, matchingVerbAnalyses]
return [] | python | def _canFormAraPhrase( araVerb, otherVerb ):
''' Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga;
Arvestab järgimisi ühilduvusi:
ains 2. pööre: ära_neg.o + V_o
ains 3. pööre: ära_neg.gu + V_gu
mitm 1. pööre: ära_neg.me + V_me
ära_neg.me + V_o
ära_neg.gem + V_gem
mitm 2. pööre: ära_neg.ge + V_ge
mitm 3. pööre: ära_neg.gu + V_gu
passiiv: ära_neg.gu + V_tagu
Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi.
Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list
(millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva
verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile);
Indeksite listid on sellised, nagu neid leitakse meetodi
wordtemplate.matchingAnalyseIndexes(token) abil;
'''
global _verbAraAgreements
for i in range(0, len(_verbAraAgreements), 2):
araVerbTemplate = _verbAraAgreements[i]
otherVerbTemplate = _verbAraAgreements[i+1]
matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb)
if matchingAraAnalyses:
matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(otherVerb)
if matchingVerbAnalyses:
return [matchingAraAnalyses, matchingVerbAnalyses]
return [] | Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga;
Arvestab järgimisi ühilduvusi:
ains 2. pööre: ära_neg.o + V_o
ains 3. pööre: ära_neg.gu + V_gu
mitm 1. pööre: ära_neg.me + V_me
ära_neg.me + V_o
ära_neg.gem + V_gem
mitm 2. pööre: ära_neg.ge + V_ge
mitm 3. pööre: ära_neg.gu + V_gu
passiiv: ära_neg.gu + V_tagu
Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi.
Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list
(millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva
verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile);
Indeksite listid on sellised, nagu neid leitakse meetodi
wordtemplate.matchingAnalyseIndexes(token) abil; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L133-L161 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _extractBasicPredicateFromClause | def _extractBasicPredicateFromClause( clauseTokens, clauseID ):
'''
Meetod, mis tuvastab antud osalausest kesksed verbid + nendega otseselt seotud
esmased verbifraasid:
1) predikaadiga seotud eituse(d): (ei/ära/pole) + sobiv verb;
2) olema-verbifraasid: olema; olema + sobiv verb;
3) tavalised (mitte-olema) verbid, mis peaksid olema osalause keskmeks;
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt tehtud
s6na-analyyse);
Tagastab listi tuvastatud fraasidest, kus iga liige (dict) on kujul:
{ PHRASE: list, -- tuvastatud fraasi positsioon lauses (WORD_ID indeksid);
PATTERN: list, -- yldine muster, mille alusel tuvastamine toimus;
POLARITY: str, -- polaarsus ('NEG', 'POS', '??')
OTHER_VERBS: bool -- kas kontekstis on veel verbe, mida v6iks potentsiaalselt
s6naga liita?
}
Eraldatakse järgmised üldised mustrid (PATTERN j2rjendid):
verb
ole
ei+V
ole+V
pole
ei+ole
pole+V
ole+ole
ei
ära+V
pole+ole
ära
ära+ole
NB! Kui osalauses on veel verbe, mis v6iksid (potentsiaalselt) eraldatud mustriga liituda,
siis m22ratakse mustris otherVerbs = True;
'''
global _phraseBreakerAdvs
# Verbieituse indikaatorid
verbEi = WordTemplate({ROOT:'^ei$',FORM:'neg',POSTAG:'V'})
verbEi2 = WordTemplate({ROOT:'^ei$',POSTAG:'D'}) # juhuks, kui morf yhestamises valitakse vale analyys
verbAra = WordTemplate({ROOT:'^ära$',FORM:'neg.*',POSTAG:'V'})
verbPole = WordTemplate({ROOT:'^ole$',FORM:'neg.*',POSTAG:'V'})
# Eituse sisuverbi osad
verbEiJarel = WordTemplate({POSTAG:'V',FORM:'o|nud|tud|nuks|nuvat|vat|ks|ta|taks|tavat$'})
verbEiJarel2 = WordTemplate({POSTAG:'V',FORM:'neg o$'})
# Infiniitverb, olema ja verbid, mis v6ivad olema-le j2rgneda
verbInf = WordTemplate({POSTAG:'V', FORM:'^(da|des|ma|tama|ta|maks|mas|mast|nud|tud|v|mata)$'})
verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'})
verbOleJarel = WordTemplate({POSTAG:'V',FORM:'nud$'})
verbOleJarelHeur1 = WordTemplate({POSTAG:'V',FORM:'^(tud|da|mas)$'})
verbOleJarelHeur2 = WordTemplate({POSTAG:'V',FORM:'^(tud|mas)$'})
# Muud
verb = WordTemplate({POSTAG:'V'})
verbid = verb.matchingPositions( clauseTokens )
sonaEga = WordTemplate({ROOT:'^ega$',POSTAG:'[DJ]'})
# Eraldamise tulemused: eraldatud (verbi)fraasid ja kasutatud reeglid
foundMatches = []
negPhraseWIDs = []
posPhraseWIDs = []
for i in range(len(clauseTokens)):
tokenJson = clauseTokens[i]
matchFound = False
# ===================================================================
# V e r b i e i t u s
# ===================================================================
if verbEi.matches(tokenJson) or verbEi2.matches(tokenJson):
#
# 1. "Ei" + Verb (käskivas, -nud, -tud, -nuks, -nuvat, -vat,
# -ks, -ta, -taks, tavat)
#
if i+1 < len(clauseTokens):
tokenJson2 = clauseTokens[i+1]
if verbEiJarel.matches(tokenJson2):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ei", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, [verbEi, verbEi2] ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbEiJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# Lisamuster:
# -neg o: Ainult "lähe" korral, kuna selle s6na käskiv
# ("mine") ei lange kokku eituse vormiga;
#
if not matchFound and verbEiJarel2.matches(tokenJson2):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ei", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, [verbEi, verbEi2] ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbEiJarel2 ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 1.2. Kui "ei" on (osa)lause alguses, ja lauses ongi vaid kaks verbi, siis
# võib eituse ja verbi vahel olla ka teisi sõnu ...
# Nt. Ei_0 ta ole_0 , ütlesin selge sõnaga .
# Hävita vaenlane - ei_0 sammugi tagane_0 .
#
if not matchFound and verbEi.matches(tokenJson) and i+1 < len(clauseTokens):
# Leiame k6ik verbid: kui ongi vaid kaks verbi, esimene 'ei'
# ja teine sellega sobiv verb ning kehtivad kitsendused:
# ** teine verb j2rgneb 'ei'-le;
# ** vahetult p2rast 'ei'-d pole koma (Nt 'Aga ei_0, sõna antud_0.')
# ** teine verb on osalause l6pus;
if len(verbid)==2 and verbid[0]==i:
if verbEiJarel.matches(clauseTokens[verbid[1]]):
if not _isFollowedByComma( tokenJson[WORD_ID], clauseTokens ) and \
_isClauseFinal( clauseTokens[verbid[1]][WORD_ID], clauseTokens ):
wid1 = tokenJson[WORD_ID]
wid2 = clauseTokens[verbid[1]][WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ei", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(clauseTokens[verbid[1]]):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = False
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbEi ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( clauseTokens[verbid[1]], verbEiJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 1.X. Ei oska "ei" predikaadikonteksti m22rata (v6imalik, et ei eitatagi verbi,
# vaid hoopis nimis6nafraasi vms).
#
if not matchFound:
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], PATTERN: ["ei"] }
matchobj[CLAUSE_IDX] = clauseID
# Leiame, kas j2rgneb s6nu, millega potentsiaalselt saaks eituse moodustada
matchobj[OTHER_VERBS] = \
any([ verbEiJarel.matches(clauseTokens[j]) for j in range(i+1, len(clauseTokens)) ])
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, [verbEi, verbEi2] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1] )
matchFound = True
elif verbAra.matches(tokenJson):
#
# 2. "Ära" + Verb (käskivas, -ge, -gem, -gu, -tagu, -me)
#
# Kui "ära"-le järgneb (osa)lauses veel teisi verbe, proovime
# moodustada ära-fraasi esimese järgneva verbiga, mis ühildub:
# Nt. Ärme_0 enam nii tee_0 !
# Ärge_0 palun minge_0 .
# Ärge_0 ainult naerma puhkege_0 .
#
if i+1 < len(clauseTokens) and len(verbid) >= 2:
for verbIndex in verbid:
tokenJson2 = clauseTokens[ verbIndex ]
if tokenJson[WORD_ID] < tokenJson2[WORD_ID]:
# Teeme kindlaks, kas järgnev verb võib ühilduda 'ära'-ga:
analyses = _canFormAraPhrase( tokenJson, tokenJson2 )
if analyses:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ära", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( analyses[0] )
matchobj[ANALYSIS_IDS].append( analyses[1] )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
break
#
# Teadaolevad veakohad:
# yks koma vahel: Ära_0 piina ennast , jäta_0 laps mulle.
# (aga siin on p6hiliseks veaks puudulik morf analyys)
#
#
# 2.X. Ei oska "ära" predikaadikonteksti m22rata ...
#
if not matchFound:
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], PATTERN: ["ära"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 1)
# Kui kontekstis on ka teisi verbe, võib ära täita hoopis määrsõna rolli, ja
# kuna eitusmustrid on välistatud, pole enam kindel, et tegu on eitusega;
matchobj[POLARITY] = '??'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbAra ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1] )
matchFound = True
elif verbPole.matches(tokenJson):
#
# 3. "Pole" + Verb (-nud)
#
if i+1 < len(clauseTokens):
tokenJson2 = clauseTokens[i+1]
if verbOleJarel.matches(tokenJson2):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbOleJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
if not matchFound and i+1 < len(clauseTokens):
tokenJson2 = clauseTokens[i+1]
#
# 3.2. Heuristik: Kui "pole" j2rel on vahetult verb (-tud, -mas), ning m6lemad
# paiknevad (osa)lause l6pus ning osalauses ongi vaid kaks verbi, loeme
# selle ka eituse fraasiks:
# Nt. Autode ostuhinda pole avalikustatud .
# Skriptid näitavad veateateid , kui tingimused pole täidetud .
# Aktsia- ja rahaturud on rahutud ning stabiilsust pole näha .
# ... kas ehk kedagi liikumas_0 pole_0 , keda võiks asjasse pühendada ...
#
if len(verbid)==2 and verbOleJarelHeur2.matches(tokenJson2) and \
_isClauseFinal( tokenJson2[WORD_ID], clauseTokens ):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = False
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbOleJarelHeur2 ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 3.3. Heuristik: Kui ongi vaid kaks verbi, ning "pole" j2rel on osalause l6pus
# "nud", loeme selle samuti yheks fraasiks.
# Nt.
# Ladina keel pole välja surnud .
# Nii odavalt pole Eesti oma laevu kunagi välja andnud .
# Tööga pole keegi rikkaks saanud .
#
if not matchFound and len(verbid)==2 and verbid[0] == i:
if verbOleJarel.matches( clauseTokens[verbid[1]] ) and \
_isClauseFinal( clauseTokens[verbid[1]][WORD_ID], clauseTokens ):
wid1 = tokenJson[WORD_ID]
wid2 = clauseTokens[verbid[1]][WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = False
if verbOle.matches( clauseTokens[verbid[1]] ):
matchobj[PATTERN][1] = 'ole'
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( clauseTokens[verbid[1]], verbOleJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
if not matchFound:
#
# 3.4. Heuristik: Kui 'pole'-le j2rgneb osalauses kusagil kaugemal -nud,
# mis ei saa olla fraasi eestäiend, siis loeme selle olema-verbiga
# kokkukuuluvaks;
#
seenNudVerbs = 0
for k in range(i+1, len(clauseTokens)):
tokenJson2 = clauseTokens[k]
if verb.matches(tokenJson2) and not verbInf.matches(tokenJson2):
# Kui j6uame finiitverbini, siis katkestame otsingu
break
if sonaEga.matches(tokenJson2):
# Kui j6uame 'ega'-ni, siis katkestame otsingu
break
if verbOleJarel.matches(tokenJson2):
seenNudVerbs += 1
#
# Kui -nud verb eelneb vahetult m6nele teisele infiniitverbile,
# on v2ga t6en2oline, et -nud on peaverb "pole" otsene alluv ning
# pole eestäiend, nt:
#
# kuid ta polnud_0 ka teatris õppinud_1 improviseerima ning
# Ega me pole_0 siia tulnud_1 paastuma ja palvetama , "
# ja Raul poleks_0 teda härrasmehena kodust välja tahtnud_1 ajada .
# ma pole_0 iial kasutanud_1 keelatud aineid .
#
# Kontrollime, et nud-ile j2rgneks infiniitverb, ning
# vahel poleks teisi nud-verbe ...
#
if k+1 in verbid and verbInf.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = True
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
#
# Kui -nud verb eelneb vahetult m6nele adverbile, mis t6en2oliselt
# on lauses iseseisev s6na (nt 'ka', 'siis', 'veel', 'juba' jms), siis ei
# saa -nud olla eest2iend ning peaks olema peaverb "pole" otsene alluv, nt:
#
# Varem pole_0 ma kirjandile jõudnud_0 aga sellepärast ,
# " Ma pole_0 Belgias saanud_0 isegi parkimistrahvi ! "
# Polnud_0 õllelembid saanud_0 veel õieti jõuluvaimu sisse elada ,
# mulle polnud_0 Väike jõudnud_0 ju veel rääkida ,
#
# Lisaks kontrollime, et vahel poleks teisi -nud verbe;
#
elif k+1<len(clauseTokens) and _phraseBreakerAdvs.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
if not matchFound and _isClauseFinal( tokenJson[WORD_ID], clauseTokens ):
#
# 3.5. Heuristik: Kui "pole" on osalause l6pus, ning sellele eelneb vahetult
# "nud", v6i eelneb vahetult tud/da/mas ning osalauses pole teisi verbe,
# loeme liiteituseks:
# Nt.
# Huvitav ainult , miks ta mulle helistanud_0 pole_0 .
# Mingit kuulsust ma küll kogunud_0 pole_0 .
#
if i-1 > -1:
tokenJson2 = clauseTokens[i-1]
if verbOleJarel.matches(tokenJson2) or (len(verbid)==2 and \
verbOleJarelHeur2.matches(tokenJson2)):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 2)
if verbOle.matches( tokenJson2 ):
matchobj[PATTERN][1] = 'ole'
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel, verbOleJarelHeur2] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 3.X. Ei oska "pole" predikaadikonteksti m22rata ...
#
if not matchFound:
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'NEG', PATTERN: ["pole"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 1)
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1] )
matchFound = True
# ===================================================================
# V e r b i j a a t u s
# ===================================================================
elif tokenJson[WORD_ID] not in negPhraseWIDs and verb.matches(tokenJson) and \
not verbInf.matches(tokenJson):
#
# Tavaline verb ( mitte olema-verb )
#
if not verbOle.matches( tokenJson ):
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'POS', PATTERN: ["verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 1)
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verb, discardAnalyses = verbInf ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1] )
matchFound = True
#
# Olema-verb
#
else:
if (len(verbid) == 1):
# Yksik olema-verb
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'POS', PATTERN: ["ole"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = False
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1] )
matchFound = True
else:
#
# Lauses on veel verbe: yritame teha kindlaks, kas tegu on liitkonstruktsiooniga
#
if i+1 < len(clauseTokens):
if verbOleJarel.matches(clauseTokens[i+1]) and \
clauseTokens[i+1][WORD_ID] not in negPhraseWIDs:
#
# Vahetult j2rgnev '-nud':
# Ta ise on_0 kasutanud_0 mitme turvafima teenuseid .
# Luule on_0 võtnud_0 Linnutee kuju .
# Õhtul oli_0 olnud_0 org , aga hommikul järv .
#
tokenJson2 = clauseTokens[i+1]
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbOleJarel ) )
#matchobj[PATTERN][1] += '??'
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# NB! See reegel võib eksida eksistentsiaallausete korral,
# mis, tõsi kyll, tunduvad olevat mittesagedased:
# Põlvamaal oli_0 möödunud_0 nädala teine pool traagiline .
# Kevadisel läbivaatusel oli_0 kogenud_0 mesinik abiks .
#
elif len(verbid)==2:
otherVerbIndex = verbid[1] if verbid[0]==i else verbid[0]
otherVerb = clauseTokens[ otherVerbIndex ]
#
# Osalauses ongi vaid kaks verbi ning 'nud/tud/mas' on osalause
# l6pus:
# Söögimaja ja kiriku uksed olid_0 suletud_0 ,
# Nööp on_0 olemas_0 , kunagi õmmeldakse mantel ka külge !
# Naine oli_0 Kalevi selleni viinud_0 .
# Etnofuturismi esivanemaid on_0 veel vähe uuritud_0 .
#
if (verbOleJarel.matches(otherVerb) or verbOleJarelHeur2.matches(otherVerb)) and \
_isClauseFinal( otherVerb[WORD_ID], clauseTokens ) and \
otherVerb[WORD_ID] not in negPhraseWIDs:
wid1 = tokenJson[WORD_ID]
wid2 = otherVerb[WORD_ID]
#
# Siin v6ib tekkida vigu/kaheldavaid kohti, kui kahe s6na vahel on
# sides6nu/punktuatsiooni/teatud_adverbe, n2iteks:
# on_0 kaasasündinud või elu jooksul omandatud_0
# Mariboris oli_0 äkki medal käes ja Tallinnas 240kilone Ruano võidetud_0
# Mina olen_0 päritolult põhjaeestlane , 50 aastat Põhja-Eestis elanud_0 .
# J2tame sellistel puhkudel yhtse verbifraasina eraldamata ...
#
if not _isSeparatedByPossibleClauseBreakers( clauseTokens, tokenJson[WORD_ID], otherVerb[WORD_ID], True, True, True):
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(otherVerb):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( otherVerb, [verbOleJarel, verbOleJarelHeur2] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
elif (verbOleJarel.matches(otherVerb) or verbOleJarelHeur2.matches(otherVerb)) and \
otherVerb[WORD_ID] not in negPhraseWIDs and \
i+1 == otherVerbIndex:
#
# Osalauses ongi vaid kaks verbi ning 'nud/tud/mas' j2rgneb vahetult
# olema verbile (umbisikuline kõneviis):
# Oktoobris-detsembris 1944. a on_0 registreeritud_0 318 haigusjuhtu .
# Enamik uuringuid on_0 korraldatud_0 täiskasvanutel .
# Graafikud on_0 tehtud_0 programmis Exel 2003 .
# Üsna sagedane just teadustekstides;
#
wid1 = tokenJson[WORD_ID]
wid2 = otherVerb[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(otherVerb):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( otherVerb, [verbOleJarel, verbOleJarelHeur2] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# Kuna '-tud' võib potentsiaalselt olla ka eestäiend, võib tekkida
# ka vigu:
# Tema tegevuses on_0 teatud_0 plaan ehk tegevuskava .
# Kõige tähtsam võistlusala on_0 kombineeritud_0 võistlus .
# Lõpetuseks on_0 grillitud_0 mereandide valik .
#
#
# Kui olema-verbile j2rgneb osalauses kusagil kaugemal -nud, mis ei saa
# olla fraasi eestäiend, siis loeme selle olema-verbiga kokkukuuluvaks;
#
if not matchFound:
seenNudVerbs = 0
for k in range(i+1, len(clauseTokens)):
tokenJson2 = clauseTokens[k]
if verb.matches(tokenJson2) and not verbInf.matches(tokenJson2):
# Kui j6uame finiitverbini, siis katkestame otsingu
break
if sonaEga.matches(tokenJson2):
# Kui j6uame 'ega'-ni, siis katkestame otsingu
break
if verbOleJarel.matches(tokenJson2):
seenNudVerbs += 1
#
# Kui -nud verb eelneb vahetult m6nele teisele infiniitverbile,
# on v2ga t6en2oline, et -nud on peaverb "olema" otsene alluv ning
# pole eestäiend, nt:
#
# Midagi niisugust olin_0 ma kogu aeg lootnud_1 leida .
# siis varem või hiljem on_0 ta pidanud_1 taanduma
# siis oleks_0 ta mingisuguse plaani tõesti võinud_1 koostada
# jälle on_0 ühest investeeringust saanud_1 surnud kapital
#
# Kontrollime, et nud-ile j2rgneks infiniitverb, ning
# vahel poleks teisi nud-verbe ...
#
if k+1 in verbid and verbInf.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = True
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
#
# Probleemset:
# *) Kui kaks -nud-i on kõrvuti, võib minna valesti, kui pimesi siduda
# esimene, näiteks:
# küllap ta oli_0 siis ka alati armunud_0 olnud .
# Eksamikomisjon oli_0 veidi ehmunud_0 olnud ,
# Samuti on_0 mitmed TTga õnnetuses osalenud_0 lausunud ,
#
#
# Kui -nud verb eelneb vahetult m6nele adverbile, mis t6en2oliselt
# on lauses iseseisev s6na (nt 'ka', 'siis', 'veel', 'juba' jms), siis ei
# saa -nud olla eest2iend ning peaks olema peaverb "olema" otsene alluv, nt:
#
# Kasiinodega on_0 rikkaks saanud_1 siiski vaid hõimud ,
# See näitaja on_0 jällegi tõusnud_1 ainult Hansapangal .
# Me oleme_0 tegelikult Kristiga ikka laval laulnud_1 ka .
# Georg Ots on_0 noorte hulgas tõusnud_1 küll sümboli staatusesse
#
# Lisaks kontrollime, et vahel poleks teisi -nud verbe;
#
elif k+1<len(clauseTokens) and _phraseBreakerAdvs.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
#
# Probleemset:
# *) kui -nud-ile vahetult eelneb sidend, v6ib -nud kuuluda v2ljaj2ttelise
# olema verbi juurde:
# Mart Timmi on_0 maakonna üks edukamaid talupidajaid ja olnud_1 ka taluseltsi esimees .
# Ulvi oli_0 ometigi loov kunstnik ega võinud_1 ka eraelus esineda epigoonina .
#
if i-1 > -1 and not matchFound:
if _isClauseFinal( tokenJson[WORD_ID], clauseTokens ) and \
clauseTokens[i-1][WORD_ID] not in negPhraseWIDs and \
(verbOleJarel.matches(clauseTokens[i-1]) or (len(verbid)==2 and \
verbOleJarelHeur2.matches(clauseTokens[i-1]))) and \
clauseTokens[i-1][WORD_ID] not in negPhraseWIDs:
#
# Vahetult eelnev '-nud':
# Ma õpetan õievalemeid , mida ma ise viiendas klassis vihanud_0 olin_0 .
# ... siis kui nemad juba ära sõitnud_0 on_0 ...
# Vahetult eelnev 'tud/mas' ning osalauses pole rohkem verbe:
# Ja sellepärast jäigi kõik nii , nagu kirjutatud_0 oli_0 .
#
tokenJson2 = clauseTokens[i-1]
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel, verbOleJarelHeur2] ) )
#matchobj[PATTERN][1] += '??'
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
if not matchFound:
#
# Ei oska m22rata, millega t2pselt "olema" verb seotud on ...
#
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'POS', PATTERN: ["ole"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = True
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
#matchobj[PATTERN][0]+='??'
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1] )
matchFound = True
return foundMatches | python | def _extractBasicPredicateFromClause( clauseTokens, clauseID ):
'''
Meetod, mis tuvastab antud osalausest kesksed verbid + nendega otseselt seotud
esmased verbifraasid:
1) predikaadiga seotud eituse(d): (ei/ära/pole) + sobiv verb;
2) olema-verbifraasid: olema; olema + sobiv verb;
3) tavalised (mitte-olema) verbid, mis peaksid olema osalause keskmeks;
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt tehtud
s6na-analyyse);
Tagastab listi tuvastatud fraasidest, kus iga liige (dict) on kujul:
{ PHRASE: list, -- tuvastatud fraasi positsioon lauses (WORD_ID indeksid);
PATTERN: list, -- yldine muster, mille alusel tuvastamine toimus;
POLARITY: str, -- polaarsus ('NEG', 'POS', '??')
OTHER_VERBS: bool -- kas kontekstis on veel verbe, mida v6iks potentsiaalselt
s6naga liita?
}
Eraldatakse järgmised üldised mustrid (PATTERN j2rjendid):
verb
ole
ei+V
ole+V
pole
ei+ole
pole+V
ole+ole
ei
ära+V
pole+ole
ära
ära+ole
NB! Kui osalauses on veel verbe, mis v6iksid (potentsiaalselt) eraldatud mustriga liituda,
siis m22ratakse mustris otherVerbs = True;
'''
global _phraseBreakerAdvs
# Verbieituse indikaatorid
verbEi = WordTemplate({ROOT:'^ei$',FORM:'neg',POSTAG:'V'})
verbEi2 = WordTemplate({ROOT:'^ei$',POSTAG:'D'}) # juhuks, kui morf yhestamises valitakse vale analyys
verbAra = WordTemplate({ROOT:'^ära$',FORM:'neg.*',POSTAG:'V'})
verbPole = WordTemplate({ROOT:'^ole$',FORM:'neg.*',POSTAG:'V'})
# Eituse sisuverbi osad
verbEiJarel = WordTemplate({POSTAG:'V',FORM:'o|nud|tud|nuks|nuvat|vat|ks|ta|taks|tavat$'})
verbEiJarel2 = WordTemplate({POSTAG:'V',FORM:'neg o$'})
# Infiniitverb, olema ja verbid, mis v6ivad olema-le j2rgneda
verbInf = WordTemplate({POSTAG:'V', FORM:'^(da|des|ma|tama|ta|maks|mas|mast|nud|tud|v|mata)$'})
verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'})
verbOleJarel = WordTemplate({POSTAG:'V',FORM:'nud$'})
verbOleJarelHeur1 = WordTemplate({POSTAG:'V',FORM:'^(tud|da|mas)$'})
verbOleJarelHeur2 = WordTemplate({POSTAG:'V',FORM:'^(tud|mas)$'})
# Muud
verb = WordTemplate({POSTAG:'V'})
verbid = verb.matchingPositions( clauseTokens )
sonaEga = WordTemplate({ROOT:'^ega$',POSTAG:'[DJ]'})
# Eraldamise tulemused: eraldatud (verbi)fraasid ja kasutatud reeglid
foundMatches = []
negPhraseWIDs = []
posPhraseWIDs = []
for i in range(len(clauseTokens)):
tokenJson = clauseTokens[i]
matchFound = False
# ===================================================================
# V e r b i e i t u s
# ===================================================================
if verbEi.matches(tokenJson) or verbEi2.matches(tokenJson):
#
# 1. "Ei" + Verb (käskivas, -nud, -tud, -nuks, -nuvat, -vat,
# -ks, -ta, -taks, tavat)
#
if i+1 < len(clauseTokens):
tokenJson2 = clauseTokens[i+1]
if verbEiJarel.matches(tokenJson2):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ei", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, [verbEi, verbEi2] ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbEiJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# Lisamuster:
# -neg o: Ainult "lähe" korral, kuna selle s6na käskiv
# ("mine") ei lange kokku eituse vormiga;
#
if not matchFound and verbEiJarel2.matches(tokenJson2):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ei", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, [verbEi, verbEi2] ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbEiJarel2 ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 1.2. Kui "ei" on (osa)lause alguses, ja lauses ongi vaid kaks verbi, siis
# võib eituse ja verbi vahel olla ka teisi sõnu ...
# Nt. Ei_0 ta ole_0 , ütlesin selge sõnaga .
# Hävita vaenlane - ei_0 sammugi tagane_0 .
#
if not matchFound and verbEi.matches(tokenJson) and i+1 < len(clauseTokens):
# Leiame k6ik verbid: kui ongi vaid kaks verbi, esimene 'ei'
# ja teine sellega sobiv verb ning kehtivad kitsendused:
# ** teine verb j2rgneb 'ei'-le;
# ** vahetult p2rast 'ei'-d pole koma (Nt 'Aga ei_0, sõna antud_0.')
# ** teine verb on osalause l6pus;
if len(verbid)==2 and verbid[0]==i:
if verbEiJarel.matches(clauseTokens[verbid[1]]):
if not _isFollowedByComma( tokenJson[WORD_ID], clauseTokens ) and \
_isClauseFinal( clauseTokens[verbid[1]][WORD_ID], clauseTokens ):
wid1 = tokenJson[WORD_ID]
wid2 = clauseTokens[verbid[1]][WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ei", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(clauseTokens[verbid[1]]):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = False
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbEi ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( clauseTokens[verbid[1]], verbEiJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 1.X. Ei oska "ei" predikaadikonteksti m22rata (v6imalik, et ei eitatagi verbi,
# vaid hoopis nimis6nafraasi vms).
#
if not matchFound:
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], PATTERN: ["ei"] }
matchobj[CLAUSE_IDX] = clauseID
# Leiame, kas j2rgneb s6nu, millega potentsiaalselt saaks eituse moodustada
matchobj[OTHER_VERBS] = \
any([ verbEiJarel.matches(clauseTokens[j]) for j in range(i+1, len(clauseTokens)) ])
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, [verbEi, verbEi2] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1] )
matchFound = True
elif verbAra.matches(tokenJson):
#
# 2. "Ära" + Verb (käskivas, -ge, -gem, -gu, -tagu, -me)
#
# Kui "ära"-le järgneb (osa)lauses veel teisi verbe, proovime
# moodustada ära-fraasi esimese järgneva verbiga, mis ühildub:
# Nt. Ärme_0 enam nii tee_0 !
# Ärge_0 palun minge_0 .
# Ärge_0 ainult naerma puhkege_0 .
#
if i+1 < len(clauseTokens) and len(verbid) >= 2:
for verbIndex in verbid:
tokenJson2 = clauseTokens[ verbIndex ]
if tokenJson[WORD_ID] < tokenJson2[WORD_ID]:
# Teeme kindlaks, kas järgnev verb võib ühilduda 'ära'-ga:
analyses = _canFormAraPhrase( tokenJson, tokenJson2 )
if analyses:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ära", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( analyses[0] )
matchobj[ANALYSIS_IDS].append( analyses[1] )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
break
#
# Teadaolevad veakohad:
# yks koma vahel: Ära_0 piina ennast , jäta_0 laps mulle.
# (aga siin on p6hiliseks veaks puudulik morf analyys)
#
#
# 2.X. Ei oska "ära" predikaadikonteksti m22rata ...
#
if not matchFound:
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], PATTERN: ["ära"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 1)
# Kui kontekstis on ka teisi verbe, võib ära täita hoopis määrsõna rolli, ja
# kuna eitusmustrid on välistatud, pole enam kindel, et tegu on eitusega;
matchobj[POLARITY] = '??'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbAra ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1] )
matchFound = True
elif verbPole.matches(tokenJson):
#
# 3. "Pole" + Verb (-nud)
#
if i+1 < len(clauseTokens):
tokenJson2 = clauseTokens[i+1]
if verbOleJarel.matches(tokenJson2):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbOleJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
if not matchFound and i+1 < len(clauseTokens):
tokenJson2 = clauseTokens[i+1]
#
# 3.2. Heuristik: Kui "pole" j2rel on vahetult verb (-tud, -mas), ning m6lemad
# paiknevad (osa)lause l6pus ning osalauses ongi vaid kaks verbi, loeme
# selle ka eituse fraasiks:
# Nt. Autode ostuhinda pole avalikustatud .
# Skriptid näitavad veateateid , kui tingimused pole täidetud .
# Aktsia- ja rahaturud on rahutud ning stabiilsust pole näha .
# ... kas ehk kedagi liikumas_0 pole_0 , keda võiks asjasse pühendada ...
#
if len(verbid)==2 and verbOleJarelHeur2.matches(tokenJson2) and \
_isClauseFinal( tokenJson2[WORD_ID], clauseTokens ):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = False
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbOleJarelHeur2 ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 3.3. Heuristik: Kui ongi vaid kaks verbi, ning "pole" j2rel on osalause l6pus
# "nud", loeme selle samuti yheks fraasiks.
# Nt.
# Ladina keel pole välja surnud .
# Nii odavalt pole Eesti oma laevu kunagi välja andnud .
# Tööga pole keegi rikkaks saanud .
#
if not matchFound and len(verbid)==2 and verbid[0] == i:
if verbOleJarel.matches( clauseTokens[verbid[1]] ) and \
_isClauseFinal( clauseTokens[verbid[1]][WORD_ID], clauseTokens ):
wid1 = tokenJson[WORD_ID]
wid2 = clauseTokens[verbid[1]][WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = False
if verbOle.matches( clauseTokens[verbid[1]] ):
matchobj[PATTERN][1] = 'ole'
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( clauseTokens[verbid[1]], verbOleJarel ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
if not matchFound:
#
# 3.4. Heuristik: Kui 'pole'-le j2rgneb osalauses kusagil kaugemal -nud,
# mis ei saa olla fraasi eestäiend, siis loeme selle olema-verbiga
# kokkukuuluvaks;
#
seenNudVerbs = 0
for k in range(i+1, len(clauseTokens)):
tokenJson2 = clauseTokens[k]
if verb.matches(tokenJson2) and not verbInf.matches(tokenJson2):
# Kui j6uame finiitverbini, siis katkestame otsingu
break
if sonaEga.matches(tokenJson2):
# Kui j6uame 'ega'-ni, siis katkestame otsingu
break
if verbOleJarel.matches(tokenJson2):
seenNudVerbs += 1
#
# Kui -nud verb eelneb vahetult m6nele teisele infiniitverbile,
# on v2ga t6en2oline, et -nud on peaverb "pole" otsene alluv ning
# pole eestäiend, nt:
#
# kuid ta polnud_0 ka teatris õppinud_1 improviseerima ning
# Ega me pole_0 siia tulnud_1 paastuma ja palvetama , "
# ja Raul poleks_0 teda härrasmehena kodust välja tahtnud_1 ajada .
# ma pole_0 iial kasutanud_1 keelatud aineid .
#
# Kontrollime, et nud-ile j2rgneks infiniitverb, ning
# vahel poleks teisi nud-verbe ...
#
if k+1 in verbid and verbInf.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = True
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
#
# Kui -nud verb eelneb vahetult m6nele adverbile, mis t6en2oliselt
# on lauses iseseisev s6na (nt 'ka', 'siis', 'veel', 'juba' jms), siis ei
# saa -nud olla eest2iend ning peaks olema peaverb "pole" otsene alluv, nt:
#
# Varem pole_0 ma kirjandile jõudnud_0 aga sellepärast ,
# " Ma pole_0 Belgias saanud_0 isegi parkimistrahvi ! "
# Polnud_0 õllelembid saanud_0 veel õieti jõuluvaimu sisse elada ,
# mulle polnud_0 Väike jõudnud_0 ju veel rääkida ,
#
# Lisaks kontrollime, et vahel poleks teisi -nud verbe;
#
elif k+1<len(clauseTokens) and _phraseBreakerAdvs.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
if not matchFound and _isClauseFinal( tokenJson[WORD_ID], clauseTokens ):
#
# 3.5. Heuristik: Kui "pole" on osalause l6pus, ning sellele eelneb vahetult
# "nud", v6i eelneb vahetult tud/da/mas ning osalauses pole teisi verbe,
# loeme liiteituseks:
# Nt.
# Huvitav ainult , miks ta mulle helistanud_0 pole_0 .
# Mingit kuulsust ma küll kogunud_0 pole_0 .
#
if i-1 > -1:
tokenJson2 = clauseTokens[i-1]
if verbOleJarel.matches(tokenJson2) or (len(verbid)==2 and \
verbOleJarelHeur2.matches(tokenJson2)):
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["pole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 2)
if verbOle.matches( tokenJson2 ):
matchobj[PATTERN][1] = 'ole'
matchobj[POLARITY] = 'NEG'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel, verbOleJarelHeur2] ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# 3.X. Ei oska "pole" predikaadikonteksti m22rata ...
#
if not matchFound:
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'NEG', PATTERN: ["pole"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 1)
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbPole ) )
foundMatches.append( matchobj )
negPhraseWIDs.extend( [wid1] )
matchFound = True
# ===================================================================
# V e r b i j a a t u s
# ===================================================================
elif tokenJson[WORD_ID] not in negPhraseWIDs and verb.matches(tokenJson) and \
not verbInf.matches(tokenJson):
#
# Tavaline verb ( mitte olema-verb )
#
if not verbOle.matches( tokenJson ):
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'POS', PATTERN: ["verb"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = (len(verbid) > 1)
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verb, discardAnalyses = verbInf ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1] )
matchFound = True
#
# Olema-verb
#
else:
if (len(verbid) == 1):
# Yksik olema-verb
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'POS', PATTERN: ["ole"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = False
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1] )
matchFound = True
else:
#
# Lauses on veel verbe: yritame teha kindlaks, kas tegu on liitkonstruktsiooniga
#
if i+1 < len(clauseTokens):
if verbOleJarel.matches(clauseTokens[i+1]) and \
clauseTokens[i+1][WORD_ID] not in negPhraseWIDs:
#
# Vahetult j2rgnev '-nud':
# Ta ise on_0 kasutanud_0 mitme turvafima teenuseid .
# Luule on_0 võtnud_0 Linnutee kuju .
# Õhtul oli_0 olnud_0 org , aga hommikul järv .
#
tokenJson2 = clauseTokens[i+1]
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, verbOleJarel ) )
#matchobj[PATTERN][1] += '??'
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# NB! See reegel võib eksida eksistentsiaallausete korral,
# mis, tõsi kyll, tunduvad olevat mittesagedased:
# Põlvamaal oli_0 möödunud_0 nädala teine pool traagiline .
# Kevadisel läbivaatusel oli_0 kogenud_0 mesinik abiks .
#
elif len(verbid)==2:
otherVerbIndex = verbid[1] if verbid[0]==i else verbid[0]
otherVerb = clauseTokens[ otherVerbIndex ]
#
# Osalauses ongi vaid kaks verbi ning 'nud/tud/mas' on osalause
# l6pus:
# Söögimaja ja kiriku uksed olid_0 suletud_0 ,
# Nööp on_0 olemas_0 , kunagi õmmeldakse mantel ka külge !
# Naine oli_0 Kalevi selleni viinud_0 .
# Etnofuturismi esivanemaid on_0 veel vähe uuritud_0 .
#
if (verbOleJarel.matches(otherVerb) or verbOleJarelHeur2.matches(otherVerb)) and \
_isClauseFinal( otherVerb[WORD_ID], clauseTokens ) and \
otherVerb[WORD_ID] not in negPhraseWIDs:
wid1 = tokenJson[WORD_ID]
wid2 = otherVerb[WORD_ID]
#
# Siin v6ib tekkida vigu/kaheldavaid kohti, kui kahe s6na vahel on
# sides6nu/punktuatsiooni/teatud_adverbe, n2iteks:
# on_0 kaasasündinud või elu jooksul omandatud_0
# Mariboris oli_0 äkki medal käes ja Tallinnas 240kilone Ruano võidetud_0
# Mina olen_0 päritolult põhjaeestlane , 50 aastat Põhja-Eestis elanud_0 .
# J2tame sellistel puhkudel yhtse verbifraasina eraldamata ...
#
if not _isSeparatedByPossibleClauseBreakers( clauseTokens, tokenJson[WORD_ID], otherVerb[WORD_ID], True, True, True):
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(otherVerb):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( otherVerb, [verbOleJarel, verbOleJarelHeur2] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
elif (verbOleJarel.matches(otherVerb) or verbOleJarelHeur2.matches(otherVerb)) and \
otherVerb[WORD_ID] not in negPhraseWIDs and \
i+1 == otherVerbIndex:
#
# Osalauses ongi vaid kaks verbi ning 'nud/tud/mas' j2rgneb vahetult
# olema verbile (umbisikuline kõneviis):
# Oktoobris-detsembris 1944. a on_0 registreeritud_0 318 haigusjuhtu .
# Enamik uuringuid on_0 korraldatud_0 täiskasvanutel .
# Graafikud on_0 tehtud_0 programmis Exel 2003 .
# Üsna sagedane just teadustekstides;
#
wid1 = tokenJson[WORD_ID]
wid2 = otherVerb[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(otherVerb):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( otherVerb, [verbOleJarel, verbOleJarelHeur2] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#
# Kuna '-tud' võib potentsiaalselt olla ka eestäiend, võib tekkida
# ka vigu:
# Tema tegevuses on_0 teatud_0 plaan ehk tegevuskava .
# Kõige tähtsam võistlusala on_0 kombineeritud_0 võistlus .
# Lõpetuseks on_0 grillitud_0 mereandide valik .
#
#
# Kui olema-verbile j2rgneb osalauses kusagil kaugemal -nud, mis ei saa
# olla fraasi eestäiend, siis loeme selle olema-verbiga kokkukuuluvaks;
#
if not matchFound:
seenNudVerbs = 0
for k in range(i+1, len(clauseTokens)):
tokenJson2 = clauseTokens[k]
if verb.matches(tokenJson2) and not verbInf.matches(tokenJson2):
# Kui j6uame finiitverbini, siis katkestame otsingu
break
if sonaEga.matches(tokenJson2):
# Kui j6uame 'ega'-ni, siis katkestame otsingu
break
if verbOleJarel.matches(tokenJson2):
seenNudVerbs += 1
#
# Kui -nud verb eelneb vahetult m6nele teisele infiniitverbile,
# on v2ga t6en2oline, et -nud on peaverb "olema" otsene alluv ning
# pole eestäiend, nt:
#
# Midagi niisugust olin_0 ma kogu aeg lootnud_1 leida .
# siis varem või hiljem on_0 ta pidanud_1 taanduma
# siis oleks_0 ta mingisuguse plaani tõesti võinud_1 koostada
# jälle on_0 ühest investeeringust saanud_1 surnud kapital
#
# Kontrollime, et nud-ile j2rgneks infiniitverb, ning
# vahel poleks teisi nud-verbe ...
#
if k+1 in verbid and verbInf.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = True
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
#
# Probleemset:
# *) Kui kaks -nud-i on kõrvuti, võib minna valesti, kui pimesi siduda
# esimene, näiteks:
# küllap ta oli_0 siis ka alati armunud_0 olnud .
# Eksamikomisjon oli_0 veidi ehmunud_0 olnud ,
# Samuti on_0 mitmed TTga õnnetuses osalenud_0 lausunud ,
#
#
# Kui -nud verb eelneb vahetult m6nele adverbile, mis t6en2oliselt
# on lauses iseseisev s6na (nt 'ka', 'siis', 'veel', 'juba' jms), siis ei
# saa -nud olla eest2iend ning peaks olema peaverb "olema" otsene alluv, nt:
#
# Kasiinodega on_0 rikkaks saanud_1 siiski vaid hõimud ,
# See näitaja on_0 jällegi tõusnud_1 ainult Hansapangal .
# Me oleme_0 tegelikult Kristiga ikka laval laulnud_1 ka .
# Georg Ots on_0 noorte hulgas tõusnud_1 küll sümboli staatusesse
#
# Lisaks kontrollime, et vahel poleks teisi -nud verbe;
#
elif k+1<len(clauseTokens) and _phraseBreakerAdvs.matches(clauseTokens[k+1]) and \
seenNudVerbs < 2:
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel] ) )
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
#_debugPrint( (('+'.join(matchobj[PATTERN]))+' | '+_getJsonAsTextString(clauseTokens, markTokens = [ matchobj[PHRASE] ] )))
break
#
# Probleemset:
# *) kui -nud-ile vahetult eelneb sidend, v6ib -nud kuuluda v2ljaj2ttelise
# olema verbi juurde:
# Mart Timmi on_0 maakonna üks edukamaid talupidajaid ja olnud_1 ka taluseltsi esimees .
# Ulvi oli_0 ometigi loov kunstnik ega võinud_1 ka eraelus esineda epigoonina .
#
if i-1 > -1 and not matchFound:
if _isClauseFinal( tokenJson[WORD_ID], clauseTokens ) and \
clauseTokens[i-1][WORD_ID] not in negPhraseWIDs and \
(verbOleJarel.matches(clauseTokens[i-1]) or (len(verbid)==2 and \
verbOleJarelHeur2.matches(clauseTokens[i-1]))) and \
clauseTokens[i-1][WORD_ID] not in negPhraseWIDs:
#
# Vahetult eelnev '-nud':
# Ma õpetan õievalemeid , mida ma ise viiendas klassis vihanud_0 olin_0 .
# ... siis kui nemad juba ära sõitnud_0 on_0 ...
# Vahetult eelnev 'tud/mas' ning osalauses pole rohkem verbe:
# Ja sellepärast jäigi kõik nii , nagu kirjutatud_0 oli_0 .
#
tokenJson2 = clauseTokens[i-1]
wid1 = tokenJson[WORD_ID]
wid2 = tokenJson2[WORD_ID]
matchobj = { PHRASE: [wid1, wid2], PATTERN: ["ole", "verb"] }
matchobj[CLAUSE_IDX] = clauseID
if verbOle.matches(tokenJson2):
matchobj[PATTERN][1] = 'ole'
matchobj[OTHER_VERBS] = (len(verbid) > 2)
matchobj[POLARITY] = 'POS'
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson2, [verbOleJarel, verbOleJarelHeur2] ) )
#matchobj[PATTERN][1] += '??'
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1, wid2] )
matchFound = True
if not matchFound:
#
# Ei oska m22rata, millega t2pselt "olema" verb seotud on ...
#
wid1 = tokenJson[WORD_ID]
matchobj = { PHRASE: [wid1], POLARITY: 'POS', PATTERN: ["ole"] }
matchobj[CLAUSE_IDX] = clauseID
matchobj[OTHER_VERBS] = True
matchobj[ANALYSIS_IDS] = []
matchobj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( tokenJson, verbOle, discardAnalyses = verbInf ) )
#matchobj[PATTERN][0]+='??'
foundMatches.append( matchobj )
posPhraseWIDs.extend( [wid1] )
matchFound = True
return foundMatches | Meetod, mis tuvastab antud osalausest kesksed verbid + nendega otseselt seotud
esmased verbifraasid:
1) predikaadiga seotud eituse(d): (ei/ära/pole) + sobiv verb;
2) olema-verbifraasid: olema; olema + sobiv verb;
3) tavalised (mitte-olema) verbid, mis peaksid olema osalause keskmeks;
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt tehtud
s6na-analyyse);
Tagastab listi tuvastatud fraasidest, kus iga liige (dict) on kujul:
{ PHRASE: list, -- tuvastatud fraasi positsioon lauses (WORD_ID indeksid);
PATTERN: list, -- yldine muster, mille alusel tuvastamine toimus;
POLARITY: str, -- polaarsus ('NEG', 'POS', '??')
OTHER_VERBS: bool -- kas kontekstis on veel verbe, mida v6iks potentsiaalselt
s6naga liita?
}
Eraldatakse järgmised üldised mustrid (PATTERN j2rjendid):
verb
ole
ei+V
ole+V
pole
ei+ole
pole+V
ole+ole
ei
ära+V
pole+ole
ära
ära+ole
NB! Kui osalauses on veel verbe, mis v6iksid (potentsiaalselt) eraldatud mustriga liituda,
siis m22ratakse mustris otherVerbs = True; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L170-L835 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _expandOlemaVerbChains | def _expandOlemaVerbChains( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis proovib laiendada 'olema'-l6pulisi (predikaadi) verbiahelaid, lisades
võimalusel nende otsa teisi verbe, nt
"on olnud" + "tehtud", "ei olnud" + "tehtud", "ei oleks" + "arvatud";
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains;
'''
verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'})
verbOleJarel1 = WordTemplate({POSTAG:'V',FORM:'(nud)$'})
verbOleJarel2 = WordTemplate({POSTAG:'V',FORM:'^(mas|tud)$'})
verbMata = WordTemplate({POSTAG:'V',FORM:'^(mata)$'})
verbMaDa = WordTemplate({POSTAG:'V',FORM:'^(da|ma)$'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] != clauseID:
continue
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] != clauseID:
continue
if verbObj[PATTERN][-1] == 'ole' and verbObj[OTHER_VERBS]:
#
# Kui on tegemist 'olema' l6pulise verbiahelaga, mille kontekstis on teisi verbe,
# st saab veel laiendada ...
#
eiOlePattern = (len(verbObj[PATTERN])==2 and verbObj[PATTERN][0] == 'ei')
lastVerbWID = verbObj[PHRASE][-1]
lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == lastVerbWID]
lastTokIndex = lastTokIndex[0]
expansion = None
appliedRule = 0
if not _isClauseFinal( lastVerbWID, clauseTokens ):
maDaVerbsBetween = 0
oleInfFollowing = 0
for i in range(lastTokIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if tokenWID in annotatedWords:
break
if verbMaDa.matches(token):
maDaVerbsBetween += 1
if (verbOleJarel1.matches(token)) or verbOleJarel2.matches(token):
#
# Heuristik:
# Kui olema j2rel, osalause l6pus on nud/tud/mas ja nende vahel pole yhtegi
# punktuatsioonim2rki, sides6na, adverbe aga/kuid/vaid, juba m2rgendatud verbiahelat
# ega teist nud/tud/mas s6na, loeme selle s6na olema-fraasi laienduseks:
#
# Pere ei_0 ole_0 Eestis toimuvast vaimustatud_0 .
# " Viimasel ajal on_0 see asi jälle susisema hakanud_0 " ,
# Esiteks ei_0 olnud_0 vajalikul ajal tavaliselt bussi tulemas_0
#
if _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], \
tokenWID, True, True, True):
expansion = token
# Veakoht: kui -mas j2rel on da/ma, pole kindel, et tegu otsese rektsiooniseosega:
# Islamlannale on_0 harjumatu näha meest midagi maast korjamas_0 ,
elif verbOleJarel1.matches(token) and eiOlePattern and i-lastTokIndex<=2:
#
# Heuristik: "ei"+"ole"-ahela j2rel "nud" ning nende vahel pole rohkem kui
# yks muu s6na:
# Tagantjärele mõeldes ei_0 oleks_0 ma pidanud_0 seda tegema .
# Mina ei_0 ole_0 suutnud_0 siiani maad osta .
#
expansion = token
oleInfFollowing += 1
break
elif verbMata.matches(token) and maDaVerbsBetween == 0:
#
# Heuristik:
# Kui olema j2rel, osalause l6pus on mata ja nende vahel pole yhtegi
# punktuatsioonim2rki, sides6na, adverbe aga/kuid/vaid, juba m2rgendatud
# verbiahelat, m6nd nud/tud/mas/ma/da verbi, loeme selle s6na olema-fraasi
# laienduseks:
#
# Maanaine on_0 veel leidmata_0 .
# linnaarhitekti koht oli_0 aasta aega täitmata_0
#
if _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], \
tokenWID, True, True, True):
expansion = token
break
# Veakoht: kui vahel on 'ilma', siis see heuristik eksib t6en2oliselt:
# on_0 lihtsalt tõlgendatavad ka ilma situatsioonis osalemata_0
oleInfFollowing += 1
#
# Heuristik:
# Kui osalauses ei j2rgne 'olema'-verbiga yhilduvaid verbe, kyll aga eelneb vahetult
# m6ni selline ning seda pole veel m2rgendatud, loeme selle potentsiaalselt olema-verbiga
# yhilduvaks, nt:
#
# Unustatud_0 ei_0 ole_0 ka mänge .
# Tõhustatud_0 on_0 ka turvameetmeid .
# milleks looja ta maailma loonud_0 on_0 , nimelt soo jätkamiseks .
#
if oleInfFollowing == 0 and not expansion:
minWID = min( verbObj[PHRASE] )
lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == minWID]
lastTokIndex = lastTokIndex[0]
token = clauseTokens[lastTokIndex-1]
if lastTokIndex-1 > -1 and token[WORD_ID] not in annotatedWords:
if (verbOleJarel1.matches(token) or verbOleJarel2.matches(token)):
expansion = token
appliedRule = 1
#
# Eituse (aga ka vastavates jaatuse) fraasides j22vad siin eraldamata
# ei + ole + Adv/Nom + Verb_da
# mustrid, nt:
# Ei_0 ole_0 mõtet teha sellist söögikohta .
# Ei_0 ole_0 võimalik väiksema vastu vahetada .
# Ei_0 ole_0 pankuril vaja teada .
# Nendega proovime tegeleda hiljem.
#
else:
#
# Leiame ahela alguspunkti (minimaalse ID-ga verbi)
#
minWID = min( verbObj[PHRASE] )
lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == minWID]
if lastTokIndex:
lastTokIndex = lastTokIndex[0]
if lastTokIndex-1 > -1 and clauseTokens[lastTokIndex-1][WORD_ID] not in annotatedWords:
#
# Heuristik:
# Kui "olema"-l6puline ahel on osalause l6pus, ning vahetult eelneb nud/tud/mas,
# siis loeme selle olema juurde kuuluvaks, nt:
# mis juba olnud ja veel tulemas_0 on_0 ,
# Eesti selle alamprojektiga seotud_0 ei_0 ole_0 .
# trombootilisi episoode kordunud_0 ei_0 ole_0 .
# (Yldiselt paistab suhteliselt v2heproduktiivne reegel olevat)
#
token = clauseTokens[lastTokIndex-1]
if (verbOleJarel1.matches(token) or verbOleJarel2.matches(token)):
expansion = token
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append( tokenWID )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( expansion, [verbOleJarel1, verbOleJarel2, verbMata] ) )
if verbOle.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb')
annotatedWords.append( tokenWID ) | python | def _expandOlemaVerbChains( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis proovib laiendada 'olema'-l6pulisi (predikaadi) verbiahelaid, lisades
võimalusel nende otsa teisi verbe, nt
"on olnud" + "tehtud", "ei olnud" + "tehtud", "ei oleks" + "arvatud";
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains;
'''
verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'})
verbOleJarel1 = WordTemplate({POSTAG:'V',FORM:'(nud)$'})
verbOleJarel2 = WordTemplate({POSTAG:'V',FORM:'^(mas|tud)$'})
verbMata = WordTemplate({POSTAG:'V',FORM:'^(mata)$'})
verbMaDa = WordTemplate({POSTAG:'V',FORM:'^(da|ma)$'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] != clauseID:
continue
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] != clauseID:
continue
if verbObj[PATTERN][-1] == 'ole' and verbObj[OTHER_VERBS]:
#
# Kui on tegemist 'olema' l6pulise verbiahelaga, mille kontekstis on teisi verbe,
# st saab veel laiendada ...
#
eiOlePattern = (len(verbObj[PATTERN])==2 and verbObj[PATTERN][0] == 'ei')
lastVerbWID = verbObj[PHRASE][-1]
lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == lastVerbWID]
lastTokIndex = lastTokIndex[0]
expansion = None
appliedRule = 0
if not _isClauseFinal( lastVerbWID, clauseTokens ):
maDaVerbsBetween = 0
oleInfFollowing = 0
for i in range(lastTokIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if tokenWID in annotatedWords:
break
if verbMaDa.matches(token):
maDaVerbsBetween += 1
if (verbOleJarel1.matches(token)) or verbOleJarel2.matches(token):
#
# Heuristik:
# Kui olema j2rel, osalause l6pus on nud/tud/mas ja nende vahel pole yhtegi
# punktuatsioonim2rki, sides6na, adverbe aga/kuid/vaid, juba m2rgendatud verbiahelat
# ega teist nud/tud/mas s6na, loeme selle s6na olema-fraasi laienduseks:
#
# Pere ei_0 ole_0 Eestis toimuvast vaimustatud_0 .
# " Viimasel ajal on_0 see asi jälle susisema hakanud_0 " ,
# Esiteks ei_0 olnud_0 vajalikul ajal tavaliselt bussi tulemas_0
#
if _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], \
tokenWID, True, True, True):
expansion = token
# Veakoht: kui -mas j2rel on da/ma, pole kindel, et tegu otsese rektsiooniseosega:
# Islamlannale on_0 harjumatu näha meest midagi maast korjamas_0 ,
elif verbOleJarel1.matches(token) and eiOlePattern and i-lastTokIndex<=2:
#
# Heuristik: "ei"+"ole"-ahela j2rel "nud" ning nende vahel pole rohkem kui
# yks muu s6na:
# Tagantjärele mõeldes ei_0 oleks_0 ma pidanud_0 seda tegema .
# Mina ei_0 ole_0 suutnud_0 siiani maad osta .
#
expansion = token
oleInfFollowing += 1
break
elif verbMata.matches(token) and maDaVerbsBetween == 0:
#
# Heuristik:
# Kui olema j2rel, osalause l6pus on mata ja nende vahel pole yhtegi
# punktuatsioonim2rki, sides6na, adverbe aga/kuid/vaid, juba m2rgendatud
# verbiahelat, m6nd nud/tud/mas/ma/da verbi, loeme selle s6na olema-fraasi
# laienduseks:
#
# Maanaine on_0 veel leidmata_0 .
# linnaarhitekti koht oli_0 aasta aega täitmata_0
#
if _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], \
tokenWID, True, True, True):
expansion = token
break
# Veakoht: kui vahel on 'ilma', siis see heuristik eksib t6en2oliselt:
# on_0 lihtsalt tõlgendatavad ka ilma situatsioonis osalemata_0
oleInfFollowing += 1
#
# Heuristik:
# Kui osalauses ei j2rgne 'olema'-verbiga yhilduvaid verbe, kyll aga eelneb vahetult
# m6ni selline ning seda pole veel m2rgendatud, loeme selle potentsiaalselt olema-verbiga
# yhilduvaks, nt:
#
# Unustatud_0 ei_0 ole_0 ka mänge .
# Tõhustatud_0 on_0 ka turvameetmeid .
# milleks looja ta maailma loonud_0 on_0 , nimelt soo jätkamiseks .
#
if oleInfFollowing == 0 and not expansion:
minWID = min( verbObj[PHRASE] )
lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == minWID]
lastTokIndex = lastTokIndex[0]
token = clauseTokens[lastTokIndex-1]
if lastTokIndex-1 > -1 and token[WORD_ID] not in annotatedWords:
if (verbOleJarel1.matches(token) or verbOleJarel2.matches(token)):
expansion = token
appliedRule = 1
#
# Eituse (aga ka vastavates jaatuse) fraasides j22vad siin eraldamata
# ei + ole + Adv/Nom + Verb_da
# mustrid, nt:
# Ei_0 ole_0 mõtet teha sellist söögikohta .
# Ei_0 ole_0 võimalik väiksema vastu vahetada .
# Ei_0 ole_0 pankuril vaja teada .
# Nendega proovime tegeleda hiljem.
#
else:
#
# Leiame ahela alguspunkti (minimaalse ID-ga verbi)
#
minWID = min( verbObj[PHRASE] )
lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == minWID]
if lastTokIndex:
lastTokIndex = lastTokIndex[0]
if lastTokIndex-1 > -1 and clauseTokens[lastTokIndex-1][WORD_ID] not in annotatedWords:
#
# Heuristik:
# Kui "olema"-l6puline ahel on osalause l6pus, ning vahetult eelneb nud/tud/mas,
# siis loeme selle olema juurde kuuluvaks, nt:
# mis juba olnud ja veel tulemas_0 on_0 ,
# Eesti selle alamprojektiga seotud_0 ei_0 ole_0 .
# trombootilisi episoode kordunud_0 ei_0 ole_0 .
# (Yldiselt paistab suhteliselt v2heproduktiivne reegel olevat)
#
token = clauseTokens[lastTokIndex-1]
if (verbOleJarel1.matches(token) or verbOleJarel2.matches(token)):
expansion = token
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append( tokenWID )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( expansion, [verbOleJarel1, verbOleJarel2, verbMata] ) )
if verbOle.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb')
annotatedWords.append( tokenWID ) | Meetod, mis proovib laiendada 'olema'-l6pulisi (predikaadi) verbiahelaid, lisades
võimalusel nende otsa teisi verbe, nt
"on olnud" + "tehtud", "ei olnud" + "tehtud", "ei oleks" + "arvatud";
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L838-L987 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _loadVerbSubcatRelations | def _loadVerbSubcatRelations(infile):
'''
Meetod, mis loeb failist sisse verbide rektsiooniseosed infiniitverbidega;
Eeldab, et rektsiooniseosed on tekstifailis, kujul:
häbene da mast
igatse da
St rea alguses on verbilemma ning TAB-iga on sellest eraldatud võimalike
rektsioonide (käändeliste verbide vormitunnuste) loetelu, tähtsuse
järjekorras;
Tagastab rektsiooniseosed sõnastikuna, mille võtmeteks lemmad ning väärtusteks
vastavad vormitunnuste loetelud.
'''
relations = dict()
in_f = codecs.open(infile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
(verb, forms) = line.split('\t')
relations[verb] = forms.split()
in_f.close()
return relations | python | def _loadVerbSubcatRelations(infile):
'''
Meetod, mis loeb failist sisse verbide rektsiooniseosed infiniitverbidega;
Eeldab, et rektsiooniseosed on tekstifailis, kujul:
häbene da mast
igatse da
St rea alguses on verbilemma ning TAB-iga on sellest eraldatud võimalike
rektsioonide (käändeliste verbide vormitunnuste) loetelu, tähtsuse
järjekorras;
Tagastab rektsiooniseosed sõnastikuna, mille võtmeteks lemmad ning väärtusteks
vastavad vormitunnuste loetelud.
'''
relations = dict()
in_f = codecs.open(infile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
(verb, forms) = line.split('\t')
relations[verb] = forms.split()
in_f.close()
return relations | Meetod, mis loeb failist sisse verbide rektsiooniseosed infiniitverbidega;
Eeldab, et rektsiooniseosed on tekstifailis, kujul:
häbene da mast
igatse da
St rea alguses on verbilemma ning TAB-iga on sellest eraldatud võimalike
rektsioonide (käändeliste verbide vormitunnuste) loetelu, tähtsuse
järjekorras;
Tagastab rektsiooniseosed sõnastikuna, mille võtmeteks lemmad ning väärtusteks
vastavad vormitunnuste loetelud. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L990-L1011 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _isVerbExpansible | def _isVerbExpansible( verbObj, clauseTokens, clauseID ):
'''
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud;
'''
global _verbInfNonExpansible
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and \
re.match('^(verb)$', verbObj[PATTERN][-1], re.I):
# Leiame viimasele s6nale vastava token'i
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
if not lastToken:
raise Exception(' Last token not found for '+str(verbObj)+' in '+str( getJsonAsTextString(clauseTokens) ))
lastToken = lastToken[0]
# Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi)
# NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada:
# Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 .
# Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ...
if not _verbInfNonExpansible.matches(lastToken):
# Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi:
# kui on, siis esialgu targu seda fraasi laiendama ei hakka:
if len(verbObj[PATTERN]) >=3 and verbObj[PATTERN][-2] == '&':
return False
return True
#
# TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused,
# kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt.
#
# ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 .
#
return False | python | def _isVerbExpansible( verbObj, clauseTokens, clauseID ):
'''
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud;
'''
global _verbInfNonExpansible
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and \
re.match('^(verb)$', verbObj[PATTERN][-1], re.I):
# Leiame viimasele s6nale vastava token'i
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
if not lastToken:
raise Exception(' Last token not found for '+str(verbObj)+' in '+str( getJsonAsTextString(clauseTokens) ))
lastToken = lastToken[0]
# Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi)
# NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada:
# Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 .
# Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ...
if not _verbInfNonExpansible.matches(lastToken):
# Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi:
# kui on, siis esialgu targu seda fraasi laiendama ei hakka:
if len(verbObj[PATTERN]) >=3 and verbObj[PATTERN][-2] == '&':
return False
return True
#
# TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused,
# kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt.
#
# ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 .
#
return False | Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1015-L1050 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.