Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def manhattan(src, tar, qval=2, normalized=False, alphabet=None):
"""Return the Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist_abs`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
normalized : bool
Normalizes to [0, 1] if True
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The Manhattan distance
Examples
--------
>>> manhattan('cat', 'hat')
4.0
>>> manhattan('Niall', 'Neil')
7.0
>>> manhattan('Colin', 'Cuilen')
9.0
>>> manhattan('ATCG', 'TAGC')
10.0
"""
return Manhattan().dist_abs(src, tar, qval, normalized, alphabet)
|
def dist_manhattan(src, tar, qval=2, alphabet=None):
"""Return the normalized Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan distance
Examples
--------
>>> dist_manhattan('cat', 'hat')
0.5
>>> round(dist_manhattan('Niall', 'Neil'), 12)
0.636363636364
>>> round(dist_manhattan('Colin', 'Cuilen'), 12)
0.692307692308
>>> dist_manhattan('ATCG', 'TAGC')
1.0
"""
return Manhattan().dist(src, tar, qval, alphabet)
|
def sim_manhattan(src, tar, qval=2, alphabet=None):
"""Return the normalized Manhattan similarity of two strings.
This is a wrapper for :py:meth:`Manhattan.sim`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan similarity
Examples
--------
>>> sim_manhattan('cat', 'hat')
0.5
>>> round(sim_manhattan('Niall', 'Neil'), 12)
0.363636363636
>>> round(sim_manhattan('Colin', 'Cuilen'), 12)
0.307692307692
>>> sim_manhattan('ATCG', 'TAGC')
0.0
"""
return Manhattan().sim(src, tar, qval, alphabet)
|
def sim_jaro_winkler(
src,
tar,
qval=1,
mode='winkler',
long_strings=False,
boost_threshold=0.7,
scaling_factor=0.1,
):
"""Return the Jaro or Jaro-Winkler similarity of two strings.
This is a wrapper for :py:meth:`JaroWinkler.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
qval : int
The length of each q-gram (defaults to 1: character-wise matching)
mode : str
Indicates which variant of this distance metric to compute:
- ``winkler`` -- computes the Jaro-Winkler distance (default) which
increases the score for matches near the start of the word
- ``jaro`` -- computes the Jaro distance
long_strings : bool
Set to True to "Increase the probability of a match when the number of
matched characters is large. This option allows for a little more
tolerance when the strings are large. It is not an appropriate test
when comparing fixedlength fields such as phone and social security
numbers." (Used in 'winkler' mode only.)
boost_threshold : float
A value between 0 and 1, below which the Winkler boost is not applied
(defaults to 0.7). (Used in 'winkler' mode only.)
scaling_factor : float
A value between 0 and 0.25, indicating by how much to boost scores for
matching prefixes (defaults to 0.1). (Used in 'winkler' mode only.)
Returns
-------
float
Jaro or Jaro-Winkler similarity
Examples
--------
>>> round(sim_jaro_winkler('cat', 'hat'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil'), 12)
0.805
>>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12)
0.833333333333
>>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)
0.783333333333
>>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)
0.833333333333
"""
return JaroWinkler().sim(
src, tar, qval, mode, long_strings, boost_threshold, scaling_factor
)
|
def dist_jaro_winkler(
src,
tar,
qval=1,
mode='winkler',
long_strings=False,
boost_threshold=0.7,
scaling_factor=0.1,
):
"""Return the Jaro or Jaro-Winkler distance between two strings.
This is a wrapper for :py:meth:`JaroWinkler.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
qval : int
The length of each q-gram (defaults to 1: character-wise matching)
mode : str
Indicates which variant of this distance metric to compute:
- ``winkler`` -- computes the Jaro-Winkler distance (default) which
increases the score for matches near the start of the word
- ``jaro`` -- computes the Jaro distance
long_strings : bool
Set to True to "Increase the probability of a match when the number of
matched characters is large. This option allows for a little more
tolerance when the strings are large. It is not an appropriate test
when comparing fixedlength fields such as phone and social security
numbers." (Used in 'winkler' mode only.)
boost_threshold : float
A value between 0 and 1, below which the Winkler boost is not applied
(defaults to 0.7). (Used in 'winkler' mode only.)
scaling_factor : float
A value between 0 and 0.25, indicating by how much to boost scores for
matching prefixes (defaults to 0.1). (Used in 'winkler' mode only.)
Returns
-------
float
Jaro or Jaro-Winkler distance
Examples
--------
>>> round(dist_jaro_winkler('cat', 'hat'), 12)
0.222222222222
>>> round(dist_jaro_winkler('Niall', 'Neil'), 12)
0.195
>>> round(dist_jaro_winkler('aluminum', 'Catalan'), 12)
0.39880952381
>>> round(dist_jaro_winkler('ATCG', 'TAGC'), 12)
0.166666666667
>>> round(dist_jaro_winkler('cat', 'hat', mode='jaro'), 12)
0.222222222222
>>> round(dist_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)
0.216666666667
>>> round(dist_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)
0.39880952381
>>> round(dist_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)
0.166666666667
"""
return JaroWinkler().dist(
src, tar, qval, mode, long_strings, boost_threshold, scaling_factor
)
|
def sim(
self,
src,
tar,
qval=1,
mode='winkler',
long_strings=False,
boost_threshold=0.7,
scaling_factor=0.1,
):
"""Return the Jaro or Jaro-Winkler similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
qval : int
The length of each q-gram (defaults to 1: character-wise matching)
mode : str
Indicates which variant of this distance metric to compute:
- ``winkler`` -- computes the Jaro-Winkler distance (default)
which increases the score for matches near the start of the
word
- ``jaro`` -- computes the Jaro distance
long_strings : bool
Set to True to "Increase the probability of a match when the number
of matched characters is large. This option allows for a little
more tolerance when the strings are large. It is not an appropriate
test when comparing fixed length fields such as phone and social
security numbers." (Used in 'winkler' mode only.)
boost_threshold : float
A value between 0 and 1, below which the Winkler boost is not
applied (defaults to 0.7). (Used in 'winkler' mode only.)
scaling_factor : float
A value between 0 and 0.25, indicating by how much to boost scores
for matching prefixes (defaults to 0.1). (Used in 'winkler' mode
only.)
Returns
-------
float
Jaro or Jaro-Winkler similarity
Raises
------
ValueError
Unsupported boost_threshold assignment; boost_threshold must be
between 0 and 1.
ValueError
Unsupported scaling_factor assignment; scaling_factor must be
between 0 and 0.25.'
Examples
--------
>>> round(sim_jaro_winkler('cat', 'hat'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil'), 12)
0.805
>>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12)
0.833333333333
>>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)
0.783333333333
>>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)
0.833333333333
"""
if mode == 'winkler':
if boost_threshold > 1 or boost_threshold < 0:
raise ValueError(
'Unsupported boost_threshold assignment; '
+ 'boost_threshold must be between 0 and 1.'
)
if scaling_factor > 0.25 or scaling_factor < 0:
raise ValueError(
'Unsupported scaling_factor assignment; '
+ 'scaling_factor must be between 0 and 0.25.'
)
if src == tar:
return 1.0
src = QGrams(src.strip(), qval)._ordered_list
tar = QGrams(tar.strip(), qval)._ordered_list
lens = len(src)
lent = len(tar)
# If either string is blank - return - added in Version 2
if lens == 0 or lent == 0:
return 0.0
if lens > lent:
search_range = lens
minv = lent
else:
search_range = lent
minv = lens
# Zero out the flags
src_flag = [0] * search_range
tar_flag = [0] * search_range
search_range = max(0, search_range // 2 - 1)
# Looking only within the search range,
# count and flag the matched pairs.
num_com = 0
yl1 = lent - 1
for i in range(lens):
low_lim = (i - search_range) if (i >= search_range) else 0
hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1
for j in range(low_lim, hi_lim + 1):
if (tar_flag[j] == 0) and (tar[j] == src[i]):
tar_flag[j] = 1
src_flag[i] = 1
num_com += 1
break
# If no characters in common - return
if num_com == 0:
return 0.0
# Count the number of transpositions
k = n_trans = 0
for i in range(lens):
if src_flag[i] != 0:
j = 0
for j in range(k, lent): # pragma: no branch
if tar_flag[j] != 0:
k = j + 1
break
if src[i] != tar[j]:
n_trans += 1
n_trans //= 2
# Main weight computation for Jaro distance
weight = (
num_com / lens + num_com / lent + (num_com - n_trans) / num_com
)
weight /= 3.0
# Continue to boost the weight if the strings are similar
# This is the Winkler portion of Jaro-Winkler distance
if mode == 'winkler' and weight > boost_threshold:
# Adjust for having up to the first 4 characters in common
j = 4 if (minv >= 4) else minv
i = 0
while (i < j) and (src[i] == tar[i]):
i += 1
weight += i * scaling_factor * (1.0 - weight)
# Optionally adjust for long strings.
# After agreeing beginning chars, at least two more must agree and
# the agreeing characters must be > .5 of remaining characters.
if (
long_strings
and (minv > 4)
and (num_com > i + 1)
and (2 * num_com >= minv + i)
):
weight += (1.0 - weight) * (
(num_com - i - 1) / (lens + lent - i * 2 + 2)
)
return weight
|
def hamming(src, tar, diff_lens=True):
"""Return the Hamming distance between two strings.
This is a wrapper for :py:meth:`Hamming.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending the
shorter string with obligatorily non-matching characters. If False, an
exception is raised in the case of strings of unequal lengths.
Returns
-------
int
The Hamming distance between src & tar
Examples
--------
>>> hamming('cat', 'hat')
1
>>> hamming('Niall', 'Neil')
3
>>> hamming('aluminum', 'Catalan')
8
>>> hamming('ATCG', 'TAGC')
4
"""
return Hamming().dist_abs(src, tar, diff_lens)
|
def dist_hamming(src, tar, diff_lens=True):
"""Return the normalized Hamming distance between two strings.
This is a wrapper for :py:meth:`Hamming.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending the
shorter string with obligatorily non-matching characters. If False, an
exception is raised in the case of strings of unequal lengths.
Returns
-------
float
The normalized Hamming distance
Examples
--------
>>> round(dist_hamming('cat', 'hat'), 12)
0.333333333333
>>> dist_hamming('Niall', 'Neil')
0.6
>>> dist_hamming('aluminum', 'Catalan')
1.0
>>> dist_hamming('ATCG', 'TAGC')
1.0
"""
return Hamming().dist(src, tar, diff_lens)
|
def sim_hamming(src, tar, diff_lens=True):
"""Return the normalized Hamming similarity of two strings.
This is a wrapper for :py:meth:`Hamming.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending the
shorter string with obligatorily non-matching characters. If False, an
exception is raised in the case of strings of unequal lengths.
Returns
-------
float
The normalized Hamming similarity
Examples
--------
>>> round(sim_hamming('cat', 'hat'), 12)
0.666666666667
>>> sim_hamming('Niall', 'Neil')
0.4
>>> sim_hamming('aluminum', 'Catalan')
0.0
>>> sim_hamming('ATCG', 'TAGC')
0.0
"""
return Hamming().sim(src, tar, diff_lens)
|
def dist_abs(self, src, tar, diff_lens=True):
"""Return the Hamming distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending
the shorter string with obligatorily non-matching characters. If
False, an exception is raised in the case of strings of unequal
lengths.
Returns
-------
int
The Hamming distance between src & tar
Raises
------
ValueError
Undefined for sequences of unequal length; set diff_lens to True
for Hamming distance between strings of unequal lengths.
Examples
--------
>>> cmp = Hamming()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
3
>>> cmp.dist_abs('aluminum', 'Catalan')
8
>>> cmp.dist_abs('ATCG', 'TAGC')
4
"""
if not diff_lens and len(src) != len(tar):
raise ValueError(
'Undefined for sequences of unequal length; set diff_lens '
+ 'to True for Hamming distance between strings of unequal '
+ 'lengths.'
)
hdist = 0
if diff_lens:
hdist += abs(len(src) - len(tar))
hdist += sum(c1 != c2 for c1, c2 in zip(src, tar))
return hdist
|
def dist(self, src, tar, diff_lens=True):
"""Return the normalized Hamming distance between two strings.
Hamming distance normalized to the interval [0, 1].
The Hamming distance is normalized by dividing it
by the greater of the number of characters in src & tar (unless
diff_lens is set to False, in which case an exception is raised).
The arguments are identical to those of the hamming() function.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
diff_lens : bool
If True (default), this returns the Hamming distance for those
characters that have a matching character in both strings plus the
difference in the strings' lengths. This is equivalent to extending
the shorter string with obligatorily non-matching characters. If
False, an exception is raised in the case of strings of unequal
lengths.
Returns
-------
float
Normalized Hamming distance
Examples
--------
>>> cmp = Hamming()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> cmp.dist('Niall', 'Neil')
0.6
>>> cmp.dist('aluminum', 'Catalan')
1.0
>>> cmp.dist('ATCG', 'TAGC')
1.0
"""
if src == tar:
return 0.0
return self.dist_abs(src, tar, diff_lens) / max(len(src), len(tar))
|
def encode(self, word, max_length=-1):
"""Return the Metaphone code for a word.
Based on Lawrence Philips' Pick BASIC code from 1990
:cite:`Philips:1990`, as described in :cite:`Philips:1990b`.
This incorporates some corrections to the above code, particularly
some of those suggested by Michael Kuhn in :cite:`Kuhn:1995`.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length of the returned Metaphone code (defaults to 64,
but in Philips' original implementation this was 4)
Returns
-------
str
The Metaphone value
Examples
--------
>>> pe = Metaphone()
>>> pe.encode('Christopher')
'KRSTFR'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SM0'
>>> pe.encode('Schmidt')
'SKMTT'
"""
# Require a max_length of at least 4
if max_length != -1:
max_length = max(4, max_length)
else:
max_length = 64
# As in variable sound--those modified by adding an "h"
ename = ''.join(c for c in word.upper() if c.isalnum())
ename = ename.replace('ß', 'SS')
# Delete non-alphanumeric characters and make all caps
if not ename:
return ''
if ename[0:2] in {'PN', 'AE', 'KN', 'GN', 'WR'}:
ename = ename[1:]
elif ename[0] == 'X':
ename = 'S' + ename[1:]
elif ename[0:2] == 'WH':
ename = 'W' + ename[2:]
# Convert to metaphone
elen = len(ename) - 1
metaph = ''
for i in range(len(ename)):
if len(metaph) >= max_length:
break
if (
ename[i] not in {'G', 'T'}
and i > 0
and ename[i - 1] == ename[i]
):
continue
if ename[i] in self._uc_v_set and i == 0:
metaph = ename[i]
elif ename[i] == 'B':
if i != elen or ename[i - 1] != 'M':
metaph += ename[i]
elif ename[i] == 'C':
if not (
i > 0
and ename[i - 1] == 'S'
and ename[i + 1 : i + 2] in self._frontv
):
if ename[i + 1 : i + 3] == 'IA':
metaph += 'X'
elif ename[i + 1 : i + 2] in self._frontv:
metaph += 'S'
elif i > 0 and ename[i - 1 : i + 2] == 'SCH':
metaph += 'K'
elif ename[i + 1 : i + 2] == 'H':
if (
i == 0
and i + 1 < elen
and ename[i + 2 : i + 3] not in self._uc_v_set
):
metaph += 'K'
else:
metaph += 'X'
else:
metaph += 'K'
elif ename[i] == 'D':
if (
ename[i + 1 : i + 2] == 'G'
and ename[i + 2 : i + 3] in self._frontv
):
metaph += 'J'
else:
metaph += 'T'
elif ename[i] == 'G':
if ename[i + 1 : i + 2] == 'H' and not (
i + 1 == elen or ename[i + 2 : i + 3] not in self._uc_v_set
):
continue
elif i > 0 and (
(i + 1 == elen and ename[i + 1] == 'N')
or (i + 3 == elen and ename[i + 1 : i + 4] == 'NED')
):
continue
elif (
i - 1 > 0
and i + 1 <= elen
and ename[i - 1] == 'D'
and ename[i + 1] in self._frontv
):
continue
elif ename[i + 1 : i + 2] == 'G':
continue
elif ename[i + 1 : i + 2] in self._frontv:
if i == 0 or ename[i - 1] != 'G':
metaph += 'J'
else:
metaph += 'K'
else:
metaph += 'K'
elif ename[i] == 'H':
if (
i > 0
and ename[i - 1] in self._uc_v_set
and ename[i + 1 : i + 2] not in self._uc_v_set
):
continue
elif i > 0 and ename[i - 1] in self._varson:
continue
else:
metaph += 'H'
elif ename[i] in {'F', 'J', 'L', 'M', 'N', 'R'}:
metaph += ename[i]
elif ename[i] == 'K':
if i > 0 and ename[i - 1] == 'C':
continue
else:
metaph += 'K'
elif ename[i] == 'P':
if ename[i + 1 : i + 2] == 'H':
metaph += 'F'
else:
metaph += 'P'
elif ename[i] == 'Q':
metaph += 'K'
elif ename[i] == 'S':
if (
i > 0
and i + 2 <= elen
and ename[i + 1] == 'I'
and ename[i + 2] in 'OA'
):
metaph += 'X'
elif ename[i + 1 : i + 2] == 'H':
metaph += 'X'
else:
metaph += 'S'
elif ename[i] == 'T':
if (
i > 0
and i + 2 <= elen
and ename[i + 1] == 'I'
and ename[i + 2] in {'A', 'O'}
):
metaph += 'X'
elif ename[i + 1 : i + 2] == 'H':
metaph += '0'
elif ename[i + 1 : i + 3] != 'CH':
if ename[i - 1 : i] != 'T':
metaph += 'T'
elif ename[i] == 'V':
metaph += 'F'
elif ename[i] in 'WY':
if ename[i + 1 : i + 2] in self._uc_v_set:
metaph += ename[i]
elif ename[i] == 'X':
metaph += 'KS'
elif ename[i] == 'Z':
metaph += 'S'
return metaph
|
def dolby(word, max_length=-1, keep_vowels=False, vowel_char='*'):
r"""Return the Dolby Code of a name.
This is a wrapper for :py:meth:`Dolby.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
Maximum length of the returned Dolby code -- this also activates the
fixed-length code mode if it is greater than 0
keep_vowels : bool
If True, retains all vowel markers
vowel_char : str
The vowel marker character (default to \*)
Returns
-------
str
The Dolby Code
Examples
--------
>>> dolby('Hansen')
'H*NSN'
>>> dolby('Larsen')
'L*RSN'
>>> dolby('Aagaard')
'*GR'
>>> dolby('Braaten')
'BR*DN'
>>> dolby('Sandvik')
'S*NVK'
>>> dolby('Hansen', max_length=6)
'H*NS*N'
>>> dolby('Larsen', max_length=6)
'L*RS*N'
>>> dolby('Aagaard', max_length=6)
'*G*R '
>>> dolby('Braaten', max_length=6)
'BR*D*N'
>>> dolby('Sandvik', max_length=6)
'S*NF*K'
>>> dolby('Smith')
'SM*D'
>>> dolby('Waters')
'W*DRS'
>>> dolby('James')
'J*MS'
>>> dolby('Schmidt')
'SM*D'
>>> dolby('Ashcroft')
'*SKRFD'
>>> dolby('Smith', max_length=6)
'SM*D '
>>> dolby('Waters', max_length=6)
'W*D*RS'
>>> dolby('James', max_length=6)
'J*M*S '
>>> dolby('Schmidt', max_length=6)
'SM*D '
>>> dolby('Ashcroft', max_length=6)
'*SKRFD'
"""
return Dolby().encode(word, max_length, keep_vowels, vowel_char)
|
def encode(self, word, max_length=-1, keep_vowels=False, vowel_char='*'):
r"""Return the Dolby Code of a name.
Parameters
----------
word : str
The word to transform
max_length : int
Maximum length of the returned Dolby code -- this also activates
the fixed-length code mode if it is greater than 0
keep_vowels : bool
If True, retains all vowel markers
vowel_char : str
The vowel marker character (default to \*)
Returns
-------
str
The Dolby Code
Examples
--------
>>> pe = Dolby()
>>> pe.encode('Hansen')
'H*NSN'
>>> pe.encode('Larsen')
'L*RSN'
>>> pe.encode('Aagaard')
'*GR'
>>> pe.encode('Braaten')
'BR*DN'
>>> pe.encode('Sandvik')
'S*NVK'
>>> pe.encode('Hansen', max_length=6)
'H*NS*N'
>>> pe.encode('Larsen', max_length=6)
'L*RS*N'
>>> pe.encode('Aagaard', max_length=6)
'*G*R '
>>> pe.encode('Braaten', max_length=6)
'BR*D*N'
>>> pe.encode('Sandvik', max_length=6)
'S*NF*K'
>>> pe.encode('Smith')
'SM*D'
>>> pe.encode('Waters')
'W*DRS'
>>> pe.encode('James')
'J*MS'
>>> pe.encode('Schmidt')
'SM*D'
>>> pe.encode('Ashcroft')
'*SKRFD'
>>> pe.encode('Smith', max_length=6)
'SM*D '
>>> pe.encode('Waters', max_length=6)
'W*D*RS'
>>> pe.encode('James', max_length=6)
'J*M*S '
>>> pe.encode('Schmidt', max_length=6)
'SM*D '
>>> pe.encode('Ashcroft', max_length=6)
'*SKRFD'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
# Rule 1 (FL2)
if word[:3] in {'MCG', 'MAG', 'MAC'}:
word = 'MK' + word[3:]
elif word[:2] == 'MC':
word = 'MK' + word[2:]
# Rule 2 (FL3)
pos = len(word) - 2
while pos > -1:
if word[pos : pos + 2] in {
'DT',
'LD',
'ND',
'NT',
'RC',
'RD',
'RT',
'SC',
'SK',
'ST',
}:
word = word[: pos + 1] + word[pos + 2 :]
pos += 1
pos -= 1
# Rule 3 (FL4)
# Although the rule indicates "after the first letter", the test cases
# make it clear that these apply to the first letter also.
word = word.replace('X', 'KS')
word = word.replace('CE', 'SE')
word = word.replace('CI', 'SI')
word = word.replace('CY', 'SI')
# not in the rule set, but they seem to have intended it
word = word.replace('TCH', 'CH')
pos = word.find('CH', 1)
while pos != -1:
if word[pos - 1 : pos] not in self._uc_vy_set:
word = word[:pos] + 'S' + word[pos + 1 :]
pos = word.find('CH', pos + 1)
word = word.replace('C', 'K')
word = word.replace('Z', 'S')
word = word.replace('WR', 'R')
word = word.replace('DG', 'G')
word = word.replace('QU', 'K')
word = word.replace('T', 'D')
word = word.replace('PH', 'F')
# Rule 4 (FL5)
# Although the rule indicates "after the first letter", the test cases
# make it clear that these apply to the first letter also.
pos = word.find('K', 0)
while pos != -1:
if pos > 1 and word[pos - 1 : pos] not in self._uc_vy_set | {
'L',
'N',
'R',
}:
word = word[: pos - 1] + word[pos:]
pos -= 1
pos = word.find('K', pos + 1)
# Rule FL6
if max_length > 0 and word[-1:] == 'E':
word = word[:-1]
# Rule 5 (FL7)
word = self._delete_consecutive_repeats(word)
# Rule 6 (FL8)
if word[:2] == 'PF':
word = word[1:]
if word[-2:] == 'PF':
word = word[:-1]
elif word[-2:] == 'GH':
if word[-3:-2] in self._uc_vy_set:
word = word[:-2] + 'F'
else:
word = word[:-2] + 'G'
word = word.replace('GH', '')
# Rule FL9
if max_length > 0:
word = word.replace('V', 'F')
# Rules 7-9 (FL10-FL12)
first = 1 + (1 if max_length > 0 else 0)
code = ''
for pos, char in enumerate(word):
if char in self._uc_vy_set:
if first or keep_vowels:
code += vowel_char
first -= 1
elif pos > 0 and char in {'W', 'H'}:
continue
else:
code += char
if max_length > 0:
# Rule FL13
if len(code) > max_length and code[-1:] == 'S':
code = code[:-1]
if keep_vowels:
code = code[:max_length]
else:
# Rule FL14
code = code[: max_length + 2]
# Rule FL15
while len(code) > max_length:
vowels = len(code) - max_length
excess = vowels - 1
word = code
code = ''
for char in word:
if char == vowel_char:
if vowels:
code += char
vowels -= 1
else:
code += char
code = code[: max_length + excess]
# Rule FL16
code += ' ' * (max_length - len(code))
return code
|
def pshp_soundex_last(lname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a last name.
This is a wrapper for :py:meth:`PSHPSoundexLast.encode`.
Parameters
----------
lname : str
The last name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pshp_soundex_last('Smith')
'S530'
>>> pshp_soundex_last('Waters')
'W350'
>>> pshp_soundex_last('James')
'J500'
>>> pshp_soundex_last('Schmidt')
'S530'
>>> pshp_soundex_last('Ashcroft')
'A225'
"""
return PSHPSoundexLast().encode(lname, max_length, german)
|
def encode(self, lname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a last name.
Parameters
----------
lname : str
The last name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pe = PSHPSoundexLast()
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Waters')
'W350'
>>> pe.encode('James')
'J500'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Ashcroft')
'A225'
"""
lname = unicode_normalize('NFKD', text_type(lname.upper()))
lname = lname.replace('ß', 'SS')
lname = ''.join(c for c in lname if c in self._uc_set)
# A. Prefix treatment
if lname[:3] == 'VON' or lname[:3] == 'VAN':
lname = lname[3:].strip()
# The rule implemented below says "MC, MAC become 1". I believe it
# meant to say they become M except in German data (where superscripted
# 1 indicates "except in German data"). It doesn't make sense for them
# to become 1 (BPFV -> 1) or to apply outside German. Unfortunately,
# both articles have this error(?).
if not german:
if lname[:3] == 'MAC':
lname = 'M' + lname[3:]
elif lname[:2] == 'MC':
lname = 'M' + lname[2:]
# The non-German-only rule to strip ' is unnecessary due to filtering
if lname[:1] in {'E', 'I', 'O', 'U'}:
lname = 'A' + lname[1:]
elif lname[:2] in {'GE', 'GI', 'GY'}:
lname = 'J' + lname[1:]
elif lname[:2] in {'CE', 'CI', 'CY'}:
lname = 'S' + lname[1:]
elif lname[:3] == 'CHR':
lname = 'K' + lname[1:]
elif lname[:1] == 'C' and lname[:2] != 'CH':
lname = 'K' + lname[1:]
if lname[:2] == 'KN':
lname = 'N' + lname[1:]
elif lname[:2] == 'PH':
lname = 'F' + lname[1:]
elif lname[:3] in {'WIE', 'WEI'}:
lname = 'V' + lname[1:]
if german and lname[:1] in {'W', 'M', 'Y', 'Z'}:
lname = {'W': 'V', 'M': 'N', 'Y': 'J', 'Z': 'S'}[lname[0]] + lname[
1:
]
code = lname[:1]
# B. Postfix treatment
if german: # moved from end of postfix treatment due to blocking
if lname[-3:] == 'TES':
lname = lname[:-3]
elif lname[-2:] == 'TS':
lname = lname[:-2]
if lname[-3:] == 'TZE':
lname = lname[:-3]
elif lname[-2:] == 'ZE':
lname = lname[:-2]
if lname[-1:] == 'Z':
lname = lname[:-1]
elif lname[-2:] == 'TE':
lname = lname[:-2]
if lname[-1:] == 'R':
lname = lname[:-1] + 'N'
elif lname[-2:] in {'SE', 'CE'}:
lname = lname[:-2]
if lname[-2:] == 'SS':
lname = lname[:-2]
elif lname[-1:] == 'S':
lname = lname[:-1]
if not german:
l5_repl = {'STOWN': 'SAWON', 'MPSON': 'MASON'}
l4_repl = {
'NSEN': 'ASEN',
'MSON': 'ASON',
'STEN': 'SAEN',
'STON': 'SAON',
}
if lname[-5:] in l5_repl:
lname = lname[:-5] + l5_repl[lname[-5:]]
elif lname[-4:] in l4_repl:
lname = lname[:-4] + l4_repl[lname[-4:]]
if lname[-2:] in {'NG', 'ND'}:
lname = lname[:-1]
if not german and lname[-3:] in {'GAN', 'GEN'}:
lname = lname[:-3] + 'A' + lname[-2:]
# C. Infix Treatment
lname = lname.replace('CK', 'C')
lname = lname.replace('SCH', 'S')
lname = lname.replace('DT', 'T')
lname = lname.replace('ND', 'N')
lname = lname.replace('NG', 'N')
lname = lname.replace('LM', 'M')
lname = lname.replace('MN', 'M')
lname = lname.replace('WIE', 'VIE')
lname = lname.replace('WEI', 'VEI')
# D. Soundexing
# code for X & Y are unspecified, but presumably are 2 & 0
lname = lname.translate(self._trans)
lname = self._delete_consecutive_repeats(lname)
code += lname[1:]
code = code.replace('0', '') # rule 1
if max_length != -1:
if len(code) < max_length:
code += '0' * (max_length - len(code))
else:
code = code[:max_length]
return code
|
def fingerprint(self, word):
"""Return the skeleton key.
Parameters
----------
word : str
The word to transform into its skeleton key
Returns
-------
str
The skeleton key
Examples
--------
>>> sk = SkeletonKey()
>>> sk.fingerprint('The quick brown fox jumped over the lazy dog.')
'THQCKBRWNFXJMPDVLZYGEUIOA'
>>> sk.fingerprint('Christopher')
'CHRSTPIOE'
>>> sk.fingerprint('Niall')
'NLIA'
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = ''.join(c for c in word if c in self._letters)
start = word[0:1]
consonant_part = ''
vowel_part = ''
# add consonants & vowels to to separate strings
# (omitting the first char & duplicates)
for char in word[1:]:
if char != start:
if char in self._vowels:
if char not in vowel_part:
vowel_part += char
elif char not in consonant_part:
consonant_part += char
# return the first char followed by consonants followed by vowels
return start + consonant_part + vowel_part
|
def nysiis(word, max_length=6, modified=False):
"""Return the NYSIIS code for a word.
This is a wrapper for :py:meth:`NYSIIS.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 6) of the code to return
modified : bool
Indicates whether to use USDA modified NYSIIS
Returns
-------
str
The NYSIIS value
Examples
--------
>>> nysiis('Christopher')
'CRASTA'
>>> nysiis('Niall')
'NAL'
>>> nysiis('Smith')
'SNAT'
>>> nysiis('Schmidt')
'SNAD'
>>> nysiis('Christopher', max_length=-1)
'CRASTAFAR'
>>> nysiis('Christopher', max_length=8, modified=True)
'CRASTAFA'
>>> nysiis('Niall', max_length=8, modified=True)
'NAL'
>>> nysiis('Smith', max_length=8, modified=True)
'SNAT'
>>> nysiis('Schmidt', max_length=8, modified=True)
'SNAD'
"""
return NYSIIS().encode(word, max_length, modified)
|
def encode(self, word, max_length=6, modified=False):
"""Return the NYSIIS code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 6) of the code to return
modified : bool
Indicates whether to use USDA modified NYSIIS
Returns
-------
str
The NYSIIS value
Examples
--------
>>> pe = NYSIIS()
>>> pe.encode('Christopher')
'CRASTA'
>>> pe.encode('Niall')
'NAL'
>>> pe.encode('Smith')
'SNAT'
>>> pe.encode('Schmidt')
'SNAD'
>>> pe.encode('Christopher', max_length=-1)
'CRASTAFAR'
>>> pe.encode('Christopher', max_length=8, modified=True)
'CRASTAFA'
>>> pe.encode('Niall', max_length=8, modified=True)
'NAL'
>>> pe.encode('Smith', max_length=8, modified=True)
'SNAT'
>>> pe.encode('Schmidt', max_length=8, modified=True)
'SNAD'
"""
# Require a max_length of at least 6
if max_length > -1:
max_length = max(6, max_length)
word = ''.join(c for c in word.upper() if c.isalpha())
word = word.replace('ß', 'SS')
# exit early if there are no alphas
if not word:
return ''
original_first_char = word[0]
if word[:3] == 'MAC':
word = 'MCC' + word[3:]
elif word[:2] == 'KN':
word = 'NN' + word[2:]
elif word[:1] == 'K':
word = 'C' + word[1:]
elif word[:2] in {'PH', 'PF'}:
word = 'FF' + word[2:]
elif word[:3] == 'SCH':
word = 'SSS' + word[3:]
elif modified:
if word[:2] == 'WR':
word = 'RR' + word[2:]
elif word[:2] == 'RH':
word = 'RR' + word[2:]
elif word[:2] == 'DG':
word = 'GG' + word[2:]
elif word[:1] in self._uc_v_set:
word = 'A' + word[1:]
if modified and word[-1:] in {'S', 'Z'}:
word = word[:-1]
if (
word[-2:] == 'EE'
or word[-2:] == 'IE'
or (modified and word[-2:] == 'YE')
):
word = word[:-2] + 'Y'
elif word[-2:] in {'DT', 'RT', 'RD'}:
word = word[:-2] + 'D'
elif word[-2:] in {'NT', 'ND'}:
word = word[:-2] + ('N' if modified else 'D')
elif modified:
if word[-2:] == 'IX':
word = word[:-2] + 'ICK'
elif word[-2:] == 'EX':
word = word[:-2] + 'ECK'
elif word[-2:] in {'JR', 'SR'}:
return 'ERROR'
key = word[:1]
skip = 0
for i in range(1, len(word)):
if i >= len(word):
continue
elif skip:
skip -= 1
continue
elif word[i : i + 2] == 'EV':
word = word[:i] + 'AF' + word[i + 2 :]
skip = 1
elif word[i] in self._uc_v_set:
word = word[:i] + 'A' + word[i + 1 :]
elif modified and i != len(word) - 1 and word[i] == 'Y':
word = word[:i] + 'A' + word[i + 1 :]
elif word[i] == 'Q':
word = word[:i] + 'G' + word[i + 1 :]
elif word[i] == 'Z':
word = word[:i] + 'S' + word[i + 1 :]
elif word[i] == 'M':
word = word[:i] + 'N' + word[i + 1 :]
elif word[i : i + 2] == 'KN':
word = word[:i] + 'N' + word[i + 2 :]
elif word[i] == 'K':
word = word[:i] + 'C' + word[i + 1 :]
elif modified and i == len(word) - 3 and word[i : i + 3] == 'SCH':
word = word[:i] + 'SSA'
skip = 2
elif word[i : i + 3] == 'SCH':
word = word[:i] + 'SSS' + word[i + 3 :]
skip = 2
elif modified and i == len(word) - 2 and word[i : i + 2] == 'SH':
word = word[:i] + 'SA'
skip = 1
elif word[i : i + 2] == 'SH':
word = word[:i] + 'SS' + word[i + 2 :]
skip = 1
elif word[i : i + 2] == 'PH':
word = word[:i] + 'FF' + word[i + 2 :]
skip = 1
elif modified and word[i : i + 3] == 'GHT':
word = word[:i] + 'TTT' + word[i + 3 :]
skip = 2
elif modified and word[i : i + 2] == 'DG':
word = word[:i] + 'GG' + word[i + 2 :]
skip = 1
elif modified and word[i : i + 2] == 'WR':
word = word[:i] + 'RR' + word[i + 2 :]
skip = 1
elif word[i] == 'H' and (
word[i - 1] not in self._uc_v_set
or word[i + 1 : i + 2] not in self._uc_v_set
):
word = word[:i] + word[i - 1] + word[i + 1 :]
elif word[i] == 'W' and word[i - 1] in self._uc_v_set:
word = word[:i] + word[i - 1] + word[i + 1 :]
if word[i : i + skip + 1] != key[-1:]:
key += word[i : i + skip + 1]
key = self._delete_consecutive_repeats(key)
if key[-1:] == 'S':
key = key[:-1]
if key[-2:] == 'AY':
key = key[:-2] + 'Y'
if key[-1:] == 'A':
key = key[:-1]
if modified and key[:1] == 'A':
key = original_first_char + key[1:]
if max_length > 0:
key = key[:max_length]
return key
|
def chebyshev(src, tar, qval=2, alphabet=None):
r"""Return the Chebyshev distance between two strings.
This is a wrapper for the :py:meth:`Chebyshev.dist_abs`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version alphabet
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The Chebyshev distance
Examples
--------
>>> chebyshev('cat', 'hat')
1.0
>>> chebyshev('Niall', 'Neil')
1.0
>>> chebyshev('Colin', 'Cuilen')
1.0
>>> chebyshev('ATCG', 'TAGC')
1.0
>>> chebyshev('ATCG', 'TAGC', qval=1)
0.0
>>> chebyshev('ATCGATTCGGAATTTC', 'TAGCATAATCGCCG', qval=1)
3.0
"""
return Chebyshev().dist_abs(src, tar, qval, alphabet)
|
def dist_abs(self, src, tar, qval=2, alphabet=None):
r"""Return the Chebyshev distance between two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version alphabet
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The Chebyshev distance
Examples
--------
>>> cmp = Chebyshev()
>>> cmp.dist_abs('cat', 'hat')
1.0
>>> cmp.dist_abs('Niall', 'Neil')
1.0
>>> cmp.dist_abs('Colin', 'Cuilen')
1.0
>>> cmp.dist_abs('ATCG', 'TAGC')
1.0
>>> cmp.dist_abs('ATCG', 'TAGC', qval=1)
0.0
>>> cmp.dist_abs('ATCGATTCGGAATTTC', 'TAGCATAATCGCCG', qval=1)
3.0
"""
return super(self.__class__, self).dist_abs(
src, tar, qval, float('inf'), False, alphabet
)
|
def mean_pairwise_similarity(
collection, metric=sim, mean_func=hmean, symmetric=False
):
"""Calculate the mean pairwise similarity of a collection of strings.
Takes the mean of the pairwise similarity between each member of a
collection, optionally in both directions (for asymmetric similarity
metrics.
Parameters
----------
collection : list
A collection of terms or a string that can be split
metric : function
A similarity metric function
mean_func : function
A mean function that takes a list of values and returns a float
symmetric : bool
Set to True if all pairwise similarities should be calculated in both
directions
Returns
-------
float
The mean pairwise similarity of a collection of strings
Raises
------
ValueError
mean_func must be a function
ValueError
metric must be a function
ValueError
collection is neither a string nor iterable type
ValueError
collection has fewer than two members
Examples
--------
>>> round(mean_pairwise_similarity(['Christopher', 'Kristof',
... 'Christobal']), 12)
0.519801980198
>>> round(mean_pairwise_similarity(['Niall', 'Neal', 'Neil']), 12)
0.545454545455
"""
if not callable(mean_func):
raise ValueError('mean_func must be a function')
if not callable(metric):
raise ValueError('metric must be a function')
if hasattr(collection, 'split'):
collection = collection.split()
if not hasattr(collection, '__iter__'):
raise ValueError('collection is neither a string nor iterable type')
elif len(collection) < 2:
raise ValueError('collection has fewer than two members')
collection = list(collection)
pairwise_values = []
for i in range(len(collection)):
for j in range(i + 1, len(collection)):
pairwise_values.append(metric(collection[i], collection[j]))
if symmetric:
pairwise_values.append(metric(collection[j], collection[i]))
return mean_func(pairwise_values)
|
def pairwise_similarity_statistics(
src_collection,
tar_collection,
metric=sim,
mean_func=amean,
symmetric=False,
):
"""Calculate the pairwise similarity statistics a collection of strings.
Calculate pairwise similarities among members of two collections,
returning the maximum, minimum, mean (according to a supplied function,
arithmetic mean, by default), and (population) standard deviation
of those similarities.
Parameters
----------
src_collection : list
A collection of terms or a string that can be split
tar_collection : list
A collection of terms or a string that can be split
metric : function
A similarity metric function
mean_func : function
A mean function that takes a list of values and returns a float
symmetric : bool
Set to True if all pairwise similarities should be calculated in both
directions
Returns
-------
tuple
The max, min, mean, and standard deviation of similarities
Raises
------
ValueError
mean_func must be a function
ValueError
metric must be a function
ValueError
src_collection is neither a string nor iterable
ValueError
tar_collection is neither a string nor iterable
Example
-------
>>> tuple(round(_, 12) for _ in pairwise_similarity_statistics(
... ['Christopher', 'Kristof', 'Christobal'], ['Niall', 'Neal', 'Neil']))
(0.2, 0.0, 0.118614718615, 0.075070477184)
"""
if not callable(mean_func):
raise ValueError('mean_func must be a function')
if not callable(metric):
raise ValueError('metric must be a function')
if hasattr(src_collection, 'split'):
src_collection = src_collection.split()
if not hasattr(src_collection, '__iter__'):
raise ValueError('src_collection is neither a string nor iterable')
if hasattr(tar_collection, 'split'):
tar_collection = tar_collection.split()
if not hasattr(tar_collection, '__iter__'):
raise ValueError('tar_collection is neither a string nor iterable')
src_collection = list(src_collection)
tar_collection = list(tar_collection)
pairwise_values = []
for src in src_collection:
for tar in tar_collection:
pairwise_values.append(metric(src, tar))
if symmetric:
pairwise_values.append(metric(tar, src))
return (
max(pairwise_values),
min(pairwise_values),
mean_func(pairwise_values),
std(pairwise_values, mean_func, 0),
)
|
def stem(self, word, early_english=False):
"""Return the Porter2 (Snowball English) stem.
Parameters
----------
word : str
The word to stem
early_english : bool
Set to True in order to remove -eth & -est (2nd & 3rd person
singular verbal agreement suffixes)
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Porter2()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
>>> stmr.stem('eateth', early_english=True)
'eat'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# replace apostrophe-like characters with U+0027, per
# http://snowball.tartarus.org/texts/apostrophe.html
word = word.replace('’', '\'')
word = word.replace('’', '\'')
# Exceptions 1
if word in self._exception1dict:
return self._exception1dict[word]
elif word in self._exception1set:
return word
# Return word if stem is shorter than 3
if len(word) < 3:
return word
# Remove initial ', if present.
while word and word[0] == '\'':
word = word[1:]
# Return word if stem is shorter than 2
if len(word) < 2:
return word
# Re-map vocalic Y to y (Y will be C, y will be V)
if word[0] == 'y':
word = 'Y' + word[1:]
for i in range(1, len(word)):
if word[i] == 'y' and word[i - 1] in self._vowels:
word = word[:i] + 'Y' + word[i + 1 :]
r1_start = self._sb_r1(word, self._r1_prefixes)
r2_start = self._sb_r2(word, self._r1_prefixes)
# Step 0
if word[-3:] == '\'s\'':
word = word[:-3]
elif word[-2:] == '\'s':
word = word[:-2]
elif word[-1:] == '\'':
word = word[:-1]
# Return word if stem is shorter than 2
if len(word) < 3:
return word
# Step 1a
if word[-4:] == 'sses':
word = word[:-2]
elif word[-3:] in {'ied', 'ies'}:
if len(word) > 4:
word = word[:-2]
else:
word = word[:-1]
elif word[-2:] in {'us', 'ss'}:
pass
elif word[-1] == 's':
if self._sb_has_vowel(word[:-2]):
word = word[:-1]
# Exceptions 2
if word in self._exception2set:
return word
# Step 1b
step1b_flag = False
if word[-5:] == 'eedly':
if len(word[r1_start:]) >= 5:
word = word[:-3]
elif word[-5:] == 'ingly':
if self._sb_has_vowel(word[:-5]):
word = word[:-5]
step1b_flag = True
elif word[-4:] == 'edly':
if self._sb_has_vowel(word[:-4]):
word = word[:-4]
step1b_flag = True
elif word[-3:] == 'eed':
if len(word[r1_start:]) >= 3:
word = word[:-1]
elif word[-3:] == 'ing':
if self._sb_has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif word[-2:] == 'ed':
if self._sb_has_vowel(word[:-2]):
word = word[:-2]
step1b_flag = True
elif early_english:
if word[-3:] == 'est':
if self._sb_has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif word[-3:] == 'eth':
if self._sb_has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
if step1b_flag:
if word[-2:] in {'at', 'bl', 'iz'}:
word += 'e'
elif word[-2:] in self._doubles:
word = word[:-1]
elif self._sb_short_word(word, self._r1_prefixes):
word += 'e'
# Step 1c
if (
len(word) > 2
and word[-1] in {'Y', 'y'}
and word[-2] not in self._vowels
):
word = word[:-1] + 'i'
# Step 2
if word[-2] == 'a':
if word[-7:] == 'ational':
if len(word[r1_start:]) >= 7:
word = word[:-5] + 'e'
elif word[-6:] == 'tional':
if len(word[r1_start:]) >= 6:
word = word[:-2]
elif word[-2] == 'c':
if word[-4:] in {'enci', 'anci'}:
if len(word[r1_start:]) >= 4:
word = word[:-1] + 'e'
elif word[-2] == 'e':
if word[-4:] == 'izer':
if len(word[r1_start:]) >= 4:
word = word[:-1]
elif word[-2] == 'g':
if word[-3:] == 'ogi':
if (
r1_start >= 1
and len(word[r1_start:]) >= 3
and word[-4] == 'l'
):
word = word[:-1]
elif word[-2] == 'l':
if word[-6:] == 'lessli':
if len(word[r1_start:]) >= 6:
word = word[:-2]
elif word[-5:] in {'entli', 'fulli', 'ousli'}:
if len(word[r1_start:]) >= 5:
word = word[:-2]
elif word[-4:] == 'abli':
if len(word[r1_start:]) >= 4:
word = word[:-1] + 'e'
elif word[-4:] == 'alli':
if len(word[r1_start:]) >= 4:
word = word[:-2]
elif word[-3:] == 'bli':
if len(word[r1_start:]) >= 3:
word = word[:-1] + 'e'
elif word[-2:] == 'li':
if (
r1_start >= 1
and len(word[r1_start:]) >= 2
and word[-3] in self._li
):
word = word[:-2]
elif word[-2] == 'o':
if word[-7:] == 'ization':
if len(word[r1_start:]) >= 7:
word = word[:-5] + 'e'
elif word[-5:] == 'ation':
if len(word[r1_start:]) >= 5:
word = word[:-3] + 'e'
elif word[-4:] == 'ator':
if len(word[r1_start:]) >= 4:
word = word[:-2] + 'e'
elif word[-2] == 's':
if word[-7:] in {'fulness', 'ousness', 'iveness'}:
if len(word[r1_start:]) >= 7:
word = word[:-4]
elif word[-5:] == 'alism':
if len(word[r1_start:]) >= 5:
word = word[:-3]
elif word[-2] == 't':
if word[-6:] == 'biliti':
if len(word[r1_start:]) >= 6:
word = word[:-5] + 'le'
elif word[-5:] == 'aliti':
if len(word[r1_start:]) >= 5:
word = word[:-3]
elif word[-5:] == 'iviti':
if len(word[r1_start:]) >= 5:
word = word[:-3] + 'e'
# Step 3
if word[-7:] == 'ational':
if len(word[r1_start:]) >= 7:
word = word[:-5] + 'e'
elif word[-6:] == 'tional':
if len(word[r1_start:]) >= 6:
word = word[:-2]
elif word[-5:] in {'alize', 'icate', 'iciti'}:
if len(word[r1_start:]) >= 5:
word = word[:-3]
elif word[-5:] == 'ative':
if len(word[r2_start:]) >= 5:
word = word[:-5]
elif word[-4:] == 'ical':
if len(word[r1_start:]) >= 4:
word = word[:-2]
elif word[-4:] == 'ness':
if len(word[r1_start:]) >= 4:
word = word[:-4]
elif word[-3:] == 'ful':
if len(word[r1_start:]) >= 3:
word = word[:-3]
# Step 4
for suffix in (
'ement',
'ance',
'ence',
'able',
'ible',
'ment',
'ant',
'ent',
'ism',
'ate',
'iti',
'ous',
'ive',
'ize',
'al',
'er',
'ic',
):
if word[-len(suffix) :] == suffix:
if len(word[r2_start:]) >= len(suffix):
word = word[: -len(suffix)]
break
else:
if word[-3:] == 'ion':
if (
len(word[r2_start:]) >= 3
and len(word) >= 4
and word[-4] in tuple('st')
):
word = word[:-3]
# Step 5
if word[-1] == 'e':
if len(word[r2_start:]) >= 1 or (
len(word[r1_start:]) >= 1
and not self._sb_ends_in_short_syllable(word[:-1])
):
word = word[:-1]
elif word[-1] == 'l':
if len(word[r2_start:]) >= 1 and word[-2] == 'l':
word = word[:-1]
# Change 'Y' back to 'y' if it survived stemming
for i in range(0, len(word)):
if word[i] == 'Y':
word = word[:i] + 'y' + word[i + 1 :]
return word
|
def synoname_toolcode(lname, fname='', qual='', normalize=0):
"""Build the Synoname toolcode.
This is a wrapper for :py:meth:`SynonameToolcode.fingerprint`.
Parameters
----------
lname : str
Last name
fname : str
First name (can be blank)
qual : str
Qualifier
normalize : int
Normalization mode (0, 1, or 2)
Returns
-------
tuple
The transformed names and the synoname toolcode
Examples
--------
>>> synoname_toolcode('hat')
('hat', '', '0000000003$$h')
>>> synoname_toolcode('niall')
('niall', '', '0000000005$$n')
>>> synoname_toolcode('colin')
('colin', '', '0000000005$$c')
>>> synoname_toolcode('atcg')
('atcg', '', '0000000004$$a')
>>> synoname_toolcode('entreatment')
('entreatment', '', '0000000011$$e')
>>> synoname_toolcode('Ste.-Marie', 'Count John II', normalize=2)
('ste.-marie ii', 'count john', '0200491310$015b049a127c$smcji')
>>> synoname_toolcode('Michelangelo IV', '', 'Workshop of')
('michelangelo iv', '', '3000550015$055b$mi')
"""
return SynonameToolcode().fingerprint(lname, fname, qual, normalize)
|
def fingerprint(self, lname, fname='', qual='', normalize=0):
"""Build the Synoname toolcode.
Parameters
----------
lname : str
Last name
fname : str
First name (can be blank)
qual : str
Qualifier
normalize : int
Normalization mode (0, 1, or 2)
Returns
-------
tuple
The transformed names and the synoname toolcode
Examples
--------
>>> st = SynonameToolcode()
>>> st.fingerprint('hat')
('hat', '', '0000000003$$h')
>>> st.fingerprint('niall')
('niall', '', '0000000005$$n')
>>> st.fingerprint('colin')
('colin', '', '0000000005$$c')
>>> st.fingerprint('atcg')
('atcg', '', '0000000004$$a')
>>> st.fingerprint('entreatment')
('entreatment', '', '0000000011$$e')
>>> st.fingerprint('Ste.-Marie', 'Count John II', normalize=2)
('ste.-marie ii', 'count john', '0200491310$015b049a127c$smcji')
>>> st.fingerprint('Michelangelo IV', '', 'Workshop of')
('michelangelo iv', '', '3000550015$055b$mi')
"""
lname = lname.lower()
fname = fname.lower()
qual = qual.lower()
# Start with the basic code
toolcode = ['0', '0', '0', '000', '00', '00', '$', '', '$', '']
full_name = ' '.join((lname, fname))
if qual in self._qual_3:
toolcode[0] = '3'
elif qual in self._qual_2:
toolcode[0] = '2'
elif qual in self._qual_1:
toolcode[0] = '1'
# Fill field 1 (punctuation)
if '.' in full_name:
toolcode[1] = '2'
else:
for punct in ',-/:;"&\'()!{|}?$%*+<=>[\\]^_`~':
if punct in full_name:
toolcode[1] = '1'
break
elderyounger = '' # save elder/younger for possible movement later
for gen in self._gen_1:
if gen in full_name:
toolcode[2] = '1'
elderyounger = gen
break
else:
for gen in self._gen_2:
if gen in full_name:
toolcode[2] = '2'
elderyounger = gen
break
# do comma flip
if normalize:
comma = lname.find(',')
if comma != -1:
lname_end = lname[comma + 1 :]
while lname_end[0] in {' ', ','}:
lname_end = lname_end[1:]
fname = lname_end + ' ' + fname
lname = lname[:comma].strip()
# do elder/younger move
if normalize == 2 and elderyounger:
elderyounger_loc = fname.find(elderyounger)
if elderyounger_loc != -1:
lname = ' '.join((lname, elderyounger.strip()))
fname = ' '.join(
(
fname[:elderyounger_loc].strip(),
fname[elderyounger_loc + len(elderyounger) :],
)
).strip()
toolcode[4] = '{:02d}'.format(len(fname))
toolcode[5] = '{:02d}'.format(len(lname))
# strip punctuation
for char in ',/:;"&()!{|}?$%*+<=>[\\]^_`~':
full_name = full_name.replace(char, '')
for pos, char in enumerate(full_name):
if char == '-' and full_name[pos - 1 : pos + 2] != 'b-g':
full_name = full_name[:pos] + ' ' + full_name[pos + 1 :]
# Fill field 9 (search range)
for letter in [_[0] for _ in full_name.split()]:
if letter not in toolcode[9]:
toolcode[9] += letter
if len(toolcode[9]) == 15:
break
def roman_check(numeral, fname, lname):
"""Move Roman numerals from first name to last.
Parameters
----------
numeral : str
Roman numeral
fname : str
First name
lname : str
Last name
Returns
-------
tuple
First and last names with Roman numeral moved
"""
loc = fname.find(numeral)
if fname and (
loc != -1
and (len(fname[loc:]) == len(numeral))
or fname[loc + len(numeral)] in {' ', ','}
):
lname = ' '.join((lname, numeral))
fname = ' '.join(
(
fname[:loc].strip(),
fname[loc + len(numeral) :].lstrip(' ,'),
)
)
return fname.strip(), lname.strip()
# Fill fields 7 (specials) and 3 (roman numerals)
for num, special in enumerate(self._synoname_special_table):
roman, match, extra, method = special
if method & self._method_dict['end']:
match_context = ' ' + match
loc = full_name.find(match_context)
if (len(full_name) > len(match_context)) and (
loc == len(full_name) - len(match_context)
):
if roman:
if not any(
abbr in fname for abbr in ('i.', 'v.', 'x.')
):
full_name = full_name[:loc]
toolcode[7] += '{:03d}'.format(num) + 'a'
if toolcode[3] == '000':
toolcode[3] = '{:03d}'.format(num)
if normalize == 2:
fname, lname = roman_check(match, fname, lname)
else:
full_name = full_name[:loc]
toolcode[7] += '{:03d}'.format(num) + 'a'
if method & self._method_dict['middle']:
match_context = ' ' + match + ' '
loc = 0
while loc != -1:
loc = full_name.find(match_context, loc + 1)
if loc > 0:
if roman:
if not any(
abbr in fname for abbr in ('i.', 'v.', 'x.')
):
full_name = (
full_name[:loc]
+ full_name[loc + len(match) + 1 :]
)
toolcode[7] += '{:03d}'.format(num) + 'b'
if toolcode[3] == '000':
toolcode[3] = '{:03d}'.format(num)
if normalize == 2:
fname, lname = roman_check(
match, fname, lname
)
else:
full_name = (
full_name[:loc]
+ full_name[loc + len(match) + 1 :]
)
toolcode[7] += '{:03d}'.format(num) + 'b'
if method & self._method_dict['beginning']:
match_context = match + ' '
loc = full_name.find(match_context)
if loc == 0:
full_name = full_name[len(match) + 1 :]
toolcode[7] += '{:03d}'.format(num) + 'c'
if method & self._method_dict['beginning_no_space']:
loc = full_name.find(match)
if loc == 0:
toolcode[7] += '{:03d}'.format(num) + 'd'
if full_name[: len(match)] not in toolcode[9]:
toolcode[9] += full_name[: len(match)]
if extra:
loc = full_name.find(extra)
if loc != -1:
toolcode[7] += '{:03d}'.format(num) + 'X'
# Since extras are unique, we only look for each of them
# once, and they include otherwise impossible characters
# for this field, it's not possible for the following line
# to have ever been false.
# if full_name[loc:loc+len(extra)] not in toolcode[9]:
toolcode[9] += full_name[loc : loc + len(match)]
return lname, fname, ''.join(toolcode)
|
def uealite(
word,
max_word_length=20,
max_acro_length=8,
return_rule_no=False,
var='standard',
):
"""Return UEA-Lite stem.
This is a wrapper for :py:meth:`UEALite.stem`.
Parameters
----------
word : str
The word to stem
max_word_length : int
The maximum word length allowed
max_acro_length : int
The maximum acronym length allowed
return_rule_no : bool
If True, returns the stem along with rule number
var : str
Variant rules to use:
- ``Adams`` to use Jason Adams' rules
- ``Perl`` to use the original Perl rules
Returns
-------
str or (str, int)
Word stem
Examples
--------
>>> uealite('readings')
'read'
>>> uealite('insulted')
'insult'
>>> uealite('cussed')
'cuss'
>>> uealite('fancies')
'fancy'
>>> uealite('eroded')
'erode'
"""
return UEALite().stem(
word, max_word_length, max_acro_length, return_rule_no, var
)
|
def stem(
self,
word,
max_word_length=20,
max_acro_length=8,
return_rule_no=False,
var='standard',
):
"""Return UEA-Lite stem.
Parameters
----------
word : str
The word to stem
max_word_length : int
The maximum word length allowed
max_acro_length : int
The maximum acronym length allowed
return_rule_no : bool
If True, returns the stem along with rule number
var : str
Variant rules to use:
- ``Adams`` to use Jason Adams' rules
- ``Perl`` to use the original Perl rules
Returns
-------
str or (str, int)
Word stem
Examples
--------
>>> uealite('readings')
'read'
>>> uealite('insulted')
'insult'
>>> uealite('cussed')
'cuss'
>>> uealite('fancies')
'fancy'
>>> uealite('eroded')
'erode'
"""
def _stem_with_duplicate_character_check(word, del_len):
if word[-1] == 's':
del_len += 1
stemmed_word = word[:-del_len]
if re_match(r'.*(\w)\1$', stemmed_word):
stemmed_word = stemmed_word[:-1]
return stemmed_word
def _stem(word):
stemmed_word = word
rule_no = 0
if not word:
return word, 0
if word in self._problem_words or (
word == 'menses' and var == 'Adams'
):
return word, 90
if max_word_length and len(word) > max_word_length:
return word, 95
if "'" in word:
if word[-2:] in {"'s", "'S"}:
stemmed_word = word[:-2]
if word[-1:] == "'":
stemmed_word = word[:-1]
stemmed_word = stemmed_word.replace("n't", 'not')
stemmed_word = stemmed_word.replace("'ve", 'have')
stemmed_word = stemmed_word.replace("'re", 'are')
stemmed_word = stemmed_word.replace("'m", 'am')
return stemmed_word, 94
if word.isdigit():
return word, 90.3
else:
hyphen = word.find('-')
if len(word) > hyphen > 0:
if (
word[:hyphen].isalpha()
and word[hyphen + 1 :].isalpha()
):
return word, 90.2
else:
return word, 90.1
elif '_' in word:
return word, 90
elif word[-1] == 's' and word[:-1].isupper():
if var == 'Adams' and len(word) - 1 > max_acro_length:
return word, 96
return word[:-1], 91.1
elif word.isupper():
if var == 'Adams' and len(word) > max_acro_length:
return word, 96
return word, 91
elif re_match(r'^.*[A-Z].*[A-Z].*$', word):
return word, 92
elif word[0].isupper():
return word, 93
elif var == 'Adams' and re_match(
r'^[a-z](|[rl])(ing|ed)$', word
):
return word, 97
for n in range(7, 1, -1):
if word[-n:] in self._rules[var][n]:
rule_no, del_len, add_str = self._rules[var][n][word[-n:]]
if del_len:
stemmed_word = word[:-del_len]
else:
stemmed_word = word
if add_str:
stemmed_word += add_str
break
if not rule_no:
if re_match(r'.*\w\wings?$', word): # rule 58
stemmed_word = _stem_with_duplicate_character_check(
word, 3
)
rule_no = 58
elif re_match(r'.*\w\weds?$', word): # rule 62
stemmed_word = _stem_with_duplicate_character_check(
word, 2
)
rule_no = 62
elif word[-1] == 's': # rule 68
stemmed_word = word[:-1]
rule_no = 68
return stemmed_word, rule_no
stem, rule_no = _stem(word)
if return_rule_no:
return stem, rule_no
return stem
|
def _sb_r1(self, term, r1_prefixes=None):
"""Return the R1 region, as defined in the Porter2 specification.
Parameters
----------
term : str
The term to examine
r1_prefixes : set
Prefixes to consider
Returns
-------
int
Length of the R1 region
"""
vowel_found = False
if hasattr(r1_prefixes, '__iter__'):
for prefix in r1_prefixes:
if term[: len(prefix)] == prefix:
return len(prefix)
for i in range(len(term)):
if not vowel_found and term[i] in self._vowels:
vowel_found = True
elif vowel_found and term[i] not in self._vowels:
return i + 1
return len(term)
|
def _sb_r2(self, term, r1_prefixes=None):
"""Return the R2 region, as defined in the Porter2 specification.
Parameters
----------
term : str
The term to examine
r1_prefixes : set
Prefixes to consider
Returns
-------
int
Length of the R1 region
"""
r1_start = self._sb_r1(term, r1_prefixes)
return r1_start + self._sb_r1(term[r1_start:])
|
def _sb_ends_in_short_syllable(self, term):
"""Return True iff term ends in a short syllable.
(...according to the Porter2 specification.)
NB: This is akin to the CVC test from the Porter stemmer. The
description is unfortunately poor/ambiguous.
Parameters
----------
term : str
The term to examine
Returns
-------
bool
True iff term ends in a short syllable
"""
if not term:
return False
if len(term) == 2:
if term[-2] in self._vowels and term[-1] not in self._vowels:
return True
elif len(term) >= 3:
if (
term[-3] not in self._vowels
and term[-2] in self._vowels
and term[-1] in self._codanonvowels
):
return True
return False
|
def _sb_short_word(self, term, r1_prefixes=None):
"""Return True iff term is a short word.
(...according to the Porter2 specification.)
Parameters
----------
term : str
The term to examine
r1_prefixes : set
Prefixes to consider
Returns
-------
bool
True iff term is a short word
"""
if self._sb_r1(term, r1_prefixes) == len(
term
) and self._sb_ends_in_short_syllable(term):
return True
return False
|
def _sb_has_vowel(self, term):
"""Return Porter helper function _sb_has_vowel value.
Parameters
----------
term : str
The term to examine
Returns
-------
bool
True iff a vowel exists in the term (as defined in the Porter
stemmer definition)
"""
for letter in term:
if letter in self._vowels:
return True
return False
|
def encode(self, word, max_length=8):
"""Return the eudex phonetic hash of a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length in bits of the code returned (default 8)
Returns
-------
int
The eudex hash
Examples
--------
>>> pe = Eudex()
>>> pe.encode('Colin')
432345564238053650
>>> pe.encode('Christopher')
433648490138894409
>>> pe.encode('Niall')
648518346341351840
>>> pe.encode('Smith')
720575940412906756
>>> pe.encode('Schmidt')
720589151732307997
"""
# Lowercase input & filter unknown characters
word = ''.join(
char for char in word.lower() if char in self._initial_phones
)
if not word:
word = '÷'
# Perform initial eudex coding of each character
values = [self._initial_phones[word[0]]]
values += [self._trailing_phones[char] for char in word[1:]]
# Right-shift by one to determine if second instance should be skipped
shifted_values = [_ >> 1 for _ in values]
condensed_values = [values[0]]
for n in range(1, len(shifted_values)):
if shifted_values[n] != shifted_values[n - 1]:
condensed_values.append(values[n])
# Add padding after first character & trim beyond max_length
values = (
[condensed_values[0]]
+ [0] * max(0, max_length - len(condensed_values))
+ condensed_values[1:max_length]
)
# Combine individual character values into eudex hash
hash_value = 0
for val in values:
hash_value = (hash_value << 8) | val
return hash_value
|
def roger_root(word, max_length=5, zero_pad=True):
"""Return the Roger Root code for a word.
This is a wrapper for :py:meth:`RogerRoot.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 5) of the code to return
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Roger Root code
Examples
--------
>>> roger_root('Christopher')
'06401'
>>> roger_root('Niall')
'02500'
>>> roger_root('Smith')
'00310'
>>> roger_root('Schmidt')
'06310'
"""
return RogerRoot().encode(word, max_length, zero_pad)
|
def encode(self, word, max_length=5, zero_pad=True):
"""Return the Roger Root code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 5) of the code to return
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Roger Root code
Examples
--------
>>> roger_root('Christopher')
'06401'
>>> roger_root('Niall')
'02500'
>>> roger_root('Smith')
'00310'
>>> roger_root('Schmidt')
'06310'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
code = ''
pos = 0
# Do first digit(s) first
for num in range(4, 0, -1):
if word[:num] in self._init_patterns[num]:
code = self._init_patterns[num][word[:num]]
pos += num
break
# Then code subsequent digits
while pos < len(word):
for num in range(4, 0, -1): # pragma: no branch
if word[pos : pos + num] in self._med_patterns[num]:
code += self._med_patterns[num][word[pos : pos + num]]
pos += num
break
code = self._delete_consecutive_repeats(code)
code = code.replace('*', '')
if zero_pad:
code += '0' * max_length
return code[:max_length]
|
def encode(self, word):
"""Return the Kölner Phonetik (numeric output) code for a word.
While the output code is numeric, it is still a str because 0s can lead
the code.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Kölner Phonetik value as a numeric string
Example
-------
>>> pe = Koelner()
>>> pe.encode('Christopher')
'478237'
>>> pe.encode('Niall')
'65'
>>> pe.encode('Smith')
'862'
>>> pe.encode('Schmidt')
'862'
>>> pe.encode('Müller')
'657'
>>> pe.encode('Zimmermann')
'86766'
"""
def _after(word, pos, letters):
"""Return True if word[pos] follows one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm precede word[pos]
Returns
-------
bool
True if word[pos] follows a value in letters
"""
return pos > 0 and word[pos - 1] in letters
def _before(word, pos, letters):
"""Return True if word[pos] precedes one of the supplied letters.
Parameters
----------
word : str
The word to check
pos : int
Position within word to check
letters : str
Letters to confirm follow word[pos]
Returns
-------
bool
True if word[pos] precedes a value in letters
"""
return pos + 1 < len(word) and word[pos + 1] in letters
sdx = ''
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = word.replace('Ä', 'AE')
word = word.replace('Ö', 'OE')
word = word.replace('Ü', 'UE')
word = ''.join(c for c in word if c in self._uc_set)
# Nothing to convert, return base case
if not word:
return sdx
for i in range(len(word)):
if word[i] in self._uc_v_set:
sdx += '0'
elif word[i] == 'B':
sdx += '1'
elif word[i] == 'P':
if _before(word, i, {'H'}):
sdx += '3'
else:
sdx += '1'
elif word[i] in {'D', 'T'}:
if _before(word, i, {'C', 'S', 'Z'}):
sdx += '8'
else:
sdx += '2'
elif word[i] in {'F', 'V', 'W'}:
sdx += '3'
elif word[i] in {'G', 'K', 'Q'}:
sdx += '4'
elif word[i] == 'C':
if _after(word, i, {'S', 'Z'}):
sdx += '8'
elif i == 0:
if _before(
word, i, {'A', 'H', 'K', 'L', 'O', 'Q', 'R', 'U', 'X'}
):
sdx += '4'
else:
sdx += '8'
elif _before(word, i, {'A', 'H', 'K', 'O', 'Q', 'U', 'X'}):
sdx += '4'
else:
sdx += '8'
elif word[i] == 'X':
if _after(word, i, {'C', 'K', 'Q'}):
sdx += '8'
else:
sdx += '48'
elif word[i] == 'L':
sdx += '5'
elif word[i] in {'M', 'N'}:
sdx += '6'
elif word[i] == 'R':
sdx += '7'
elif word[i] in {'S', 'Z'}:
sdx += '8'
sdx = self._delete_consecutive_repeats(sdx)
if sdx:
sdx = sdx[:1] + sdx[1:].replace('0', '')
return sdx
|
def _to_alpha(self, num):
"""Convert a Kölner Phonetik code from numeric to alphabetic.
Parameters
----------
num : str or int
A numeric Kölner Phonetik representation
Returns
-------
str
An alphabetic representation of the same word
Examples
--------
>>> pe = Koelner()
>>> pe._to_alpha('862')
'SNT'
>>> pe._to_alpha('657')
'NLR'
>>> pe._to_alpha('86766')
'SNRNN'
"""
num = ''.join(c for c in text_type(num) if c in self._num_set)
return num.translate(self._num_trans)
|
def main(argv):
"""Read input file and write to output.
Parameters
----------
argv : list
Arguments to the script
"""
first_col = 3
last_col = -1
def print_usage():
"""Print usage statement."""
sys.stdout.write(
'features_csv_to_dict.py -i <inputfile> ' + '[-o <outputfile>]\n'
)
sys.exit(2)
def binarize(num):
"""Replace 0, -1, 1, 2 with 00, 10, 01, 11.
Parameters
----------
num : str
The number to binarize
Returns
-------
str
A binarized number
"""
if num == '0': # 0
return '00'
elif num == '-1': # -
return '10'
elif num == '1': # +
return '01'
elif num == '2': # ± (segmental) or copy from base (non-segmental)
return '11'
def init_termdicts():
"""Initialize the terms dict.
Returns
-------
(dict, dict)
Term & feature mask dictionaries
"""
ifile = codecs.open('features_terms.csv', 'r', 'utf-8')
feature_mask = {}
keyline = ifile.readline().strip().split(',')[first_col:last_col]
mag = len(keyline)
for i in range(len(keyline)):
features = '0b' + ('00' * i) + '11' + ('00' * (mag - i - 1))
feature_mask[keyline[i]] = int(features, 2)
termdict = {}
for line in ifile:
line = line.strip().rstrip(',')
if '#' in line:
line = line[: line.find('#')].strip()
if line:
line = line.split(',')
term = line[last_col]
features = '0b' + ''.join(
[binarize(val) for val in line[first_col:last_col]]
)
termdict[term] = int(features, 2)
return termdict, feature_mask
def check_terms(sym, features, name, termdict):
"""Check terms.
Check each term of the phone name to confirm that it matches
the expected features implied by that feature.
Parameters
----------
sym : str
Symbol to check
features : int
Phone features
name : str
Phone name
termdict : dict
Dictionary of terms
"""
if '#' in name:
name = name[: name.find('#')].strip()
for term in name.split():
if term in termdict:
if termdict[term] & features != termdict[term]:
sys.stdout.write(
'Feature mismatch for term "'
+ term
+ '" in '
+ sym
+ '\n'
)
else:
sys.stdout.write(
'Unknown term "'
+ term
+ '" in '
+ name
+ ' : '
+ sym
+ '\n'
)
def check_entailments(sym, features, feature_mask):
"""Check entailments.
Check for necessary feature assignments (entailments)
For example, [+round] necessitates [+labial].
Parameters
----------
sym : str
Symbol to check
features : int
Phone features
feature_mask : dict
The feature mask
"""
entailments = {
'+labial': ('±round', '±protruded', '±compressed', '±labiodental'),
'-labial': ('0round', '0protruded', '0compressed', '0labiodental'),
'+coronal': ('±anterior', '±distributed'),
'-coronal': ('0anterior', '0distributed'),
'+dorsal': ('±high', '±low', '±front', '±back', '±tense'),
'-dorsal': ('0high', '0low', '0front', '0back', '0tense'),
'+pharyngeal': ('±atr', '±rtr'),
'-pharyngeal': ('0atr', '0rtr'),
'+protruded': ('+labial', '+round', '-compressed'),
'+compressed': ('+labial', '+round', '-protruded'),
'+glottalic_suction': ('-velaric_suction',),
'+velaric_suction': ('-glottalic_suction',),
}
for feature in entailments:
fname = feature[1:]
if feature[0] == '+':
fm = (feature_mask[fname] >> 1) & feature_mask[fname]
else:
fm = (feature_mask[fname] << 1) & feature_mask[fname]
if (features & fm) == fm:
for ent in entailments[feature]:
ename = ent[1:]
if ent[0] == '+':
efm = (feature_mask[ename] >> 1) & feature_mask[ename]
elif ent[0] == '-':
efm = (feature_mask[ename] << 1) & feature_mask[ename]
elif ent[0] == '0':
efm = 0
elif ent[0] == '±':
efm = feature_mask[ename]
if ent[0] == '±':
if (features & efm) == 0:
sys.stdout.write(
'Incorrect entailment for '
+ sym
+ ' for feature '
+ fname
+ ' and entailment '
+ ename
)
else:
if (features & efm) != efm:
sys.stdout.write(
'Incorrect entailment for '
+ sym
+ ' for feature '
+ fname
+ ' and entailment '
+ ename
)
checkdict = {} # a mapping of symbol to feature
checkset_s = set() # a set of the symbols seen
checkset_f = set() # a set of the feature values seen
termdict, feature_mask = init_termdicts()
ifile = ''
ofile = ''
try:
opts = getopt.getopt(argv, 'hi:o:', ['ifile=', 'ofile='])[0]
except getopt.GetoptError:
print_usage()
for opt, arg in opts:
if opt == '-h':
print_usage()
elif opt in ('-i', '--ifile'):
ifile = codecs.open(arg, 'r', 'utf-8')
elif opt in ('-o', '--ofile'):
ofile = codecs.open(arg, 'w', 'utf-8')
if not ifile:
print_usage()
oline = 'PHONETIC_FEATURES = {'
if not ofile:
ofile = sys.stdout
ofile.write(oline + '\n')
keyline = ifile.readline().strip().split(',')[first_col:last_col]
for line in ifile:
line = line.strip().rstrip(',')
if line.startswith('####'):
break
line = unicodedata.normalize('NFC', line)
if not line or line.startswith('#'):
oline = ' ' + line
else:
line = line.strip().split(',')
if '#' in line:
line = line[: line.find('#')]
symbol = line[0]
variant = int(line[1])
segmental = bool(line[2])
features = '0b' + ''.join(
[binarize(val) for val in line[first_col:last_col]]
)
name = line[-1].strip()
if not segmental:
features = '-' + features
featint = int(features, 2)
check_terms(symbol, featint, name, termdict)
check_entailments(symbol, featint, feature_mask)
if symbol in checkset_s:
sys.stdout.write(
'Symbol ' + symbol + ' appears twice in CSV.\n'
)
else:
checkset_s.add(symbol)
if variant < 2:
if featint in checkset_f:
sys.stdout.write(
'Feature set '
+ str(featint)
+ ' appears in CSV for two primary IPA '
+ 'symbols: '
+ symbol
+ ' and '
+ checkdict[featint]
)
else:
checkdict[featint] = symbol
checkset_f.add(featint)
if variant < 5:
oline = ' \'{}\': {},'.format(
symbol, featint
)
else:
oline = ''
if oline:
ofile.write(oline + '\n')
ofile.write(' }\n\nFEATURE_MASK = {')
mag = len(keyline)
for i in range(len(keyline)):
features = int('0b' + ('00' * i) + '11' + ('00' * (mag - i - 1)), 2)
oline = ' \'{}\': {},'.format(keyline[i], features)
ofile.write(oline + '\n')
ofile.write(' }\n')
|
def lein(word, max_length=4, zero_pad=True):
"""Return the Lein code for a word.
This is a wrapper for :py:meth:`Lein.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Lein code
Examples
--------
>>> lein('Christopher')
'C351'
>>> lein('Niall')
'N300'
>>> lein('Smith')
'S210'
>>> lein('Schmidt')
'S521'
"""
return Lein().encode(word, max_length, zero_pad)
|
def encode(self, word, max_length=4, zero_pad=True):
"""Return the Lein code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Lein code
Examples
--------
>>> pe = Lein()
>>> pe.encode('Christopher')
'C351'
>>> pe.encode('Niall')
'N300'
>>> pe.encode('Smith')
'S210'
>>> pe.encode('Schmidt')
'S521'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
code = word[:1] # Rule 1
word = word[1:].translate(self._del_trans) # Rule 2
word = self._delete_consecutive_repeats(word) # Rule 3
code += word.translate(self._trans) # Rule 4
if zero_pad:
code += '0' * max_length # Rule 4
return code[:max_length]
|
def _get_qgrams(self, src, tar, qval=0, skip=0):
"""Return the Q-Grams in src & tar.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
skip : int
The number of characters to skip (only works when src and tar are
strings)
Returns
-------
tuple of Counters
Q-Grams
Examples
--------
>>> pe = _TokenDistance()
>>> pe._get_qgrams('AT', 'TT', qval=2)
(QGrams({'$A': 1, 'AT': 1, 'T#': 1}),
QGrams({'$T': 1, 'TT': 1, 'T#': 1}))
"""
if isinstance(src, Counter) and isinstance(tar, Counter):
return src, tar
if qval > 0:
return QGrams(src, qval, '$#', skip), QGrams(tar, qval, '$#', skip)
return Counter(src.strip().split()), Counter(tar.strip().split())
|
def rle_encode(text, use_bwt=True):
r"""Perform encoding of run-length-encoding (RLE).
This is a wrapper for :py:meth:`RLE.encode`.
Parameters
----------
text : str
A text string to encode
use_bwt : bool
Indicates whether to perform BWT encoding before RLE encoding
Returns
-------
str
Word decoded by RLE
Examples
--------
>>> rle_encode('align')
'n\x00ilag'
>>> rle_encode('align', use_bwt=False)
'align'
>>> rle_encode('banana')
'annb\x00aa'
>>> rle_encode('banana', use_bwt=False)
'banana'
>>> rle_encode('aaabaabababa')
'ab\x00abbab5a'
>>> rle_encode('aaabaabababa', False)
'3abaabababa'
"""
if use_bwt:
text = BWT().encode(text)
return RLE().encode(text)
|
def rle_decode(text, use_bwt=True):
r"""Perform decoding of run-length-encoding (RLE).
This is a wrapper for :py:meth:`RLE.decode`.
Parameters
----------
text : str
A text string to decode
use_bwt : bool
Indicates whether to perform BWT decoding after RLE decoding
Returns
-------
str
Word decoded by RLE
Examples
--------
>>> rle_decode('n\x00ilag')
'align'
>>> rle_decode('align', use_bwt=False)
'align'
>>> rle_decode('annb\x00aa')
'banana'
>>> rle_decode('banana', use_bwt=False)
'banana'
>>> rle_decode('ab\x00abbab5a')
'aaabaabababa'
>>> rle_decode('3abaabababa', False)
'aaabaabababa'
"""
text = RLE().decode(text)
if use_bwt:
text = BWT().decode(text)
return text
|
def encode(self, text):
r"""Perform encoding of run-length-encoding (RLE).
Parameters
----------
text : str
A text string to encode
Returns
-------
str
Word decoded by RLE
Examples
--------
>>> rle = RLE()
>>> bwt = BWT()
>>> rle.encode(bwt.encode('align'))
'n\x00ilag'
>>> rle.encode('align')
'align'
>>> rle.encode(bwt.encode('banana'))
'annb\x00aa'
>>> rle.encode('banana')
'banana'
>>> rle.encode(bwt.encode('aaabaabababa'))
'ab\x00abbab5a'
>>> rle.encode('aaabaabababa')
'3abaabababa'
"""
if text:
text = ((len(list(g)), k) for k, g in groupby(text))
text = (
(str(n) + k if n > 2 else (k if n == 1 else 2 * k))
for n, k in text
)
return ''.join(text)
|
def decode(self, text):
r"""Perform decoding of run-length-encoding (RLE).
Parameters
----------
text : str
A text string to decode
Returns
-------
str
Word decoded by RLE
Examples
--------
>>> rle = RLE()
>>> bwt = BWT()
>>> bwt.decode(rle.decode('n\x00ilag'))
'align'
>>> rle.decode('align')
'align'
>>> bwt.decode(rle.decode('annb\x00aa'))
'banana'
>>> rle.decode('banana')
'banana'
>>> bwt.decode(rle.decode('ab\x00abbab5a'))
'aaabaabababa'
>>> rle.decode('3abaabababa')
'aaabaabababa'
"""
mult = ''
decoded = []
for letter in list(text):
if not letter.isdigit():
if mult:
decoded.append(int(mult) * letter)
mult = ''
else:
decoded.append(letter)
else:
mult += letter
text = ''.join(decoded)
return text
|
def encode(self, word, max_length=4):
"""Return the SoundD code.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
Returns
-------
str
The SoundD code
Examples
--------
>>> sound_d('Gough')
'2000'
>>> sound_d('pneuma')
'5500'
>>> sound_d('knight')
'5300'
>>> sound_d('trice')
'3620'
>>> sound_d('judge')
'2200'
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
if word[:2] in {'KN', 'GN', 'PN', 'AC', 'WR'}:
word = word[1:]
elif word[:1] == 'X':
word = 'S' + word[1:]
elif word[:2] == 'WH':
word = 'W' + word[2:]
word = (
word.replace('DGE', '20').replace('DGI', '20').replace('GH', '0')
)
word = word.translate(self._trans)
word = self._delete_consecutive_repeats(word)
word = word.replace('0', '')
if max_length != -1:
if len(word) < max_length:
word += '0' * (max_length - len(word))
else:
word = word[:max_length]
return word
|
def dist(self, src, tar):
"""Return the NCD between two strings using LZMA compression.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Raises
------
ValueError
Install the PylibLZMA module in order to use LZMA
Examples
--------
>>> cmp = NCDlzma()
>>> cmp.dist('cat', 'hat')
0.08695652173913043
>>> cmp.dist('Niall', 'Neil')
0.16
>>> cmp.dist('aluminum', 'Catalan')
0.16
>>> cmp.dist('ATCG', 'TAGC')
0.08695652173913043
"""
if src == tar:
return 0.0
src = src.encode('utf-8')
tar = tar.encode('utf-8')
if lzma is not None:
src_comp = lzma.compress(src)[14:]
tar_comp = lzma.compress(tar)[14:]
concat_comp = lzma.compress(src + tar)[14:]
concat_comp2 = lzma.compress(tar + src)[14:]
else: # pragma: no cover
raise ValueError(
'Install the PylibLZMA module in order to use LZMA'
)
return (
min(len(concat_comp), len(concat_comp2))
- min(len(src_comp), len(tar_comp))
) / max(len(src_comp), len(tar_comp))
|
def smith_waterman(src, tar, gap_cost=1, sim_func=sim_ident):
"""Return the Smith-Waterman score of two strings.
This is a wrapper for :py:meth:`SmithWaterman.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
gap_cost : float
The cost of an alignment gap (1 by default)
sim_func : function
A function that returns the similarity of two characters (identity
similarity by default)
Returns
-------
float
Smith-Waterman score
Examples
--------
>>> smith_waterman('cat', 'hat')
2.0
>>> smith_waterman('Niall', 'Neil')
1.0
>>> smith_waterman('aluminum', 'Catalan')
0.0
>>> smith_waterman('ATCG', 'TAGC')
1.0
"""
return SmithWaterman().dist_abs(src, tar, gap_cost, sim_func)
|
def dist_abs(self, src, tar, gap_cost=1, sim_func=sim_ident):
"""Return the Smith-Waterman score of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
gap_cost : float
The cost of an alignment gap (1 by default)
sim_func : function
A function that returns the similarity of two characters (identity
similarity by default)
Returns
-------
float
Smith-Waterman score
Examples
--------
>>> cmp = SmithWaterman()
>>> cmp.dist_abs('cat', 'hat')
2.0
>>> cmp.dist_abs('Niall', 'Neil')
1.0
>>> cmp.dist_abs('aluminum', 'Catalan')
0.0
>>> cmp.dist_abs('ATCG', 'TAGC')
1.0
"""
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32)
for i in range(len(src) + 1):
d_mat[i, 0] = 0
for j in range(len(tar) + 1):
d_mat[0, j] = 0
for i in range(1, len(src) + 1):
for j in range(1, len(tar) + 1):
match = d_mat[i - 1, j - 1] + sim_func(src[i - 1], tar[j - 1])
delete = d_mat[i - 1, j] - gap_cost
insert = d_mat[i, j - 1] - gap_cost
d_mat[i, j] = max(0, match, delete, insert)
return d_mat[d_mat.shape[0] - 1, d_mat.shape[1] - 1]
|
def levenshtein(src, tar, mode='lev', cost=(1, 1, 1, 1)):
"""Return the Levenshtein distance between two strings.
This is a wrapper of :py:meth:`Levenshtein.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance, in
which edits may include inserts, deletes, and substitutions
- ``osa`` computes the Optimal String Alignment distance, in which
edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and transpositions, respectively (by default:
(1, 1, 1, 1))
Returns
-------
int (may return a float if cost has float values)
The Levenshtein distance between src & tar
Examples
--------
>>> levenshtein('cat', 'hat')
1
>>> levenshtein('Niall', 'Neil')
3
>>> levenshtein('aluminum', 'Catalan')
7
>>> levenshtein('ATCG', 'TAGC')
3
>>> levenshtein('ATCG', 'TAGC', mode='osa')
2
>>> levenshtein('ACTG', 'TAGC', mode='osa')
4
"""
return Levenshtein().dist_abs(src, tar, mode, cost)
|
def dist_levenshtein(src, tar, mode='lev', cost=(1, 1, 1, 1)):
"""Return the normalized Levenshtein distance between two strings.
This is a wrapper of :py:meth:`Levenshtein.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance, in
which edits may include inserts, deletes, and substitutions
- ``osa`` computes the Optimal String Alignment distance, in which
edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and transpositions, respectively (by default:
(1, 1, 1, 1))
Returns
-------
float
The Levenshtein distance between src & tar
Examples
--------
>>> round(dist_levenshtein('cat', 'hat'), 12)
0.333333333333
>>> round(dist_levenshtein('Niall', 'Neil'), 12)
0.6
>>> dist_levenshtein('aluminum', 'Catalan')
0.875
>>> dist_levenshtein('ATCG', 'TAGC')
0.75
"""
return Levenshtein().dist(src, tar, mode, cost)
|
def sim_levenshtein(src, tar, mode='lev', cost=(1, 1, 1, 1)):
"""Return the Levenshtein similarity of two strings.
This is a wrapper of :py:meth:`Levenshtein.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance, in
which edits may include inserts, deletes, and substitutions
- ``osa`` computes the Optimal String Alignment distance, in which
edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and transpositions, respectively (by default:
(1, 1, 1, 1))
Returns
-------
float
The Levenshtein similarity between src & tar
Examples
--------
>>> round(sim_levenshtein('cat', 'hat'), 12)
0.666666666667
>>> round(sim_levenshtein('Niall', 'Neil'), 12)
0.4
>>> sim_levenshtein('aluminum', 'Catalan')
0.125
>>> sim_levenshtein('ATCG', 'TAGC')
0.25
"""
return Levenshtein().sim(src, tar, mode, cost)
|
def dist_abs(self, src, tar, mode='lev', cost=(1, 1, 1, 1)):
"""Return the Levenshtein distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance,
in which edits may include inserts, deletes, and
substitutions
- ``osa`` computes the Optimal String Alignment distance, in
which edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits:
inserts, deletes, substitutions, and transpositions, respectively
(by default: (1, 1, 1, 1))
Returns
-------
int (may return a float if cost has float values)
The Levenshtein distance between src & tar
Examples
--------
>>> cmp = Levenshtein()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
3
>>> cmp.dist_abs('aluminum', 'Catalan')
7
>>> cmp.dist_abs('ATCG', 'TAGC')
3
>>> cmp.dist_abs('ATCG', 'TAGC', mode='osa')
2
>>> cmp.dist_abs('ACTG', 'TAGC', mode='osa')
4
"""
ins_cost, del_cost, sub_cost, trans_cost = cost
if src == tar:
return 0
if not src:
return len(tar) * ins_cost
if not tar:
return len(src) * del_cost
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
for i in range(len(src) + 1):
d_mat[i, 0] = i * del_cost
for j in range(len(tar) + 1):
d_mat[0, j] = j * ins_cost
for i in range(len(src)):
for j in range(len(tar)):
d_mat[i + 1, j + 1] = min(
d_mat[i + 1, j] + ins_cost, # ins
d_mat[i, j + 1] + del_cost, # del
d_mat[i, j]
+ (sub_cost if src[i] != tar[j] else 0), # sub/==
)
if mode == 'osa':
if (
i + 1 > 1
and j + 1 > 1
and src[i] == tar[j - 1]
and src[i - 1] == tar[j]
):
# transposition
d_mat[i + 1, j + 1] = min(
d_mat[i + 1, j + 1],
d_mat[i - 1, j - 1] + trans_cost,
)
return d_mat[len(src), len(tar)]
|
def dist(self, src, tar, mode='lev', cost=(1, 1, 1, 1)):
"""Return the normalized Levenshtein distance between two strings.
The Levenshtein distance is normalized by dividing the Levenshtein
distance (calculated by any of the three supported methods) by the
greater of the number of characters in src times the cost of a delete
and the number of characters in tar times the cost of an insert.
For the case in which all operations have :math:`cost = 1`, this is
equivalent to the greater of the length of the two strings src & tar.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mode : str
Specifies a mode for computing the Levenshtein distance:
- ``lev`` (default) computes the ordinary Levenshtein distance,
in which edits may include inserts, deletes, and
substitutions
- ``osa`` computes the Optimal String Alignment distance, in
which edits may include inserts, deletes, substitutions, and
transpositions but substrings may only be edited once
cost : tuple
A 4-tuple representing the cost of the four possible edits:
inserts, deletes, substitutions, and transpositions, respectively
(by default: (1, 1, 1, 1))
Returns
-------
float
The normalized Levenshtein distance between src & tar
Examples
--------
>>> cmp = Levenshtein()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.6
>>> cmp.dist('aluminum', 'Catalan')
0.875
>>> cmp.dist('ATCG', 'TAGC')
0.75
"""
if src == tar:
return 0
ins_cost, del_cost = cost[:2]
return levenshtein(src, tar, mode, cost) / (
max(len(src) * del_cost, len(tar) * ins_cost)
)
|
def fingerprint(self, word):
"""Return the omission key.
Parameters
----------
word : str
The word to transform into its omission key
Returns
-------
str
The omission key
Examples
--------
>>> ok = OmissionKey()
>>> ok.fingerprint('The quick brown fox jumped over the lazy dog.')
'JKQXZVWYBFMGPDHCLNTREUIOA'
>>> ok.fingerprint('Christopher')
'PHCTSRIOE'
>>> ok.fingerprint('Niall')
'LNIA'
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = ''.join(c for c in word if c in self._letters)
key = ''
# add consonants in order supplied by _consonants (no duplicates)
for char in self._consonants:
if char in word:
key += char
# add vowels in order they appeared in the word (no duplicates)
for char in word:
if char not in self._consonants and char not in key:
key += char
return key
|
def minkowski(src, tar, qval=2, pval=1, normalized=False, alphabet=None):
"""Return the Minkowski distance (:math:`L^p`-norm) of two strings.
This is a wrapper for :py:meth:`Minkowski.dist_abs`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
pval : int or float
The :math:`p`-value of the :math:`L^p`-space
normalized : bool
Normalizes to [0, 1] if True
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The Minkowski distance
Examples
--------
>>> minkowski('cat', 'hat')
4.0
>>> minkowski('Niall', 'Neil')
7.0
>>> minkowski('Colin', 'Cuilen')
9.0
>>> minkowski('ATCG', 'TAGC')
10.0
"""
return Minkowski().dist_abs(src, tar, qval, pval, normalized, alphabet)
|
def dist_minkowski(src, tar, qval=2, pval=1, alphabet=None):
"""Return normalized Minkowski distance of two strings.
This is a wrapper for :py:meth:`Minkowski.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
pval : int or float
The :math:`p`-value of the :math:`L^p`-space
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Minkowski distance
Examples
--------
>>> dist_minkowski('cat', 'hat')
0.5
>>> round(dist_minkowski('Niall', 'Neil'), 12)
0.636363636364
>>> round(dist_minkowski('Colin', 'Cuilen'), 12)
0.692307692308
>>> dist_minkowski('ATCG', 'TAGC')
1.0
"""
return Minkowski().dist(src, tar, qval, pval, alphabet)
|
def sim_minkowski(src, tar, qval=2, pval=1, alphabet=None):
"""Return normalized Minkowski similarity of two strings.
This is a wrapper for :py:meth:`Minkowski.sim`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
pval : int or float
The :math:`p`-value of the :math:`L^p`-space
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Minkowski similarity
Examples
--------
>>> sim_minkowski('cat', 'hat')
0.5
>>> round(sim_minkowski('Niall', 'Neil'), 12)
0.363636363636
>>> round(sim_minkowski('Colin', 'Cuilen'), 12)
0.307692307692
>>> sim_minkowski('ATCG', 'TAGC')
0.0
"""
return Minkowski().sim(src, tar, qval, pval, alphabet)
|
def dist_abs(
self, src, tar, qval=2, pval=1, normalized=False, alphabet=None
):
"""Return the Minkowski distance (:math:`L^p`-norm) of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
pval : int or float
The :math:`p`-value of the :math:`L^p`-space
normalized : bool
Normalizes to [0, 1] if True
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The Minkowski distance
Examples
--------
>>> cmp = Minkowski()
>>> cmp.dist_abs('cat', 'hat')
4.0
>>> cmp.dist_abs('Niall', 'Neil')
7.0
>>> cmp.dist_abs('Colin', 'Cuilen')
9.0
>>> cmp.dist_abs('ATCG', 'TAGC')
10.0
"""
q_src, q_tar = self._get_qgrams(src, tar, qval)
diffs = ((q_src - q_tar) + (q_tar - q_src)).values()
normalizer = 1
if normalized:
totals = (q_src + q_tar).values()
if alphabet is not None:
# noinspection PyTypeChecker
normalizer = (
alphabet if isinstance(alphabet, Number) else len(alphabet)
)
elif pval == 0:
normalizer = len(totals)
else:
normalizer = sum(_ ** pval for _ in totals) ** (1 / pval)
if len(diffs) == 0:
return 0.0
if pval == float('inf'):
# Chebyshev distance
return max(diffs) / normalizer
if pval == 0:
# This is the l_0 "norm" as developed by David Donoho
return len(diffs) / normalizer
return sum(_ ** pval for _ in diffs) ** (1 / pval) / normalizer
|
def dist(self, src, tar, qval=2, pval=1, alphabet=None):
"""Return normalized Minkowski distance of two strings.
The normalized Minkowski distance :cite:`Minkowski:1910` is a distance
metric in :math:`L^p`-space, normalized to [0, 1].
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
pval : int or float
The :math:`p`-value of the :math:`L^p`-space
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Minkowski distance
Examples
--------
>>> cmp = Minkowski()
>>> cmp.dist('cat', 'hat')
0.5
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.636363636364
>>> round(cmp.dist('Colin', 'Cuilen'), 12)
0.692307692308
>>> cmp.dist('ATCG', 'TAGC')
1.0
"""
return self.dist_abs(src, tar, qval, pval, True, alphabet)
|
def occurrence_halved_fingerprint(
word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG
):
"""Return the occurrence halved fingerprint.
This is a wrapper for :py:meth:`OccurrenceHalved.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The occurrence halved fingerprint
Examples
--------
>>> bin(occurrence_halved_fingerprint('hat'))
'0b1010000000010'
>>> bin(occurrence_halved_fingerprint('niall'))
'0b10010100000'
>>> bin(occurrence_halved_fingerprint('colin'))
'0b1001010000'
>>> bin(occurrence_halved_fingerprint('atcg'))
'0b10100000000000'
>>> bin(occurrence_halved_fingerprint('entreatment'))
'0b1111010000110000'
"""
return OccurrenceHalved().fingerprint(word, n_bits, most_common)
|
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG):
"""Return the occurrence halved fingerprint.
Based on the occurrence halved fingerprint from :cite:`Cislak:2017`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The occurrence halved fingerprint
Examples
--------
>>> ohf = OccurrenceHalved()
>>> bin(ohf.fingerprint('hat'))
'0b1010000000010'
>>> bin(ohf.fingerprint('niall'))
'0b10010100000'
>>> bin(ohf.fingerprint('colin'))
'0b1001010000'
>>> bin(ohf.fingerprint('atcg'))
'0b10100000000000'
>>> bin(ohf.fingerprint('entreatment'))
'0b1111010000110000'
"""
if n_bits % 2:
n_bits += 1
w_len = len(word) // 2
w_1 = set(word[:w_len])
w_2 = set(word[w_len:])
fingerprint = 0
for letter in most_common:
if n_bits:
fingerprint <<= 1
if letter in w_1:
fingerprint += 1
fingerprint <<= 1
if letter in w_2:
fingerprint += 1
n_bits -= 2
else:
break
if n_bits > 0:
fingerprint <<= n_bits
return fingerprint
|
def encode(self, word, max_length=3):
"""Calculate the early version of the Henry code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 3)
Returns
-------
str
The early Henry code
Examples
--------
>>> henry_early('Marchand')
'MRC'
>>> henry_early('Beaulieu')
'BL'
>>> henry_early('Beaumont')
'BM'
>>> henry_early('Legrand')
'LGR'
>>> henry_early('Pelletier')
'PLT'
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = ''.join(c for c in word if c in self._uc_set)
if not word:
return ''
# Rule Ia seems to be covered entirely in II
# Rule Ib
if word[0] in self._uc_vy_set:
# Ib1
if (
word[1:2] in self._uc_c_set - {'M', 'N'}
and word[2:3] in self._uc_c_set
) or (
word[1:2] in self._uc_c_set and word[2:3] not in self._uc_c_set
):
if word[0] == 'Y':
word = 'I' + word[1:]
# Ib2
elif word[1:2] in {'M', 'N'} and word[2:3] in self._uc_c_set:
if word[0] == 'E':
word = 'A' + word[1:]
elif word[0] in {'I', 'U', 'Y'}:
word = 'E' + word[1:]
# Ib3
elif word[:2] in self._diph:
word = self._diph[word[:2]] + word[2:]
# Ib4
elif word[1:2] in self._uc_vy_set and word[0] == 'Y':
word = 'I' + word[1:]
code = ''
skip = 0
# Rule II
for pos, char in enumerate(word):
nxch = word[pos + 1 : pos + 2]
prev = word[pos - 1 : pos]
if skip:
skip -= 1
elif char in self._uc_vy_set:
code += char
# IIc
elif char == nxch:
skip = 1
code += char
elif word[pos : pos + 2] in {'CQ', 'DT', 'SC'}:
continue
# IIb
elif char in self._simple:
code += self._simple[char]
elif char in {'C', 'G', 'P', 'Q', 'S'}:
if char == 'C':
if nxch in {'A', 'O', 'U', 'L', 'R'}:
code += 'K'
elif nxch in {'E', 'I', 'Y'}:
code += 'S'
elif nxch == 'H':
if word[pos + 2 : pos + 3] in self._uc_vy_set:
code += 'C'
else: # CHR, CHL, etc.
code += 'K'
else:
code += 'C'
elif char == 'G':
if nxch in {'A', 'O', 'U', 'L', 'R'}:
code += 'G'
elif nxch in {'E', 'I', 'Y'}:
code += 'J'
elif nxch == 'N':
code += 'N'
elif char == 'P':
if nxch != 'H':
code += 'P'
else:
code += 'F'
elif char == 'Q':
if word[pos + 1 : pos + 3] in {'UE', 'UI', 'UY'}:
code += 'G'
else: # QUA, QUO, etc.
code += 'K'
else: # S...
if word[pos : pos + 6] == 'SAINTE':
code += 'X'
skip = 5
elif word[pos : pos + 5] == 'SAINT':
code += 'X'
skip = 4
elif word[pos : pos + 3] == 'STE':
code += 'X'
skip = 2
elif word[pos : pos + 2] == 'ST':
code += 'X'
skip = 1
elif nxch in self._uc_c_set:
continue
else:
code += 'S'
# IId
elif char == 'H' and prev in self._uc_c_set:
continue
elif char in self._uc_c_set - {
'L',
'R',
} and nxch in self._uc_c_set - {'L', 'R'}:
continue
elif char == 'L' and nxch in {'M', 'N'}:
continue
elif (
char in {'M', 'N'}
and prev in self._uc_vy_set
and nxch in self._uc_c_set
):
continue
# IIa
else:
code += char
# IIe1
if code[-4:] in {'AULT', 'EULT', 'OULT'}:
code = code[:-2]
# The following are blocked by rules above
# elif code[-4:-3] in _vows and code[-3:] == 'MPS':
# code = code[:-3]
# elif code[-3:-2] in _vows and code[-2:] in {'MB', 'MP', 'ND',
# 'NS', 'NT'}:
# code = code[:-2]
elif code[-2:-1] == 'R' and code[-1:] in self._uc_c_set:
code = code[:-1]
# IIe2
elif code[-2:-1] in self._uc_vy_set and code[-1:] in {
'D',
'M',
'N',
'S',
'T',
}:
code = code[:-1]
elif code[-2:] == 'ER':
code = code[:-1]
# Drop non-initial vowels
code = code[:1] + code[1:].translate(
{65: '', 69: '', 73: '', 79: '', 85: '', 89: ''}
)
if max_length != -1:
code = code[:max_length]
return code
|
def pshp_soundex_first(fname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a first name.
This is a wrapper for :py:meth:`PSHPSoundexFirst.encode`.
Parameters
----------
fname : str
The first name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pshp_soundex_first('Smith')
'S530'
>>> pshp_soundex_first('Waters')
'W352'
>>> pshp_soundex_first('James')
'J700'
>>> pshp_soundex_first('Schmidt')
'S500'
>>> pshp_soundex_first('Ashcroft')
'A220'
>>> pshp_soundex_first('John')
'J500'
>>> pshp_soundex_first('Colin')
'K400'
>>> pshp_soundex_first('Niall')
'N400'
>>> pshp_soundex_first('Sally')
'S400'
>>> pshp_soundex_first('Jane')
'J500'
"""
return PSHPSoundexFirst().encode(fname, max_length, german)
|
def encode(self, fname, max_length=4, german=False):
"""Calculate the PSHP Soundex/Viewex Coding of a first name.
Parameters
----------
fname : str
The first name to encode
max_length : int
The length of the code returned (defaults to 4)
german : bool
Set to True if the name is German (different rules apply)
Returns
-------
str
The PSHP Soundex/Viewex Coding
Examples
--------
>>> pe = PSHPSoundexFirst()
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Waters')
'W352'
>>> pe.encode('James')
'J700'
>>> pe.encode('Schmidt')
'S500'
>>> pe.encode('Ashcroft')
'A220'
>>> pe.encode('John')
'J500'
>>> pe.encode('Colin')
'K400'
>>> pe.encode('Niall')
'N400'
>>> pe.encode('Sally')
'S400'
>>> pe.encode('Jane')
'J500'
"""
fname = unicode_normalize('NFKD', text_type(fname.upper()))
fname = fname.replace('ß', 'SS')
fname = ''.join(c for c in fname if c in self._uc_set)
# special rules
if fname == 'JAMES':
code = 'J7'
elif fname == 'PAT':
code = 'P7'
else:
# A. Prefix treatment
if fname[:2] in {'GE', 'GI', 'GY'}:
fname = 'J' + fname[1:]
elif fname[:2] in {'CE', 'CI', 'CY'}:
fname = 'S' + fname[1:]
elif fname[:3] == 'CHR':
fname = 'K' + fname[1:]
elif fname[:1] == 'C' and fname[:2] != 'CH':
fname = 'K' + fname[1:]
if fname[:2] == 'KN':
fname = 'N' + fname[1:]
elif fname[:2] == 'PH':
fname = 'F' + fname[1:]
elif fname[:3] in {'WIE', 'WEI'}:
fname = 'V' + fname[1:]
if german and fname[:1] in {'W', 'M', 'Y', 'Z'}:
fname = {'W': 'V', 'M': 'N', 'Y': 'J', 'Z': 'S'}[
fname[0]
] + fname[1:]
code = fname[:1]
# B. Soundex coding
# code for Y unspecified, but presumably is 0
fname = fname.translate(self._trans)
fname = self._delete_consecutive_repeats(fname)
code += fname[1:]
syl_ptr = code.find('0')
syl2_ptr = code[syl_ptr + 1 :].find('0')
if syl_ptr != -1 and syl2_ptr != -1 and syl2_ptr - syl_ptr > -1:
code = code[: syl_ptr + 2]
code = code.replace('0', '') # rule 1
if max_length != -1:
if len(code) < max_length:
code += '0' * (max_length - len(code))
else:
code = code[:max_length]
return code
|
def c2u(name):
"""Convert camelCase (used in PHP) to Python-standard snake_case.
Src:
https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
Parameters
----------
name: A function or variable name in camelCase
Returns
-------
str: The name in snake_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
s1 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return s1
|
def pythonize(line, fn='', subdir='gen'):
"""Convert a line of BMPM code from PHP to Python.
Parameters
----------
line : str
A line of code
fn : str
A filename
subdir : str
The file's subdirectory
Returns
-------
The code in Python
"""
global array_seen, nl, sd
if '$all' in line:
return ''
if 'make the sum of all languages be visible in the function' in line:
return ''
line = line.strip()
if 'array' in line and not line.startswith('//'):
array_seen = True
line = re.sub('//+', '#', line)
# line = re.sub('"\.\((\$.+?)\)\."', r'\1', line)
if line and re.search(r'array\("[^"]+?"\)', line):
# print("### " + line)
line = ''
line = line.replace('array', '')
line = re.sub(r'^\s*', '', line)
line = re.sub(';$', '', line)
line = re.sub('^include_.+', '', line)
line = re.sub(
r'\$(approx|rules|exact)\[LanguageIndex\("([^"]+)", '
+ r'\$languages\)\] = \$([a-zA-Z]+)',
lambda m: (
"BMDATA['"
+ subdir
+ "']['"
+ m.group(1)
+ "'][L_"
+ m.group(2).upper()
+ '] = _'
+ subdir.upper()
+ '_'
+ c2u(m.group(3)).upper()
),
line,
)
line = re.sub(
r'\$(approx|rules|exact|hebrew)([A-Za-z]+) = _merge'
+ r'\(\$([a-zA-Z]+), \$([a-zA-Z]+)\)',
lambda m: (
"BMDATA['"
+ subdir
+ "']['"
+ m.group(1)
+ "'][L_"
+ c2u(m.group(2)).upper()
+ '] = _'
+ subdir.upper()
+ '_'
+ c2u(m.group(3)).upper()
+ ' + _'
+ subdir.upper()
+ '_'
+ c2u(m.group(4)).upper()
),
line,
)
line = re.sub(
r'\$(approx|rules|exact)\[LanguageIndex\("([^"]+)", '
+ r'\$languages\)\] = _merge\(\$([a-zA-Z]+), \$([a-zA-Z]+)\)',
lambda m: (
"BMDATA['"
+ subdir
+ "']['"
+ m.group(1)
+ "'][L_"
+ c2u(m.group(2)).upper()
+ '] = _'
+ subdir.upper()
+ '_'
+ c2u(m.group(3)).upper()
+ ' + _'
+ subdir.upper()
+ '_'
+ c2u(m.group(4)).upper()
),
line,
)
line = re.sub(
r'^\$([a-zA-Z]+)',
lambda m: '_' + sd.upper() + '_' + c2u(m.group(1)).upper(),
line,
)
for _ in range(len(lang_tuple)):
line = re.sub(r'($[a-zA-Z]+) *\+ *($[a-zA-Z]+)', r'\1\+\2', line)
line = re.sub(
r'\$([a-zA-Z]+)',
lambda m: (
'L_' + m.group(1).upper()
if m.group(1) in lang_dict
else '$' + m.group(1)
),
line,
)
line = re.sub(r'\[\"\.\((L_[A-Z_+]+)\)\.\"\]', r'[\1]', line)
line = re.sub(
'L_([A-Z]+)', lambda m: str(lang_dict[m.group(1).lower()]), line
)
for _ in range(4):
line = re.sub(
r'([0-9]+) *\+ *([0-9]+)',
lambda m: str(int(m.group(1)) + int(m.group(2))),
line,
)
if fn == 'lang':
if len(line.split(',')) >= 3:
parts = line.split(',')
parts[0] = re.sub('/(.+?)/', r'\1', parts[0])
# parts[1] = re.sub('\$', 'L_', parts[1])
# parts[1] = re.sub(' *\+ *', '|', parts[1])
parts[2] = parts[2].title()
line = ','.join(parts)
if 'languagenames' in fn:
line = line.replace('"', "'")
line = line.replace("','", "', '")
if line and line[0] == "'":
line = ' ' * 14 + line
# fix upstream
# line = line.replace('ë', 'ü')
comment = ''
if '#' in line:
hashsign = line.find('#')
comment = line[hashsign:]
code = line[:hashsign]
else:
code = line
code = code.rstrip()
comment = comment.strip()
if not re.match(r'^\s*$', code):
comment = ' ' + comment
if '(' in code and ')' in code:
prefix = code[: code.find('(') + 1]
suffix = code[code.rfind(')') :]
tuplecontent = code[len(prefix) : len(code) - len(suffix)]
elts = tuplecontent.split(',')
for i in range(len(elts)):
elts[i] = elts[i].strip()
if elts[i][0] == '"' and elts[i][-1] == '"':
elts[i] = "'" + elts[i][1:-1].replace("'", "\\'") + "'"
tuplecontent = ', '.join(elts)
code = prefix + tuplecontent + suffix
line = code + comment
line = re.sub('# *', '# ', line)
if line:
nl = False
if array_seen and not (line[0] == '_' or line.startswith('BMDATA')):
line = ' ' * 4 + line
return line + '\n'
elif not nl:
nl = True
return '\n'
else:
return ''
|
def sim(self, src, tar, qval=2):
r"""Return the cosine similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Cosine similarity
Examples
--------
>>> cmp = Cosine()
>>> cmp.sim('cat', 'hat')
0.5
>>> cmp.sim('Niall', 'Neil')
0.3651483716701107
>>> cmp.sim('aluminum', 'Catalan')
0.11785113019775793
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
q_src, q_tar = self._get_qgrams(src, tar, qval)
q_src_mag = sum(q_src.values())
q_tar_mag = sum(q_tar.values())
q_intersection_mag = sum((q_src & q_tar).values())
return q_intersection_mag / sqrt(q_src_mag * q_tar_mag)
|
def sim_monge_elkan(src, tar, sim_func=sim_levenshtein, symmetric=False):
"""Return the Monge-Elkan similarity of two strings.
This is a wrapper for :py:meth:`MongeElkan.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
Rhe internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan similarity
Examples
--------
>>> sim_monge_elkan('cat', 'hat')
0.75
>>> round(sim_monge_elkan('Niall', 'Neil'), 12)
0.666666666667
>>> round(sim_monge_elkan('aluminum', 'Catalan'), 12)
0.388888888889
>>> sim_monge_elkan('ATCG', 'TAGC')
0.5
"""
return MongeElkan().sim(src, tar, sim_func, symmetric)
|
def dist_monge_elkan(src, tar, sim_func=sim_levenshtein, symmetric=False):
"""Return the Monge-Elkan distance between two strings.
This is a wrapper for :py:meth:`MongeElkan.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
The internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan distance
Examples
--------
>>> dist_monge_elkan('cat', 'hat')
0.25
>>> round(dist_monge_elkan('Niall', 'Neil'), 12)
0.333333333333
>>> round(dist_monge_elkan('aluminum', 'Catalan'), 12)
0.611111111111
>>> dist_monge_elkan('ATCG', 'TAGC')
0.5
"""
return MongeElkan().dist(src, tar, sim_func, symmetric)
|
def sim(self, src, tar, sim_func=sim_levenshtein, symmetric=False):
"""Return the Monge-Elkan similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
The internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan similarity
Examples
--------
>>> cmp = MongeElkan()
>>> cmp.sim('cat', 'hat')
0.75
>>> round(cmp.sim('Niall', 'Neil'), 12)
0.666666666667
>>> round(cmp.sim('aluminum', 'Catalan'), 12)
0.388888888889
>>> cmp.sim('ATCG', 'TAGC')
0.5
"""
if src == tar:
return 1.0
q_src = sorted(QGrams(src).elements())
q_tar = sorted(QGrams(tar).elements())
if not q_src or not q_tar:
return 0.0
sum_of_maxes = 0
for q_s in q_src:
max_sim = float('-inf')
for q_t in q_tar:
max_sim = max(max_sim, sim_func(q_s, q_t))
sum_of_maxes += max_sim
sim_em = sum_of_maxes / len(q_src)
if symmetric:
sim_em = (sim_em + self.sim(tar, src, sim_func, False)) / 2
return sim_em
|
def encode(self, word):
"""Return the Phonem code for a word.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Phonem value
Examples
--------
>>> pe = Phonem()
>>> pe.encode('Christopher')
'CRYSDOVR'
>>> pe.encode('Niall')
'NYAL'
>>> pe.encode('Smith')
'SMYD'
>>> pe.encode('Schmidt')
'CMYD'
"""
word = unicode_normalize('NFC', text_type(word.upper()))
for i, j in self._substitutions:
word = word.replace(i, j)
word = word.translate(self._trans)
return ''.join(
c
for c in self._delete_consecutive_repeats(word)
if c in self._uc_set
)
|
def stem(self, word):
"""Return CLEF Swedish stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> clef_swedish('undervisa')
'undervis'
>>> clef_swedish('suspension')
'suspensio'
>>> clef_swedish('visshet')
'viss'
"""
wlen = len(word) - 2
if wlen > 2 and word[-1] == 's':
word = word[:-1]
wlen -= 1
_endings = {
5: {'elser', 'heten'},
4: {'arne', 'erna', 'ande', 'else', 'aste', 'orna', 'aren'},
3: {'are', 'ast', 'het'},
2: {'ar', 'er', 'or', 'en', 'at', 'te', 'et'},
1: {'a', 'e', 'n', 't'},
}
for end_len in range(5, 0, -1):
if wlen > end_len and word[-end_len:] in _endings[end_len]:
return word[:-end_len]
return word
|
def _undouble(self, word):
"""Undouble endings -kk, -dd, and -tt.
Parameters
----------
word : str
The word to stem
Returns
-------
str
The word with doubled endings undoubled
"""
if (
len(word) > 1
and word[-1] == word[-2]
and word[-1] in {'d', 'k', 't'}
):
return word[:-1]
return word
|
def stem(self, word):
"""Return Snowball Dutch stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = SnowballDutch()
>>> stmr.stem('lezen')
'lez'
>>> stmr.stem('opschorting')
'opschort'
>>> stmr.stem('ongrijpbaarheid')
'ongrijp'
"""
# lowercase, normalize, decompose, filter umlauts & acutes out, and
# compose
word = normalize('NFC', text_type(word.lower()))
word = word.translate(self._accented)
for i in range(len(word)):
if i == 0 and word[0] == 'y':
word = 'Y' + word[1:]
elif word[i] == 'y' and word[i - 1] in self._vowels:
word = word[:i] + 'Y' + word[i + 1 :]
elif (
word[i] == 'i'
and word[i - 1] in self._vowels
and i + 1 < len(word)
and word[i + 1] in self._vowels
):
word = word[:i] + 'I' + word[i + 1 :]
r1_start = max(3, self._sb_r1(word))
r2_start = self._sb_r2(word)
# Step 1
if word[-5:] == 'heden':
if len(word[r1_start:]) >= 5:
word = word[:-3] + 'id'
elif word[-3:] == 'ene':
if len(word[r1_start:]) >= 3 and (
word[-4] not in self._vowels and word[-6:-3] != 'gem'
):
word = self._undouble(word[:-3])
elif word[-2:] == 'en':
if len(word[r1_start:]) >= 2 and (
word[-3] not in self._vowels and word[-5:-2] != 'gem'
):
word = self._undouble(word[:-2])
elif word[-2:] == 'se':
if (
len(word[r1_start:]) >= 2
and word[-3] not in self._not_s_endings
):
word = word[:-2]
elif word[-1:] == 's':
if (
len(word[r1_start:]) >= 1
and word[-2] not in self._not_s_endings
):
word = word[:-1]
# Step 2
e_removed = False
if word[-1:] == 'e':
if len(word[r1_start:]) >= 1 and word[-2] not in self._vowels:
word = self._undouble(word[:-1])
e_removed = True
# Step 3a
if word[-4:] == 'heid':
if len(word[r2_start:]) >= 4 and word[-5] != 'c':
word = word[:-4]
if word[-2:] == 'en':
if len(word[r1_start:]) >= 2 and (
word[-3] not in self._vowels and word[-5:-2] != 'gem'
):
word = self._undouble(word[:-2])
# Step 3b
if word[-4:] == 'lijk':
if len(word[r2_start:]) >= 4:
word = word[:-4]
# Repeat step 2
if word[-1:] == 'e':
if (
len(word[r1_start:]) >= 1
and word[-2] not in self._vowels
):
word = self._undouble(word[:-1])
elif word[-4:] == 'baar':
if len(word[r2_start:]) >= 4:
word = word[:-4]
elif word[-3:] in ('end', 'ing'):
if len(word[r2_start:]) >= 3:
word = word[:-3]
if (
word[-2:] == 'ig'
and len(word[r2_start:]) >= 2
and word[-3] != 'e'
):
word = word[:-2]
else:
word = self._undouble(word)
elif word[-3:] == 'bar':
if len(word[r2_start:]) >= 3 and e_removed:
word = word[:-3]
elif word[-2:] == 'ig':
if len(word[r2_start:]) >= 2 and word[-3] != 'e':
word = word[:-2]
# Step 4
if (
len(word) >= 4
and word[-3] == word[-2]
and word[-2] in {'a', 'e', 'o', 'u'}
and word[-4] not in self._vowels
and word[-1] not in self._vowels
and word[-1] != 'I'
):
word = word[:-2] + word[-1]
# Change 'Y' and 'U' back to lowercase if survived stemming
for i in range(0, len(word)):
if word[i] == 'Y':
word = word[:i] + 'y' + word[i + 1 :]
elif word[i] == 'I':
word = word[:i] + 'i' + word[i + 1 :]
return word
|
def soundex(word, max_length=4, var='American', reverse=False, zero_pad=True):
"""Return the Soundex code for a word.
This is a wrapper for :py:meth:`Soundex.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as described
at :cite:`US:2007` and in :cite:`Knuth:1998`; this is also called
Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55 :cite:`US:1997`
by the US Census, including coding prefixed and unprefixed
versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for blocking
in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Soundex value
Examples
--------
>>> soundex("Christopher")
'C623'
>>> soundex("Niall")
'N400'
>>> soundex('Smith')
'S530'
>>> soundex('Schmidt')
'S530'
>>> soundex('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> soundex('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> soundex('Christopher', reverse=True)
'R132'
>>> soundex('Ashcroft')
'A261'
>>> soundex('Asicroft')
'A226'
>>> soundex('Ashcroft', var='special')
'A226'
>>> soundex('Asicroft', var='special')
'A226'
"""
return Soundex().encode(word, max_length, var, reverse, zero_pad)
|
def encode(
self, word, max_length=4, var='American', reverse=False, zero_pad=True
):
"""Return the Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
var : str
The variant of the algorithm to employ (defaults to ``American``):
- ``American`` follows the American Soundex algorithm, as
described at :cite:`US:2007` and in :cite:`Knuth:1998`; this
is also called Miracode
- ``special`` follows the rules from the 1880-1910 US Census
retrospective re-analysis, in which h & w are not treated as
blocking consonants but as vowels. Cf. :cite:`Repici:2013`.
- ``Census`` follows the rules laid out in GIL 55
:cite:`US:1997` by the US Census, including coding prefixed
and unprefixed versions of some names
reverse : bool
Reverse the word before computing the selected Soundex (defaults to
False); This results in "Reverse Soundex", which is useful for
blocking in cases where the initial elements may be in error.
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Soundex value
Examples
--------
>>> pe = Soundex()
>>> pe.encode("Christopher")
'C623'
>>> pe.encode("Niall")
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
>>> pe.encode('Christopher', max_length=-1)
'C623160000000000000000000000000000000000000000000000000000000000'
>>> pe.encode('Christopher', max_length=-1, zero_pad=False)
'C62316'
>>> pe.encode('Christopher', reverse=True)
'R132'
>>> pe.encode('Ashcroft')
'A261'
>>> pe.encode('Asicroft')
'A226'
>>> pe.encode('Ashcroft', var='special')
'A226'
>>> pe.encode('Asicroft', var='special')
'A226'
"""
# Require a max_length of at least 4 and not more than 64
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
if var == 'Census':
if word[:3] in {'VAN', 'CON'} and len(word) > 4:
return (
soundex(word, max_length, 'American', reverse, zero_pad),
soundex(
word[3:], max_length, 'American', reverse, zero_pad
),
)
if word[:2] in {'DE', 'DI', 'LA', 'LE'} and len(word) > 3:
return (
soundex(word, max_length, 'American', reverse, zero_pad),
soundex(
word[2:], max_length, 'American', reverse, zero_pad
),
)
# Otherwise, proceed as usual (var='American' mode, ostensibly)
word = ''.join(c for c in word if c in self._uc_set)
# Nothing to convert, return base case
if not word:
if zero_pad:
return '0' * max_length
return '0'
# Reverse word if computing Reverse Soundex
if reverse:
word = word[::-1]
# apply the Soundex algorithm
sdx = word.translate(self._trans)
if var == 'special':
sdx = sdx.replace('9', '0') # special rule for 1880-1910 census
else:
sdx = sdx.replace('9', '') # rule 1
sdx = self._delete_consecutive_repeats(sdx) # rule 3
if word[0] in 'HW':
sdx = word[0] + sdx
else:
sdx = word[0] + sdx[1:]
sdx = sdx.replace('0', '') # rule 1
if zero_pad:
sdx += '0' * max_length # rule 4
return sdx[:max_length]
|
def fingerprint(self, phrase, joiner=' '):
"""Return string fingerprint.
Parameters
----------
phrase : str
The string from which to calculate the fingerprint
joiner : str
The string that will be placed between each word
Returns
-------
str
The fingerprint of the phrase
Example
-------
>>> sf = String()
>>> sf.fingerprint('The quick brown fox jumped over the lazy dog.')
'brown dog fox jumped lazy over quick the'
"""
phrase = unicode_normalize('NFKD', text_type(phrase.strip().lower()))
phrase = ''.join([c for c in phrase if c.isalnum() or c.isspace()])
phrase = joiner.join(sorted(list(set(phrase.split()))))
return phrase
|
def encode(self, word):
"""Return the Russell Index (integer output) of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
int
The Russell Index value
Examples
--------
>>> pe = RussellIndex()
>>> pe.encode('Christopher')
3813428
>>> pe.encode('Niall')
715
>>> pe.encode('Smith')
3614
>>> pe.encode('Schmidt')
3614
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = word.replace('GH', '') # discard gh (rule 3)
word = word.rstrip('SZ') # discard /[sz]$/ (rule 3)
# translate according to Russell's mapping
word = ''.join(c for c in word if c in self._uc_set)
sdx = word.translate(self._trans)
# remove any 1s after the first occurrence
one = sdx.find('1') + 1
if one:
sdx = sdx[:one] + ''.join(c for c in sdx[one:] if c != '1')
# remove repeating characters
sdx = self._delete_consecutive_repeats(sdx)
# return as an int
return int(sdx) if sdx else float('NaN')
|
def ipa_to_features(ipa):
"""Convert IPA to features.
This translates an IPA string of one or more phones to a list of ints
representing the features of the string.
Parameters
----------
ipa : str
The IPA representation of a phone or series of phones
Returns
-------
list of ints
A representation of the features of the input string
Examples
--------
>>> ipa_to_features('mut')
[2709662981243185770, 1825831513894594986, 2783230754502126250]
>>> ipa_to_features('fon')
[2781702983095331242, 1825831531074464170, 2711173160463936106]
>>> ipa_to_features('telz')
[2783230754502126250, 1826957430176000426, 2693158761954453926,
2783230754501863834]
"""
features = []
pos = 0
ipa = normalize('NFD', text_type(ipa.lower()))
maxsymlen = max(len(_) for _ in _PHONETIC_FEATURES)
while pos < len(ipa):
found_match = False
for i in range(maxsymlen, 0, -1):
if (
pos + i - 1 <= len(ipa)
and ipa[pos : pos + i] in _PHONETIC_FEATURES
):
features.append(_PHONETIC_FEATURES[ipa[pos : pos + i]])
pos += i
found_match = True
if not found_match:
features.append(-1)
pos += 1
return features
|
def get_feature(vector, feature):
"""Get a feature vector.
This returns a list of ints, equal in length to the vector input,
representing presence/absence/neutrality with respect to a particular
phonetic feature.
Parameters
----------
vector : list
A tuple or list of ints representing the phonetic features of a phone
or series of phones (such as is returned by the ipa_to_features
function)
feature : str
A feature name from the set:
- ``consonantal``
- ``sonorant``
- ``syllabic``
- ``labial``
- ``round``
- ``coronal``
- ``anterior``
- ``distributed``
- ``dorsal``
- ``high``
- ``low``
- ``back``
- ``tense``
- ``pharyngeal``
- ``ATR``
- ``voice``
- ``spread_glottis``
- ``constricted_glottis``
- ``continuant``
- ``strident``
- ``lateral``
- ``delayed_release``
- ``nasal``
Returns
-------
list of ints
A list indicating presence/absence/neutrality with respect to the
feature
Raises
------
AttributeError
feature must be one of ...
Examples
--------
>>> tails = ipa_to_features('telz')
>>> get_feature(tails, 'consonantal')
[1, -1, 1, 1]
>>> get_feature(tails, 'sonorant')
[-1, 1, 1, -1]
>>> get_feature(tails, 'nasal')
[-1, -1, -1, -1]
>>> get_feature(tails, 'coronal')
[1, -1, 1, 1]
"""
# :param bool binary: if False, -1, 0, & 1 represent -, 0, & +
# if True, only binary oppositions are allowed:
# 0 & 1 represent - & + and 0s are mapped to -
if feature not in _FEATURE_MASK:
raise AttributeError(
"feature must be one of: '"
+ "', '".join(
(
'consonantal',
'sonorant',
'syllabic',
'labial',
'round',
'coronal',
'anterior',
'distributed',
'dorsal',
'high',
'low',
'back',
'tense',
'pharyngeal',
'ATR',
'voice',
'spread_glottis',
'constricted_glottis',
'continuant',
'strident',
'lateral',
'delayed_release',
'nasal',
)
)
+ "'"
)
# each feature mask contains two bits, one each for - and +
mask = _FEATURE_MASK[feature]
# the lower bit represents +
pos_mask = mask >> 1
retvec = []
for char in vector:
if char < 0:
retvec.append(float('NaN'))
else:
masked = char & mask
if masked == 0:
retvec.append(0) # 0
elif masked == mask:
retvec.append(2) # +/-
elif masked & pos_mask:
retvec.append(1) # +
else:
retvec.append(-1) # -
return retvec
|
def cmp_features(feat1, feat2):
"""Compare features.
This returns a number in the range [0, 1] representing a comparison of two
feature bundles.
If one of the bundles is negative, -1 is returned (for unknown values)
If the bundles are identical, 1 is returned.
If they are inverses of one another, 0 is returned.
Otherwise, a float representing their similarity is returned.
Parameters
----------
feat1 : int
A feature bundle
feat2 : int
A feature bundle
Returns
-------
float
A comparison of the feature bundles
Examples
--------
>>> cmp_features(ipa_to_features('l')[0], ipa_to_features('l')[0])
1.0
>>> cmp_features(ipa_to_features('l')[0], ipa_to_features('n')[0])
0.8709677419354839
>>> cmp_features(ipa_to_features('l')[0], ipa_to_features('z')[0])
0.8709677419354839
>>> cmp_features(ipa_to_features('l')[0], ipa_to_features('i')[0])
0.564516129032258
"""
if feat1 < 0 or feat2 < 0:
return -1.0
if feat1 == feat2:
return 1.0
magnitude = len(_FEATURE_MASK)
featxor = feat1 ^ feat2
diffbits = 0
# print(featxor)
while featxor:
if featxor & 0b1:
diffbits += 1
featxor >>= 1
# print(diffbits)
return 1 - (diffbits / (2 * magnitude))
|
def sim(self, src, tar):
"""Return the length similarity of two strings.
Length similarity is the ratio of the length of the shorter string to
the longer.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Length similarity
Examples
--------
>>> cmp = Length()
>>> cmp.sim('cat', 'hat')
1.0
>>> cmp.sim('Niall', 'Neil')
0.8
>>> cmp.sim('aluminum', 'Catalan')
0.875
>>> cmp.sim('ATCG', 'TAGC')
1.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
return (
len(src) / len(tar) if len(src) < len(tar) else len(tar) / len(src)
)
|
def encode(self, word, version=2):
"""Return the Caverphone code for a word.
Parameters
----------
word : str
The word to transform
version : int
The version of Caverphone to employ for encoding (defaults to 2)
Returns
-------
str
The Caverphone value
Examples
--------
>>> pe = Caverphone()
>>> pe.encode('Christopher')
'KRSTFA1111'
>>> pe.encode('Niall')
'NA11111111'
>>> pe.encode('Smith')
'SMT1111111'
>>> pe.encode('Schmidt')
'SKMT111111'
>>> pe.encode('Christopher', 1)
'KRSTF1'
>>> pe.encode('Niall', 1)
'N11111'
>>> pe.encode('Smith', 1)
'SMT111'
>>> pe.encode('Schmidt', 1)
'SKMT11'
"""
word = word.lower()
word = ''.join(c for c in word if c in self._lc_set)
def _squeeze_replace(word, char):
"""Convert strings of char in word to one instance.
Parameters
----------
word : str
The partially converted word
char : str
A character to 'squeeze'
Returns
-------
str
The word with instances of char squeezed down to one
"""
while char * 2 in word:
word = word.replace(char * 2, char)
return word.replace(char, char.upper())
# the main replacement algorithm
if version != 1 and word[-1:] == 'e':
word = word[:-1]
if word:
if word[:5] == 'cough':
word = 'cou2f' + word[5:]
if word[:5] == 'rough':
word = 'rou2f' + word[5:]
if word[:5] == 'tough':
word = 'tou2f' + word[5:]
if word[:6] == 'enough':
word = 'enou2f' + word[6:]
if version != 1 and word[:6] == 'trough':
word = 'trou2f' + word[6:]
if word[:2] == 'gn':
word = '2n' + word[2:]
if word[-2:] == 'mb':
word = word[:-1] + '2'
for src, tar in (
('cq', '2q'),
('ci', 'si'),
('ce', 'se'),
('cy', 'sy'),
('tch', '2ch'),
('c', 'k'),
('q', 'k'),
('x', 'k'),
('v', 'f'),
('dg', '2g'),
('tio', 'sio'),
('tia', 'sia'),
('d', 't'),
('ph', 'fh'),
('b', 'p'),
('sh', 's2'),
('z', 's'),
):
word = word.replace(src, tar)
if word[0] in self._lc_v_set:
word = 'A' + word[1:]
for vowel in 'aeiou':
word = word.replace(vowel, '3')
if version != 1:
word = word.replace('j', 'y')
if word[:2] == 'y3':
word = 'Y3' + word[2:]
if word[:1] == 'y':
word = 'A' + word[1:]
word = word.replace('y', '3')
for src, tar in (('3gh3', '3kh3'), ('gh', '22'), ('g', 'k')):
word = word.replace(src, tar)
for char in 'stpkfmn':
word = _squeeze_replace(word, char)
word = word.replace('w3', 'W3')
if version == 1:
word = word.replace('wy', 'Wy')
word = word.replace('wh3', 'Wh3')
if version == 1:
word = word.replace('why', 'Why')
if version != 1 and word[-1:] == 'w':
word = word[:-1] + '3'
word = word.replace('w', '2')
if word[:1] == 'h':
word = 'A' + word[1:]
word = word.replace('h', '2')
word = word.replace('r3', 'R3')
if version == 1:
word = word.replace('ry', 'Ry')
if version != 1 and word[-1:] == 'r':
word = word[:-1] + '3'
word = word.replace('r', '2')
word = word.replace('l3', 'L3')
if version == 1:
word = word.replace('ly', 'Ly')
if version != 1 and word[-1:] == 'l':
word = word[:-1] + '3'
word = word.replace('l', '2')
if version == 1:
word = word.replace('j', 'y')
word = word.replace('y3', 'Y3')
word = word.replace('y', '2')
word = word.replace('2', '')
if version != 1 and word[-1:] == '3':
word = word[:-1] + 'A'
word = word.replace('3', '')
# pad with 1s, then extract the necessary length of code
word += '1' * 10
if version != 1:
word = word[:10]
else:
word = word[:6]
return word
|
def hmean(nums):
r"""Return harmonic mean.
The harmonic mean is defined as:
:math:`\frac{|nums|}{\sum\limits_{i}\frac{1}{nums_i}}`
Following the behavior of Wolfram|Alpha:
- If one of the values in nums is 0, return 0.
- If more than one value in nums is 0, return NaN.
Cf. https://en.wikipedia.org/wiki/Harmonic_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The harmonic mean of nums
Raises
------
AttributeError
hmean requires at least one value
Examples
--------
>>> hmean([1, 2, 3, 4])
1.9200000000000004
>>> hmean([1, 2])
1.3333333333333333
>>> hmean([0, 5, 1000])
0
"""
if len(nums) < 1:
raise AttributeError('hmean requires at least one value')
elif len(nums) == 1:
return nums[0]
else:
for i in range(1, len(nums)):
if nums[0] != nums[i]:
break
else:
return nums[0]
if 0 in nums:
if nums.count(0) > 1:
return float('nan')
return 0
return len(nums) / sum(1 / i for i in nums)
|
def lmean(nums):
r"""Return logarithmic mean.
The logarithmic mean of an arbitrarily long series is defined by
http://www.survo.fi/papers/logmean.pdf
as:
:math:`L(x_1, x_2, ..., x_n) =
(n-1)! \sum\limits_{i=1}^n \frac{x_i}
{\prod\limits_{\substack{j = 1\\j \ne i}}^n
ln \frac{x_i}{x_j}}`
Cf. https://en.wikipedia.org/wiki/Logarithmic_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The logarithmic mean of nums
Raises
------
AttributeError
No two values in the nums list may be equal
Examples
--------
>>> lmean([1, 2, 3, 4])
2.2724242417489258
>>> lmean([1, 2])
1.4426950408889634
"""
if len(nums) != len(set(nums)):
raise AttributeError('No two values in the nums list may be equal')
rolling_sum = 0
for i in range(len(nums)):
rolling_prod = 1
for j in range(len(nums)):
if i != j:
rolling_prod *= math.log(nums[i] / nums[j])
rolling_sum += nums[i] / rolling_prod
return math.factorial(len(nums) - 1) * rolling_sum
|
def imean(nums):
r"""Return identric (exponential) mean.
The identric mean of two numbers x and y is:
x if x = y
otherwise :math:`\frac{1}{e} \sqrt[x-y]{\frac{x^x}{y^y}}`
Cf. https://en.wikipedia.org/wiki/Identric_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The identric mean of nums
Raises
------
AttributeError
imean supports no more than two values
Examples
--------
>>> imean([1, 2])
1.4715177646857693
>>> imean([1, 0])
nan
>>> imean([2, 4])
2.9430355293715387
"""
if len(nums) == 1:
return nums[0]
if len(nums) > 2:
raise AttributeError('imean supports no more than two values')
if nums[0] <= 0 or nums[1] <= 0:
return float('NaN')
elif nums[0] == nums[1]:
return nums[0]
return (1 / math.e) * (nums[0] ** nums[0] / nums[1] ** nums[1]) ** (
1 / (nums[0] - nums[1])
)
|
def seiffert_mean(nums):
r"""Return Seiffert's mean.
Seiffert's mean of two numbers x and y is:
:math:`\frac{x - y}{4 \cdot arctan \sqrt{\frac{x}{y}} - \pi}`
It is defined in :cite:`Seiffert:1993`.
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
Sieffert's mean of nums
Raises
------
AttributeError
seiffert_mean supports no more than two values
Examples
--------
>>> seiffert_mean([1, 2])
1.4712939827611637
>>> seiffert_mean([1, 0])
0.3183098861837907
>>> seiffert_mean([2, 4])
2.9425879655223275
>>> seiffert_mean([2, 1000])
336.84053300118825
"""
if len(nums) == 1:
return nums[0]
if len(nums) > 2:
raise AttributeError('seiffert_mean supports no more than two values')
if nums[0] + nums[1] == 0 or nums[0] - nums[1] == 0:
return float('NaN')
return (nums[0] - nums[1]) / (
2 * math.asin((nums[0] - nums[1]) / (nums[0] + nums[1]))
)
|
def lehmer_mean(nums, exp=2):
r"""Return Lehmer mean.
The Lehmer mean is:
:math:`\frac{\sum\limits_i{x_i^p}}{\sum\limits_i{x_i^(p-1)}}`
Cf. https://en.wikipedia.org/wiki/Lehmer_mean
Parameters
----------
nums : list
A series of numbers
exp : numeric
The exponent of the Lehmer mean
Returns
-------
float
The Lehmer mean of nums for the given exponent
Examples
--------
>>> lehmer_mean([1, 2, 3, 4])
3.0
>>> lehmer_mean([1, 2])
1.6666666666666667
>>> lehmer_mean([0, 5, 1000])
995.0497512437811
"""
return sum(x ** exp for x in nums) / sum(x ** (exp - 1) for x in nums)
|
def heronian_mean(nums):
r"""Return Heronian mean.
The Heronian mean is:
:math:`\frac{\sum\limits_{i, j}\sqrt{{x_i \cdot x_j}}}
{|nums| \cdot \frac{|nums| + 1}{2}}`
for :math:`j \ge i`
Cf. https://en.wikipedia.org/wiki/Heronian_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The Heronian mean of nums
Examples
--------
>>> heronian_mean([1, 2, 3, 4])
2.3888282852609093
>>> heronian_mean([1, 2])
1.4714045207910316
>>> heronian_mean([0, 5, 1000])
179.28511301977582
"""
mag = len(nums)
rolling_sum = 0
for i in range(mag):
for j in range(i, mag):
if nums[i] == nums[j]:
rolling_sum += nums[i]
else:
rolling_sum += (nums[i] * nums[j]) ** 0.5
return rolling_sum * 2 / (mag * (mag + 1))
|
def hoelder_mean(nums, exp=2):
r"""Return Hölder (power/generalized) mean.
The Hölder mean is defined as:
:math:`\sqrt[p]{\frac{1}{|nums|} \cdot \sum\limits_i{x_i^p}}`
for :math:`p \ne 0`, and the geometric mean for :math:`p = 0`
Cf. https://en.wikipedia.org/wiki/Generalized_mean
Parameters
----------
nums : list
A series of numbers
exp : numeric
The exponent of the Hölder mean
Returns
-------
float
The Hölder mean of nums for the given exponent
Examples
--------
>>> hoelder_mean([1, 2, 3, 4])
2.7386127875258306
>>> hoelder_mean([1, 2])
1.5811388300841898
>>> hoelder_mean([0, 5, 1000])
577.3574860228857
"""
if exp == 0:
return gmean(nums)
return ((1 / len(nums)) * sum(i ** exp for i in nums)) ** (1 / exp)
|
def agmean(nums):
"""Return arithmetic-geometric mean.
Iterates between arithmetic & geometric means until they converge to
a single value (rounded to 12 digits).
Cf. https://en.wikipedia.org/wiki/Arithmetic-geometric_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The arithmetic-geometric mean of nums
Examples
--------
>>> agmean([1, 2, 3, 4])
2.3545004777751077
>>> agmean([1, 2])
1.4567910310469068
>>> agmean([0, 5, 1000])
2.9753977059954195e-13
"""
m_a = amean(nums)
m_g = gmean(nums)
if math.isnan(m_a) or math.isnan(m_g):
return float('nan')
while round(m_a, 12) != round(m_g, 12):
m_a, m_g = (m_a + m_g) / 2, (m_a * m_g) ** (1 / 2)
return m_a
|
def ghmean(nums):
"""Return geometric-harmonic mean.
Iterates between geometric & harmonic means until they converge to
a single value (rounded to 12 digits).
Cf. https://en.wikipedia.org/wiki/Geometric-harmonic_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The geometric-harmonic mean of nums
Examples
--------
>>> ghmean([1, 2, 3, 4])
2.058868154613003
>>> ghmean([1, 2])
1.3728805006183502
>>> ghmean([0, 5, 1000])
0.0
>>> ghmean([0, 0])
0.0
>>> ghmean([0, 0, 5])
nan
"""
m_g = gmean(nums)
m_h = hmean(nums)
if math.isnan(m_g) or math.isnan(m_h):
return float('nan')
while round(m_h, 12) != round(m_g, 12):
m_g, m_h = (m_g * m_h) ** (1 / 2), (2 * m_g * m_h) / (m_g + m_h)
return m_g
|
def aghmean(nums):
"""Return arithmetic-geometric-harmonic mean.
Iterates over arithmetic, geometric, & harmonic means until they
converge to a single value (rounded to 12 digits), following the
method described in :cite:`Raissouli:2009`.
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The arithmetic-geometric-harmonic mean of nums
Examples
--------
>>> aghmean([1, 2, 3, 4])
2.198327159900212
>>> aghmean([1, 2])
1.4142135623731884
>>> aghmean([0, 5, 1000])
335.0
"""
m_a = amean(nums)
m_g = gmean(nums)
m_h = hmean(nums)
if math.isnan(m_a) or math.isnan(m_g) or math.isnan(m_h):
return float('nan')
while round(m_a, 12) != round(m_g, 12) and round(m_g, 12) != round(
m_h, 12
):
m_a, m_g, m_h = (
(m_a + m_g + m_h) / 3,
(m_a * m_g * m_h) ** (1 / 3),
3 / (1 / m_a + 1 / m_g + 1 / m_h),
)
return m_a
|
def median(nums):
"""Return median.
With numbers sorted by value, the median is the middle value (if there is
an odd number of values) or the arithmetic mean of the two middle values
(if there is an even number of values).
Cf. https://en.wikipedia.org/wiki/Median
Parameters
----------
nums : list
A series of numbers
Returns
-------
int or float
The median of nums
Examples
--------
>>> median([1, 2, 3])
2
>>> median([1, 2, 3, 4])
2.5
>>> median([1, 2, 2, 4])
2
"""
nums = sorted(nums)
mag = len(nums)
if mag % 2:
mag = int((mag - 1) / 2)
return nums[mag]
mag = int(mag / 2)
med = (nums[mag - 1] + nums[mag]) / 2
return med if not med.is_integer() else int(med)
|
def var(nums, mean_func=amean, ddof=0):
r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
A series of numbers
mean_func : function
A mean function (amean by default)
ddof : int
The degrees of freedom (0 by default)
Returns
-------
float
The variance of the values in the series
Examples
--------
>>> var([1, 1, 1, 1])
0.0
>>> var([1, 2, 3, 4])
1.25
>>> round(var([1, 2, 3, 4], ddof=1), 12)
1.666666666667
"""
x_bar = mean_func(nums)
return sum((x - x_bar) ** 2 for x in nums) / (len(nums) - ddof)
|
def stem(self, word):
"""Return the stem of a word according to the Schinke stemmer.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Schinke()
>>> stmr.stem('atque')
{'n': 'atque', 'v': 'atque'}
>>> stmr.stem('census')
{'n': 'cens', 'v': 'censu'}
>>> stmr.stem('virum')
{'n': 'uir', 'v': 'uiru'}
>>> stmr.stem('populusque')
{'n': 'popul', 'v': 'populu'}
>>> stmr.stem('senatus')
{'n': 'senat', 'v': 'senatu'}
"""
word = normalize('NFKD', text_type(word.lower()))
word = ''.join(
c
for c in word
if c
in {
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
}
)
# Rule 2
word = word.replace('j', 'i').replace('v', 'u')
# Rule 3
if word[-3:] == 'que':
# This diverges from the paper by also returning 'que' itself
# unstemmed
if word[:-3] in self._keep_que or word == 'que':
return {'n': word, 'v': word}
else:
word = word[:-3]
# Base case will mean returning the words as is
noun = word
verb = word
# Rule 4
for endlen in range(4, 0, -1):
if word[-endlen:] in self._n_endings[endlen]:
if len(word) - 2 >= endlen:
noun = word[:-endlen]
else:
noun = word
break
for endlen in range(6, 0, -1):
if word[-endlen:] in self._v_endings_strip[endlen]:
if len(word) - 2 >= endlen:
verb = word[:-endlen]
else:
verb = word
break
if word[-endlen:] in self._v_endings_alter[endlen]:
if word[-endlen:] in {
'iuntur',
'erunt',
'untur',
'iunt',
'unt',
}:
new_word = word[:-endlen] + 'i'
addlen = 1
elif word[-endlen:] in {'beris', 'bor', 'bo'}:
new_word = word[:-endlen] + 'bi'
addlen = 2
else:
new_word = word[:-endlen] + 'eri'
addlen = 3
# Technically this diverges from the paper by considering the
# length of the stem without the new suffix
if len(new_word) >= 2 + addlen:
verb = new_word
else:
verb = word
break
return {'n': noun, 'v': verb}
|
def onca(word, max_length=4, zero_pad=True):
"""Return the Oxford Name Compression Algorithm (ONCA) code for a word.
This is a wrapper for :py:meth:`ONCA.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 5) of the code to return
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The ONCA code
Examples
--------
>>> onca('Christopher')
'C623'
>>> onca('Niall')
'N400'
>>> onca('Smith')
'S530'
>>> onca('Schmidt')
'S530'
"""
return ONCA().encode(word, max_length, zero_pad)
|
def encode(self, word, max_length=4, zero_pad=True):
"""Return the Oxford Name Compression Algorithm (ONCA) code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 5) of the code to return
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The ONCA code
Examples
--------
>>> pe = ONCA()
>>> pe.encode('Christopher')
'C623'
>>> pe.encode('Niall')
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
"""
# In the most extreme case, 3 characters of NYSIIS input can be
# compressed to one character of output, so give it triple the
# max_length.
return self._soundex.encode(
self._nysiis.encode(word, max_length=max_length * 3),
max_length,
zero_pad=zero_pad,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.