Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def phonix(word, max_length=4, zero_pad=True):
"""Return the Phonix code for a word.
This is a wrapper for :py:meth:`Phonix.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Phonix value
Examples
--------
>>> phonix('Christopher')
'K683'
>>> phonix('Niall')
'N400'
>>> phonix('Smith')
'S530'
>>> phonix('Schmidt')
'S530'
"""
return Phonix().encode(word, max_length, zero_pad)
|
def encode(self, word, max_length=4, zero_pad=True):
"""Return the Phonix code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Phonix value
Examples
--------
>>> pe = Phonix()
>>> pe.encode('Christopher')
'K683'
>>> pe.encode('Niall')
'N400'
>>> pe.encode('Smith')
'S530'
>>> pe.encode('Schmidt')
'S530'
"""
def _start_repl(word, src, tar, post=None):
"""Replace src with tar at the start of word.
Parameters
----------
word : str
The word to modify
src : str
Substring to match
tar : str
Substring to substitute
post : set
Following characters
Returns
-------
str
Modified string
"""
if post:
for i in post:
if word.startswith(src + i):
return tar + word[len(src) :]
elif word.startswith(src):
return tar + word[len(src) :]
return word
def _end_repl(word, src, tar, pre=None):
"""Replace src with tar at the end of word.
Parameters
----------
word : str
The word to modify
src : str
Substring to match
tar : str
Substring to substitute
pre : set
Preceding characters
Returns
-------
str
Modified string
"""
if pre:
for i in pre:
if word.endswith(i + src):
return word[: -len(src)] + tar
elif word.endswith(src):
return word[: -len(src)] + tar
return word
def _mid_repl(word, src, tar, pre=None, post=None):
"""Replace src with tar in the middle of word.
Parameters
----------
word : str
The word to modify
src : str
Substring to match
tar : str
Substring to substitute
pre : set
Preceding characters
post : set
Following characters
Returns
-------
str
Modified string
"""
if pre or post:
if not pre:
return word[0] + _all_repl(word[1:], src, tar, pre, post)
elif not post:
return _all_repl(word[:-1], src, tar, pre, post) + word[-1]
return _all_repl(word, src, tar, pre, post)
return (
word[0] + _all_repl(word[1:-1], src, tar, pre, post) + word[-1]
)
def _all_repl(word, src, tar, pre=None, post=None):
"""Replace src with tar anywhere in word.
Parameters
----------
word : str
The word to modify
src : str
Substring to match
tar : str
Substring to substitute
pre : set
Preceding characters
post : set
Following characters
Returns
-------
str
Modified string
"""
if pre or post:
if post:
post = post
else:
post = frozenset(('',))
if pre:
pre = pre
else:
pre = frozenset(('',))
for i, j in ((i, j) for i in pre for j in post):
word = word.replace(i + src + j, i + tar + j)
return word
else:
return word.replace(src, tar)
repl_at = (_start_repl, _end_repl, _mid_repl, _all_repl)
sdx = ''
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
if word:
for trans in self._substitutions:
word = repl_at[trans[0]](word, *trans[1:])
if word[0] in self._uc_vy_set:
sdx = 'v' + word[1:].translate(self._trans)
else:
sdx = word[0] + word[1:].translate(self._trans)
sdx = self._delete_consecutive_repeats(sdx)
sdx = sdx.replace('0', '')
# Clamp max_length to [4, 64]
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
if zero_pad:
sdx += '0' * max_length
if not sdx:
sdx = '0'
return sdx[:max_length]
|
def editex(src, tar, cost=(0, 1, 2), local=False):
"""Return the Editex distance between two strings.
This is a wrapper for :py:meth:`Editex.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> editex('cat', 'hat')
2
>>> editex('Niall', 'Neil')
2
>>> editex('aluminum', 'Catalan')
12
>>> editex('ATCG', 'TAGC')
6
"""
return Editex().dist_abs(src, tar, cost, local)
|
def dist_editex(src, tar, cost=(0, 1, 2), local=False):
"""Return the normalized Editex distance between two strings.
This is a wrapper for :py:meth:`Editex.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Normalized Editex distance
Examples
--------
>>> round(dist_editex('cat', 'hat'), 12)
0.333333333333
>>> round(dist_editex('Niall', 'Neil'), 12)
0.2
>>> dist_editex('aluminum', 'Catalan')
0.75
>>> dist_editex('ATCG', 'TAGC')
0.75
"""
return Editex().dist(src, tar, cost, local)
|
def sim_editex(src, tar, cost=(0, 1, 2), local=False):
"""Return the normalized Editex similarity of two strings.
This is a wrapper for :py:meth:`Editex.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Normalized Editex similarity
Examples
--------
>>> round(sim_editex('cat', 'hat'), 12)
0.666666666667
>>> round(sim_editex('Niall', 'Neil'), 12)
0.8
>>> sim_editex('aluminum', 'Catalan')
0.25
>>> sim_editex('ATCG', 'TAGC')
0.25
"""
return Editex().sim(src, tar, cost, local)
|
def dist_abs(self, src, tar, cost=(0, 1, 2), local=False):
"""Return the Editex distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> cmp = Editex()
>>> cmp.dist_abs('cat', 'hat')
2
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('aluminum', 'Catalan')
12
>>> cmp.dist_abs('ATCG', 'TAGC')
6
"""
match_cost, group_cost, mismatch_cost = cost
def r_cost(ch1, ch2):
"""Return r(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
r(a,b) according to Zobel & Dart's definition
"""
if ch1 == ch2:
return match_cost
if ch1 in self._all_letters and ch2 in self._all_letters:
for group in self._letter_groups:
if ch1 in group and ch2 in group:
return group_cost
return mismatch_cost
def d_cost(ch1, ch2):
"""Return d(a,b) according to Zobel & Dart's definition.
Parameters
----------
ch1 : str
The first character to compare
ch2 : str
The second character to compare
Returns
-------
int
d(a,b) according to Zobel & Dart's definition
"""
if ch1 != ch2 and (ch1 == 'H' or ch1 == 'W'):
return group_cost
return r_cost(ch1, ch2)
# convert both src & tar to NFKD normalized unicode
src = unicode_normalize('NFKD', text_type(src.upper()))
tar = unicode_normalize('NFKD', text_type(tar.upper()))
# convert ß to SS (for Python2)
src = src.replace('ß', 'SS')
tar = tar.replace('ß', 'SS')
if src == tar:
return 0.0
if not src:
return len(tar) * mismatch_cost
if not tar:
return len(src) * mismatch_cost
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
lens = len(src)
lent = len(tar)
src = ' ' + src
tar = ' ' + tar
if not local:
for i in range(1, lens + 1):
d_mat[i, 0] = d_mat[i - 1, 0] + d_cost(src[i - 1], src[i])
for j in range(1, lent + 1):
d_mat[0, j] = d_mat[0, j - 1] + d_cost(tar[j - 1], tar[j])
for i in range(1, lens + 1):
for j in range(1, lent + 1):
d_mat[i, j] = min(
d_mat[i - 1, j] + d_cost(src[i - 1], src[i]),
d_mat[i, j - 1] + d_cost(tar[j - 1], tar[j]),
d_mat[i - 1, j - 1] + r_cost(src[i], tar[j]),
)
return d_mat[lens, lent]
|
def dist(self, src, tar, cost=(0, 1, 2), local=False):
"""Return the normalized Editex distance between two strings.
The Editex distance is normalized by dividing the Editex distance
(calculated by any of the three supported methods) by the greater of
the number of characters in src times the cost of a delete and
the number of characters in tar times the cost of an insert.
For the case in which all operations have :math:`cost = 1`, this is
equivalent to the greater of the length of the two strings src & tar.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Normalized Editex distance
Examples
--------
>>> cmp = Editex()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.2
>>> cmp.dist('aluminum', 'Catalan')
0.75
>>> cmp.dist('ATCG', 'TAGC')
0.75
"""
if src == tar:
return 0.0
mismatch_cost = cost[2]
return self.dist_abs(src, tar, cost, local) / (
max(len(src) * mismatch_cost, len(tar) * mismatch_cost)
)
|
def position_fingerprint(
word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG, bits_per_letter=3
):
"""Return the position fingerprint.
This is a wrapper for :py:meth:`Position.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
bits_per_letter : int
The bits to assign for letter position
Returns
-------
int
The position fingerprint
Examples
--------
>>> bin(position_fingerprint('hat'))
'0b1110100011111111'
>>> bin(position_fingerprint('niall'))
'0b1111110101110010'
>>> bin(position_fingerprint('colin'))
'0b1111111110010111'
>>> bin(position_fingerprint('atcg'))
'0b1110010001111111'
>>> bin(position_fingerprint('entreatment'))
'0b101011111111'
"""
return Position().fingerprint(word, n_bits, most_common, bits_per_letter)
|
def fingerprint(
self,
word,
n_bits=16,
most_common=MOST_COMMON_LETTERS_CG,
bits_per_letter=3,
):
"""Return the position fingerprint.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
bits_per_letter : int
The bits to assign for letter position
Returns
-------
int
The position fingerprint
Examples
--------
>>> bin(position_fingerprint('hat'))
'0b1110100011111111'
>>> bin(position_fingerprint('niall'))
'0b1111110101110010'
>>> bin(position_fingerprint('colin'))
'0b1111111110010111'
>>> bin(position_fingerprint('atcg'))
'0b1110010001111111'
>>> bin(position_fingerprint('entreatment'))
'0b101011111111'
"""
position = {}
for pos, letter in enumerate(word):
if letter not in position and letter in most_common:
position[letter] = min(pos, 2 ** bits_per_letter - 1)
fingerprint = 0
for letter in most_common:
if n_bits:
fingerprint <<= min(bits_per_letter, n_bits)
if letter in position:
fingerprint += min(position[letter], 2 ** n_bits - 1)
else:
fingerprint += min(
2 ** bits_per_letter - 1, 2 ** n_bits - 1
)
n_bits -= min(bits_per_letter, n_bits)
else:
break
for _ in range(n_bits):
fingerprint <<= 1
fingerprint += 1
return fingerprint
|
def stem(self, word):
"""Return Caumanns German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Caumanns()
>>> stmr.stem('lesen')
'les'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier'
"""
if not word:
return ''
upper_initial = word[0].isupper()
word = normalize('NFC', text_type(word.lower()))
# # Part 2: Substitution
# 1. Change umlauts to corresponding vowels & ß to ss
word = word.translate(self._umlauts)
word = word.replace('ß', 'ss')
# 2. Change second of doubled characters to *
new_word = word[0]
for i in range(1, len(word)):
if new_word[i - 1] == word[i]:
new_word += '*'
else:
new_word += word[i]
word = new_word
# 3. Replace sch, ch, ei, ie with $, §, %, &
word = word.replace('sch', '$')
word = word.replace('ch', '§')
word = word.replace('ei', '%')
word = word.replace('ie', '&')
word = word.replace('ig', '#')
word = word.replace('st', '!')
# # Part 1: Recursive Context-Free Stripping
# 1. Remove the following 7 suffixes recursively
while len(word) > 3:
if (len(word) > 4 and word[-2:] in {'em', 'er'}) or (
len(word) > 5 and word[-2:] == 'nd'
):
word = word[:-2]
elif (word[-1] in {'e', 's', 'n'}) or (
not upper_initial and word[-1] in {'t', '!'}
):
word = word[:-1]
else:
break
# Additional optimizations:
if len(word) > 5 and word[-5:] == 'erin*':
word = word[:-1]
if word[-1] == 'z':
word = word[:-1] + 'x'
# Reverse substitutions:
word = word.replace('$', 'sch')
word = word.replace('§', 'ch')
word = word.replace('%', 'ei')
word = word.replace('&', 'ie')
word = word.replace('#', 'ig')
word = word.replace('!', 'st')
# Expand doubled
word = ''.join(
[word[0]]
+ [
word[i - 1] if word[i] == '*' else word[i]
for i in range(1, len(word))
]
)
# Finally, convert gege to ge
if len(word) > 4:
word = word.replace('gege', 'ge', 1)
return word
|
def dist(self, src, tar, probs=None):
"""Return the NCD between two strings using arithmetic coding.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
probs : dict
A dictionary trained with :py:meth:`Arithmetic.train`
Returns
-------
float
Compression distance
Examples
--------
>>> cmp = NCDarith()
>>> cmp.dist('cat', 'hat')
0.5454545454545454
>>> cmp.dist('Niall', 'Neil')
0.6875
>>> cmp.dist('aluminum', 'Catalan')
0.8275862068965517
>>> cmp.dist('ATCG', 'TAGC')
0.6923076923076923
"""
if src == tar:
return 0.0
if probs is None:
# lacking a reasonable dictionary, train on the strings themselves
self._coder.train(src + tar)
else:
self._coder.set_probs(probs)
src_comp = self._coder.encode(src)[1]
tar_comp = self._coder.encode(tar)[1]
concat_comp = self._coder.encode(src + tar)[1]
concat_comp2 = self._coder.encode(tar + src)[1]
return (
min(concat_comp, concat_comp2) - min(src_comp, tar_comp)
) / max(src_comp, tar_comp)
|
def readfile(fn):
"""Read fn and return the contents.
Parameters
----------
fn : str
A filename
Returns
-------
str
The content of the file
"""
with open(path.join(HERE, fn), 'r', encoding='utf-8') as f:
return f.read()
|
def spanish_metaphone(word, max_length=6, modified=False):
"""Return the Spanish Metaphone of a word.
This is a wrapper for :py:meth:`SpanishMetaphone.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 6)
modified : bool
Set to True to use del Pilar Angeles & Bailón-Miguel's modified version
of the algorithm
Returns
-------
str
The Spanish Metaphone code
Examples
--------
>>> spanish_metaphone('Perez')
'PRZ'
>>> spanish_metaphone('Martinez')
'MRTNZ'
>>> spanish_metaphone('Gutierrez')
'GTRRZ'
>>> spanish_metaphone('Santiago')
'SNTG'
>>> spanish_metaphone('Nicolás')
'NKLS'
"""
return SpanishMetaphone().encode(word, max_length, modified)
|
def encode(self, word, max_length=6, modified=False):
"""Return the Spanish Metaphone of a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 6)
modified : bool
Set to True to use del Pilar Angeles & Bailón-Miguel's modified
version of the algorithm
Returns
-------
str
The Spanish Metaphone code
Examples
--------
>>> pe = SpanishMetaphone()
>>> pe.encode('Perez')
'PRZ'
>>> pe.encode('Martinez')
'MRTNZ'
>>> pe.encode('Gutierrez')
'GTRRZ'
>>> pe.encode('Santiago')
'SNTG'
>>> pe.encode('Nicolás')
'NKLS'
"""
def _is_vowel(pos):
"""Return True if the character at word[pos] is a vowel.
Parameters
----------
pos : int
Position to check for a vowel
Returns
-------
bool
True if word[pos] is a vowel
"""
return pos < len(word) and word[pos] in {'A', 'E', 'I', 'O', 'U'}
word = unicode_normalize('NFC', text_type(word.upper()))
meta_key = ''
pos = 0
# do some replacements for the modified version
if modified:
word = word.replace('MB', 'NB')
word = word.replace('MP', 'NP')
word = word.replace('BS', 'S')
if word[:2] == 'PS':
word = word[1:]
# simple replacements
word = word.replace('Á', 'A')
word = word.replace('CH', 'X')
word = word.replace('Ç', 'S')
word = word.replace('É', 'E')
word = word.replace('Í', 'I')
word = word.replace('Ó', 'O')
word = word.replace('Ú', 'U')
word = word.replace('Ñ', 'NY')
word = word.replace('GÜ', 'W')
word = word.replace('Ü', 'U')
word = word.replace('B', 'V')
word = word.replace('LL', 'Y')
while len(meta_key) < max_length:
if pos >= len(word):
break
# get the next character
current_char = word[pos]
# if a vowel in pos 0, add to key
if _is_vowel(pos) and pos == 0:
meta_key += current_char
pos += 1
# otherwise, do consonant rules
else:
# simple consonants (unmutated)
if current_char in {
'D',
'F',
'J',
'K',
'M',
'N',
'P',
'T',
'V',
'L',
'Y',
}:
meta_key += current_char
# skip doubled consonants
if word[pos + 1 : pos + 2] == current_char:
pos += 2
else:
pos += 1
else:
if current_char == 'C':
# special case 'acción', 'reacción',etc.
if word[pos + 1 : pos + 2] == 'C':
meta_key += 'X'
pos += 2
# special case 'cesar', 'cien', 'cid', 'conciencia'
elif word[pos + 1 : pos + 2] in {'E', 'I'}:
meta_key += 'Z'
pos += 2
# base case
else:
meta_key += 'K'
pos += 1
elif current_char == 'G':
# special case 'gente', 'ecologia',etc
if word[pos + 1 : pos + 2] in {'E', 'I'}:
meta_key += 'J'
pos += 2
# base case
else:
meta_key += 'G'
pos += 1
elif current_char == 'H':
# since the letter 'H' is silent in Spanish,
# set the meta key to the vowel after the letter 'H'
if _is_vowel(pos + 1):
meta_key += word[pos + 1]
pos += 2
else:
meta_key += 'H'
pos += 1
elif current_char == 'Q':
if word[pos + 1 : pos + 2] == 'U':
pos += 2
else:
pos += 1
meta_key += 'K'
elif current_char == 'W':
meta_key += 'U'
pos += 1
elif current_char == 'R':
meta_key += 'R'
pos += 1
elif current_char == 'S':
if not _is_vowel(pos + 1) and pos == 0:
meta_key += 'ES'
pos += 1
else:
meta_key += 'S'
pos += 1
elif current_char == 'Z':
meta_key += 'Z'
pos += 1
elif current_char == 'X':
if (
len(word) > 1
and pos == 0
and not _is_vowel(pos + 1)
):
meta_key += 'EX'
pos += 1
else:
meta_key += 'X'
pos += 1
else:
pos += 1
# Final change from S to Z in modified version
if modified:
meta_key = meta_key.replace('S', 'Z')
return meta_key
|
def encode(self, word):
"""Return the FONEM code of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The FONEM code
Examples
--------
>>> pe = FONEM()
>>> pe.encode('Marchand')
'MARCHEN'
>>> pe.encode('Beaulieu')
'BOLIEU'
>>> pe.encode('Beaumont')
'BOMON'
>>> pe.encode('Legrand')
'LEGREN'
>>> pe.encode('Pelletier')
'PELETIER'
"""
# normalize, upper-case, and filter non-French letters
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.translate({198: 'AE', 338: 'OE'})
word = ''.join(c for c in word if c in self._uc_set)
for rule in self._rule_order:
regex, repl = self._rule_table[rule]
if isinstance(regex, text_type):
word = word.replace(regex, repl)
else:
word = regex.sub(repl, word)
return word
|
def encode(self, word, max_length=4):
"""Return the Statistics Canada code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The maximum length (default 4) of the code to return
Returns
-------
str
The Statistics Canada name code value
Examples
--------
>>> pe = StatisticsCanada()
>>> pe.encode('Christopher')
'CHRS'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SMTH'
>>> pe.encode('Schmidt')
'SCHM'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
if not word:
return ''
code = word[1:]
for vowel in self._uc_vy_set:
code = code.replace(vowel, '')
code = word[0] + code
code = self._delete_consecutive_repeats(code)
code = code.replace(' ', '')
return code[:max_length]
|
def synoname(
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
ret_name=False,
):
"""Return the Synoname similarity type of two words.
This is a wrapper for :py:meth:`Synoname.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx' match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test names
to perform (defaults to performing all tests)
ret_name : bool
If True, returns the match name rather than its integer equivalent
Returns
-------
int (or str if ret_name is True)
Synoname value
Examples
--------
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''))
2
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''),
... ret_name=True)
'omission'
>>> synoname(('Dore', 'Gustave', ''),
... ('Dore', 'Paul Gustave Louis Christophe', ''), ret_name=True)
'inclusion'
>>> synoname(('Pereira', 'I. R.', ''), ('Pereira', 'I. Smith', ''),
... ret_name=True)
'word_approx'
"""
return Synoname().dist_abs(
src, tar, word_approx_min, char_approx_min, tests, ret_name
)
|
def _synoname_strip_punct(self, word):
"""Return a word with punctuation stripped out.
Parameters
----------
word : str
A word to strip punctuation from
Returns
-------
str
The word stripped of punctuation
Examples
--------
>>> pe = Synoname()
>>> pe._synoname_strip_punct('AB;CD EF-GH$IJ')
'ABCD EFGHIJ'
"""
stripped = ''
for char in word:
if char not in set(',-./:;"&\'()!{|}?$%*+<=>[\\]^_`~'):
stripped += char
return stripped.strip()
|
def _synoname_word_approximation(
self, src_ln, tar_ln, src_fn='', tar_fn='', features=None
):
"""Return the Synoname word approximation score for two names.
Parameters
----------
src_ln : str
Last name of the source
tar_ln : str
Last name of the target
src_fn : str
First name of the source (optional)
tar_fn : str
First name of the target (optional)
features : dict
A dict containing special features calculated using
:py:class:`fingerprint.SynonameToolcode` (optional)
Returns
-------
float
The word approximation score
Examples
--------
>>> pe = Synoname()
>>> pe._synoname_word_approximation('Smith Waterman', 'Waterman',
... 'Tom Joe Bob', 'Tom Joe')
0.6
"""
if features is None:
features = {}
if 'src_specials' not in features:
features['src_specials'] = []
if 'tar_specials' not in features:
features['tar_specials'] = []
src_len_specials = len(features['src_specials'])
tar_len_specials = len(features['tar_specials'])
# 1
if ('gen_conflict' in features and features['gen_conflict']) or (
'roman_conflict' in features and features['roman_conflict']
):
return 0
# 3 & 7
full_tar1 = ' '.join((tar_ln, tar_fn)).replace('-', ' ').strip()
for s_pos, s_type in features['tar_specials']:
if s_type == 'a':
full_tar1 = full_tar1[
: -(
1
+ len(
self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
)
)
]
elif s_type == 'b':
loc = (
full_tar1.find(
' '
+ self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
+ ' '
)
+ 1
)
full_tar1 = (
full_tar1[:loc]
+ full_tar1[
loc
+ len(
self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
) :
]
)
elif s_type == 'c':
full_tar1 = full_tar1[
1
+ len(
self._stc._synoname_special_table[s_pos][ # noqa: SF01
1
]
) :
]
full_src1 = ' '.join((src_ln, src_fn)).replace('-', ' ').strip()
for s_pos, s_type in features['src_specials']:
if s_type == 'a':
full_src1 = full_src1[
: -(
1
+ len(
self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
)
)
]
elif s_type == 'b':
loc = (
full_src1.find(
' '
+ self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
+ ' '
)
+ 1
)
full_src1 = (
full_src1[:loc]
+ full_src1[
loc
+ len(
self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
) :
]
)
elif s_type == 'c':
full_src1 = full_src1[
1
+ len(
self._stc._synoname_special_table[s_pos][ # noqa: SF01
1
]
) :
]
full_tar2 = full_tar1
for s_pos, s_type in features['tar_specials']:
if s_type == 'd':
full_tar2 = full_tar2[
len(
self._stc._synoname_special_table[s_pos][ # noqa: SF01
1
]
) :
]
elif (
s_type == 'X'
and self._stc._synoname_special_table[s_pos][1] # noqa: SF01
in full_tar2
):
loc = full_tar2.find(
' '
+ self._stc._synoname_special_table[s_pos][1] # noqa: SF01
)
full_tar2 = (
full_tar2[:loc]
+ full_tar2[
loc
+ len(
self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
) :
]
)
full_src2 = full_src1
for s_pos, s_type in features['src_specials']:
if s_type == 'd':
full_src2 = full_src2[
len(
self._stc._synoname_special_table[s_pos][ # noqa: SF01
1
]
) :
]
elif (
s_type == 'X'
and self._stc._synoname_special_table[s_pos][1] # noqa: SF01
in full_src2
):
loc = full_src2.find(
' '
+ self._stc._synoname_special_table[s_pos][1] # noqa: SF01
)
full_src2 = (
full_src2[:loc]
+ full_src2[
loc
+ len(
self._stc._synoname_special_table[ # noqa: SF01
s_pos
][1]
) :
]
)
full_tar1 = self._synoname_strip_punct(full_tar1)
tar1_words = full_tar1.split()
tar1_num_words = len(tar1_words)
full_src1 = self._synoname_strip_punct(full_src1)
src1_words = full_src1.split()
src1_num_words = len(src1_words)
full_tar2 = self._synoname_strip_punct(full_tar2)
tar2_words = full_tar2.split()
tar2_num_words = len(tar2_words)
full_src2 = self._synoname_strip_punct(full_src2)
src2_words = full_src2.split()
src2_num_words = len(src2_words)
# 2
if (
src1_num_words < 2
and src_len_specials == 0
and src2_num_words < 2
and tar_len_specials == 0
):
return 0
# 4
if (
tar1_num_words == 1
and src1_num_words == 1
and tar1_words[0] == src1_words[0]
):
return 1
if tar1_num_words < 2 and tar_len_specials == 0:
return 0
# 5
last_found = False
for word in tar1_words:
if src_ln.endswith(word) or word + ' ' in src_ln:
last_found = True
if not last_found:
for word in src1_words:
if tar_ln.endswith(word) or word + ' ' in tar_ln:
last_found = True
# 6
matches = 0
if last_found:
for i, s_word in enumerate(src1_words):
for j, t_word in enumerate(tar1_words):
if s_word == t_word:
src1_words[i] = '@'
tar1_words[j] = '@'
matches += 1
w_ratio = matches / max(tar1_num_words, src1_num_words)
if matches > 1 or (
matches == 1
and src1_num_words == 1
and tar1_num_words == 1
and (tar_len_specials > 0 or src_len_specials > 0)
):
return w_ratio
# 8
if (
tar2_num_words == 1
and src2_num_words == 1
and tar2_words[0] == src2_words[0]
):
return 1
# I see no way that the following can be True if the equivalent in
# #4 was False.
if tar2_num_words < 2 and tar_len_specials == 0: # pragma: no cover
return 0
# 9
last_found = False
for word in tar2_words:
if src_ln.endswith(word) or word + ' ' in src_ln:
last_found = True
if not last_found:
for word in src2_words:
if tar_ln.endswith(word) or word + ' ' in tar_ln:
last_found = True
if not last_found:
return 0
# 10
matches = 0
if last_found:
for i, s_word in enumerate(src2_words):
for j, t_word in enumerate(tar2_words):
if s_word == t_word:
src2_words[i] = '@'
tar2_words[j] = '@'
matches += 1
w_ratio = matches / max(tar2_num_words, src2_num_words)
if matches > 1 or (
matches == 1
and src2_num_words == 1
and tar2_num_words == 1
and (tar_len_specials > 0 or src_len_specials > 0)
):
return w_ratio
return 0
|
def dist_abs(
self,
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
ret_name=False,
):
"""Return the Synoname similarity type of two words.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx'
match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test
names to perform (defaults to performing all tests)
ret_name : bool
If True, returns the match name rather than its integer equivalent
Returns
-------
int (or str if ret_name is True)
Synoname value
Examples
--------
>>> cmp = Synoname()
>>> cmp.dist_abs(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''))
2
>>> cmp.dist_abs(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''),
... ret_name=True)
'omission'
>>> cmp.dist_abs(('Dore', 'Gustave', ''),
... ('Dore', 'Paul Gustave Louis Christophe', ''), ret_name=True)
'inclusion'
>>> cmp.dist_abs(('Pereira', 'I. R.', ''), ('Pereira', 'I. Smith', ''),
... ret_name=True)
'word_approx'
"""
if isinstance(tests, Iterable):
new_tests = 0
for term in tests:
if term in self._test_dict:
new_tests += self._test_dict[term]
tests = new_tests
if isinstance(src, tuple):
src_ln, src_fn, src_qual = src
elif '#' in src:
src_ln, src_fn, src_qual = src.split('#')[-3:]
else:
src_ln, src_fn, src_qual = src, '', ''
if isinstance(tar, tuple):
tar_ln, tar_fn, tar_qual = tar
elif '#' in tar:
tar_ln, tar_fn, tar_qual = tar.split('#')[-3:]
else:
tar_ln, tar_fn, tar_qual = tar, '', ''
def _split_special(spec):
spec_list = []
while spec:
spec_list.append((int(spec[:3]), spec[3:4]))
spec = spec[4:]
return spec_list
def _fmt_retval(val):
if ret_name:
return self._match_name[val]
return val
# 1. Preprocessing
# Lowercasing
src_fn = src_fn.strip().lower()
src_ln = src_ln.strip().lower()
src_qual = src_qual.strip().lower()
tar_fn = tar_fn.strip().lower()
tar_ln = tar_ln.strip().lower()
tar_qual = tar_qual.strip().lower()
# Create toolcodes
src_ln, src_fn, src_tc = self._stc.fingerprint(
src_ln, src_fn, src_qual
)
tar_ln, tar_fn, tar_tc = self._stc.fingerprint(
tar_ln, tar_fn, tar_qual
)
src_generation = int(src_tc[2])
src_romancode = int(src_tc[3:6])
src_len_fn = int(src_tc[6:8])
src_tc = src_tc.split('$')
src_specials = _split_special(src_tc[1])
tar_generation = int(tar_tc[2])
tar_romancode = int(tar_tc[3:6])
tar_len_fn = int(tar_tc[6:8])
tar_tc = tar_tc.split('$')
tar_specials = _split_special(tar_tc[1])
gen_conflict = (src_generation != tar_generation) and bool(
src_generation or tar_generation
)
roman_conflict = (src_romancode != tar_romancode) and bool(
src_romancode or tar_romancode
)
ln_equal = src_ln == tar_ln
fn_equal = src_fn == tar_fn
# approx_c
def _approx_c():
if gen_conflict or roman_conflict:
return False, 0
full_src = ' '.join((src_ln, src_fn))
if full_src.startswith('master '):
full_src = full_src[len('master ') :]
for intro in [
'of the ',
'of ',
'known as the ',
'with the ',
'with ',
]:
if full_src.startswith(intro):
full_src = full_src[len(intro) :]
full_tar = ' '.join((tar_ln, tar_fn))
if full_tar.startswith('master '):
full_tar = full_tar[len('master ') :]
for intro in [
'of the ',
'of ',
'known as the ',
'with the ',
'with ',
]:
if full_tar.startswith(intro):
full_tar = full_tar[len(intro) :]
loc_ratio = sim_ratcliff_obershelp(full_src, full_tar)
return loc_ratio >= char_approx_min, loc_ratio
approx_c_result, ca_ratio = _approx_c()
if tests & self._test_dict['exact'] and fn_equal and ln_equal:
return _fmt_retval(self._match_type_dict['exact'])
if tests & self._test_dict['omission']:
if (
fn_equal
and levenshtein(src_ln, tar_ln, cost=(1, 1, 99, 99)) == 1
):
if not roman_conflict:
return _fmt_retval(self._match_type_dict['omission'])
elif (
ln_equal
and levenshtein(src_fn, tar_fn, cost=(1, 1, 99, 99)) == 1
):
return _fmt_retval(self._match_type_dict['omission'])
if tests & self._test_dict['substitution']:
if (
fn_equal
and levenshtein(src_ln, tar_ln, cost=(99, 99, 1, 99)) == 1
):
return _fmt_retval(self._match_type_dict['substitution'])
elif (
ln_equal
and levenshtein(src_fn, tar_fn, cost=(99, 99, 1, 99)) == 1
):
return _fmt_retval(self._match_type_dict['substitution'])
if tests & self._test_dict['transposition']:
if fn_equal and (
levenshtein(src_ln, tar_ln, mode='osa', cost=(99, 99, 99, 1))
== 1
):
return _fmt_retval(self._match_type_dict['transposition'])
elif ln_equal and (
levenshtein(src_fn, tar_fn, mode='osa', cost=(99, 99, 99, 1))
== 1
):
return _fmt_retval(self._match_type_dict['transposition'])
if tests & self._test_dict['punctuation']:
np_src_fn = self._synoname_strip_punct(src_fn)
np_tar_fn = self._synoname_strip_punct(tar_fn)
np_src_ln = self._synoname_strip_punct(src_ln)
np_tar_ln = self._synoname_strip_punct(tar_ln)
if (np_src_fn == np_tar_fn) and (np_src_ln == np_tar_ln):
return _fmt_retval(self._match_type_dict['punctuation'])
np_src_fn = self._synoname_strip_punct(src_fn.replace('-', ' '))
np_tar_fn = self._synoname_strip_punct(tar_fn.replace('-', ' '))
np_src_ln = self._synoname_strip_punct(src_ln.replace('-', ' '))
np_tar_ln = self._synoname_strip_punct(tar_ln.replace('-', ' '))
if (np_src_fn == np_tar_fn) and (np_src_ln == np_tar_ln):
return _fmt_retval(self._match_type_dict['punctuation'])
if tests & self._test_dict['initials'] and ln_equal:
if src_fn and tar_fn:
src_initials = self._synoname_strip_punct(src_fn).split()
tar_initials = self._synoname_strip_punct(tar_fn).split()
initials = bool(
(len(src_initials) == len(''.join(src_initials)))
or (len(tar_initials) == len(''.join(tar_initials)))
)
if initials:
src_initials = ''.join(_[0] for _ in src_initials)
tar_initials = ''.join(_[0] for _ in tar_initials)
if src_initials == tar_initials:
return _fmt_retval(self._match_type_dict['initials'])
initial_diff = abs(len(src_initials) - len(tar_initials))
if initial_diff and (
(
initial_diff
== levenshtein(
src_initials,
tar_initials,
cost=(1, 99, 99, 99),
)
)
or (
initial_diff
== levenshtein(
tar_initials,
src_initials,
cost=(1, 99, 99, 99),
)
)
):
return _fmt_retval(self._match_type_dict['initials'])
if tests & self._test_dict['extension']:
if src_ln[1] == tar_ln[1] and (
src_ln.startswith(tar_ln) or tar_ln.startswith(src_ln)
):
if (
(not src_len_fn and not tar_len_fn)
or (tar_fn and src_fn.startswith(tar_fn))
or (src_fn and tar_fn.startswith(src_fn))
) and not roman_conflict:
return _fmt_retval(self._match_type_dict['extension'])
if tests & self._test_dict['inclusion'] and ln_equal:
if (src_fn and src_fn in tar_fn) or (tar_fn and tar_fn in src_ln):
return _fmt_retval(self._match_type_dict['inclusion'])
if tests & self._test_dict['no_first'] and ln_equal:
if src_fn == '' or tar_fn == '':
return _fmt_retval(self._match_type_dict['no_first'])
if tests & self._test_dict['word_approx']:
ratio = self._synoname_word_approximation(
src_ln,
tar_ln,
src_fn,
tar_fn,
{
'gen_conflict': gen_conflict,
'roman_conflict': roman_conflict,
'src_specials': src_specials,
'tar_specials': tar_specials,
},
)
if ratio == 1 and tests & self._test_dict['confusions']:
if (
' '.join((src_fn, src_ln)).strip()
== ' '.join((tar_fn, tar_ln)).strip()
):
return _fmt_retval(self._match_type_dict['confusions'])
if ratio >= word_approx_min:
return _fmt_retval(self._match_type_dict['word_approx'])
if tests & self._test_dict['char_approx']:
if ca_ratio >= char_approx_min:
return _fmt_retval(self._match_type_dict['char_approx'])
return _fmt_retval(self._match_type_dict['no_match'])
|
def dist(
self,
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
):
"""Return the normalized Synoname distance between two words.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx'
match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test
names to perform (defaults to performing all tests)
Returns
-------
float
Normalized Synoname distance
"""
return (
synoname(src, tar, word_approx_min, char_approx_min, tests, False)
/ 14
)
|
def sim(self, src, tar, qval=2):
r"""Return the Jaccard similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Jaccard similarity
Examples
--------
>>> cmp = Jaccard()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
return super(self.__class__, self).sim(src, tar, qval, 1, 1)
|
def tanimoto_coeff(self, src, tar, qval=2):
"""Return the Tanimoto distance between two strings.
Tanimoto distance :cite:`Tanimoto:1958` is
:math:`-log_{2} sim_{Tanimoto}(X, Y)`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Tanimoto distance
Examples
--------
>>> cmp = Jaccard()
>>> cmp.tanimoto_coeff('cat', 'hat')
-1.5849625007211563
>>> cmp.tanimoto_coeff('Niall', 'Neil')
-2.1699250014423126
>>> cmp.tanimoto_coeff('aluminum', 'Catalan')
-4.0
>>> cmp.tanimoto_coeff('ATCG', 'TAGC')
-inf
"""
coeff = self.sim(src, tar, qval)
if coeff != 0:
return log(coeff, 2)
return float('-inf')
|
def sift4_common(src, tar, max_offset=5, max_distance=0):
"""Return the "common" Sift4 distance between two terms.
This is a wrapper for :py:meth:`Sift4.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
max_distance : int
The distance at which to stop and exit
Returns
-------
int
The Sift4 distance according to the common formula
Examples
--------
>>> sift4_common('cat', 'hat')
1
>>> sift4_common('Niall', 'Neil')
2
>>> sift4_common('Colin', 'Cuilen')
3
>>> sift4_common('ATCG', 'TAGC')
2
"""
return Sift4().dist_abs(src, tar, max_offset, max_distance)
|
def dist_sift4(src, tar, max_offset=5, max_distance=0):
"""Return the normalized "common" Sift4 distance between two terms.
This is a wrapper for :py:meth:`Sift4.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
max_distance : int
The distance at which to stop and exit
Returns
-------
float
The normalized Sift4 distance
Examples
--------
>>> round(dist_sift4('cat', 'hat'), 12)
0.333333333333
>>> dist_sift4('Niall', 'Neil')
0.4
>>> dist_sift4('Colin', 'Cuilen')
0.5
>>> dist_sift4('ATCG', 'TAGC')
0.5
"""
return Sift4().dist(src, tar, max_offset, max_distance)
|
def sim_sift4(src, tar, max_offset=5, max_distance=0):
"""Return the normalized "common" Sift4 similarity of two terms.
This is a wrapper for :py:meth:`Sift4.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
max_distance : int
The distance at which to stop and exit
Returns
-------
float
The normalized Sift4 similarity
Examples
--------
>>> round(sim_sift4('cat', 'hat'), 12)
0.666666666667
>>> sim_sift4('Niall', 'Neil')
0.6
>>> sim_sift4('Colin', 'Cuilen')
0.5
>>> sim_sift4('ATCG', 'TAGC')
0.5
"""
return Sift4().sim(src, tar, max_offset, max_distance)
|
def dist_abs(self, src, tar, max_offset=5, max_distance=0):
"""Return the "common" Sift4 distance between two terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
max_distance : int
The distance at which to stop and exit
Returns
-------
int
The Sift4 distance according to the common formula
Examples
--------
>>> cmp = Sift4()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('Colin', 'Cuilen')
3
>>> cmp.dist_abs('ATCG', 'TAGC')
2
"""
if not src:
return len(tar)
if not tar:
return len(src)
src_len = len(src)
tar_len = len(tar)
src_cur = 0
tar_cur = 0
lcss = 0
local_cs = 0
trans = 0
offset_arr = []
while (src_cur < src_len) and (tar_cur < tar_len):
if src[src_cur] == tar[tar_cur]:
local_cs += 1
is_trans = False
i = 0
while i < len(offset_arr):
ofs = offset_arr[i]
if src_cur <= ofs['src_cur'] or tar_cur <= ofs['tar_cur']:
is_trans = abs(tar_cur - src_cur) >= abs(
ofs['tar_cur'] - ofs['src_cur']
)
if is_trans:
trans += 1
elif not ofs['trans']:
ofs['trans'] = True
trans += 1
break
elif src_cur > ofs['tar_cur'] and tar_cur > ofs['src_cur']:
del offset_arr[i]
else:
i += 1
offset_arr.append(
{'src_cur': src_cur, 'tar_cur': tar_cur, 'trans': is_trans}
)
else:
lcss += local_cs
local_cs = 0
if src_cur != tar_cur:
src_cur = tar_cur = min(src_cur, tar_cur)
for i in range(max_offset):
if not (
(src_cur + i < src_len) or (tar_cur + i < tar_len)
):
break
if (src_cur + i < src_len) and (
src[src_cur + i] == tar[tar_cur]
):
src_cur += i - 1
tar_cur -= 1
break
if (tar_cur + i < tar_len) and (
src[src_cur] == tar[tar_cur + i]
):
src_cur -= 1
tar_cur += i - 1
break
src_cur += 1
tar_cur += 1
if max_distance:
temporary_distance = max(src_cur, tar_cur) - lcss + trans
if temporary_distance >= max_distance:
return round(temporary_distance)
if (src_cur >= src_len) or (tar_cur >= tar_len):
lcss += local_cs
local_cs = 0
src_cur = tar_cur = min(src_cur, tar_cur)
lcss += local_cs
return round(max(src_len, tar_len) - lcss + trans)
|
def dist(self, src, tar, max_offset=5, max_distance=0):
"""Return the normalized "common" Sift4 distance between two terms.
This is Sift4 distance, normalized to [0, 1].
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
max_distance : int
The distance at which to stop and exit
Returns
-------
float
The normalized Sift4 distance
Examples
--------
>>> cmp = Sift4()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> cmp.dist('Niall', 'Neil')
0.4
>>> cmp.dist('Colin', 'Cuilen')
0.5
>>> cmp.dist('ATCG', 'TAGC')
0.5
"""
return self.dist_abs(src, tar, max_offset, max_distance) / (
max(len(src), len(tar), 1)
)
|
def dist(self, src, tar):
"""Return the NCD between two strings using bzip2 compression.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Examples
--------
>>> cmp = NCDbz2()
>>> cmp.dist('cat', 'hat')
0.06666666666666667
>>> cmp.dist('Niall', 'Neil')
0.03125
>>> cmp.dist('aluminum', 'Catalan')
0.17647058823529413
>>> cmp.dist('ATCG', 'TAGC')
0.03125
"""
if src == tar:
return 0.0
src = src.encode('utf-8')
tar = tar.encode('utf-8')
src_comp = bz2.compress(src, self._level)[10:]
tar_comp = bz2.compress(tar, self._level)[10:]
concat_comp = bz2.compress(src + tar, self._level)[10:]
concat_comp2 = bz2.compress(tar + src, self._level)[10:]
return (
min(len(concat_comp), len(concat_comp2))
- min(len(src_comp), len(tar_comp))
) / max(len(src_comp), len(tar_comp))
|
def encode(self, word, lang='en'):
"""Return the MetaSoundex code for a word.
Parameters
----------
word : str
The word to transform
lang : str
Either ``en`` for English or ``es`` for Spanish
Returns
-------
str
The MetaSoundex code
Examples
--------
>>> pe = MetaSoundex()
>>> pe.encode('Smith')
'4500'
>>> pe.encode('Waters')
'7362'
>>> pe.encode('James')
'1520'
>>> pe.encode('Schmidt')
'4530'
>>> pe.encode('Ashcroft')
'0261'
>>> pe.encode('Perez', lang='es')
'094'
>>> pe.encode('Martinez', lang='es')
'69364'
>>> pe.encode('Gutierrez', lang='es')
'83994'
>>> pe.encode('Santiago', lang='es')
'4638'
>>> pe.encode('Nicolás', lang='es')
'6754'
"""
if lang == 'es':
return self._phonetic_spanish.encode(
self._spanish_metaphone.encode(word)
)
word = self._soundex.encode(self._metaphone.encode(word))
word = word[0].translate(self._trans) + word[1:]
return word
|
def stem(self, word):
"""Return the S-stemmed form of a word.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = SStemmer()
>>> stmr.stem('summaries')
'summary'
>>> stmr.stem('summary')
'summary'
>>> stmr.stem('towers')
'tower'
>>> stmr.stem('reading')
'reading'
>>> stmr.stem('census')
'census'
"""
lowered = word.lower()
if lowered[-3:] == 'ies' and lowered[-4:-3] not in {'e', 'a'}:
return word[:-3] + ('Y' if word[-1:].isupper() else 'y')
if lowered[-2:] == 'es' and lowered[-3:-2] not in {'a', 'e', 'o'}:
return word[:-1]
if lowered[-1:] == 's' and lowered[-2:-1] not in {'u', 's'}:
return word[:-1]
return word
|
def sim(self, src, tar):
"""Return the Ratcliff-Obershelp similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Ratcliff-Obershelp similarity
Examples
--------
>>> cmp = RatcliffObershelp()
>>> round(cmp.sim('cat', 'hat'), 12)
0.666666666667
>>> round(cmp.sim('Niall', 'Neil'), 12)
0.666666666667
>>> round(cmp.sim('aluminum', 'Catalan'), 12)
0.4
>>> cmp.sim('ATCG', 'TAGC')
0.5
"""
def _lcsstr_stl(src, tar):
"""Return start positions & length for Ratcliff-Obershelp.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
tuple
The start position in the source string, start position in the
target string, and length of the longest common substring of
strings src and tar.
"""
lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
longest, src_longest, tar_longest = 0, 0, 0
for i in range(1, len(src) + 1):
for j in range(1, len(tar) + 1):
if src[i - 1] == tar[j - 1]:
lengths[i, j] = lengths[i - 1, j - 1] + 1
if lengths[i, j] > longest:
longest = lengths[i, j]
src_longest = i
tar_longest = j
else:
lengths[i, j] = 0
return src_longest - longest, tar_longest - longest, longest
def _sstr_matches(src, tar):
"""Return the sum of substring match lengths.
This follows the Ratcliff-Obershelp algorithm
:cite:`Ratcliff:1988`:
1. Find the length of the longest common substring in src &
tar.
2. Recurse on the strings to the left & right of each this
substring in src & tar.
3. Base case is a 0 length common substring, in which case,
return 0.
4. Return the sum.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
Sum of substring match lengths
"""
src_start, tar_start, length = _lcsstr_stl(src, tar)
if length == 0:
return 0
return (
_sstr_matches(src[:src_start], tar[:tar_start])
+ length
+ _sstr_matches(
src[src_start + length :], tar[tar_start + length :]
)
)
if src == tar:
return 1.0
elif not src or not tar:
return 0.0
return 2 * _sstr_matches(src, tar) / (len(src) + len(tar))
|
def refined_soundex(word, max_length=-1, zero_pad=False, retain_vowels=False):
"""Return the Refined Soundex code for a word.
This is a wrapper for :py:meth:`RefinedSoundex.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to unlimited)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
retain_vowels : bool
Retain vowels (as 0) in the resulting code
Returns
-------
str
The Refined Soundex value
Examples
--------
>>> refined_soundex('Christopher')
'C393619'
>>> refined_soundex('Niall')
'N87'
>>> refined_soundex('Smith')
'S386'
>>> refined_soundex('Schmidt')
'S386'
"""
return RefinedSoundex().encode(word, max_length, zero_pad, retain_vowels)
|
def encode(self, word, max_length=-1, zero_pad=False, retain_vowels=False):
"""Return the Refined Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to unlimited)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
retain_vowels : bool
Retain vowels (as 0) in the resulting code
Returns
-------
str
The Refined Soundex value
Examples
--------
>>> pe = RefinedSoundex()
>>> pe.encode('Christopher')
'C393619'
>>> pe.encode('Niall')
'N87'
>>> pe.encode('Smith')
'S386'
>>> pe.encode('Schmidt')
'S386'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
# apply the Soundex algorithm
sdx = word[:1] + word.translate(self._trans)
sdx = self._delete_consecutive_repeats(sdx)
if not retain_vowels:
sdx = sdx.replace('0', '') # Delete vowels, H, W, Y
if max_length > 0:
if zero_pad:
sdx += '0' * max_length
sdx = sdx[:max_length]
return sdx
|
def dist_abs(self, src, tar):
"""Return the MRA comparison rating of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
MRA comparison rating
Examples
--------
>>> cmp = MRA()
>>> cmp.dist_abs('cat', 'hat')
5
>>> cmp.dist_abs('Niall', 'Neil')
6
>>> cmp.dist_abs('aluminum', 'Catalan')
0
>>> cmp.dist_abs('ATCG', 'TAGC')
5
"""
if src == tar:
return 6
if src == '' or tar == '':
return 0
src = list(mra(src))
tar = list(mra(tar))
if abs(len(src) - len(tar)) > 2:
return 0
length_sum = len(src) + len(tar)
if length_sum < 5:
min_rating = 5
elif length_sum < 8:
min_rating = 4
elif length_sum < 12:
min_rating = 3
else:
min_rating = 2
for _ in range(2):
new_src = []
new_tar = []
minlen = min(len(src), len(tar))
for i in range(minlen):
if src[i] != tar[i]:
new_src.append(src[i])
new_tar.append(tar[i])
src = new_src + src[minlen:]
tar = new_tar + tar[minlen:]
src.reverse()
tar.reverse()
similarity = 6 - max(len(src), len(tar))
if similarity >= min_rating:
return similarity
return 0
|
def encode(self, word):
"""Return the Parmar-Kumbharana encoding of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Parmar-Kumbharana encoding
Examples
--------
>>> pe = ParmarKumbharana()
>>> pe.encode('Gough')
'GF'
>>> pe.encode('pneuma')
'NM'
>>> pe.encode('knight')
'NT'
>>> pe.encode('trice')
'TRS'
>>> pe.encode('judge')
'JJ'
"""
word = word.upper() # Rule 3
word = self._delete_consecutive_repeats(word) # Rule 4
# Rule 5
i = 0
while i < len(word):
for match_len in range(4, 1, -1):
if word[i : i + match_len] in self._rules[match_len]:
repl = self._rules[match_len][word[i : i + match_len]]
word = word[:i] + repl + word[i + match_len :]
i += len(repl)
break
else:
i += 1
word = word[:1] + word[1:].translate(self._del_trans) # Rule 6
return word
|
def eudex_hamming(
src, tar, weights='exponential', max_length=8, normalized=False
):
"""Calculate the Hamming distance between the Eudex hashes of two terms.
This is a wrapper for :py:meth:`Eudex.eudex_hamming`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
max_length : int
The number of characters to encode as a eudex hash
normalized : bool
Normalizes to [0, 1] if True
Returns
-------
int
The Eudex Hamming distance
Examples
--------
>>> eudex_hamming('cat', 'hat')
128
>>> eudex_hamming('Niall', 'Neil')
2
>>> eudex_hamming('Colin', 'Cuilen')
10
>>> eudex_hamming('ATCG', 'TAGC')
403
>>> eudex_hamming('cat', 'hat', weights='fibonacci')
34
>>> eudex_hamming('Niall', 'Neil', weights='fibonacci')
2
>>> eudex_hamming('Colin', 'Cuilen', weights='fibonacci')
7
>>> eudex_hamming('ATCG', 'TAGC', weights='fibonacci')
117
>>> eudex_hamming('cat', 'hat', weights=None)
1
>>> eudex_hamming('Niall', 'Neil', weights=None)
1
>>> eudex_hamming('Colin', 'Cuilen', weights=None)
2
>>> eudex_hamming('ATCG', 'TAGC', weights=None)
9
>>> # Using the OEIS A000142:
>>> eudex_hamming('cat', 'hat', [1, 1, 2, 6, 24, 120, 720, 5040])
1
>>> eudex_hamming('Niall', 'Neil', [1, 1, 2, 6, 24, 120, 720, 5040])
720
>>> eudex_hamming('Colin', 'Cuilen', [1, 1, 2, 6, 24, 120, 720, 5040])
744
>>> eudex_hamming('ATCG', 'TAGC', [1, 1, 2, 6, 24, 120, 720, 5040])
6243
"""
return Eudex().dist_abs(src, tar, weights, max_length, normalized)
|
def dist_eudex(src, tar, weights='exponential', max_length=8):
"""Return normalized Hamming distance between Eudex hashes of two terms.
This is a wrapper for :py:meth:`Eudex.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
max_length : int
The number of characters to encode as a eudex hash
Returns
-------
int
The normalized Eudex Hamming distance
Examples
--------
>>> round(dist_eudex('cat', 'hat'), 12)
0.062745098039
>>> round(dist_eudex('Niall', 'Neil'), 12)
0.000980392157
>>> round(dist_eudex('Colin', 'Cuilen'), 12)
0.004901960784
>>> round(dist_eudex('ATCG', 'TAGC'), 12)
0.197549019608
"""
return Eudex().dist(src, tar, weights, max_length)
|
def sim_eudex(src, tar, weights='exponential', max_length=8):
"""Return normalized Hamming similarity between Eudex hashes of two terms.
This is a wrapper for :py:meth:`Eudex.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
max_length : int
The number of characters to encode as a eudex hash
Returns
-------
int
The normalized Eudex Hamming similarity
Examples
--------
>>> round(sim_eudex('cat', 'hat'), 12)
0.937254901961
>>> round(sim_eudex('Niall', 'Neil'), 12)
0.999019607843
>>> round(sim_eudex('Colin', 'Cuilen'), 12)
0.995098039216
>>> round(sim_eudex('ATCG', 'TAGC'), 12)
0.802450980392
"""
return Eudex().sim(src, tar, weights, max_length)
|
def gen_fibonacci():
"""Yield the next Fibonacci number.
Based on https://www.python-course.eu/generators.php
Starts at Fibonacci number 3 (the second 1)
Yields
------
int
The next Fibonacci number
"""
num_a, num_b = 1, 2
while True:
yield num_a
num_a, num_b = num_b, num_a + num_b
|
def dist_abs(
self, src, tar, weights='exponential', max_length=8, normalized=False
):
"""Calculate the distance between the Eudex hashes of two terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
- If set to ``None``, a simple Hamming distance is calculated.
- If set to ``exponential``, weight decays by powers of 2, as
proposed in the eudex specification:
https://github.com/ticki/eudex.
- If set to ``fibonacci``, weight decays through the Fibonacci
series, as in the eudex reference implementation.
- If set to a callable function, this assumes it creates a
generator and the generator is used to populate a series of
weights.
- If set to an iterable, the iterable's values should be
integers and will be used as the weights.
max_length : int
The number of characters to encode as a eudex hash
normalized : bool
Normalizes to [0, 1] if True
Returns
-------
int
The Eudex Hamming distance
Examples
--------
>>> cmp = Eudex()
>>> cmp.dist_abs('cat', 'hat')
128
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('Colin', 'Cuilen')
10
>>> cmp.dist_abs('ATCG', 'TAGC')
403
>>> cmp.dist_abs('cat', 'hat', weights='fibonacci')
34
>>> cmp.dist_abs('Niall', 'Neil', weights='fibonacci')
2
>>> cmp.dist_abs('Colin', 'Cuilen', weights='fibonacci')
7
>>> cmp.dist_abs('ATCG', 'TAGC', weights='fibonacci')
117
>>> cmp.dist_abs('cat', 'hat', weights=None)
1
>>> cmp.dist_abs('Niall', 'Neil', weights=None)
1
>>> cmp.dist_abs('Colin', 'Cuilen', weights=None)
2
>>> cmp.dist_abs('ATCG', 'TAGC', weights=None)
9
>>> # Using the OEIS A000142:
>>> cmp.dist_abs('cat', 'hat', [1, 1, 2, 6, 24, 120, 720, 5040])
1
>>> cmp.dist_abs('Niall', 'Neil', [1, 1, 2, 6, 24, 120, 720, 5040])
720
>>> cmp.dist_abs('Colin', 'Cuilen',
... [1, 1, 2, 6, 24, 120, 720, 5040])
744
>>> cmp.dist_abs('ATCG', 'TAGC', [1, 1, 2, 6, 24, 120, 720, 5040])
6243
"""
# Calculate the eudex hashes and XOR them
xored = eudex(src, max_length=max_length) ^ eudex(
tar, max_length=max_length
)
# Simple hamming distance (all bits are equal)
if not weights:
binary = bin(xored)
distance = binary.count('1')
if normalized:
return distance / (len(binary) - 2)
return distance
# If weights is a function, it should create a generator,
# which we now use to populate a list
if callable(weights):
weights = weights()
elif weights == 'exponential':
weights = Eudex.gen_exponential()
elif weights == 'fibonacci':
weights = Eudex.gen_fibonacci()
if isinstance(weights, GeneratorType):
weights = [next(weights) for _ in range(max_length)][::-1]
# Sum the weighted hamming distance
distance = 0
max_distance = 0
while (xored or normalized) and weights:
max_distance += 8 * weights[-1]
distance += bin(xored & 0xFF).count('1') * weights.pop()
xored >>= 8
if normalized:
distance /= max_distance
return distance
|
def dist(self, src, tar, weights='exponential', max_length=8):
"""Return normalized distance between the Eudex hashes of two terms.
This is Eudex distance normalized to [0, 1].
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
max_length : int
The number of characters to encode as a eudex hash
Returns
-------
int
The normalized Eudex Hamming distance
Examples
--------
>>> cmp = Eudex()
>>> round(cmp.dist('cat', 'hat'), 12)
0.062745098039
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.000980392157
>>> round(cmp.dist('Colin', 'Cuilen'), 12)
0.004901960784
>>> round(cmp.dist('ATCG', 'TAGC'), 12)
0.197549019608
"""
return self.dist_abs(src, tar, weights, max_length, True)
|
def euclidean(src, tar, qval=2, normalized=False, alphabet=None):
"""Return the Euclidean distance between two strings.
This is a wrapper for :py:meth:`Euclidean.dist_abs`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
normalized : bool
Normalizes to [0, 1] if True
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float: The Euclidean distance
Examples
--------
>>> euclidean('cat', 'hat')
2.0
>>> round(euclidean('Niall', 'Neil'), 12)
2.645751311065
>>> euclidean('Colin', 'Cuilen')
3.0
>>> round(euclidean('ATCG', 'TAGC'), 12)
3.162277660168
"""
return Euclidean().dist_abs(src, tar, qval, normalized, alphabet)
|
def dist_euclidean(src, tar, qval=2, alphabet=None):
"""Return the normalized Euclidean distance between two strings.
This is a wrapper for :py:meth:`Euclidean.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Euclidean distance
Examples
--------
>>> round(dist_euclidean('cat', 'hat'), 12)
0.57735026919
>>> round(dist_euclidean('Niall', 'Neil'), 12)
0.683130051064
>>> round(dist_euclidean('Colin', 'Cuilen'), 12)
0.727606875109
>>> dist_euclidean('ATCG', 'TAGC')
1.0
"""
return Euclidean().dist(src, tar, qval, alphabet)
|
def sim_euclidean(src, tar, qval=2, alphabet=None):
"""Return the normalized Euclidean similarity of two strings.
This is a wrapper for :py:meth:`Euclidean.sim`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Euclidean similarity
Examples
--------
>>> round(sim_euclidean('cat', 'hat'), 12)
0.42264973081
>>> round(sim_euclidean('Niall', 'Neil'), 12)
0.316869948936
>>> round(sim_euclidean('Colin', 'Cuilen'), 12)
0.272393124891
>>> sim_euclidean('ATCG', 'TAGC')
0.0
"""
return Euclidean().sim(src, tar, qval, alphabet)
|
def _cond_k(self, word, suffix_len):
"""Return Lovins' condition K.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
return (len(word) - suffix_len >= 3) and (
word[-suffix_len - 1] in {'i', 'l'}
or (word[-suffix_len - 3] == 'u' and word[-suffix_len - 1] == 'e')
)
|
def _cond_n(self, word, suffix_len):
"""Return Lovins' condition N.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
if len(word) - suffix_len >= 3:
if word[-suffix_len - 3] == 's':
if len(word) - suffix_len >= 4:
return True
else:
return True
return False
|
def _cond_s(self, word, suffix_len):
"""Return Lovins' condition S.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
return word[-suffix_len - 2 : -suffix_len] == 'dr' or (
word[-suffix_len - 1] == 't'
and word[-suffix_len - 2 : -suffix_len] != 'tt'
)
|
def _cond_x(self, word, suffix_len):
"""Return Lovins' condition X.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
return word[-suffix_len - 1] in {'i', 'l'} or (
word[-suffix_len - 3 : -suffix_len] == 'u'
and word[-suffix_len - 1] == 'e'
)
|
def _cond_bb(self, word, suffix_len):
"""Return Lovins' condition BB.
Parameters
----------
word : str
Word to check
suffix_len : int
Suffix length
Returns
-------
bool
True if condition is met
"""
return (
len(word) - suffix_len >= 3
and word[-suffix_len - 3 : -suffix_len] != 'met'
and word[-suffix_len - 4 : -suffix_len] != 'ryst'
)
|
def stem(self, word):
"""Return Lovins stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Lovins()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
for suffix_len in range(11, 0, -1):
ending = word[-suffix_len:]
if (
ending in self._suffix
and len(word) - suffix_len >= 2
and (
self._suffix[ending] is None
or self._suffix[ending](word, suffix_len)
)
):
word = word[:-suffix_len]
break
if word[-2:] in {
'bb',
'dd',
'gg',
'll',
'mm',
'nn',
'pp',
'rr',
'ss',
'tt',
}:
word = word[:-1]
for ending, replacement in self._recode:
if word.endswith(ending):
if callable(replacement):
word = replacement(word)
else:
word = word[: -len(ending)] + replacement
return word
|
def dist_abs(self, src, tar, cost=(1, 1, 1, 1)):
"""Return the Damerau-Levenshtein distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 4-tuple representing the cost of the four possible edits:
inserts, deletes, substitutions, and transpositions, respectively
(by default: (1, 1, 1, 1))
Returns
-------
int (may return a float if cost has float values)
The Damerau-Levenshtein distance between src & tar
Raises
------
ValueError
Unsupported cost assignment; the cost of two transpositions must
not be less than the cost of an insert plus a delete.
Examples
--------
>>> cmp = DamerauLevenshtein()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
3
>>> cmp.dist_abs('aluminum', 'Catalan')
7
>>> cmp.dist_abs('ATCG', 'TAGC')
2
"""
ins_cost, del_cost, sub_cost, trans_cost = cost
if src == tar:
return 0
if not src:
return len(tar) * ins_cost
if not tar:
return len(src) * del_cost
if 2 * trans_cost < ins_cost + del_cost:
raise ValueError(
'Unsupported cost assignment; the cost of two transpositions '
+ 'must not be less than the cost of an insert plus a delete.'
)
d_mat = np_zeros((len(src)) * (len(tar)), dtype=np_int).reshape(
(len(src), len(tar))
)
if src[0] != tar[0]:
d_mat[0, 0] = min(sub_cost, ins_cost + del_cost)
src_index_by_character = {src[0]: 0}
for i in range(1, len(src)):
del_distance = d_mat[i - 1, 0] + del_cost
ins_distance = (i + 1) * del_cost + ins_cost
match_distance = i * del_cost + (
0 if src[i] == tar[0] else sub_cost
)
d_mat[i, 0] = min(del_distance, ins_distance, match_distance)
for j in range(1, len(tar)):
del_distance = (j + 1) * ins_cost + del_cost
ins_distance = d_mat[0, j - 1] + ins_cost
match_distance = j * ins_cost + (
0 if src[0] == tar[j] else sub_cost
)
d_mat[0, j] = min(del_distance, ins_distance, match_distance)
for i in range(1, len(src)):
max_src_letter_match_index = 0 if src[i] == tar[0] else -1
for j in range(1, len(tar)):
candidate_swap_index = (
-1
if tar[j] not in src_index_by_character
else src_index_by_character[tar[j]]
)
j_swap = max_src_letter_match_index
del_distance = d_mat[i - 1, j] + del_cost
ins_distance = d_mat[i, j - 1] + ins_cost
match_distance = d_mat[i - 1, j - 1]
if src[i] != tar[j]:
match_distance += sub_cost
else:
max_src_letter_match_index = j
if candidate_swap_index != -1 and j_swap != -1:
i_swap = candidate_swap_index
if i_swap == 0 and j_swap == 0:
pre_swap_cost = 0
else:
pre_swap_cost = d_mat[
max(0, i_swap - 1), max(0, j_swap - 1)
]
swap_distance = (
pre_swap_cost
+ (i - i_swap - 1) * del_cost
+ (j - j_swap - 1) * ins_cost
+ trans_cost
)
else:
swap_distance = maxsize
d_mat[i, j] = min(
del_distance, ins_distance, match_distance, swap_distance
)
src_index_by_character[src[i]] = i
return d_mat[len(src) - 1, len(tar) - 1]
|
def dist(self, src, tar):
"""Return the NCD between two strings using zlib compression.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Examples
--------
>>> cmp = NCDzlib()
>>> cmp.dist('cat', 'hat')
0.3333333333333333
>>> cmp.dist('Niall', 'Neil')
0.45454545454545453
>>> cmp.dist('aluminum', 'Catalan')
0.5714285714285714
>>> cmp.dist('ATCG', 'TAGC')
0.4
"""
if src == tar:
return 0.0
src = src.encode('utf-8')
tar = tar.encode('utf-8')
self._compressor.compress(src)
src_comp = self._compressor.flush(zlib.Z_FULL_FLUSH)
self._compressor.compress(tar)
tar_comp = self._compressor.flush(zlib.Z_FULL_FLUSH)
self._compressor.compress(src + tar)
concat_comp = self._compressor.flush(zlib.Z_FULL_FLUSH)
self._compressor.compress(tar + src)
concat_comp2 = self._compressor.flush(zlib.Z_FULL_FLUSH)
return (
min(len(concat_comp), len(concat_comp2))
- min(len(src_comp), len(tar_comp))
) / max(len(src_comp), len(tar_comp))
|
def phonex(word, max_length=4, zero_pad=True):
"""Return the Phonex code for a word.
This is a wrapper for :py:meth:`Phonex.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The Phonex value
Examples
--------
>>> phonex('Christopher')
'C623'
>>> phonex('Niall')
'N400'
>>> phonex('Schmidt')
'S253'
>>> phonex('Smith')
'S530'
"""
return Phonex().encode(word, max_length, zero_pad)
|
def encode(self, word, max_length=4, zero_pad=True):
"""Return the Phonex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Phonex value
Examples
--------
>>> pe = Phonex()
>>> pe.encode('Christopher')
'C623'
>>> pe.encode('Niall')
'N400'
>>> pe.encode('Schmidt')
'S253'
>>> pe.encode('Smith')
'S530'
"""
name = unicode_normalize('NFKD', text_type(word.upper()))
name = name.replace('ß', 'SS')
# Clamp max_length to [4, 64]
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
name_code = last = ''
# Deletions effected by replacing with next letter which
# will be ignored due to duplicate handling of Soundex code.
# This is faster than 'moving' all subsequent letters.
# Remove any trailing Ss
while name[-1:] == 'S':
name = name[:-1]
# Phonetic equivalents of first 2 characters
# Works since duplicate letters are ignored
if name[:2] == 'KN':
name = 'N' + name[2:] # KN.. == N..
elif name[:2] == 'PH':
name = 'F' + name[2:] # PH.. == F.. (H ignored anyway)
elif name[:2] == 'WR':
name = 'R' + name[2:] # WR.. == R..
if name:
# Special case, ignore H first letter (subsequent Hs ignored
# anyway)
# Works since duplicate letters are ignored
if name[0] == 'H':
name = name[1:]
if name:
# Phonetic equivalents of first character
if name[0] in self._uc_vy_set:
name = 'A' + name[1:]
elif name[0] in {'B', 'P'}:
name = 'B' + name[1:]
elif name[0] in {'V', 'F'}:
name = 'F' + name[1:]
elif name[0] in {'C', 'K', 'Q'}:
name = 'C' + name[1:]
elif name[0] in {'G', 'J'}:
name = 'G' + name[1:]
elif name[0] in {'S', 'Z'}:
name = 'S' + name[1:]
name_code = last = name[0]
# Modified Soundex code
for i in range(1, len(name)):
code = '0'
if name[i] in {'B', 'F', 'P', 'V'}:
code = '1'
elif name[i] in {'C', 'G', 'J', 'K', 'Q', 'S', 'X', 'Z'}:
code = '2'
elif name[i] in {'D', 'T'}:
if name[i + 1 : i + 2] != 'C':
code = '3'
elif name[i] == 'L':
if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len(
name
):
code = '4'
elif name[i] in {'M', 'N'}:
if name[i + 1 : i + 2] in {'D', 'G'}:
name = name[: i + 1] + name[i] + name[i + 2 :]
code = '5'
elif name[i] == 'R':
if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len(
name
):
code = '6'
if code != last and code != '0' and i != 0:
name_code += code
last = name_code[-1]
if zero_pad:
name_code += '0' * max_length
if not name_code:
name_code = '0'
return name_code[:max_length]
|
def pylint_color(score):
"""Return Pylint badge color.
Parameters
----------
score : float
A Pylint score
Returns
-------
str
Badge color
"""
# These are the score cutoffs for each color above.
# I.e. score==10 -> brightgreen, down to 7.5 > score >= 5 -> orange
score_cutoffs = (10, 9.5, 8.5, 7.5, 5)
for i in range(len(score_cutoffs)):
if score >= score_cutoffs[i]:
return BADGE_COLORS[i]
# and score < 5 -> red
return BADGE_COLORS[-1]
|
def pydocstyle_color(score):
"""Return pydocstyle badge color.
Parameters
----------
score : float
A pydocstyle score
Returns
-------
str
Badge color
"""
# These are the score cutoffs for each color above.
# I.e. score==0 -> brightgreen, down to 100 < score <= 200 -> orange
score_cutoffs = (0, 10, 25, 50, 100)
for i in range(len(score_cutoffs)):
if score <= score_cutoffs[i]:
return BADGE_COLORS[i]
# and score > 200 -> red
return BADGE_COLORS[-1]
|
def flake8_color(score):
"""Return flake8 badge color.
Parameters
----------
score : float
A flake8 score
Returns
-------
str
Badge color
"""
# These are the score cutoffs for each color above.
# I.e. score==0 -> brightgreen, down to 100 < score <= 200 -> orange
score_cutoffs = (0, 20, 50, 100, 200)
for i in range(len(score_cutoffs)):
if score <= score_cutoffs[i]:
return BADGE_COLORS[i]
# and score > 200 -> red
return BADGE_COLORS[-1]
|
def gotoh(src, tar, gap_open=1, gap_ext=0.4, sim_func=sim_ident):
"""Return the Gotoh score of two strings.
This is a wrapper for :py:meth:`Gotoh.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
gap_open : float
The cost of an open alignment gap (1 by default)
gap_ext : float
The cost of an alignment gap extension (0.4 by default)
sim_func : function
A function that returns the similarity of two characters (identity
similarity by default)
Returns
-------
float
Gotoh score
Examples
--------
>>> gotoh('cat', 'hat')
2.0
>>> gotoh('Niall', 'Neil')
1.0
>>> round(gotoh('aluminum', 'Catalan'), 12)
-0.4
>>> gotoh('cat', 'hat')
2.0
"""
return Gotoh().dist_abs(src, tar, gap_open, gap_ext, sim_func)
|
def dist_abs(self, src, tar, gap_open=1, gap_ext=0.4, sim_func=sim_ident):
"""Return the Gotoh score of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
gap_open : float
The cost of an open alignment gap (1 by default)
gap_ext : float
The cost of an alignment gap extension (0.4 by default)
sim_func : function
A function that returns the similarity of two characters (identity
similarity by default)
Returns
-------
float
Gotoh score
Examples
--------
>>> cmp = Gotoh()
>>> cmp.dist_abs('cat', 'hat')
2.0
>>> cmp.dist_abs('Niall', 'Neil')
1.0
>>> round(cmp.dist_abs('aluminum', 'Catalan'), 12)
-0.4
>>> cmp.dist_abs('cat', 'hat')
2.0
"""
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32)
p_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32)
q_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32)
d_mat[0, 0] = 0
p_mat[0, 0] = float('-inf')
q_mat[0, 0] = float('-inf')
for i in range(1, len(src) + 1):
d_mat[i, 0] = float('-inf')
p_mat[i, 0] = -gap_open - gap_ext * (i - 1)
q_mat[i, 0] = float('-inf')
q_mat[i, 1] = -gap_open
for j in range(1, len(tar) + 1):
d_mat[0, j] = float('-inf')
p_mat[0, j] = float('-inf')
p_mat[1, j] = -gap_open
q_mat[0, j] = -gap_open - gap_ext * (j - 1)
for i in range(1, len(src) + 1):
for j in range(1, len(tar) + 1):
sim_val = sim_func(src[i - 1], tar[j - 1])
d_mat[i, j] = max(
d_mat[i - 1, j - 1] + sim_val,
p_mat[i - 1, j - 1] + sim_val,
q_mat[i - 1, j - 1] + sim_val,
)
p_mat[i, j] = max(
d_mat[i - 1, j] - gap_open, p_mat[i - 1, j] - gap_ext
)
q_mat[i, j] = max(
d_mat[i, j - 1] - gap_open, q_mat[i, j - 1] - gap_ext
)
i, j = (n - 1 for n in d_mat.shape)
return max(d_mat[i, j], p_mat[i, j], q_mat[i, j])
|
def dist_abs(self, src, tar):
"""Return the bag distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
Bag distance
Examples
--------
>>> cmp = Bag()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('aluminum', 'Catalan')
5
>>> cmp.dist_abs('ATCG', 'TAGC')
0
>>> cmp.dist_abs('abcdefg', 'hijklm')
7
>>> cmp.dist_abs('abcdefg', 'hijklmno')
8
"""
if tar == src:
return 0
elif not src:
return len(tar)
elif not tar:
return len(src)
src_bag = Counter(src)
tar_bag = Counter(tar)
return max(
sum((src_bag - tar_bag).values()),
sum((tar_bag - src_bag).values()),
)
|
def dist(self, src, tar):
"""Return the normalized bag distance between two strings.
Bag distance is normalized by dividing by :math:`max( |src|, |tar| )`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Normalized bag distance
Examples
--------
>>> cmp = Bag()
>>> cmp.dist('cat', 'hat')
0.3333333333333333
>>> cmp.dist('Niall', 'Neil')
0.4
>>> cmp.dist('aluminum', 'Catalan')
0.625
>>> cmp.dist('ATCG', 'TAGC')
0.0
"""
if tar == src:
return 0.0
if not src or not tar:
return 1.0
max_length = max(len(src), len(tar))
return self.dist_abs(src, tar) / max_length
|
def stem(self, word):
"""Return 'CLEF German stemmer plus' stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGermanPlus()
>>> clef_german_plus('lesen')
'les'
>>> clef_german_plus('graues')
'grau'
>>> clef_german_plus('buchstabieren')
'buchstabi'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# remove umlauts
word = word.translate(self._accents)
# Step 1
wlen = len(word) - 1
if wlen > 4 and word[-3:] == 'ern':
word = word[:-3]
elif wlen > 3 and word[-2:] in {'em', 'en', 'er', 'es'}:
word = word[:-2]
elif wlen > 2 and (
word[-1] == 'e'
or (word[-1] == 's' and word[-2] in self._st_ending)
):
word = word[:-1]
# Step 2
wlen = len(word) - 1
if wlen > 4 and word[-3:] == 'est':
word = word[:-3]
elif wlen > 3 and (
word[-2:] in {'er', 'en'}
or (word[-2:] == 'st' and word[-3] in self._st_ending)
):
word = word[:-2]
return word
|
def sim_mlipns(src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS similarity of two strings.
This is a wrapper for :py:meth:`MLIPNS.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
"""
return MLIPNS().sim(src, tar, threshold, max_mismatches)
|
def dist_mlipns(src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS distance between two strings.
This is a wrapper for :py:meth:`MLIPNS.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS distance
Examples
--------
>>> dist_mlipns('cat', 'hat')
0.0
>>> dist_mlipns('Niall', 'Neil')
1.0
>>> dist_mlipns('aluminum', 'Catalan')
1.0
>>> dist_mlipns('ATCG', 'TAGC')
1.0
"""
return MLIPNS().dist(src, tar, threshold, max_mismatches)
|
def sim(self, src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below
which the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove
before declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
"""
if tar == src:
return 1.0
if not src or not tar:
return 0.0
mismatches = 0
ham = Hamming().dist_abs(src, tar, diff_lens=True)
max_length = max(len(src), len(tar))
while src and tar and mismatches <= max_mismatches:
if (
max_length < 1
or (1 - (max_length - ham) / max_length) <= threshold
):
return 1.0
else:
mismatches += 1
ham -= 1
max_length -= 1
if max_length < 1:
return 1.0
return 0.0
|
def sim(src, tar, method=sim_levenshtein):
"""Return a similarity of two strings.
This is a generalized function for calling other similarity functions.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
method : function
Specifies the similarity metric (:py:func:`sim_levenshtein` by default)
Returns
-------
float
Similarity according to the specified function
Raises
------
AttributeError
Unknown distance function
Examples
--------
>>> round(sim('cat', 'hat'), 12)
0.666666666667
>>> round(sim('Niall', 'Neil'), 12)
0.4
>>> sim('aluminum', 'Catalan')
0.125
>>> sim('ATCG', 'TAGC')
0.25
"""
if callable(method):
return method(src, tar)
else:
raise AttributeError('Unknown similarity function: ' + str(method))
|
def dist(src, tar, method=sim_levenshtein):
"""Return a distance between two strings.
This is a generalized function for calling other distance functions.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
method : function
Specifies the similarity metric (:py:func:`sim_levenshtein` by default)
-- Note that this takes a similarity metric function, not a distance
metric function.
Returns
-------
float
Distance according to the specified function
Raises
------
AttributeError
Unknown distance function
Examples
--------
>>> round(dist('cat', 'hat'), 12)
0.333333333333
>>> round(dist('Niall', 'Neil'), 12)
0.6
>>> dist('aluminum', 'Catalan')
0.875
>>> dist('ATCG', 'TAGC')
0.75
"""
if callable(method):
return 1 - method(src, tar)
else:
raise AttributeError('Unknown distance function: ' + str(method))
|
def encode(self, word):
"""Return the MRA personal numeric identifier (PNI) for a word.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The MRA PNI
Examples
--------
>>> pe = MRA()
>>> pe.encode('Christopher')
'CHRPHR'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SMTH'
>>> pe.encode('Schmidt')
'SCHMDT'
"""
if not word:
return word
word = word.upper()
word = word.replace('ß', 'SS')
word = word[0] + ''.join(
c for c in word[1:] if c not in self._uc_v_set
)
word = self._delete_consecutive_repeats(word)
if len(word) > 6:
word = word[:3] + word[-3:]
return word
|
def _m_degree(self, term):
"""Return Porter helper function _m_degree value.
m-degree is equal to the number of V to C transitions
Parameters
----------
term : str
The word for which to calculate the m-degree
Returns
-------
int
The m-degree as defined in the Porter stemmer definition
"""
mdeg = 0
last_was_vowel = False
for letter in term:
if letter in self._vowels:
last_was_vowel = True
else:
if last_was_vowel:
mdeg += 1
last_was_vowel = False
return mdeg
|
def _has_vowel(self, term):
"""Return Porter helper function _has_vowel value.
Parameters
----------
term : str
The word to scan for vowels
Returns
-------
bool
True iff a vowel exists in the term (as defined in the Porter
stemmer definition)
"""
for letter in term:
if letter in self._vowels:
return True
return False
|
def _ends_in_doubled_cons(self, term):
"""Return Porter helper function _ends_in_doubled_cons value.
Parameters
----------
term : str
The word to check for a final doubled consonant
Returns
-------
bool
True iff the stem ends in a doubled consonant (as defined in the
Porter stemmer definition)
"""
return (
len(term) > 1
and term[-1] not in self._vowels
and term[-2] == term[-1]
)
|
def _ends_in_cvc(self, term):
"""Return Porter helper function _ends_in_cvc value.
Parameters
----------
term : str
The word to scan for cvc
Returns
-------
bool
True iff the stem ends in cvc (as defined in the Porter stemmer
definition)
"""
return len(term) > 2 and (
term[-1] not in self._vowels
and term[-2] in self._vowels
and term[-3] not in self._vowels
and term[-1] not in tuple('wxY')
)
|
def stem(self, word, early_english=False):
"""Return Porter stem.
Parameters
----------
word : str
The word to stem
early_english : bool
Set to True in order to remove -eth & -est (2nd & 3rd person
singular verbal agreement suffixes)
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Porter()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
>>> stmr.stem('eateth', early_english=True)
'eat'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# Return word if stem is shorter than 2
if len(word) < 3:
return word
# Re-map consonantal y to Y (Y will be C, y will be V)
if word[0] == 'y':
word = 'Y' + word[1:]
for i in range(1, len(word)):
if word[i] == 'y' and word[i - 1] in self._vowels:
word = word[:i] + 'Y' + word[i + 1 :]
# Step 1a
if word[-1] == 's':
if word[-4:] == 'sses':
word = word[:-2]
elif word[-3:] == 'ies':
word = word[:-2]
elif word[-2:] == 'ss':
pass
else:
word = word[:-1]
# Step 1b
step1b_flag = False
if word[-3:] == 'eed':
if self._m_degree(word[:-3]) > 0:
word = word[:-1]
elif word[-2:] == 'ed':
if self._has_vowel(word[:-2]):
word = word[:-2]
step1b_flag = True
elif word[-3:] == 'ing':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif early_english:
if word[-3:] == 'est':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif word[-3:] == 'eth':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
if step1b_flag:
if word[-2:] in {'at', 'bl', 'iz'}:
word += 'e'
elif self._ends_in_doubled_cons(word) and word[-1] not in {
'l',
's',
'z',
}:
word = word[:-1]
elif self._m_degree(word) == 1 and self._ends_in_cvc(word):
word += 'e'
# Step 1c
if word[-1] in {'Y', 'y'} and self._has_vowel(word[:-1]):
word = word[:-1] + 'i'
# Step 2
if len(word) > 1:
if word[-2] == 'a':
if word[-7:] == 'ational':
if self._m_degree(word[:-7]) > 0:
word = word[:-5] + 'e'
elif word[-6:] == 'tional':
if self._m_degree(word[:-6]) > 0:
word = word[:-2]
elif word[-2] == 'c':
if word[-4:] in {'enci', 'anci'}:
if self._m_degree(word[:-4]) > 0:
word = word[:-1] + 'e'
elif word[-2] == 'e':
if word[-4:] == 'izer':
if self._m_degree(word[:-4]) > 0:
word = word[:-1]
elif word[-2] == 'g':
if word[-4:] == 'logi':
if self._m_degree(word[:-4]) > 0:
word = word[:-1]
elif word[-2] == 'l':
if word[-3:] == 'bli':
if self._m_degree(word[:-3]) > 0:
word = word[:-1] + 'e'
elif word[-4:] == 'alli':
if self._m_degree(word[:-4]) > 0:
word = word[:-2]
elif word[-5:] == 'entli':
if self._m_degree(word[:-5]) > 0:
word = word[:-2]
elif word[-3:] == 'eli':
if self._m_degree(word[:-3]) > 0:
word = word[:-2]
elif word[-5:] == 'ousli':
if self._m_degree(word[:-5]) > 0:
word = word[:-2]
elif word[-2] == 'o':
if word[-7:] == 'ization':
if self._m_degree(word[:-7]) > 0:
word = word[:-5] + 'e'
elif word[-5:] == 'ation':
if self._m_degree(word[:-5]) > 0:
word = word[:-3] + 'e'
elif word[-4:] == 'ator':
if self._m_degree(word[:-4]) > 0:
word = word[:-2] + 'e'
elif word[-2] == 's':
if word[-5:] == 'alism':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-7:] in {'iveness', 'fulness', 'ousness'}:
if self._m_degree(word[:-7]) > 0:
word = word[:-4]
elif word[-2] == 't':
if word[-5:] == 'aliti':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-5:] == 'iviti':
if self._m_degree(word[:-5]) > 0:
word = word[:-3] + 'e'
elif word[-6:] == 'biliti':
if self._m_degree(word[:-6]) > 0:
word = word[:-5] + 'le'
# Step 3
if word[-5:] in 'icate':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-5:] == 'ative':
if self._m_degree(word[:-5]) > 0:
word = word[:-5]
elif word[-5:] in {'alize', 'iciti'}:
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-4:] == 'ical':
if self._m_degree(word[:-4]) > 0:
word = word[:-2]
elif word[-3:] == 'ful':
if self._m_degree(word[:-3]) > 0:
word = word[:-3]
elif word[-4:] == 'ness':
if self._m_degree(word[:-4]) > 0:
word = word[:-4]
# Step 4
if word[-2:] == 'al':
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-4:] in {'ance', 'ence'}:
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-2:] in {'er', 'ic'}:
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-4:] in {'able', 'ible'}:
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-3:] == 'ant':
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-5:] == 'ement':
if self._m_degree(word[:-5]) > 1:
word = word[:-5]
elif word[-4:] == 'ment':
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-3:] == 'ent':
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-4:] in {'sion', 'tion'}:
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-2:] == 'ou':
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-3:] in {'ism', 'ate', 'iti', 'ous', 'ive', 'ize'}:
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
# Step 5a
if word[-1] == 'e':
if self._m_degree(word[:-1]) > 1:
word = word[:-1]
elif self._m_degree(word[:-1]) == 1 and not self._ends_in_cvc(
word[:-1]
):
word = word[:-1]
# Step 5b
if word[-2:] == 'll' and self._m_degree(word) > 1:
word = word[:-1]
# Change 'Y' back to 'y' if it survived stemming
for i in range(len(word)):
if word[i] == 'Y':
word = word[:i] + 'y' + word[i + 1 :]
return word
|
def soundex_br(word, max_length=4, zero_pad=True):
"""Return the SoundexBR encoding of a word.
This is a wrapper for :py:meth:`SoundexBR.encode`.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length string
Returns
-------
str
The SoundexBR code
Examples
--------
>>> soundex_br('Oliveira')
'O416'
>>> soundex_br('Almeida')
'A453'
>>> soundex_br('Barbosa')
'B612'
>>> soundex_br('Araújo')
'A620'
>>> soundex_br('Gonçalves')
'G524'
>>> soundex_br('Goncalves')
'G524'
"""
return SoundexBR().encode(word, max_length, zero_pad)
|
def encode(self, word, max_length=4, zero_pad=True):
"""Return the SoundexBR encoding of a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The SoundexBR code
Examples
--------
>>> soundex_br('Oliveira')
'O416'
>>> soundex_br('Almeida')
'A453'
>>> soundex_br('Barbosa')
'B612'
>>> soundex_br('Araújo')
'A620'
>>> soundex_br('Gonçalves')
'G524'
>>> soundex_br('Goncalves')
'G524'
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = ''.join(c for c in word if c in self._uc_set)
if word[:2] == 'WA':
first = 'V'
elif word[:1] == 'K' and word[1:2] in {'A', 'O', 'U'}:
first = 'C'
elif word[:1] == 'C' and word[1:2] in {'I', 'E'}:
first = 'S'
elif word[:1] == 'G' and word[1:2] in {'E', 'I'}:
first = 'J'
elif word[:1] == 'Y':
first = 'I'
elif word[:1] == 'H':
first = word[1:2]
word = word[1:]
else:
first = word[:1]
sdx = first + word[1:].translate(self._trans)
sdx = self._delete_consecutive_repeats(sdx)
sdx = sdx.replace('0', '')
if zero_pad:
sdx += '0' * max_length
return sdx[:max_length]
|
def filter_symlog(y, base=10.0):
"""Symmetrical logarithmic scale.
Optional arguments:
*base*:
The base of the logarithm.
"""
log_base = np.log(base)
sign = np.sign(y)
logs = np.log(np.abs(y) / log_base)
return sign * logs
|
def filter_savitzky_golay(y, window_size=5, order=2, deriv=0, rate=1):
"""Smooth (and optionally differentiate) with a Savitzky-Golay filter."""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError('window_size and order must be integers')
if window_size % 2 != 1 or window_size < 1:
raise ValueError('window_size size must be a positive odd number')
if window_size < order + 2:
raise ValueError('window_size is too small for the polynomials order')
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute limits
minimum = np.min(y)
maximum = np.max(y)
# precompute coefficients
b = np.mat([
[k ** i for i in order_range]
for k in range(-half_window, half_window + 1)
])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * math.factorial(deriv)
# pad the signal at the extremes with values taken from the original signal
firstvals = y[0] - np.abs(y[1:half_window+1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.clip(
np.convolve(m[::-1], y, mode='valid'),
minimum,
maximum,
)
|
def usage_function(parser):
"""Show usage and available curve functions."""
parser.print_usage()
print('')
print('available functions:')
for function in sorted(FUNCTION):
doc = FUNCTION[function].__doc__.strip().splitlines()[0]
print(' %-12s %s' % (function + ':', doc))
return 0
|
def usage_palette(parser):
"""Show usage and available palettes."""
parser.print_usage()
print('')
print('available palettes:')
for palette in sorted(PALETTE):
print(' %-12s' % (palette,))
return 0
|
def run():
"""Main entrypoint if invoked via the command line."""
import argparse
parser = argparse.ArgumentParser(
description=(
'Text mode diagrams using UTF-8 characters and fancy colors.'
),
epilog="""
(1): only works for the horizontal bar graph, the first argument is the key
and the second value is the data point.
""",
)
group = parser.add_argument_group('optional drawing mode')
group.add_argument(
'-G', '--graph',
dest='mode', action='store_const', const='g',
help='axis drawing mode (default)',
)
group.add_argument(
'-H', '--horizontal-bars',
dest='mode', action='store_const', const='h',
help='horizontal drawing mode',
)
group.add_argument(
'-V', '--vertical-bars',
dest='mode', action='store_const', const='v',
help='vertical drawing mode',
)
group = parser.add_argument_group('optional drawing arguments')
group.add_argument(
'-a', '--axis',
dest='axis', action='store_const', const=True, default=True,
help='draw axis (default: yes)',
)
group.add_argument(
'-A', '--no-axis',
dest='axis', action='store_const', const=False,
help="don't draw axis",
)
group.add_argument(
'-c', '--color',
dest='color', action='store_const', const=True, default=True,
help='use colors (default: yes)',
)
group.add_argument(
'-C', '--no-color',
dest='color', action='store_const', const=False,
help="don't use colors",
)
group.add_argument(
'-l', '--legend',
dest='legend', action='store_const', const=True, default=True,
help='draw y-axis legend (default: yes)',
)
group.add_argument(
'-L', '--no-legend',
dest='legend', action='store_const', const=False,
help="don't draw y-axis legend",
)
group.add_argument(
'-f', '--function',
default=None, metavar='function',
help='curve manipulation function, use "help" for a list',
)
group.add_argument(
'-p', '--palette',
default='default', metavar='palette',
help='palette name, use "help" for a list',
)
group.add_argument(
'-x', '--width',
default=0, type=int, metavar='characters',
help='drawing width (default: auto)',
)
group.add_argument(
'-y', '--height',
default=0, type=int, metavar='characters',
help='drawing height (default: auto)',
)
group.add_argument(
'-r', '--reverse',
default=False, action='store_true',
help='reverse draw graph',
)
group.add_argument(
'--sort-by-column',
default=0, type=int, metavar='index',
help='sort input data based on given column',
)
group = parser.add_argument_group('optional input and output arguments')
group.add_argument(
'-b', '--batch',
default=False, action='store_true',
help='batch mode (default: no)',
)
group.add_argument(
'-k', '--keys',
default=False, action='store_true',
help='input are key-value pairs (default: no) (1)',
)
group.add_argument(
'-s', '--sleep',
default=0, type=float,
help='batch poll sleep time (default: none)',
)
group.add_argument(
'-i', '--input',
default='-', metavar='file',
help='input file (default: stdin)',
)
group.add_argument(
'-o', '--output',
default='-', metavar='file',
help='output file (default: stdout)',
)
group.add_argument(
'-e', '--encoding',
dest='encoding', default='',
help='output encoding (default: auto)',
)
option = parser.parse_args()
if option.function == 'help':
return usage_function(parser)
if option.palette == 'help':
return usage_palette(parser)
option.mode = option.mode or 'g'
option.size = Point((option.width, option.height))
if option.input in ['-', 'stdin']:
istream = sys.stdin
else:
istream = open(option.input, 'r')
if option.output in ['-', 'stdout']:
try:
ostream = sys.stdout.buffer
except AttributeError:
ostream = sys.stdout
else:
ostream = open(option.output, 'wb')
option.encoding = option.encoding or Terminal().encoding
if option.mode == 'g':
engine = AxisGraph(option.size, option)
elif option.mode == 'h':
engine = HorizontalBarGraph(option.size, option)
elif option.mode == 'v':
engine = VerticalBarGraph(option.size, option)
else:
parser.error('invalid mode')
return 1
engine.consume(istream, ostream, batch=option.batch)
|
def size(self):
"""Get the current terminal size."""
for fd in range(3):
cr = self._ioctl_GWINSZ(fd)
if cr:
break
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = self._ioctl_GWINSZ(fd)
os.close(fd)
except Exception:
pass
if not cr:
env = os.environ
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
|
def color(self, index):
"""Get the escape sequence for indexed color ``index``.
The ``index`` is a color index in the 256 color space. The color space
consists of:
* 0x00-0x0f: default EGA colors
* 0x10-0xe7: 6x6x6 RGB cubes
* 0xe8-0xff: gray scale ramp
"""
if self.colors == 16:
if index >= 8:
return self.csi('bold') + self.csi('setaf', index - 8)
else:
return self.csi('sgr0') + self.csi('setaf', index)
else:
return self.csi('setaf', index)
|
def csi(self, capname, *args):
"""Return the escape sequence for the selected Control Sequence."""
value = curses.tigetstr(capname)
if value is None:
return b''
else:
return curses.tparm(value, *args)
|
def csi_wrap(self, value, capname, *args):
"""Return a value wrapped in the selected CSI and does a reset."""
if isinstance(value, str):
value = value.encode('utf-8')
return b''.join([
self.csi(capname, *args),
value,
self.csi('sgr0'),
])
|
def consume(self, istream, ostream, batch=False):
"""Read points from istream and output to ostream."""
datapoints = [] # List of 2-tuples
if batch:
sleep = max(0.01, self.option.sleep)
fd = istream.fileno()
while True:
try:
if select.select([fd], [], [], sleep):
try:
line = istream.readline()
if line == '':
break
datapoints.append(self.consume_line(line))
except ValueError:
continue
if self.option.sort_by_column:
datapoints = sorted(datapoints, key=itemgetter(self.option.sort_by_column - 1))
if len(datapoints) > 1:
datapoints = datapoints[-self.maximum_points:]
self.update([dp[0] for dp in datapoints], [dp[1] for dp in datapoints])
self.render(ostream)
time.sleep(sleep)
except KeyboardInterrupt:
break
else:
for line in istream:
try:
datapoints.append(self.consume_line(line))
except ValueError:
pass
if self.option.sort_by_column:
datapoints = sorted(datapoints, key=itemgetter(self.option.sort_by_column - 1))
self.update([dp[0] for dp in datapoints], [dp[1] for dp in datapoints])
self.render(ostream)
|
def consume_line(self, line):
"""Consume data from a line."""
data = RE_VALUE_KEY.split(line.strip(), 1)
if len(data) == 1:
return float(data[0]), None
else:
return float(data[0]), data[1].strip()
|
def update(self, points, values=None):
"""Add a set of data points."""
self.values = values or [None] * len(points)
if np is None:
if self.option.function:
warnings.warn('numpy not available, function ignored')
self.points = points
self.minimum = min(self.points)
self.maximum = max(self.points)
self.current = self.points[-1]
else:
self.points = self.apply_function(points)
self.minimum = np.min(self.points)
self.maximum = np.max(self.points)
self.current = self.points[-1]
if self.maximum == self.minimum:
self.extents = 1
else:
self.extents = (self.maximum - self.minimum)
self.extents = (self.maximum - self.minimum)
|
def color_ramp(self, size):
"""Generate a color ramp for the current screen height."""
color = PALETTE.get(self.option.palette, {})
color = color.get(self.term.colors, None)
color_ramp = []
if color is not None:
ratio = len(color) / float(size)
for i in range(int(size)):
color_ramp.append(self.term.color(color[int(ratio * i)]))
return color_ramp
|
def human(self, size, base=1000, units=' kMGTZ'):
"""Convert the input ``size`` to human readable, short form."""
sign = '+' if size >= 0 else '-'
size = abs(size)
if size < 1000:
return '%s%d' % (sign, size)
for i, suffix in enumerate(units):
unit = 1000 ** (i + 1)
if size < unit:
return ('%s%.01f%s' % (
sign,
size / float(unit) * base,
suffix,
)).strip()
raise OverflowError
|
def apply_function(self, points):
"""Run the filter function on the provided points."""
if not self.option.function:
return points
if np is None:
raise ImportError('numpy is not available')
if ':' in self.option.function:
function, arguments = self.option.function.split(':', 1)
arguments = arguments.split(',')
else:
function = self.option.function
arguments = []
# Resolve arguments
arguments = list(map(self._function_argument, arguments))
# Resolve function
filter_function = FUNCTION.get(function)
if filter_function is None:
raise TypeError('Invalid function "%s"' % (function,))
else:
# We wrap in ``list()`` to consume generators and iterators, as
# ``np.array`` doesn't do this for us.
return filter_function(np.array(list(points)), *arguments)
|
def line(self, p1, p2, resolution=1):
"""Resolve the points to make a line between two points."""
xdiff = max(p1.x, p2.x) - min(p1.x, p2.x)
ydiff = max(p1.y, p2.y) - min(p1.y, p2.y)
xdir = [-1, 1][int(p1.x <= p2.x)]
ydir = [-1, 1][int(p1.y <= p2.y)]
r = int(round(max(xdiff, ydiff)))
if r == 0:
return
for i in range((r + 1) * resolution):
x = p1.x
y = p1.y
if xdiff:
x += (float(i) * xdiff) / r * xdir / resolution
if ydiff:
y += (float(i) * ydiff) / r * ydir / resolution
yield Point((x, y))
|
def set_text(self, point, text):
"""Set a text value in the screen canvas."""
if not self.option.legend:
return
if not isinstance(point, Point):
point = Point(point)
for offset, char in enumerate(str(text)):
self.screen.canvas[point.y][point.x + offset] = char
|
def render(self, stream):
"""Render graph to stream."""
encoding = self.option.encoding or self.term.encoding or "utf8"
if self.option.color:
ramp = self.color_ramp(self.size.y)[::-1]
else:
ramp = None
if self.cycle >= 1 and self.lines:
stream.write(self.term.csi('cuu', self.lines))
zero = int(self.null / 4) # Zero crossing
lines = 0
for y in range(self.screen.size.y):
if y == zero and self.size.y > 1:
stream.write(self.term.csi('smul'))
if ramp:
stream.write(ramp[y])
for x in range(self.screen.size.x):
point = Point((x, y))
if point in self.screen:
value = self.screen[point]
if isinstance(value, int):
stream.write(chr(self.base + value).encode(encoding))
else:
stream.write(self.term.csi('sgr0'))
stream.write(self.term.csi_wrap(
value.encode(encoding),
'bold'
))
if y == zero and self.size.y > 1:
stream.write(self.term.csi('smul'))
if ramp:
stream.write(ramp[y])
else:
stream.write(b' ')
if y == zero and self.size.y > 1:
stream.write(self.term.csi('rmul'))
if ramp:
stream.write(self.term.csi('sgr0'))
stream.write(b'\n')
lines += 1
stream.flush()
self.cycle = self.cycle + 1
self.lines = lines
|
def _normalised_numpy(self):
"""Normalised data points using numpy."""
dx = (self.screen.width / float(len(self.points)))
oy = (self.screen.height)
points = np.array(self.points) - self.minimum
points = points * 4.0 / self.extents * self.size.y
for x, y in enumerate(points):
yield Point((
dx * x,
min(oy, oy - y),
))
|
def _normalised_python(self):
"""Normalised data points using pure Python."""
dx = (self.screen.width / float(len(self.points)))
oy = (self.screen.height)
for x, point in enumerate(self.points):
y = (point - self.minimum) * 4.0 / self.extents * self.size.y
yield Point((
dx * x,
min(oy, oy - y),
))
|
def null(self):
"""Zero crossing value."""
if not self.option.axis:
return -1
else:
return self.screen.height - (
-self.minimum * 4.0 / self.extents * self.size.y
)
|
def set(self, point):
"""Set pixel at (x, y) point."""
if not isinstance(point, Point):
point = Point(point)
rx = self.round(point.x)
ry = self.round(point.y)
item = Point((rx >> 1, min(ry >> 2, self.size.y)))
self.screen[item] |= self.pixels[ry & 3][rx & 1]
|
def unset(self, point):
"""Unset pixel at (x, y) point."""
if not isinstance(point, Point):
point = Point(point)
x, y = self.round(point.x) >> 1, self.round(point.y) >> 2
if (x, y) not in self.screen:
return
if isinstance(self.screen[y][x], int):
self.screen[(x, y)] &= ~self.pixels[y & 3][x & 1]
else:
del self.screen[(x, y)]
if not self.screen.canvas.get(y):
del self.screen[y]
|
def mem_size(self):
'''used when allocating memory ingame'''
data_len = self._data_mem_size
node_count = len(list(self.xml_doc.iter(tag=etree.Element)))
if self.compressed:
size = 52 * node_count + data_len + 630
else:
tags_len = 0
for e in self.xml_doc.iter(tag=etree.Element):
e_len = max(len(e.tag), 8)
e_len = (e_len + 3) & ~3
tags_len += e_len
size = 56 * node_count + data_len + 630 + tags_len
# debugging
#print('nodes:{} ({}) data:{} ({})'.format(node_count,hex(node_count), data_len, hex(data_len)))
return (size + 8) & ~7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.