Search is not available for this dataset
text
stringlengths
75
104k
def davidson(lname, fname='.', omit_fname=False): """Return Davidson's Consonant Code. This is a wrapper for :py:meth:`Davidson.encode`. Parameters ---------- lname : str Last name (or word) to be encoded fname : str First name (optional), of which the first character is included in the code. omit_fname : bool Set to True to completely omit the first character of the first name Returns ------- str Davidson's Consonant Code Example ------- >>> davidson('Gough') 'G .' >>> davidson('pneuma') 'PNM .' >>> davidson('knight') 'KNGT.' >>> davidson('trice') 'TRC .' >>> davidson('judge') 'JDG .' >>> davidson('Smith', 'James') 'SMT J' >>> davidson('Wasserman', 'Tabitha') 'WSRMT' """ return Davidson().encode(lname, fname, omit_fname)
def encode(self, lname, fname='.', omit_fname=False): """Return Davidson's Consonant Code. Parameters ---------- lname : str Last name (or word) to be encoded fname : str First name (optional), of which the first character is included in the code. omit_fname : bool Set to True to completely omit the first character of the first name Returns ------- str Davidson's Consonant Code Example ------- >>> pe = Davidson() >>> pe.encode('Gough') 'G .' >>> pe.encode('pneuma') 'PNM .' >>> pe.encode('knight') 'KNGT.' >>> pe.encode('trice') 'TRC .' >>> pe.encode('judge') 'JDG .' >>> pe.encode('Smith', 'James') 'SMT J' >>> pe.encode('Wasserman', 'Tabitha') 'WSRMT' """ lname = text_type(lname.upper()) code = self._delete_consecutive_repeats( lname[:1] + lname[1:].translate(self._trans) ) code = code[:4] + (4 - len(code)) * ' ' if not omit_fname: code += fname[:1].upper() return code
def ac_encode(text, probs): """Encode a text using arithmetic coding with the provided probabilities. This is a wrapper for :py:meth:`Arithmetic.encode`. Parameters ---------- text : str A string to encode probs : dict A probability statistics dictionary generated by :py:meth:`Arithmetic.train` Returns ------- tuple The arithmetically coded text Example ------- >>> pr = ac_train('the quick brown fox jumped over the lazy dog') >>> ac_encode('align', pr) (16720586181, 34) """ coder = Arithmetic() coder.set_probs(probs) return coder.encode(text)
def ac_decode(longval, nbits, probs): """Decode the number to a string using the given statistics. This is a wrapper for :py:meth:`Arithmetic.decode`. Parameters ---------- longval : int The first part of an encoded tuple from ac_encode nbits : int The second part of an encoded tuple from ac_encode probs : dict A probability statistics dictionary generated by :py:meth:`Arithmetic.train` Returns ------- str The arithmetically decoded text Example ------- >>> pr = ac_train('the quick brown fox jumped over the lazy dog') >>> ac_decode(16720586181, 34, pr) 'align' """ coder = Arithmetic() coder.set_probs(probs) return coder.decode(longval, nbits)
def train(self, text): r"""Generate a probability dict from the provided text. Text to 0-order probability statistics as a dict Parameters ---------- text : str The text data over which to calculate probability statistics. This must not contain the NUL (0x00) character because that is used to indicate the end of data. Example ------- >>> ac = Arithmetic() >>> ac.train('the quick brown fox jumped over the lazy dog') >>> ac.get_probs() {' ': (Fraction(0, 1), Fraction(8, 45)), 'o': (Fraction(8, 45), Fraction(4, 15)), 'e': (Fraction(4, 15), Fraction(16, 45)), 'u': (Fraction(16, 45), Fraction(2, 5)), 't': (Fraction(2, 5), Fraction(4, 9)), 'r': (Fraction(4, 9), Fraction(22, 45)), 'h': (Fraction(22, 45), Fraction(8, 15)), 'd': (Fraction(8, 15), Fraction(26, 45)), 'z': (Fraction(26, 45), Fraction(3, 5)), 'y': (Fraction(3, 5), Fraction(28, 45)), 'x': (Fraction(28, 45), Fraction(29, 45)), 'w': (Fraction(29, 45), Fraction(2, 3)), 'v': (Fraction(2, 3), Fraction(31, 45)), 'q': (Fraction(31, 45), Fraction(32, 45)), 'p': (Fraction(32, 45), Fraction(11, 15)), 'n': (Fraction(11, 15), Fraction(34, 45)), 'm': (Fraction(34, 45), Fraction(7, 9)), 'l': (Fraction(7, 9), Fraction(4, 5)), 'k': (Fraction(4, 5), Fraction(37, 45)), 'j': (Fraction(37, 45), Fraction(38, 45)), 'i': (Fraction(38, 45), Fraction(13, 15)), 'g': (Fraction(13, 15), Fraction(8, 9)), 'f': (Fraction(8, 9), Fraction(41, 45)), 'c': (Fraction(41, 45), Fraction(14, 15)), 'b': (Fraction(14, 15), Fraction(43, 45)), 'a': (Fraction(43, 45), Fraction(44, 45)), '\x00': (Fraction(44, 45), Fraction(1, 1))} """ text = text_type(text) if '\x00' in text: text = text.replace('\x00', ' ') counts = Counter(text) counts['\x00'] = 1 tot_letters = sum(counts.values()) tot = 0 self._probs = {} prev = Fraction(0) for char, count in sorted( counts.items(), key=lambda x: (x[1], x[0]), reverse=True ): follow = Fraction(tot + count, tot_letters) self._probs[char] = (prev, follow) prev = follow tot = tot + count
def encode(self, text): """Encode a text using arithmetic coding. Text and the 0-order probability statistics -> longval, nbits The encoded number is Fraction(longval, 2**nbits) Parameters ---------- text : str A string to encode Returns ------- tuple The arithmetically coded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.encode('align') (16720586181, 34) """ text = text_type(text) if '\x00' in text: text = text.replace('\x00', ' ') minval = Fraction(0) maxval = Fraction(1) for char in text + '\x00': prob_range = self._probs[char] delta = maxval - minval maxval = minval + prob_range[1] * delta minval = minval + prob_range[0] * delta # I tried without the /2 just to check. Doesn't work. # Keep scaling up until the error range is >= 1. That # gives me the minimum number of bits needed to resolve # down to the end-of-data character. delta = (maxval - minval) / 2 nbits = long(0) while delta < 1: nbits += 1 delta *= 2 # The below condition shouldn't ever be false if nbits == 0: # pragma: no cover return 0, 0 # using -1 instead of /2 avg = (maxval + minval) * 2 ** (nbits - 1) # Could return a rational instead ... # the division truncation is deliberate return avg.numerator // avg.denominator, nbits
def decode(self, longval, nbits): """Decode the number to a string using the given statistics. Parameters ---------- longval : int The first part of an encoded tuple from encode nbits : int The second part of an encoded tuple from encode Returns ------- str The arithmetically decoded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.decode(16720586181, 34) 'align' """ val = Fraction(longval, long(1) << nbits) letters = [] probs_items = [ (char, minval, maxval) for (char, (minval, maxval)) in self._probs.items() ] char = '\x00' while True: for (char, minval, maxval) in probs_items: # noqa: B007 if minval <= val < maxval: break if char == '\x00': break letters.append(char) delta = maxval - minval val = (val - minval) / delta return ''.join(letters)
def fuzzy_soundex(word, max_length=5, zero_pad=True): """Return the Fuzzy Soundex code for a word. This is a wrapper for :py:meth:`FuzzySoundex.encode`. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Fuzzy Soundex value Examples -------- >>> fuzzy_soundex('Christopher') 'K6931' >>> fuzzy_soundex('Niall') 'N4000' >>> fuzzy_soundex('Smith') 'S5300' >>> fuzzy_soundex('Smith') 'S5300' """ return FuzzySoundex().encode(word, max_length, zero_pad)
def encode(self, word, max_length=5, zero_pad=True): """Return the Fuzzy Soundex code for a word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Fuzzy Soundex value Examples -------- >>> pe = FuzzySoundex() >>> pe.encode('Christopher') 'K6931' >>> pe.encode('Niall') 'N4000' >>> pe.encode('Smith') 'S5300' >>> pe.encode('Smith') 'S5300' """ word = unicode_normalize('NFKD', text_type(word.upper())) word = word.replace('ß', 'SS') # Clamp max_length to [4, 64] if max_length != -1: max_length = min(max(4, max_length), 64) else: max_length = 64 if not word: if zero_pad: return '0' * max_length return '0' if word[:2] in {'CS', 'CZ', 'TS', 'TZ'}: word = 'SS' + word[2:] elif word[:2] == 'GN': word = 'NN' + word[2:] elif word[:2] in {'HR', 'WR'}: word = 'RR' + word[2:] elif word[:2] == 'HW': word = 'WW' + word[2:] elif word[:2] in {'KN', 'NG'}: word = 'NN' + word[2:] if word[-2:] == 'CH': word = word[:-2] + 'KK' elif word[-2:] == 'NT': word = word[:-2] + 'TT' elif word[-2:] == 'RT': word = word[:-2] + 'RR' elif word[-3:] == 'RDT': word = word[:-3] + 'RR' word = word.replace('CA', 'KA') word = word.replace('CC', 'KK') word = word.replace('CK', 'KK') word = word.replace('CE', 'SE') word = word.replace('CHL', 'KL') word = word.replace('CL', 'KL') word = word.replace('CHR', 'KR') word = word.replace('CR', 'KR') word = word.replace('CI', 'SI') word = word.replace('CO', 'KO') word = word.replace('CU', 'KU') word = word.replace('CY', 'SY') word = word.replace('DG', 'GG') word = word.replace('GH', 'HH') word = word.replace('MAC', 'MK') word = word.replace('MC', 'MK') word = word.replace('NST', 'NSS') word = word.replace('PF', 'FF') word = word.replace('PH', 'FF') word = word.replace('SCH', 'SSS') word = word.replace('TIO', 'SIO') word = word.replace('TIA', 'SIO') word = word.replace('TCH', 'CHH') sdx = word.translate(self._trans) sdx = sdx.replace('-', '') # remove repeating characters sdx = self._delete_consecutive_repeats(sdx) if word[0] in {'H', 'W', 'Y'}: sdx = word[0] + sdx else: sdx = word[0] + sdx[1:] sdx = sdx.replace('0', '') if zero_pad: sdx += '0' * max_length return sdx[:max_length]
def corpus_importer(self, corpus, n_val=1, bos='_START_', eos='_END_'): r"""Fill in self.ngcorpus from a Corpus argument. Parameters ---------- corpus :Corpus The Corpus from which to initialize the n-gram corpus n_val : int Maximum n value for n-grams bos : str String to insert as an indicator of beginning of sentence eos : str String to insert as an indicator of end of sentence Raises ------ TypeError Corpus argument of the Corpus class required. Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus() >>> ngcorp.corpus_importer(Corpus(tqbf)) """ if not corpus or not isinstance(corpus, Corpus): raise TypeError('Corpus argument of the Corpus class required.') sentences = corpus.sents() for sent in sentences: ngs = Counter(sent) for key in ngs.keys(): self._add_to_ngcorpus(self.ngcorpus, [key], ngs[key]) if n_val > 1: if bos and bos != '': sent = [bos] + sent if eos and eos != '': sent += [eos] for i in range(2, n_val + 1): for j in range(len(sent) - i + 1): self._add_to_ngcorpus( self.ngcorpus, sent[j : j + i], 1 )
def get_count(self, ngram, corpus=None): r"""Get the count of an n-gram in the corpus. Parameters ---------- ngram : str The n-gram to retrieve the count of from the n-gram corpus corpus : Corpus The corpus Returns ------- int The n-gram count Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus(Corpus(tqbf)) >>> NGramCorpus(Corpus(tqbf)).get_count('the') 2 >>> NGramCorpus(Corpus(tqbf)).get_count('fox') 1 """ if not corpus: corpus = self.ngcorpus # if ngram is empty, we're at our leaf node and should return the # value in None if not ngram: return corpus[None] # support strings or lists/tuples by splitting strings if isinstance(ngram, (text_type, str)): ngram = text_type(ngram).split() # if ngram is not empty, check whether the next element is in the # corpus; if so, recurse--if not, return 0 if ngram[0] in corpus: return self.get_count(ngram[1:], corpus[ngram[0]]) return 0
def _add_to_ngcorpus(self, corpus, words, count): """Build up a corpus entry recursively. Parameters ---------- corpus : Corpus The corpus words : [str] Words to add to the corpus count : int Count of words """ if words[0] not in corpus: corpus[words[0]] = Counter() if len(words) == 1: corpus[words[0]][None] += count else: self._add_to_ngcorpus(corpus[words[0]], words[1:], count)
def gng_importer(self, corpus_file): """Fill in self.ngcorpus from a Google NGram corpus file. Parameters ---------- corpus_file : file The Google NGram file from which to initialize the n-gram corpus """ with c_open(corpus_file, 'r', encoding='utf-8') as gng: for line in gng: line = line.rstrip().split('\t') words = line[0].split() self._add_to_ngcorpus(self.ngcorpus, words, int(line[2]))
def tf(self, term): r"""Return term frequency. Parameters ---------- term : str The term for which to calculate tf Returns ------- float The term frequency (tf) Raises ------ ValueError tf can only calculate the frequency of individual words Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus(Corpus(tqbf)) >>> NGramCorpus(Corpus(tqbf)).tf('the') 1.3010299956639813 >>> NGramCorpus(Corpus(tqbf)).tf('fox') 1.0 """ if ' ' in term: raise ValueError( 'tf can only calculate the term frequency of individual words' ) tcount = self.get_count(term) if tcount == 0: return 0.0 return 1 + log10(tcount)
def encode(self, word): """Return the Standardized Phonetic Frequency Code (SPFC) of a word. Parameters ---------- word : str The word to transform Returns ------- str The SPFC value Raises ------ AttributeError Word attribute must be a string with a space or period dividing the first and last names or a tuple/list consisting of the first and last names Examples -------- >>> pe = SPFC() >>> pe.encode('Christopher Smith') '01160' >>> pe.encode('Christopher Schmidt') '01160' >>> pe.encode('Niall Smith') '01660' >>> pe.encode('Niall Schmidt') '01660' >>> pe.encode('L.Smith') '01960' >>> pe.encode('R.Miller') '65490' >>> pe.encode(('L', 'Smith')) '01960' >>> pe.encode(('R', 'Miller')) '65490' """ def _raise_word_ex(): """Raise an AttributeError. Raises ------ AttributeError Word attribute must be a string with a space or period dividing the first and last names or a tuple/list consisting of the first and last names """ raise AttributeError( 'Word attribute must be a string with a space or period ' + 'dividing the first and last names or a tuple/list ' + 'consisting of the first and last names' ) if not word: return '' names = [] if isinstance(word, (str, text_type)): names = word.split('.', 1) if len(names) != 2: names = word.split(' ', 1) if len(names) != 2: _raise_word_ex() elif hasattr(word, '__iter__'): if len(word) != 2: _raise_word_ex() names = word else: _raise_word_ex() names = [ unicode_normalize( 'NFKD', text_type(_.strip().replace('ß', 'SS').upper()) ) for _ in names ] code = '' def _steps_one_to_three(name): """Perform the first three steps of SPFC. Parameters ---------- name : str Name to transform Returns ------- str Transformed name """ # filter out non A-Z name = ''.join(_ for _ in name if _ in self._uc_set) # 1. In the field, convert DK to K, DT to T, SC to S, KN to N, # and MN to N for subst in self._substitutions: name = name.replace(subst[0], subst[1]) # 2. In the name field, replace multiple letters with a single # letter name = self._delete_consecutive_repeats(name) # 3. Remove vowels, W, H, and Y, but keep the first letter in the # name field. if name: name = name[0] + ''.join( _ for _ in name[1:] if _ not in {'A', 'E', 'H', 'I', 'O', 'U', 'W', 'Y'} ) return name names = [_steps_one_to_three(_) for _ in names] # 4. The first digit of the code is obtained using PF1 and the first # letter of the name field. Remove this letter after coding. if names[1]: code += names[1][0].translate(self._pf1) names[1] = names[1][1:] # 5. Using the last letters of the name, use Table PF3 to obtain the # second digit of the code. Use as many letters as possible and remove # after coding. if names[1]: if names[1][-3:] == 'STN' or names[1][-3:] == 'PRS': code += '8' names[1] = names[1][:-3] elif names[1][-2:] == 'SN': code += '8' names[1] = names[1][:-2] elif names[1][-3:] == 'STR': code += '9' names[1] = names[1][:-3] elif names[1][-2:] in {'SR', 'TN', 'TD'}: code += '9' names[1] = names[1][:-2] elif names[1][-3:] == 'DRS': code += '7' names[1] = names[1][:-3] elif names[1][-2:] in {'TR', 'MN'}: code += '7' names[1] = names[1][:-2] else: code += names[1][-1].translate(self._pf3) names[1] = names[1][:-1] # 6. The third digit is found using Table PF2 and the first character # of the first name. Remove after coding. if names[0]: code += names[0][0].translate(self._pf2) names[0] = names[0][1:] # 7. The fourth digit is found using Table PF2 and the first character # of the name field. If no letters remain use zero. After coding remove # the letter. # 8. The fifth digit is found in the same manner as the fourth using # the remaining characters of the name field if any. for _ in range(2): if names[1]: code += names[1][0].translate(self._pf2) names[1] = names[1][1:] else: code += '0' return code
def encode(self, word, terminator='\0'): r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa' """ if word: if terminator in word: raise ValueError( 'Specified terminator, {}, already in word.'.format( terminator if terminator != '\0' else '\\0' ) ) else: word += terminator wordlist = sorted( word[i:] + word[:i] for i in range(len(word)) ) return ''.join([w[-1] for w in wordlist]) else: return terminator
def decode(self, code, terminator='\0'): r"""Return a word decoded from BWT form. Parameters ---------- code : str The word to transform from BWT form terminator : str A character added to signal the end of the string Returns ------- str Word decoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.decode('n\x00ilag') 'align' >>> bwt.decode('annb\x00aa') 'banana' >>> bwt.decode('annb@aa', '@') 'banana' """ if code: if terminator not in code: raise ValueError( 'Specified terminator, {}, absent from code.'.format( terminator if terminator != '\0' else '\\0' ) ) else: wordlist = [''] * len(code) for i in range(len(code)): wordlist = sorted( code[i] + wordlist[i] for i in range(len(code)) ) rows = [w for w in wordlist if w[-1] == terminator][0] return rows.rstrip(terminator) else: return ''
def dist_abs(self, src, tar): """Return the indel distance between two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- int Indel distance Examples -------- >>> cmp = Indel() >>> cmp.dist_abs('cat', 'hat') 2 >>> cmp.dist_abs('Niall', 'Neil') 3 >>> cmp.dist_abs('Colin', 'Cuilen') 5 >>> cmp.dist_abs('ATCG', 'TAGC') 4 """ return self._lev.dist_abs( src, tar, mode='lev', cost=(1, 1, 9999, 9999) )
def dist(self, src, tar): """Return the normalized indel distance between two strings. This is equivalent to normalized Levenshtein distance, when only inserts and deletes are possible. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Normalized indel distance Examples -------- >>> cmp = Indel() >>> round(cmp.dist('cat', 'hat'), 12) 0.333333333333 >>> round(cmp.dist('Niall', 'Neil'), 12) 0.333333333333 >>> round(cmp.dist('Colin', 'Cuilen'), 12) 0.454545454545 >>> cmp.dist('ATCG', 'TAGC') 0.5 """ if src == tar: return 0.0 return self.dist_abs(src, tar) / (len(src) + len(tar))
def encode(self, word, primary_only=False): """Return the Haase Phonetik (numeric output) code for a word. While the output code is numeric, it is nevertheless a str. Parameters ---------- word : str The word to transform primary_only : bool If True, only the primary code is returned Returns ------- tuple The Haase Phonetik value as a numeric string Examples -------- >>> pe = Haase() >>> pe.encode('Joachim') ('9496',) >>> pe.encode('Christoph') ('4798293', '8798293') >>> pe.encode('Jörg') ('974',) >>> pe.encode('Smith') ('8692',) >>> pe.encode('Schmidt') ('8692', '4692') """ def _after(word, pos, letters): """Return True if word[pos] follows one of the supplied letters. Parameters ---------- word : str Word to modify pos : int Position to examine letters : set Letters to check for Returns ------- bool True if word[pos] follows one of letters """ if pos > 0 and word[pos - 1] in letters: return True return False def _before(word, pos, letters): """Return True if word[pos] precedes one of the supplied letters. Parameters ---------- word : str Word to modify pos : int Position to examine letters : set Letters to check for Returns ------- bool True if word[pos] precedes one of letters """ if pos + 1 < len(word) and word[pos + 1] in letters: return True return False word = unicode_normalize('NFKD', text_type(word.upper())) word = word.replace('ß', 'SS') word = word.replace('Ä', 'AE') word = word.replace('Ö', 'OE') word = word.replace('Ü', 'UE') word = ''.join(c for c in word if c in self._uc_set) variants = [] if primary_only: variants = [word] else: pos = 0 if word[:2] == 'CH': variants.append(('CH', 'SCH')) pos += 2 len_3_vars = { 'OWN': 'AUN', 'WSK': 'RSK', 'SCH': 'CH', 'GLI': 'LI', 'AUX': 'O', 'EUX': 'O', } while pos < len(word): if word[pos : pos + 4] == 'ILLE': variants.append(('ILLE', 'I')) pos += 4 elif word[pos : pos + 3] in len_3_vars: variants.append( (word[pos : pos + 3], len_3_vars[word[pos : pos + 3]]) ) pos += 3 elif word[pos : pos + 2] == 'RB': variants.append(('RB', 'RW')) pos += 2 elif len(word[pos:]) == 3 and word[pos:] == 'EAU': variants.append(('EAU', 'O')) pos += 3 elif len(word[pos:]) == 1 and word[pos:] in {'A', 'O'}: if word[pos:] == 'O': variants.append(('O', 'OW')) else: variants.append(('A', 'AR')) pos += 1 else: variants.append((word[pos],)) pos += 1 variants = [''.join(letters) for letters in product(*variants)] def _haase_code(word): sdx = '' for i in range(len(word)): if word[i] in self._uc_v_set: sdx += '9' elif word[i] == 'B': sdx += '1' elif word[i] == 'P': if _before(word, i, {'H'}): sdx += '3' else: sdx += '1' elif word[i] in {'D', 'T'}: if _before(word, i, {'C', 'S', 'Z'}): sdx += '8' else: sdx += '2' elif word[i] in {'F', 'V', 'W'}: sdx += '3' elif word[i] in {'G', 'K', 'Q'}: sdx += '4' elif word[i] == 'C': if _after(word, i, {'S', 'Z'}): sdx += '8' elif i == 0: if _before( word, i, {'A', 'H', 'K', 'L', 'O', 'Q', 'R', 'U', 'X'}, ): sdx += '4' else: sdx += '8' elif _before(word, i, {'A', 'H', 'K', 'O', 'Q', 'U', 'X'}): sdx += '4' else: sdx += '8' elif word[i] == 'X': if _after(word, i, {'C', 'K', 'Q'}): sdx += '8' else: sdx += '48' elif word[i] == 'L': sdx += '5' elif word[i] in {'M', 'N'}: sdx += '6' elif word[i] == 'R': sdx += '7' elif word[i] in {'S', 'Z'}: sdx += '8' sdx = self._delete_consecutive_repeats(sdx) return sdx encoded = tuple(_haase_code(word) for word in variants) if len(encoded) > 1: encoded_set = set() encoded_single = [] for code in encoded: if code not in encoded_set: encoded_set.add(code) encoded_single.append(code) return tuple(encoded_single) return encoded
def sim(self, src, tar, *args, **kwargs): """Return similarity. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- float Similarity """ return 1.0 - self.dist(src, tar, *args, **kwargs)
def dist_abs(self, src, tar, *args, **kwargs): """Return absolute distance. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- int Absolute distance """ return self.dist(src, tar, *args, **kwargs)
def occurrence_fingerprint( word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG ): """Return the occurrence fingerprint. This is a wrapper for :py:meth:`Occurrence.fingerprint`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence fingerprint Examples -------- >>> bin(occurrence_fingerprint('hat')) '0b110000100000000' >>> bin(occurrence_fingerprint('niall')) '0b10110000100000' >>> bin(occurrence_fingerprint('colin')) '0b1110000110000' >>> bin(occurrence_fingerprint('atcg')) '0b110000000010000' >>> bin(occurrence_fingerprint('entreatment')) '0b1110010010000100' """ return Occurrence().fingerprint(word, n_bits, most_common)
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG): """Return the occurrence fingerprint. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence fingerprint Examples -------- >>> of = Occurrence() >>> bin(of.fingerprint('hat')) '0b110000100000000' >>> bin(of.fingerprint('niall')) '0b10110000100000' >>> bin(of.fingerprint('colin')) '0b1110000110000' >>> bin(of.fingerprint('atcg')) '0b110000000010000' >>> bin(of.fingerprint('entreatment')) '0b1110010010000100' """ word = set(word) fingerprint = 0 for letter in most_common: if letter in word: fingerprint += 1 n_bits -= 1 if n_bits: fingerprint <<= 1 else: break n_bits -= 1 if n_bits > 0: fingerprint <<= n_bits return fingerprint
def sim_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None): """Return the Baystat similarity. This is a wrapper for :py:meth:`Baystat.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison min_ss_len : int Minimum substring length to be considered left_ext :int Left-side extension length right_ext :int Right-side extension length Returns ------- float The Baystat similarity Examples -------- >>> round(sim_baystat('cat', 'hat'), 12) 0.666666666667 >>> sim_baystat('Niall', 'Neil') 0.4 >>> round(sim_baystat('Colin', 'Cuilen'), 12) 0.166666666667 >>> sim_baystat('ATCG', 'TAGC') 0.0 """ return Baystat().sim(src, tar, min_ss_len, left_ext, right_ext)
def dist_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None): """Return the Baystat distance. This is a wrapper for :py:meth:`Baystat.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison min_ss_len : int Minimum substring length to be considered left_ext : int Left-side extension length right_ext : int Right-side extension length Returns ------- float The Baystat distance Examples -------- >>> round(dist_baystat('cat', 'hat'), 12) 0.333333333333 >>> dist_baystat('Niall', 'Neil') 0.6 >>> round(dist_baystat('Colin', 'Cuilen'), 12) 0.833333333333 >>> dist_baystat('ATCG', 'TAGC') 1.0 """ return Baystat().dist(src, tar, min_ss_len, left_ext, right_ext)
def sim(self, src, tar, min_ss_len=None, left_ext=None, right_ext=None): """Return the Baystat similarity. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison min_ss_len : int Minimum substring length to be considered left_ext :int Left-side extension length right_ext :int Right-side extension length Returns ------- float The Baystat similarity Examples -------- >>> cmp = Baystat() >>> round(cmp.sim('cat', 'hat'), 12) 0.666666666667 >>> cmp.sim('Niall', 'Neil') 0.4 >>> round(cmp.sim('Colin', 'Cuilen'), 12) 0.166666666667 >>> cmp.sim('ATCG', 'TAGC') 0.0 """ if src == tar: return 1.0 if not src or not tar: return 0.0 max_len = max(len(src), len(tar)) if not (min_ss_len and left_ext and right_ext): # These can be set via arguments to the function. Otherwise they # are set automatically based on values from the article. if max_len >= 7: min_ss_len = 2 left_ext = 2 right_ext = 2 else: # The paper suggests that for short names, (exclusively) one or # the other of left_ext and right_ext can be 1, with good # results. I use 0 & 0 as the default in this case. min_ss_len = 1 left_ext = 0 right_ext = 0 pos = 0 match_len = 0 while True: if pos + min_ss_len > len(src): return match_len / max_len hit_len = 0 ix = 1 substring = src[pos : pos + min_ss_len] search_begin = pos - left_ext if search_begin < 0: search_begin = 0 left_ext_len = pos else: left_ext_len = left_ext if pos + min_ss_len + right_ext >= len(tar): right_ext_len = len(tar) - pos - min_ss_len else: right_ext_len = right_ext if ( search_begin + left_ext_len + min_ss_len + right_ext_len > search_begin ): search_val = tar[ search_begin : ( search_begin + left_ext_len + min_ss_len + right_ext_len ) ] else: search_val = '' flagged_tar = '' while substring in search_val and pos + ix <= len(src): hit_len = len(substring) flagged_tar = tar.replace(substring, '#' * hit_len) if pos + min_ss_len + ix <= len(src): substring = src[pos : pos + min_ss_len + ix] if pos + min_ss_len + right_ext_len + 1 <= len(tar): right_ext_len += 1 # The following is unnecessary, I think # if (search_begin + left_ext_len + min_ss_len + right_ext_len # <= len(tar)): search_val = tar[ search_begin : ( search_begin + left_ext_len + min_ss_len + right_ext_len ) ] ix += 1 if hit_len > 0: tar = flagged_tar match_len += hit_len pos += ix
def sim_tversky(src, tar, qval=2, alpha=1, beta=1, bias=None): """Return the Tversky index of two strings. This is a wrapper for :py:meth:`Tversky.sim`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alpha : float Tversky index parameter as described above beta : float Tversky index parameter as described above bias : float The symmetric Tversky index bias parameter Returns ------- float Tversky similarity Examples -------- >>> sim_tversky('cat', 'hat') 0.3333333333333333 >>> sim_tversky('Niall', 'Neil') 0.2222222222222222 >>> sim_tversky('aluminum', 'Catalan') 0.0625 >>> sim_tversky('ATCG', 'TAGC') 0.0 """ return Tversky().sim(src, tar, qval, alpha, beta, bias)
def dist_tversky(src, tar, qval=2, alpha=1, beta=1, bias=None): """Return the Tversky distance between two strings. This is a wrapper for :py:meth:`Tversky.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alpha : float Tversky index parameter as described above beta : float Tversky index parameter as described above bias : float The symmetric Tversky index bias parameter Returns ------- float Tversky distance Examples -------- >>> dist_tversky('cat', 'hat') 0.6666666666666667 >>> dist_tversky('Niall', 'Neil') 0.7777777777777778 >>> dist_tversky('aluminum', 'Catalan') 0.9375 >>> dist_tversky('ATCG', 'TAGC') 1.0 """ return Tversky().dist(src, tar, qval, alpha, beta, bias)
def sim(self, src, tar, qval=2, alpha=1, beta=1, bias=None): """Return the Tversky index of two strings. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alpha : float Tversky index parameter as described above beta : float Tversky index parameter as described above bias : float The symmetric Tversky index bias parameter Returns ------- float Tversky similarity Raises ------ ValueError Unsupported weight assignment; alpha and beta must be greater than or equal to 0. Examples -------- >>> cmp = Tversky() >>> cmp.sim('cat', 'hat') 0.3333333333333333 >>> cmp.sim('Niall', 'Neil') 0.2222222222222222 >>> cmp.sim('aluminum', 'Catalan') 0.0625 >>> cmp.sim('ATCG', 'TAGC') 0.0 """ if alpha < 0 or beta < 0: raise ValueError( 'Unsupported weight assignment; alpha and beta ' + 'must be greater than or equal to 0.' ) if src == tar: return 1.0 elif not src or not tar: return 0.0 q_src, q_tar = self._get_qgrams(src, tar, qval) q_src_mag = sum(q_src.values()) q_tar_mag = sum(q_tar.values()) q_intersection_mag = sum((q_src & q_tar).values()) if not q_src or not q_tar: return 0.0 if bias is None: return q_intersection_mag / ( q_intersection_mag + alpha * (q_src_mag - q_intersection_mag) + beta * (q_tar_mag - q_intersection_mag) ) a_val = min( q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag ) b_val = max( q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag ) c_val = q_intersection_mag + bias return c_val / (beta * (alpha * a_val + (1 - alpha) * b_val) + c_val)
def lcsseq(self, src, tar): """Return the longest common subsequence of two strings. Based on the dynamic programming algorithm from http://rosettacode.org/wiki/Longest_common_subsequence :cite:`rosettacode:2018b`. This is licensed GFDL 1.2. Modifications include: conversion to a numpy array in place of a list of lists Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- str The longest common subsequence Examples -------- >>> sseq = LCSseq() >>> sseq.lcsseq('cat', 'hat') 'at' >>> sseq.lcsseq('Niall', 'Neil') 'Nil' >>> sseq.lcsseq('aluminum', 'Catalan') 'aln' >>> sseq.lcsseq('ATCG', 'TAGC') 'AC' """ lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int) # row 0 and column 0 are initialized to 0 already for i, src_char in enumerate(src): for j, tar_char in enumerate(tar): if src_char == tar_char: lengths[i + 1, j + 1] = lengths[i, j] + 1 else: lengths[i + 1, j + 1] = max( lengths[i + 1, j], lengths[i, j + 1] ) # read the substring out from the matrix result = '' i, j = len(src), len(tar) while i != 0 and j != 0: if lengths[i, j] == lengths[i - 1, j]: i -= 1 elif lengths[i, j] == lengths[i, j - 1]: j -= 1 else: result = src[i - 1] + result i -= 1 j -= 1 return result
def sim(self, src, tar): r"""Return the longest common subsequence similarity of two strings. Longest common subsequence similarity (:math:`sim_{LCSseq}`). This employs the LCSseq function to derive a similarity metric: :math:`sim_{LCSseq}(s,t) = \frac{|LCSseq(s,t)|}{max(|s|, |t|)}` Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float LCSseq similarity Examples -------- >>> sseq = LCSseq() >>> sseq.sim('cat', 'hat') 0.6666666666666666 >>> sseq.sim('Niall', 'Neil') 0.6 >>> sseq.sim('aluminum', 'Catalan') 0.375 >>> sseq.sim('ATCG', 'TAGC') 0.5 """ if src == tar: return 1.0 elif not src or not tar: return 0.0 return len(self.lcsseq(src, tar)) / max(len(src), len(tar))
def sim(self, src, tar): """Return the prefix similarity of two strings. Prefix similarity is the ratio of the length of the shorter term that exactly matches the longer term to the length of the shorter term, beginning at the start of both terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Prefix similarity Examples -------- >>> cmp = Prefix() >>> cmp.sim('cat', 'hat') 0.0 >>> cmp.sim('Niall', 'Neil') 0.25 >>> cmp.sim('aluminum', 'Catalan') 0.0 >>> cmp.sim('ATCG', 'TAGC') 0.0 """ if src == tar: return 1.0 if not src or not tar: return 0.0 min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src) min_len = len(min_word) for i in range(min_len, 0, -1): if min_word[:i] == max_word[:i]: return i / min_len return 0.0
def count_fingerprint(word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG): """Return the count fingerprint. This is a wrapper for :py:meth:`Count.fingerprint`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The count fingerprint Examples -------- >>> bin(count_fingerprint('hat')) '0b1010000000001' >>> bin(count_fingerprint('niall')) '0b10001010000' >>> bin(count_fingerprint('colin')) '0b101010000' >>> bin(count_fingerprint('atcg')) '0b1010000000000' >>> bin(count_fingerprint('entreatment')) '0b1111010000100000' """ return Count().fingerprint(word, n_bits, most_common)
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG): """Return the count fingerprint. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The count fingerprint Examples -------- >>> cf = Count() >>> bin(cf.fingerprint('hat')) '0b1010000000001' >>> bin(cf.fingerprint('niall')) '0b10001010000' >>> bin(cf.fingerprint('colin')) '0b101010000' >>> bin(cf.fingerprint('atcg')) '0b1010000000000' >>> bin(cf.fingerprint('entreatment')) '0b1111010000100000' """ if n_bits % 2: n_bits += 1 word = Counter(word) fingerprint = 0 for letter in most_common: if n_bits: fingerprint <<= 2 fingerprint += word[letter] & 3 n_bits -= 2 else: break if n_bits: fingerprint <<= n_bits return fingerprint
def phonetic_fingerprint( phrase, phonetic_algorithm=double_metaphone, joiner=' ', *args, **kwargs ): """Return the phonetic fingerprint of a phrase. This is a wrapper for :py:meth:`Phonetic.fingerprint`. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> phonetic_fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> phonetic_fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000' """ return Phonetic().fingerprint( phrase, phonetic_algorithm, joiner, *args, **kwargs )
def fingerprint( self, phrase, phonetic_algorithm=double_metaphone, joiner=' ', *args, **kwargs ): """Return the phonetic fingerprint of a phrase. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> pf = Phonetic() >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000' """ phonetic = '' for word in phrase.split(): word = phonetic_algorithm(word, *args, **kwargs) if not isinstance(word, text_type) and hasattr(word, '__iter__'): word = word[0] phonetic += word + joiner phonetic = phonetic[: -len(joiner)] return super(self.__class__, self).fingerprint(phonetic)
def docs_of_words(self): r"""Return the docs in the corpus, with sentences flattened. Each list within the corpus represents all the words of that document. Thus the sentence level of lists has been flattened. Returns ------- [[str]] The docs in the corpus as a list of list of strs Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> corp.docs_of_words() [['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog.', 'And', 'then', 'it', 'slept.', 'And', 'the', 'dog', 'ran', 'off.']] >>> len(corp.docs_of_words()) 1 """ return [ [words for sents in doc for words in sents] for doc in self.corpus ]
def raw(self): r"""Return the raw corpus. This is reconstructed by joining sub-components with the corpus' split characters Returns ------- str The raw corpus Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.raw()) The quick brown fox jumped over the lazy dog. And then it slept. And the dog ran off. >>> len(corp.raw()) 85 """ doc_list = [] for doc in self.corpus: sent_list = [] for sent in doc: sent_list.append(' '.join(sent)) doc_list.append(self.sent_split.join(sent_list)) del sent_list return self.doc_split.join(doc_list)
def idf(self, term, transform=None): r"""Calculate the Inverse Document Frequency of a term in the corpus. Parameters ---------- term : str The term to calculate the IDF of transform : function A function to apply to each document term before checking for the presence of term Returns ------- float The IDF Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n\n' >>> tqbf += 'And then it slept.\n\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.docs()) [[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog.']], [['And', 'then', 'it', 'slept.']], [['And', 'the', 'dog', 'ran', 'off.']]] >>> round(corp.idf('dog'), 10) 0.4771212547 >>> round(corp.idf('the'), 10) 0.1760912591 """ docs_with_term = 0 docs = self.docs_of_words() for doc in docs: doc_set = set(doc) if transform: transformed_doc = [] for word in doc_set: transformed_doc.append(transform(word)) doc_set = set(transformed_doc) if term in doc_set: docs_with_term += 1 if docs_with_term == 0: return float('inf') return log10(len(docs) / docs_with_term)
def stem(self, word): """Return Paice-Husk stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = PaiceHusk() >>> stmr.stem('assumption') 'assum' >>> stmr.stem('verifiable') 'ver' >>> stmr.stem('fancies') 'fant' >>> stmr.stem('fanciful') 'fancy' >>> stmr.stem('torment') 'tor' """ terminate = False intact = True while not terminate: for n in range(6, 0, -1): if word[-n:] in self._rule_table[n]: accept = False if len(self._rule_table[n][word[-n:]]) < 4: for rule in self._rule_table[n][word[-n:]]: ( word, accept, intact, terminate, ) = self._apply_rule(word, rule, intact, terminate) if accept: break else: rule = self._rule_table[n][word[-n:]] (word, accept, intact, terminate) = self._apply_rule( word, rule, intact, terminate ) if accept: break else: break return word
def encode(self, word): """Return Reth-Schek Phonetik code for a word. Parameters ---------- word : str The word to transform Returns ------- str The Reth-Schek Phonetik code Examples -------- >>> reth_schek_phonetik('Joachim') 'JOAGHIM' >>> reth_schek_phonetik('Christoph') 'GHRISDOF' >>> reth_schek_phonetik('Jörg') 'JOERG' >>> reth_schek_phonetik('Smith') 'SMID' >>> reth_schek_phonetik('Schmidt') 'SCHMID' """ # Uppercase word = word.upper() # Replace umlauts/eszett word = word.replace('Ä', 'AE') word = word.replace('Ö', 'OE') word = word.replace('Ü', 'UE') word = word.replace('ß', 'SS') # Main loop, using above replacements table pos = 0 while pos < len(word): for num in range(3, 0, -1): if word[pos : pos + num] in self._replacements[num]: word = ( word[:pos] + self._replacements[num][word[pos : pos + num]] + word[pos + num :] ) pos += 1 break else: pos += 1 # Advance if nothing is recognized # Change 'CH' back(?) to 'SCH' word = word.replace('CH', 'SCH') # Replace final sequences if word[-2:] == 'ER': word = word[:-2] + 'R' elif word[-2:] == 'EL': word = word[:-2] + 'L' elif word[-1:] == 'H': word = word[:-1] return word
def encode(self, word, max_length=-1): """Return the SfinxBis code for a word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to unlimited) Returns ------- tuple The SfinxBis value Examples -------- >>> pe = SfinxBis() >>> pe.encode('Christopher') ('K68376',) >>> pe.encode('Niall') ('N4',) >>> pe.encode('Smith') ('S53',) >>> pe.encode('Schmidt') ('S53',) >>> pe.encode('Johansson') ('J585',) >>> pe.encode('Sjöberg') ('#162',) """ def _foersvensker(lokal_ordet): """Return the Swedish-ized form of the word. Parameters ---------- lokal_ordet : str Word to transform Returns ------- str Transformed word """ lokal_ordet = lokal_ordet.replace('STIERN', 'STJÄRN') lokal_ordet = lokal_ordet.replace('HIE', 'HJ') lokal_ordet = lokal_ordet.replace('SIÖ', 'SJÖ') lokal_ordet = lokal_ordet.replace('SCH', 'SH') lokal_ordet = lokal_ordet.replace('QU', 'KV') lokal_ordet = lokal_ordet.replace('IO', 'JO') lokal_ordet = lokal_ordet.replace('PH', 'F') for i in self._harde_vokaler: lokal_ordet = lokal_ordet.replace(i + 'Ü', i + 'J') lokal_ordet = lokal_ordet.replace(i + 'Y', i + 'J') lokal_ordet = lokal_ordet.replace(i + 'I', i + 'J') for i in self._mjuka_vokaler: lokal_ordet = lokal_ordet.replace(i + 'Ü', i + 'J') lokal_ordet = lokal_ordet.replace(i + 'Y', i + 'J') lokal_ordet = lokal_ordet.replace(i + 'I', i + 'J') if 'H' in lokal_ordet: for i in self._uc_c_set: lokal_ordet = lokal_ordet.replace('H' + i, i) lokal_ordet = lokal_ordet.translate(self._substitutions) lokal_ordet = lokal_ordet.replace('Ð', 'ETH') lokal_ordet = lokal_ordet.replace('Þ', 'TH') lokal_ordet = lokal_ordet.replace('ß', 'SS') return lokal_ordet def _koda_foersta_ljudet(lokal_ordet): """Return the word with the first sound coded. Parameters ---------- lokal_ordet : str Word to transform Returns ------- str Transformed word """ if ( lokal_ordet[0:1] in self._mjuka_vokaler or lokal_ordet[0:1] in self._harde_vokaler ): lokal_ordet = '$' + lokal_ordet[1:] elif lokal_ordet[0:2] in ('DJ', 'GJ', 'HJ', 'LJ'): lokal_ordet = 'J' + lokal_ordet[2:] elif ( lokal_ordet[0:1] == 'G' and lokal_ordet[1:2] in self._mjuka_vokaler ): lokal_ordet = 'J' + lokal_ordet[1:] elif lokal_ordet[0:1] == 'Q': lokal_ordet = 'K' + lokal_ordet[1:] elif lokal_ordet[0:2] == 'CH' and lokal_ordet[2:3] in frozenset( self._mjuka_vokaler | self._harde_vokaler ): lokal_ordet = '#' + lokal_ordet[2:] elif ( lokal_ordet[0:1] == 'C' and lokal_ordet[1:2] in self._harde_vokaler ): lokal_ordet = 'K' + lokal_ordet[1:] elif ( lokal_ordet[0:1] == 'C' and lokal_ordet[1:2] in self._uc_c_set ): lokal_ordet = 'K' + lokal_ordet[1:] elif lokal_ordet[0:1] == 'X': lokal_ordet = 'S' + lokal_ordet[1:] elif ( lokal_ordet[0:1] == 'C' and lokal_ordet[1:2] in self._mjuka_vokaler ): lokal_ordet = 'S' + lokal_ordet[1:] elif lokal_ordet[0:3] in ('SKJ', 'STJ', 'SCH'): lokal_ordet = '#' + lokal_ordet[3:] elif lokal_ordet[0:2] in ('SH', 'KJ', 'TJ', 'SJ'): lokal_ordet = '#' + lokal_ordet[2:] elif ( lokal_ordet[0:2] == 'SK' and lokal_ordet[2:3] in self._mjuka_vokaler ): lokal_ordet = '#' + lokal_ordet[2:] elif ( lokal_ordet[0:1] == 'K' and lokal_ordet[1:2] in self._mjuka_vokaler ): lokal_ordet = '#' + lokal_ordet[1:] return lokal_ordet # Steg 1, Versaler word = unicode_normalize('NFC', text_type(word.upper())) word = word.replace('ß', 'SS') word = word.replace('-', ' ') # Steg 2, Ta bort adelsprefix for adelstitel in self._adelstitler: while adelstitel in word: word = word.replace(adelstitel, ' ') if word.startswith(adelstitel[1:]): word = word[len(adelstitel) - 1 :] # Split word into tokens ordlista = word.split() # Steg 3, Ta bort dubbelteckning i början på namnet ordlista = [ self._delete_consecutive_repeats(ordet) for ordet in ordlista ] if not ordlista: # noinspection PyRedundantParentheses return ('',) # Steg 4, Försvenskning ordlista = [_foersvensker(ordet) for ordet in ordlista] # Steg 5, Ta bort alla tecken som inte är A-Ö (65-90,196,197,214) ordlista = [ ''.join(c for c in ordet if c in self._uc_set) for ordet in ordlista ] # Steg 6, Koda första ljudet ordlista = [_koda_foersta_ljudet(ordet) for ordet in ordlista] # Steg 7, Dela upp namnet i två delar rest = [ordet[1:] for ordet in ordlista] # Steg 8, Utför fonetisk transformation i resten rest = [ordet.replace('DT', 'T') for ordet in rest] rest = [ordet.replace('X', 'KS') for ordet in rest] # Steg 9, Koda resten till en sifferkod for vokal in self._mjuka_vokaler: rest = [ordet.replace('C' + vokal, '8' + vokal) for ordet in rest] rest = [ordet.translate(self._trans) for ordet in rest] # Steg 10, Ta bort intilliggande dubbletter rest = [self._delete_consecutive_repeats(ordet) for ordet in rest] # Steg 11, Ta bort alla "9" rest = [ordet.replace('9', '') for ordet in rest] # Steg 12, Sätt ihop delarna igen ordlista = [ ''.join(ordet) for ordet in zip((_[0:1] for _ in ordlista), rest) ] # truncate, if max_length is set if max_length > 0: ordlista = [ordet[:max_length] for ordet in ordlista] return tuple(ordlista)
def bmpm( word, language_arg=0, name_mode='gen', match_mode='approx', concat=False, filter_langs=False, ): """Return the Beider-Morse Phonetic Matching encoding(s) of a term. This is a wrapper for :py:meth:`BeiderMorse.encode`. Parameters ---------- word : str The word to transform language_arg : str The language of the term; supported values include: - ``any`` - ``arabic`` - ``cyrillic`` - ``czech`` - ``dutch`` - ``english`` - ``french`` - ``german`` - ``greek`` - ``greeklatin`` - ``hebrew`` - ``hungarian`` - ``italian`` - ``latvian`` - ``polish`` - ``portuguese`` - ``romanian`` - ``russian`` - ``spanish`` - ``turkish`` name_mode : str The name mode of the algorithm: - ``gen`` -- general (default) - ``ash`` -- Ashkenazi - ``sep`` -- Sephardic match_mode : str Matching mode: ``approx`` or ``exact`` concat : bool Concatenation mode filter_langs : bool Filter out incompatible languages Returns ------- tuple The Beider-Morse phonetic value(s) Examples -------- >>> bmpm('Christopher') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir xristopi xritopir xritopi xristofi xritofir xritofi tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir zristofi zritofir zritofi' >>> bmpm('Niall') 'nial niol' >>> bmpm('Smith') 'zmit' >>> bmpm('Schmidt') 'zmit stzmit' >>> bmpm('Christopher', language_arg='German') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> bmpm('Christopher', language_arg='English') 'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir xristafir xrQstafir' >>> bmpm('Christopher', language_arg='German', name_mode='ash') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> bmpm('Christopher', language_arg='German', match_mode='exact') 'xriStopher xriStofer xristopher xristofer' """ return BeiderMorse().encode( word, language_arg, name_mode, match_mode, concat, filter_langs )
def _language(self, name, name_mode): """Return the best guess language ID for the word and language choices. Parameters ---------- name : str The term to guess the language of name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) Returns ------- int Language ID """ name = name.strip().lower() rules = BMDATA[name_mode]['language_rules'] all_langs = ( sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['languages']) - 1 ) choices_remaining = all_langs for rule in rules: letters, languages, accept = rule if search(letters, name) is not None: if accept: choices_remaining &= languages else: choices_remaining &= (~languages) % (all_langs + 1) if choices_remaining == L_NONE: choices_remaining = L_ANY return choices_remaining
def _redo_language( self, term, name_mode, rules, final_rules1, final_rules2, concat ): """Reassess the language of the terms and call the phonetic encoder. Uses a split multi-word term. Parameters ---------- term : str The term to encode via Beider-Morse name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) rules : tuple The set of initial phonetic transform regexps final_rules1 : tuple The common set of final phonetic transform regexps final_rules2 : tuple The specific set of final phonetic transform regexps concat : bool A flag to indicate concatenation Returns ------- str A Beider-Morse phonetic code """ language_arg = self._language(term, name_mode) return self._phonetic( term, name_mode, rules, final_rules1, final_rules2, language_arg, concat, )
def _phonetic( self, term, name_mode, rules, final_rules1, final_rules2, language_arg=0, concat=False, ): """Return the Beider-Morse encoding(s) of a term. Parameters ---------- term : str The term to encode via Beider-Morse name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) rules : tuple The set of initial phonetic transform regexps final_rules1 : tuple The common set of final phonetic transform regexps final_rules2 : tuple The specific set of final phonetic transform regexps language_arg : int The language of the term concat : bool A flag to indicate concatenation Returns ------- str A Beider-Morse phonetic code """ term = term.replace('-', ' ').strip() if name_mode == 'gen': # generic case # discard and concatenate certain words if at the start of the name for pfx in BMDATA['gen']['discards']: if term.startswith(pfx): remainder = term[len(pfx) :] combined = pfx[:-1] + remainder result = ( self._redo_language( remainder, name_mode, rules, final_rules1, final_rules2, concat, ) + '-' + self._redo_language( combined, name_mode, rules, final_rules1, final_rules2, concat, ) ) return result words = ( term.split() ) # create array of the individual words in the name words2 = [] if name_mode == 'sep': # Sephardic case # for each word in the name, delete portions of word preceding # apostrophe # ex: d'avila d'aguilar --> avila aguilar # also discard certain words in the name # note that we can never get a match on "de la" because we are # checking single words below # this is a bug, but I won't try to fix it now for word in words: word = word[word.rfind('\'') + 1 :] if word not in BMDATA['sep']['discards']: words2.append(word) elif name_mode == 'ash': # Ashkenazic case # discard certain words if at the start of the name if len(words) > 1 and words[0] in BMDATA['ash']['discards']: words2 = words[1:] else: words2 = list(words) else: words2 = list(words) if concat: # concatenate the separate words of a multi-word name # (normally used for exact matches) term = ' '.join(words2) elif len(words2) == 1: # not a multi-word name term = words2[0] else: # encode each word in a multi-word name separately # (normally used for approx matches) result = '-'.join( [ self._redo_language( w, name_mode, rules, final_rules1, final_rules2, concat ) for w in words2 ] ) return result term_length = len(term) # apply language rules to map to phonetic alphabet phonetic = '' skip = 0 for i in range(term_length): if skip: skip -= 1 continue found = False for rule in rules: pattern = rule[_PATTERN_POS] pattern_length = len(pattern) lcontext = rule[_LCONTEXT_POS] rcontext = rule[_RCONTEXT_POS] # check to see if next sequence in input matches the string in # the rule if (pattern_length > term_length - i) or ( term[i : i + pattern_length] != pattern ): # no match continue right = '^' + rcontext left = lcontext + '$' # check that right context is satisfied if rcontext != '': if not search(right, term[i + pattern_length :]): continue # check that left context is satisfied if lcontext != '': if not search(left, term[:i]): continue # check for incompatible attributes candidate = self._apply_rule_if_compat( phonetic, rule[_PHONETIC_POS], language_arg ) # The below condition shouldn't ever be false if candidate is not None: # pragma: no branch phonetic = candidate found = True break if ( not found ): # character in name that is not in table -- e.g., space pattern_length = 1 skip = pattern_length - 1 # apply final rules on phonetic-alphabet, # doing a substitution of certain characters phonetic = self._apply_final_rules( phonetic, final_rules1, language_arg, False ) # apply common rules # final_rules1 are the common approx rules, # final_rules2 are approx rules for specific language phonetic = self._apply_final_rules( phonetic, final_rules2, language_arg, True ) # apply lang specific rules return phonetic
def _apply_final_rules(self, phonetic, final_rules, language_arg, strip): """Apply a set of final rules to the phonetic encoding. Parameters ---------- phonetic : str The term to which to apply the final rules final_rules : tuple The set of final phonetic transform regexps language_arg : int An integer representing the target language of the phonetic encoding strip : bool Flag to indicate whether to normalize the language attributes Returns ------- str A Beider-Morse phonetic code """ # optimization to save time if not final_rules: return phonetic # expand the result phonetic = self._expand_alternates(phonetic) phonetic_array = phonetic.split('|') for k in range(len(phonetic_array)): phonetic = phonetic_array[k] phonetic2 = '' phoneticx = self._normalize_lang_attrs(phonetic, True) i = 0 while i < len(phonetic): found = False if phonetic[i] == '[': # skip over language attribute attrib_start = i i += 1 while True: if phonetic[i] == ']': i += 1 phonetic2 += phonetic[attrib_start:i] break i += 1 continue for rule in final_rules: pattern = rule[_PATTERN_POS] pattern_length = len(pattern) lcontext = rule[_LCONTEXT_POS] rcontext = rule[_RCONTEXT_POS] right = '^' + rcontext left = lcontext + '$' # check to see if next sequence in phonetic matches the # string in the rule if (pattern_length > len(phoneticx) - i) or phoneticx[ i : i + pattern_length ] != pattern: continue # check that right context is satisfied if rcontext != '': if not search(right, phoneticx[i + pattern_length :]): continue # check that left context is satisfied if lcontext != '': if not search(left, phoneticx[:i]): continue # check for incompatible attributes candidate = self._apply_rule_if_compat( phonetic2, rule[_PHONETIC_POS], language_arg ) # The below condition shouldn't ever be false if candidate is not None: # pragma: no branch phonetic2 = candidate found = True break if not found: # character in name for which there is no substitution in # the table phonetic2 += phonetic[i] pattern_length = 1 i += pattern_length phonetic_array[k] = self._expand_alternates(phonetic2) phonetic = '|'.join(phonetic_array) if strip: phonetic = self._normalize_lang_attrs(phonetic, True) if '|' in phonetic: phonetic = '(' + self._remove_dupes(phonetic) + ')' return phonetic
def _expand_alternates(self, phonetic): """Expand phonetic alternates separated by |s. Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ alt_start = phonetic.find('(') if alt_start == -1: return self._normalize_lang_attrs(phonetic, False) prefix = phonetic[:alt_start] alt_start += 1 # get past the ( alt_end = phonetic.find(')', alt_start) alt_string = phonetic[alt_start:alt_end] alt_end += 1 # get past the ) suffix = phonetic[alt_end:] alt_array = alt_string.split('|') result = '' for i in range(len(alt_array)): alt = alt_array[i] alternate = self._expand_alternates(prefix + alt + suffix) if alternate != '' and alternate != '[0]': if result != '': result += '|' result += alternate return result
def _pnums_with_leading_space(self, phonetic): """Join prefixes & suffixes in cases of alternate phonetic values. Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ alt_start = phonetic.find('(') if alt_start == -1: return ' ' + self._phonetic_number(phonetic) prefix = phonetic[:alt_start] alt_start += 1 # get past the ( alt_end = phonetic.find(')', alt_start) alt_string = phonetic[alt_start:alt_end] alt_end += 1 # get past the ) suffix = phonetic[alt_end:] alt_array = alt_string.split('|') result = '' for alt in alt_array: result += self._pnums_with_leading_space(prefix + alt + suffix) return result
def _phonetic_numbers(self, phonetic): """Prepare & join phonetic numbers. Split phonetic value on '-', run through _pnums_with_leading_space, and join with ' ' Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ phonetic_array = phonetic.split('-') # for names with spaces in them result = ' '.join( [self._pnums_with_leading_space(i)[1:] for i in phonetic_array] ) return result
def _remove_dupes(self, phonetic): """Remove duplicates from a phonetic encoding list. Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ alt_string = phonetic alt_array = alt_string.split('|') result = '|' for i in range(len(alt_array)): alt = alt_array[i] if alt and '|' + alt + '|' not in result: result += alt + '|' return result[1:-1]
def _normalize_lang_attrs(self, text, strip): """Remove embedded bracketed attributes. This (potentially) bitwise-ands bracketed attributes together and adds to the end. This is applied to a single alternative at a time -- not to a parenthesized list. It removes all embedded bracketed attributes, logically-ands them together, and places them at the end. However if strip is true, this can indeed remove embedded bracketed attributes from a parenthesized list. Parameters ---------- text : str A Beider-Morse phonetic encoding (in progress) strip : bool Remove the bracketed attributes (and throw away) Returns ------- str A Beider-Morse phonetic code Raises ------ ValueError No closing square bracket """ uninitialized = -1 # all 1's attrib = uninitialized while '[' in text: bracket_start = text.find('[') bracket_end = text.find(']', bracket_start) if bracket_end == -1: raise ValueError( 'No closing square bracket: text=(' + text + ') strip=(' + text_type(strip) + ')' ) attrib &= int(text[bracket_start + 1 : bracket_end]) text = text[:bracket_start] + text[bracket_end + 1 :] if attrib == uninitialized or strip: return text elif attrib == 0: # means that the attributes were incompatible and there is no # alternative here return '[0]' return text + '[' + str(attrib) + ']'
def _apply_rule_if_compat(self, phonetic, target, language_arg): """Apply a phonetic regex if compatible. tests for compatible language rules to do so, apply the rule, expand the results, and detect alternatives with incompatible attributes then drop each alternative that has incompatible attributes and keep those that are compatible if there are no compatible alternatives left, return false otherwise return the compatible alternatives apply the rule Parameters ---------- phonetic : str The Beider-Morse phonetic encoding (so far) target : str A proposed addition to the phonetic encoding language_arg : int An integer representing the target language of the phonetic encoding Returns ------- str A candidate encoding """ candidate = phonetic + target if '[' not in candidate: # no attributes so we need test no further return candidate # expand the result, converting incompatible attributes to [0] candidate = self._expand_alternates(candidate) candidate_array = candidate.split('|') # drop each alternative that has incompatible attributes candidate = '' found = False for i in range(len(candidate_array)): this_candidate = candidate_array[i] if language_arg != 1: this_candidate = self._normalize_lang_attrs( this_candidate + '[' + str(language_arg) + ']', False ) if this_candidate != '[0]': found = True if candidate: candidate += '|' candidate += this_candidate # return false if no compatible alternatives remain if not found: return None # return the result of applying the rule if '|' in candidate: candidate = '(' + candidate + ')' return candidate
def _language_index_from_code(self, code, name_mode): """Return the index value for a language code. This returns l_any if more than one code is specified or the code is out of bounds. Parameters ---------- code : int The language code to interpret name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) Returns ------- int Language code index """ if code < 1 or code > sum( _LANG_DICT[_] for _ in BMDATA[name_mode]['languages'] ): # code out of range return L_ANY if ( code & (code - 1) ) != 0: # choice was more than one language; use any return L_ANY return code
def encode( self, word, language_arg=0, name_mode='gen', match_mode='approx', concat=False, filter_langs=False, ): """Return the Beider-Morse Phonetic Matching encoding(s) of a term. Parameters ---------- word : str The word to transform language_arg : int The language of the term; supported values include: - ``any`` - ``arabic`` - ``cyrillic`` - ``czech`` - ``dutch`` - ``english`` - ``french`` - ``german`` - ``greek`` - ``greeklatin`` - ``hebrew`` - ``hungarian`` - ``italian`` - ``latvian`` - ``polish`` - ``portuguese`` - ``romanian`` - ``russian`` - ``spanish`` - ``turkish`` name_mode : str The name mode of the algorithm: - ``gen`` -- general (default) - ``ash`` -- Ashkenazi - ``sep`` -- Sephardic match_mode : str Matching mode: ``approx`` or ``exact`` concat : bool Concatenation mode filter_langs : bool Filter out incompatible languages Returns ------- tuple The Beider-Morse phonetic value(s) Raises ------ ValueError Unknown language Examples -------- >>> pe = BeiderMorse() >>> pe.encode('Christopher') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir xristopi xritopir xritopi xristofi xritofir xritofi tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir zristofi zritofir zritofi' >>> pe.encode('Niall') 'nial niol' >>> pe.encode('Smith') 'zmit' >>> pe.encode('Schmidt') 'zmit stzmit' >>> pe.encode('Christopher', language_arg='German') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='English') 'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir xristafir xrQstafir' >>> pe.encode('Christopher', language_arg='German', name_mode='ash') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='German', match_mode='exact') 'xriStopher xriStofer xristopher xristofer' """ word = normalize('NFC', text_type(word.strip().lower())) name_mode = name_mode.strip().lower()[:3] if name_mode not in {'ash', 'sep', 'gen'}: name_mode = 'gen' if match_mode != 'exact': match_mode = 'approx' # Translate the supplied language_arg value into an integer # representing a set of languages all_langs = ( sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['languages']) - 1 ) lang_choices = 0 if isinstance(language_arg, (int, float, long)): lang_choices = int(language_arg) elif language_arg != '' and isinstance(language_arg, (text_type, str)): for lang in text_type(language_arg).lower().split(','): if lang in _LANG_DICT and (_LANG_DICT[lang] & all_langs): lang_choices += _LANG_DICT[lang] elif not filter_langs: raise ValueError( 'Unknown \'' + name_mode + '\' language: \'' + lang + '\'' ) # Language choices are either all incompatible with the name mode or # no choices were given, so try to autodetect if lang_choices == 0: language_arg = self._language(word, name_mode) else: language_arg = lang_choices language_arg2 = self._language_index_from_code(language_arg, name_mode) rules = BMDATA[name_mode]['rules'][language_arg2] final_rules1 = BMDATA[name_mode][match_mode]['common'] final_rules2 = BMDATA[name_mode][match_mode][language_arg2] result = self._phonetic( word, name_mode, rules, final_rules1, final_rules2, language_arg, concat, ) result = self._phonetic_numbers(result) return result
def sim_strcmp95(src, tar, long_strings=False): """Return the strcmp95 similarity of two strings. This is a wrapper for :py:meth:`Strcmp95.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison long_strings : bool Set to True to increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers. Returns ------- float Strcmp95 similarity Examples -------- >>> sim_strcmp95('cat', 'hat') 0.7777777777777777 >>> sim_strcmp95('Niall', 'Neil') 0.8454999999999999 >>> sim_strcmp95('aluminum', 'Catalan') 0.6547619047619048 >>> sim_strcmp95('ATCG', 'TAGC') 0.8333333333333334 """ return Strcmp95().sim(src, tar, long_strings)
def dist_strcmp95(src, tar, long_strings=False): """Return the strcmp95 distance between two strings. This is a wrapper for :py:meth:`Strcmp95.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison long_strings : bool Set to True to increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers. Returns ------- float Strcmp95 distance Examples -------- >>> round(dist_strcmp95('cat', 'hat'), 12) 0.222222222222 >>> round(dist_strcmp95('Niall', 'Neil'), 12) 0.1545 >>> round(dist_strcmp95('aluminum', 'Catalan'), 12) 0.345238095238 >>> round(dist_strcmp95('ATCG', 'TAGC'), 12) 0.166666666667 """ return Strcmp95().dist(src, tar, long_strings)
def sim(self, src, tar, long_strings=False): """Return the strcmp95 similarity of two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison long_strings : bool Set to True to increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers. Returns ------- float Strcmp95 similarity Examples -------- >>> cmp = Strcmp95() >>> cmp.sim('cat', 'hat') 0.7777777777777777 >>> cmp.sim('Niall', 'Neil') 0.8454999999999999 >>> cmp.sim('aluminum', 'Catalan') 0.6547619047619048 >>> cmp.sim('ATCG', 'TAGC') 0.8333333333333334 """ def _in_range(char): """Return True if char is in the range (0, 91). Parameters ---------- char : str The character to check Returns ------- bool True if char is in the range (0, 91) """ return 91 > ord(char) > 0 ying = src.strip().upper() yang = tar.strip().upper() if ying == yang: return 1.0 # If either string is blank - return - added in Version 2 if not ying or not yang: return 0.0 adjwt = defaultdict(int) # Initialize the adjwt array on the first call to the function only. # The adjwt array is used to give partial credit for characters that # may be errors due to known phonetic or character recognition errors. # A typical example is to match the letter "O" with the number "0" for i in self._sp_mx: adjwt[(i[0], i[1])] = 3 adjwt[(i[1], i[0])] = 3 if len(ying) > len(yang): search_range = len(ying) minv = len(yang) else: search_range = len(yang) minv = len(ying) # Blank out the flags ying_flag = [0] * search_range yang_flag = [0] * search_range search_range = max(0, search_range // 2 - 1) # Looking only within the search range, # count and flag the matched pairs. num_com = 0 yl1 = len(yang) - 1 for i in range(len(ying)): low_lim = (i - search_range) if (i >= search_range) else 0 hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1 for j in range(low_lim, hi_lim + 1): if (yang_flag[j] == 0) and (yang[j] == ying[i]): yang_flag[j] = 1 ying_flag[i] = 1 num_com += 1 break # If no characters in common - return if num_com == 0: return 0.0 # Count the number of transpositions k = n_trans = 0 for i in range(len(ying)): if ying_flag[i] != 0: j = 0 for j in range(k, len(yang)): # pragma: no branch if yang_flag[j] != 0: k = j + 1 break if ying[i] != yang[j]: n_trans += 1 n_trans //= 2 # Adjust for similarities in unmatched characters n_simi = 0 if minv > num_com: for i in range(len(ying)): if ying_flag[i] == 0 and _in_range(ying[i]): for j in range(len(yang)): if yang_flag[j] == 0 and _in_range(yang[j]): if (ying[i], yang[j]) in adjwt: n_simi += adjwt[(ying[i], yang[j])] yang_flag[j] = 2 break num_sim = n_simi / 10.0 + num_com # Main weight computation weight = ( num_sim / len(ying) + num_sim / len(yang) + (num_com - n_trans) / num_com ) weight /= 3.0 # Continue to boost the weight if the strings are similar if weight > 0.7: # Adjust for having up to the first 4 characters in common j = 4 if (minv >= 4) else minv i = 0 while (i < j) and (ying[i] == yang[i]) and (not ying[i].isdigit()): i += 1 if i: weight += i * 0.1 * (1.0 - weight) # Optionally adjust for long strings. # After agreeing beginning chars, at least two more must agree and # the agreeing characters must be > .5 of remaining characters. if ( long_strings and (minv > 4) and (num_com > i + 1) and (2 * num_com >= minv + i) ): if not ying[0].isdigit(): weight += (1.0 - weight) * ( (num_com - i - 1) / (len(ying) + len(yang) - i * 2 + 2) ) return weight
def encode(self, word): """Return the Naval Research Laboratory phonetic encoding of a word. Parameters ---------- word : str The word to transform Returns ------- str The NRL phonetic encoding Examples -------- >>> pe = NRL() >>> pe.encode('the') 'DHAX' >>> pe.encode('round') 'rAWnd' >>> pe.encode('quick') 'kwIHk' >>> pe.encode('eaten') 'IYtEHn' >>> pe.encode('Smith') 'smIHTH' >>> pe.encode('Larsen') 'lAArsEHn' """ def _to_regex(pattern, left_match=True): new_pattern = '' replacements = { '#': '[AEIOU]+', ':': '[BCDFGHJKLMNPQRSTVWXYZ]*', '^': '[BCDFGHJKLMNPQRSTVWXYZ]', '.': '[BDVGJLMNTWZ]', '%': '(ER|E|ES|ED|ING|ELY)', '+': '[EIY]', ' ': '^', } for char in pattern: new_pattern += ( replacements[char] if char in replacements else char ) if left_match: new_pattern += '$' if '^' not in pattern: new_pattern = '^.*' + new_pattern else: new_pattern = '^' + new_pattern.replace('^', '$') if '$' not in new_pattern: new_pattern += '.*$' return new_pattern word = word.upper() pron = '' pos = 0 while pos < len(word): left_orig = word[:pos] right_orig = word[pos:] first = word[pos] if word[pos] in self._rules else ' ' for rule in self._rules[first]: left, match, right, out = rule if right_orig.startswith(match): if left: l_pattern = _to_regex(left, left_match=True) if right: r_pattern = _to_regex(right, left_match=False) if (not left or re_match(l_pattern, left_orig)) and ( not right or re_match(r_pattern, right_orig[len(match) :]) ): pron += out pos += len(match) break else: pron += word[pos] pos += 1 return pron
def lcsstr(self, src, tar): """Return the longest common substring of two strings. Longest common substring (LCSstr). Based on the code from https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring :cite:`Wikibooks:2018`. This is licensed Creative Commons: Attribution-ShareAlike 3.0. Modifications include: - conversion to a numpy array in place of a list of lists - conversion to Python 2/3-safe range from xrange via six Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- str The longest common substring Examples -------- >>> sstr = LCSstr() >>> sstr.lcsstr('cat', 'hat') 'at' >>> sstr.lcsstr('Niall', 'Neil') 'N' >>> sstr.lcsstr('aluminum', 'Catalan') 'al' >>> sstr.lcsstr('ATCG', 'TAGC') 'A' """ lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int) longest, i_longest = 0, 0 for i in range(1, len(src) + 1): for j in range(1, len(tar) + 1): if src[i - 1] == tar[j - 1]: lengths[i, j] = lengths[i - 1, j - 1] + 1 if lengths[i, j] > longest: longest = lengths[i, j] i_longest = i else: lengths[i, j] = 0 return src[i_longest - longest : i_longest]
def sim(self, src, tar): r"""Return the longest common substring similarity of two strings. Longest common substring similarity (:math:`sim_{LCSstr}`). This employs the LCS function to derive a similarity metric: :math:`sim_{LCSstr}(s,t) = \frac{|LCSstr(s,t)|}{max(|s|, |t|)}` Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float LCSstr similarity Examples -------- >>> sim_lcsstr('cat', 'hat') 0.6666666666666666 >>> sim_lcsstr('Niall', 'Neil') 0.2 >>> sim_lcsstr('aluminum', 'Catalan') 0.25 >>> sim_lcsstr('ATCG', 'TAGC') 0.25 """ if src == tar: return 1.0 elif not src or not tar: return 0.0 return len(self.lcsstr(src, tar)) / max(len(src), len(tar))
def needleman_wunsch(src, tar, gap_cost=1, sim_func=sim_ident): """Return the Needleman-Wunsch score of two strings. This is a wrapper for :py:meth:`NeedlemanWunsch.dist_abs`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison gap_cost : float The cost of an alignment gap (1 by default) sim_func : function A function that returns the similarity of two characters (identity similarity by default) Returns ------- float Needleman-Wunsch score Examples -------- >>> needleman_wunsch('cat', 'hat') 2.0 >>> needleman_wunsch('Niall', 'Neil') 1.0 >>> needleman_wunsch('aluminum', 'Catalan') -1.0 >>> needleman_wunsch('ATCG', 'TAGC') 0.0 """ return NeedlemanWunsch().dist_abs(src, tar, gap_cost, sim_func)
def sim_matrix( src, tar, mat=None, mismatch_cost=0, match_cost=1, symmetric=True, alphabet=None, ): """Return the matrix similarity of two strings. With the default parameters, this is identical to sim_ident. It is possible for sim_matrix to return values outside of the range :math:`[0, 1]`, if values outside that range are present in mat, mismatch_cost, or match_cost. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison mat : dict A dict mapping tuples to costs; the tuples are (src, tar) pairs of symbols from the alphabet parameter mismatch_cost : float The value returned if (src, tar) is absent from mat when src does not equal tar match_cost : float The value returned if (src, tar) is absent from mat when src equals tar symmetric : bool True if the cost of src not matching tar is identical to the cost of tar not matching src; in this case, the values in mat need only contain (src, tar) or (tar, src), not both alphabet : str A collection of tokens from which src and tar are drawn; if this is defined a ValueError is raised if either tar or src is not found in alphabet Returns ------- float Matrix similarity Raises ------ ValueError src value not in alphabet ValueError tar value not in alphabet Examples -------- >>> NeedlemanWunsch.sim_matrix('cat', 'hat') 0 >>> NeedlemanWunsch.sim_matrix('hat', 'hat') 1 """ if alphabet: alphabet = tuple(alphabet) for i in src: if i not in alphabet: raise ValueError('src value not in alphabet') for i in tar: if i not in alphabet: raise ValueError('tar value not in alphabet') if src == tar: if mat and (src, src) in mat: return mat[(src, src)] return match_cost if mat and (src, tar) in mat: return mat[(src, tar)] elif symmetric and mat and (tar, src) in mat: return mat[(tar, src)] return mismatch_cost
def encode(self, word, max_length=14): """Return the IBM Alpha Search Inquiry System code for a word. A collection is necessary as the return type since there can be multiple values for a single word. But the collection must be ordered since the first value is the primary coding. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 14) Returns ------- tuple The Alpha-SIS value Examples -------- >>> pe = AlphaSIS() >>> pe.encode('Christopher') ('06401840000000', '07040184000000', '04018400000000') >>> pe.encode('Niall') ('02500000000000',) >>> pe.encode('Smith') ('03100000000000',) >>> pe.encode('Schmidt') ('06310000000000',) """ alpha = [''] pos = 0 word = unicode_normalize('NFKD', text_type(word.upper())) word = word.replace('ß', 'SS') word = ''.join(c for c in word if c in self._uc_set) # Clamp max_length to [4, 64] if max_length != -1: max_length = min(max(4, max_length), 64) else: max_length = 64 # Do special processing for initial substrings for k in self._alpha_sis_initials_order: if word.startswith(k): alpha[0] += self._alpha_sis_initials[k] pos += len(k) break # Add a '0' if alpha is still empty if not alpha[0]: alpha[0] += '0' # Whether or not any special initial codes were encoded, iterate # through the length of the word in the main encoding loop while pos < len(word): orig_pos = pos for k in self._alpha_sis_basic_order: if word[pos:].startswith(k): if isinstance(self._alpha_sis_basic[k], tuple): newalpha = [] for i in range(len(self._alpha_sis_basic[k])): newalpha += [ _ + self._alpha_sis_basic[k][i] for _ in alpha ] alpha = newalpha else: alpha = [_ + self._alpha_sis_basic[k] for _ in alpha] pos += len(k) break if pos == orig_pos: alpha = [_ + '_' for _ in alpha] pos += 1 # Trim doublets and placeholders for i in range(len(alpha)): pos = 1 while pos < len(alpha[i]): if alpha[i][pos] == alpha[i][pos - 1]: alpha[i] = alpha[i][:pos] + alpha[i][pos + 1 :] pos += 1 alpha = (_.replace('_', '') for _ in alpha) # Trim codes and return tuple alpha = ((_ + ('0' * max_length))[:max_length] for _ in alpha) return tuple(alpha)
def encode(self, word, max_length=-1): """Return the PhoneticSpanish coding of word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to unlimited) Returns ------- str The PhoneticSpanish code Examples -------- >>> pe = PhoneticSpanish() >>> pe.encode('Perez') '094' >>> pe.encode('Martinez') '69364' >>> pe.encode('Gutierrez') '83994' >>> pe.encode('Santiago') '4638' >>> pe.encode('Nicolás') '6454' """ # uppercase, normalize, and decompose, filter to A-Z minus vowels & W word = unicode_normalize('NFKD', text_type(word.upper())) word = ''.join(c for c in word if c in self._uc_set) # merge repeated Ls & Rs word = word.replace('LL', 'L') word = word.replace('R', 'R') # apply the Soundex algorithm sdx = word.translate(self._trans) if max_length > 0: sdx = (sdx + ('0' * max_length))[:max_length] return sdx
def qgram_fingerprint(phrase, qval=2, start_stop='', joiner=''): """Return Q-Gram fingerprint. This is a wrapper for :py:meth:`QGram.fingerprint`. Parameters ---------- phrase : str The string from which to calculate the q-gram fingerprint qval : int The length of each q-gram (by default 2) start_stop : str The start & stop symbol(s) to concatenate on either end of the phrase, as defined in :py:class:`tokenizer.QGrams` joiner : str The string that will be placed between each word Returns ------- str The q-gram fingerprint of the phrase Examples -------- >>> qgram_fingerprint('The quick brown fox jumped over the lazy dog.') 'azbrckdoedeleqerfoheicjukblampnfogovowoxpequrortthuiumvewnxjydzy' >>> qgram_fingerprint('Christopher') 'cherhehrisopphristto' >>> qgram_fingerprint('Niall') 'aliallni' """ return QGram().fingerprint(phrase, qval, start_stop, joiner)
def fingerprint(self, phrase, qval=2, start_stop='', joiner=''): """Return Q-Gram fingerprint. Parameters ---------- phrase : str The string from which to calculate the q-gram fingerprint qval : int The length of each q-gram (by default 2) start_stop : str The start & stop symbol(s) to concatenate on either end of the phrase, as defined in :py:class:`tokenizer.QGrams` joiner : str The string that will be placed between each word Returns ------- str The q-gram fingerprint of the phrase Examples -------- >>> qf = QGram() >>> qf.fingerprint('The quick brown fox jumped over the lazy dog.') 'azbrckdoedeleqerfoheicjukblampnfogovowoxpequrortthuiumvewnxjydzy' >>> qf.fingerprint('Christopher') 'cherhehrisopphristto' >>> qf.fingerprint('Niall') 'aliallni' """ phrase = unicode_normalize('NFKD', text_type(phrase.strip().lower())) phrase = ''.join(c for c in phrase if c.isalnum()) phrase = QGrams(phrase, qval, start_stop) phrase = joiner.join(sorted(phrase)) return phrase
def dist(self, src, tar): """Return the NCD between two strings using BWT plus RLE. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Examples -------- >>> cmp = NCDbwtrle() >>> cmp.dist('cat', 'hat') 0.75 >>> cmp.dist('Niall', 'Neil') 0.8333333333333334 >>> cmp.dist('aluminum', 'Catalan') 1.0 >>> cmp.dist('ATCG', 'TAGC') 0.8 """ if src == tar: return 0.0 src_comp = self._rle.encode(self._bwt.encode(src)) tar_comp = self._rle.encode(self._bwt.encode(tar)) concat_comp = self._rle.encode(self._bwt.encode(src + tar)) concat_comp2 = self._rle.encode(self._bwt.encode(tar + src)) return ( min(len(concat_comp), len(concat_comp2)) - min(len(src_comp), len(tar_comp)) ) / max(len(src_comp), len(tar_comp))
def dm_soundex(word, max_length=6, zero_pad=True): """Return the Daitch-Mokotoff Soundex code for a word. This is a wrapper for :py:meth:`DaitchMokotoff.encode`. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 6; must be between 6 and 64) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Daitch-Mokotoff Soundex value Examples -------- >>> sorted(dm_soundex('Christopher')) ['494379', '594379'] >>> dm_soundex('Niall') {'680000'} >>> dm_soundex('Smith') {'463000'} >>> dm_soundex('Schmidt') {'463000'} >>> sorted(dm_soundex('The quick brown fox', max_length=20, ... zero_pad=False)) ['35457976754', '3557976754'] """ return DaitchMokotoff().encode(word, max_length, zero_pad)
def encode(self, word, max_length=6, zero_pad=True): """Return the Daitch-Mokotoff Soundex code for a word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 6; must be between 6 and 64) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Daitch-Mokotoff Soundex value Examples -------- >>> pe = DaitchMokotoff() >>> sorted(pe.encode('Christopher')) ['494379', '594379'] >>> pe.encode('Niall') {'680000'} >>> pe.encode('Smith') {'463000'} >>> pe.encode('Schmidt') {'463000'} >>> sorted(pe.encode('The quick brown fox', max_length=20, ... zero_pad=False)) ['35457976754', '3557976754'] """ dms = [''] # initialize empty code list # Require a max_length of at least 6 and not more than 64 if max_length != -1: max_length = min(max(6, max_length), 64) else: max_length = 64 # uppercase, normalize, decompose, and filter non-A-Z word = unicode_normalize('NFKD', text_type(word.upper())) word = word.replace('ß', 'SS') word = ''.join(c for c in word if c in self._uc_set) # Nothing to convert, return base case if not word: if zero_pad: return {'0' * max_length} return {'0'} pos = 0 while pos < len(word): # Iterate through _dms_order, which specifies the possible # substrings for which codes exist in the Daitch-Mokotoff coding for sstr in self._dms_order[word[pos]]: # pragma: no branch if word[pos:].startswith(sstr): # Having determined a valid substring start, retrieve the # code dm_val = self._dms_table[sstr] # Having retried the code (triple), determine the correct # positional variant (first, pre-vocalic, elsewhere) if pos == 0: dm_val = dm_val[0] elif ( pos + len(sstr) < len(word) and word[pos + len(sstr)] in self._uc_v_set ): dm_val = dm_val[1] else: dm_val = dm_val[2] # Build the code strings if isinstance(dm_val, tuple): dms = [_ + text_type(dm_val[0]) for _ in dms] + [ _ + text_type(dm_val[1]) for _ in dms ] else: dms = [_ + text_type(dm_val) for _ in dms] pos += len(sstr) break # Filter out double letters and _ placeholders dms = ( ''.join(c for c in self._delete_consecutive_repeats(_) if c != '_') for _ in dms ) # Trim codes and return set if zero_pad: dms = ((_ + ('0' * max_length))[:max_length] for _ in dms) else: dms = (_[:max_length] for _ in dms) return set(dms)
def encode(self, word): """Return the Norphone code. Parameters ---------- word : str The word to transform Returns ------- str The Norphone code Examples -------- >>> pe = Norphone() >>> pe.encode('Hansen') 'HNSN' >>> pe.encode('Larsen') 'LRSN' >>> pe.encode('Aagaard') 'ÅKRT' >>> pe.encode('Braaten') 'BRTN' >>> pe.encode('Sandvik') 'SNVK' """ word = word.upper() code = '' skip = 0 if word[0:2] == 'AA': code = 'Å' skip = 2 elif word[0:2] == 'GI': code = 'J' skip = 2 elif word[0:3] == 'SKY': code = 'X' skip = 3 elif word[0:2] == 'EI': code = 'Æ' skip = 2 elif word[0:2] == 'KY': code = 'X' skip = 2 elif word[:1] == 'C': code = 'K' skip = 1 elif word[:1] == 'Ä': code = 'Æ' skip = 1 elif word[:1] == 'Ö': code = 'Ø' skip = 1 if word[-2:] == 'DT': word = word[:-2] + 'T' # Though the rules indicate this rule applies in all positions, the # reference implementation indicates it applies only in final position. elif word[-2:-1] in self._uc_v_set and word[-1:] == 'D': word = word[:-2] for pos, char in enumerate(word): if skip: skip -= 1 else: for length in sorted(self._replacements, reverse=True): if word[pos : pos + length] in self._replacements[length]: code += self._replacements[length][ word[pos : pos + length] ] skip = length - 1 break else: if not pos or char not in self._uc_v_set: code += char code = self._delete_consecutive_repeats(code) return code
def to_tuple(self): """Cast to tuple. Returns ------- tuple The confusion table as a 4-tuple (tp, tn, fp, fn) Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.to_tuple() (120, 60, 20, 30) """ return self._tp, self._tn, self._fp, self._fn
def to_dict(self): """Cast to dict. Returns ------- dict The confusion table as a dict Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> import pprint >>> pprint.pprint(ct.to_dict()) {'fn': 30, 'fp': 20, 'tn': 60, 'tp': 120} """ return {'tp': self._tp, 'tn': self._tn, 'fp': self._fp, 'fn': self._fn}
def population(self): """Return population, N. Returns ------- int The population (N) of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.population() 230 """ return self._tp + self._tn + self._fp + self._fn
def precision(self): r"""Return precision. Precision is defined as :math:`\frac{tp}{tp + fp}` AKA positive predictive value (PPV) Cf. https://en.wikipedia.org/wiki/Precision_and_recall Cf. https://en.wikipedia.org/wiki/Information_retrieval#Precision Returns ------- float The precision of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.precision() 0.8571428571428571 """ if self._tp + self._fp == 0: return float('NaN') return self._tp / (self._tp + self._fp)
def precision_gain(self): r"""Return gain in precision. The gain in precision is defined as: :math:`G(precision) = \frac{precision}{random~ precision}` Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval) Returns ------- float The gain in precision of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.precision_gain() 1.3142857142857143 """ if self.population() == 0: return float('NaN') random_precision = self.cond_pos_pop() / self.population() return self.precision() / random_precision
def recall(self): r"""Return recall. Recall is defined as :math:`\frac{tp}{tp + fn}` AKA sensitivity AKA true positive rate (TPR) Cf. https://en.wikipedia.org/wiki/Precision_and_recall Cf. https://en.wikipedia.org/wiki/Sensitivity_(test) Cf. https://en.wikipedia.org/wiki/Information_retrieval#Recall Returns ------- float The recall of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.recall() 0.8 """ if self._tp + self._fn == 0: return float('NaN') return self._tp / (self._tp + self._fn)
def specificity(self): r"""Return specificity. Specificity is defined as :math:`\frac{tn}{tn + fp}` AKA true negative rate (TNR) Cf. https://en.wikipedia.org/wiki/Specificity_(tests) Returns ------- float The specificity of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.specificity() 0.75 """ if self._tn + self._fp == 0: return float('NaN') return self._tn / (self._tn + self._fp)
def npv(self): r"""Return negative predictive value (NPV). NPV is defined as :math:`\frac{tn}{tn + fn}` Cf. https://en.wikipedia.org/wiki/Negative_predictive_value Returns ------- float The negative predictive value of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.npv() 0.6666666666666666 """ if self._tn + self._fn == 0: return float('NaN') return self._tn / (self._tn + self._fn)
def fallout(self): r"""Return fall-out. Fall-out is defined as :math:`\frac{fp}{fp + tn}` AKA false positive rate (FPR) Cf. https://en.wikipedia.org/wiki/Information_retrieval#Fall-out Returns ------- float The fall-out of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.fallout() 0.25 """ if self._fp + self._tn == 0: return float('NaN') return self._fp / (self._fp + self._tn)
def fdr(self): r"""Return false discovery rate (FDR). False discovery rate is defined as :math:`\frac{fp}{fp + tp}` Cf. https://en.wikipedia.org/wiki/False_discovery_rate Returns ------- float The false discovery rate of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.fdr() 0.14285714285714285 """ if self._fp + self._tp == 0: return float('NaN') return self._fp / (self._fp + self._tp)
def accuracy(self): r"""Return accuracy. Accuracy is defined as :math:`\frac{tp + tn}{population}` Cf. https://en.wikipedia.org/wiki/Accuracy Returns ------- float The accuracy of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.accuracy() 0.782608695652174 """ if self.population() == 0: return float('NaN') return (self._tp + self._tn) / self.population()
def accuracy_gain(self): r"""Return gain in accuracy. The gain in accuracy is defined as: :math:`G(accuracy) = \frac{accuracy}{random~ accuracy}` Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval) Returns ------- float The gain in accuracy of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.accuracy_gain() 1.4325259515570934 """ if self.population() == 0: return float('NaN') random_accuracy = (self.cond_pos_pop() / self.population()) ** 2 + ( self.cond_neg_pop() / self.population() ) ** 2 return self.accuracy() / random_accuracy
def pr_lmean(self): r"""Return logarithmic mean of precision & recall. The logarithmic mean is: 0 if either precision or recall is 0, the precision if they are equal, otherwise :math:`\frac{precision - recall} {ln(precision) - ln(recall)}` Cf. https://en.wikipedia.org/wiki/Logarithmic_mean Returns ------- float The logarithmic mean of the confusion table's precision & recall Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.pr_lmean() 0.8282429171492667 """ precision = self.precision() recall = self.recall() if not precision or not recall: return 0.0 elif precision == recall: return precision return (precision - recall) / (math.log(precision) - math.log(recall))
def fbeta_score(self, beta=1.0): r"""Return :math:`F_{\beta}` score. :math:`F_{\beta}` for a positive real value :math:`\beta` "measures the effectiveness of retrieval with respect to a user who attaches :math:`\beta` times as much importance to recall as precision" (van Rijsbergen 1979) :math:`F_{\beta}` score is defined as: :math:`(1 + \beta^2) \cdot \frac{precision \cdot recall} {((\beta^2 \cdot precision) + recall)}` Cf. https://en.wikipedia.org/wiki/F1_score Parameters ---------- beta : float The :math:`\beta` parameter in the above formula Returns ------- float The :math:`F_{\beta}` of the confusion table Raises ------ AttributeError Beta must be a positive real value Examples -------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.fbeta_score() 0.8275862068965518 >>> ct.fbeta_score(beta=0.1) 0.8565371024734982 """ if beta <= 0: raise AttributeError('Beta must be a positive real value.') precision = self.precision() recall = self.recall() return ( (1 + beta ** 2) * precision * recall / ((beta ** 2 * precision) + recall) )
def mcc(self): r"""Return Matthews correlation coefficient (MCC). The Matthews correlation coefficient is defined in :cite:`Matthews:1975` as: :math:`\frac{(tp \cdot tn) - (fp \cdot fn)} {\sqrt{(tp + fp)(tp + fn)(tn + fp)(tn + fn)}}` This is equivalent to the geometric mean of informedness and markedness, defined above. Cf. https://en.wikipedia.org/wiki/Matthews_correlation_coefficient Returns ------- float The Matthews correlation coefficient of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.mcc() 0.5367450401216932 """ if ( ( (self._tp + self._fp) * (self._tp + self._fn) * (self._tn + self._fp) * (self._tn + self._fn) ) ) == 0: return float('NaN') return ((self._tp * self._tn) - (self._fp * self._fn)) / math.sqrt( (self._tp + self._fp) * (self._tp + self._fn) * (self._tn + self._fp) * (self._tn + self._fn) )
def significance(self): r"""Return the significance, :math:`\chi^{2}`. Significance is defined as: :math:`\chi^{2} = \frac{(tp \cdot tn - fp \cdot fn)^{2} (tp + tn + fp + fn)} {((tp + fp)(tp + fn)(tn + fp)(tn + fn)}` Also: :math:`\chi^{2} = MCC^{2} \cdot n` Cf. https://en.wikipedia.org/wiki/Pearson%27s_chi-square_test Returns ------- float The significance of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.significance() 66.26190476190476 """ if ( ( (self._tp + self._fp) * (self._tp + self._fn) * (self._tn + self._fp) * (self._tn + self._fn) ) ) == 0: return float('NaN') return ( (self._tp * self._tn - self._fp * self._fn) ** 2 * (self._tp + self._tn + self._fp + self._fn) ) / ( (self._tp + self._fp) * (self._tp + self._fn) * (self._tn + self._fp) * (self._tn + self._fn) )
def kappa_statistic(self): r"""Return κ statistic. The κ statistic is defined as: :math:`\kappa = \frac{accuracy - random~ accuracy} {1 - random~ accuracy}` The κ statistic compares the performance of the classifier relative to the performance of a random classifier. :math:`\kappa` = 0 indicates performance identical to random. :math:`\kappa` = 1 indicates perfect predictive success. :math:`\kappa` = -1 indicates perfect predictive failure. Returns ------- float The κ statistic of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.kappa_statistic() 0.5344129554655871 """ if self.population() == 0: return float('NaN') random_accuracy = ( (self._tn + self._fp) * (self._tn + self._fn) + (self._fn + self._tp) * (self._fp + self._tp) ) / self.population() ** 2 return (self.accuracy() - random_accuracy) / (1 - random_accuracy)
def encode(self, word, max_length=-1): """Return the Double Metaphone code for a word. Parameters ---------- word : str The word to transform max_length : int The maximum length of the returned Double Metaphone codes (defaults to unlmited, but in Philips' original implementation this was 4) Returns ------- tuple The Double Metaphone value(s) Examples -------- >>> pe = DoubleMetaphone() >>> pe.encode('Christopher') ('KRSTFR', '') >>> pe.encode('Niall') ('NL', '') >>> pe.encode('Smith') ('SM0', 'XMT') >>> pe.encode('Schmidt') ('XMT', 'SMT') """ # Require a max_length of at least 4 if max_length != -1: max_length = max(4, max_length) primary = '' secondary = '' def _slavo_germanic(): """Return True if the word appears to be Slavic or Germanic. Returns ------- bool True if the word appears to be Slavic or Germanic """ if 'W' in word or 'K' in word or 'CZ' in word: return True return False def _metaph_add(pri, sec=''): """Return a new metaphone tuple with the supplied elements. Parameters ---------- pri : str The primary element sec : str The secondary element Returns ------- tuple A new metaphone tuple with the supplied elements """ newpri = primary newsec = secondary if pri: newpri += pri if sec: if sec != ' ': newsec += sec else: newsec += pri return newpri, newsec def _is_vowel(pos): """Return True if the character at word[pos] is a vowel. Parameters ---------- pos : int Position in the word Returns ------- bool True if the character is a vowel """ if pos >= 0 and word[pos] in {'A', 'E', 'I', 'O', 'U', 'Y'}: return True return False def _get_at(pos): """Return the character at word[pos]. Parameters ---------- pos : int Position in the word Returns ------- str Character at word[pos] """ return word[pos] def _string_at(pos, slen, substrings): """Return True if word[pos:pos+slen] is in substrings. Parameters ---------- pos : int Position in the word slen : int Substring length substrings : set Substrings to search Returns ------- bool True if word[pos:pos+slen] is in substrings """ if pos < 0: return False return word[pos : pos + slen] in substrings current = 0 length = len(word) if length < 1: return '', '' last = length - 1 word = word.upper() word = word.replace('ß', 'SS') # Pad the original string so that we can index beyond the edge of the # world word += ' ' # Skip these when at start of word if word[0:2] in {'GN', 'KN', 'PN', 'WR', 'PS'}: current += 1 # Initial 'X' is pronounced 'Z' e.g. 'Xavier' if _get_at(0) == 'X': primary, secondary = _metaph_add('S') # 'Z' maps to 'S' current += 1 # Main loop while True: if current >= length: break if _get_at(current) in {'A', 'E', 'I', 'O', 'U', 'Y'}: if current == 0: # All init vowels now map to 'A' primary, secondary = _metaph_add('A') current += 1 continue elif _get_at(current) == 'B': # "-mb", e.g", "dumb", already skipped over... primary, secondary = _metaph_add('P') if _get_at(current + 1) == 'B': current += 2 else: current += 1 continue elif _get_at(current) == 'Ç': primary, secondary = _metaph_add('S') current += 1 continue elif _get_at(current) == 'C': # Various Germanic if ( current > 1 and not _is_vowel(current - 2) and _string_at((current - 1), 3, {'ACH'}) and ( (_get_at(current + 2) != 'I') and ( (_get_at(current + 2) != 'E') or _string_at( (current - 2), 6, {'BACHER', 'MACHER'} ) ) ) ): primary, secondary = _metaph_add('K') current += 2 continue # Special case 'caesar' elif current == 0 and _string_at(current, 6, {'CAESAR'}): primary, secondary = _metaph_add('S') current += 2 continue # Italian 'chianti' elif _string_at(current, 4, {'CHIA'}): primary, secondary = _metaph_add('K') current += 2 continue elif _string_at(current, 2, {'CH'}): # Find 'Michael' if current > 0 and _string_at(current, 4, {'CHAE'}): primary, secondary = _metaph_add('K', 'X') current += 2 continue # Greek roots e.g. 'chemistry', 'chorus' elif ( current == 0 and ( _string_at((current + 1), 5, {'HARAC', 'HARIS'}) or _string_at( (current + 1), 3, {'HOR', 'HYM', 'HIA', 'HEM'} ) ) and not _string_at(0, 5, {'CHORE'}) ): primary, secondary = _metaph_add('K') current += 2 continue # Germanic, Greek, or otherwise 'ch' for 'kh' sound elif ( ( _string_at(0, 4, {'VAN ', 'VON '}) or _string_at(0, 3, {'SCH'}) ) or # 'architect but not 'arch', 'orchestra', 'orchid' _string_at( (current - 2), 6, {'ORCHES', 'ARCHIT', 'ORCHID'} ) or _string_at((current + 2), 1, {'T', 'S'}) or ( ( _string_at( (current - 1), 1, {'A', 'O', 'U', 'E'} ) or (current == 0) ) and # e.g., 'wachtler', 'wechsler', but not 'tichner' _string_at( (current + 2), 1, { 'L', 'R', 'N', 'M', 'B', 'H', 'F', 'V', 'W', ' ', }, ) ) ): primary, secondary = _metaph_add('K') else: if current > 0: if _string_at(0, 2, {'MC'}): # e.g., "McHugh" primary, secondary = _metaph_add('K') else: primary, secondary = _metaph_add('X', 'K') else: primary, secondary = _metaph_add('X') current += 2 continue # e.g, 'czerny' elif _string_at(current, 2, {'CZ'}) and not _string_at( (current - 2), 4, {'WICZ'} ): primary, secondary = _metaph_add('S', 'X') current += 2 continue # e.g., 'focaccia' elif _string_at((current + 1), 3, {'CIA'}): primary, secondary = _metaph_add('X') current += 3 # double 'C', but not if e.g. 'McClellan' elif _string_at(current, 2, {'CC'}) and not ( (current == 1) and (_get_at(0) == 'M') ): # 'bellocchio' but not 'bacchus' if _string_at( (current + 2), 1, {'I', 'E', 'H'} ) and not _string_at((current + 2), 2, {'HU'}): # 'accident', 'accede' 'succeed' if ( (current == 1) and _get_at(current - 1) == 'A' ) or _string_at((current - 1), 5, {'UCCEE', 'UCCES'}): primary, secondary = _metaph_add('KS') # 'bacci', 'bertucci', other italian else: primary, secondary = _metaph_add('X') current += 3 continue else: # Pierce's rule primary, secondary = _metaph_add('K') current += 2 continue elif _string_at(current, 2, {'CK', 'CG', 'CQ'}): primary, secondary = _metaph_add('K') current += 2 continue elif _string_at(current, 2, {'CI', 'CE', 'CY'}): # Italian vs. English if _string_at(current, 3, {'CIO', 'CIE', 'CIA'}): primary, secondary = _metaph_add('S', 'X') else: primary, secondary = _metaph_add('S') current += 2 continue # else else: primary, secondary = _metaph_add('K') # name sent in 'mac caffrey', 'mac gregor if _string_at((current + 1), 2, {' C', ' Q', ' G'}): current += 3 elif _string_at( (current + 1), 1, {'C', 'K', 'Q'} ) and not _string_at((current + 1), 2, {'CE', 'CI'}): current += 2 else: current += 1 continue elif _get_at(current) == 'D': if _string_at(current, 2, {'DG'}): if _string_at((current + 2), 1, {'I', 'E', 'Y'}): # e.g. 'edge' primary, secondary = _metaph_add('J') current += 3 continue else: # e.g. 'edgar' primary, secondary = _metaph_add('TK') current += 2 continue elif _string_at(current, 2, {'DT', 'DD'}): primary, secondary = _metaph_add('T') current += 2 continue # else else: primary, secondary = _metaph_add('T') current += 1 continue elif _get_at(current) == 'F': if _get_at(current + 1) == 'F': current += 2 else: current += 1 primary, secondary = _metaph_add('F') continue elif _get_at(current) == 'G': if _get_at(current + 1) == 'H': if (current > 0) and not _is_vowel(current - 1): primary, secondary = _metaph_add('K') current += 2 continue # 'ghislane', ghiradelli elif current == 0: if _get_at(current + 2) == 'I': primary, secondary = _metaph_add('J') else: primary, secondary = _metaph_add('K') current += 2 continue # Parker's rule (with some further refinements) - # e.g., 'hugh' elif ( ( (current > 1) and _string_at((current - 2), 1, {'B', 'H', 'D'}) ) or # e.g., 'bough' ( (current > 2) and _string_at((current - 3), 1, {'B', 'H', 'D'}) ) or # e.g., 'broughton' ( (current > 3) and _string_at((current - 4), 1, {'B', 'H'}) ) ): current += 2 continue else: # e.g. 'laugh', 'McLaughlin', 'cough', # 'gough', 'rough', 'tough' if ( (current > 2) and (_get_at(current - 1) == 'U') and ( _string_at( (current - 3), 1, {'C', 'G', 'L', 'R', 'T'} ) ) ): primary, secondary = _metaph_add('F') elif (current > 0) and _get_at(current - 1) != 'I': primary, secondary = _metaph_add('K') current += 2 continue elif _get_at(current + 1) == 'N': if ( (current == 1) and _is_vowel(0) and not _slavo_germanic() ): primary, secondary = _metaph_add('KN', 'N') # not e.g. 'cagney' elif ( not _string_at((current + 2), 2, {'EY'}) and (_get_at(current + 1) != 'Y') and not _slavo_germanic() ): primary, secondary = _metaph_add('N', 'KN') else: primary, secondary = _metaph_add('KN') current += 2 continue # 'tagliaro' elif ( _string_at((current + 1), 2, {'LI'}) and not _slavo_germanic() ): primary, secondary = _metaph_add('KL', 'L') current += 2 continue # -ges-, -gep-, -gel-, -gie- at beginning elif (current == 0) and ( (_get_at(current + 1) == 'Y') or _string_at( (current + 1), 2, { 'ES', 'EP', 'EB', 'EL', 'EY', 'IB', 'IL', 'IN', 'IE', 'EI', 'ER', }, ) ): primary, secondary = _metaph_add('K', 'J') current += 2 continue # -ger-, -gy- elif ( ( _string_at((current + 1), 2, {'ER'}) or (_get_at(current + 1) == 'Y') ) and not _string_at(0, 6, {'DANGER', 'RANGER', 'MANGER'}) and not _string_at((current - 1), 1, {'E', 'I'}) and not _string_at((current - 1), 3, {'RGY', 'OGY'}) ): primary, secondary = _metaph_add('K', 'J') current += 2 continue # italian e.g, 'biaggi' elif _string_at( (current + 1), 1, {'E', 'I', 'Y'} ) or _string_at((current - 1), 4, {'AGGI', 'OGGI'}): # obvious germanic if ( _string_at(0, 4, {'VAN ', 'VON '}) or _string_at(0, 3, {'SCH'}) ) or _string_at((current + 1), 2, {'ET'}): primary, secondary = _metaph_add('K') elif _string_at((current + 1), 4, {'IER '}): primary, secondary = _metaph_add('J') else: primary, secondary = _metaph_add('J', 'K') current += 2 continue else: if _get_at(current + 1) == 'G': current += 2 else: current += 1 primary, secondary = _metaph_add('K') continue elif _get_at(current) == 'H': # only keep if first & before vowel or btw. 2 vowels if ((current == 0) or _is_vowel(current - 1)) and _is_vowel( current + 1 ): primary, secondary = _metaph_add('H') current += 2 else: # also takes care of 'HH' current += 1 continue elif _get_at(current) == 'J': # obvious spanish, 'jose', 'san jacinto' if _string_at(current, 4, {'JOSE'}) or _string_at( 0, 4, {'SAN '} ): if ( (current == 0) and (_get_at(current + 4) == ' ') ) or _string_at(0, 4, {'SAN '}): primary, secondary = _metaph_add('H') else: primary, secondary = _metaph_add('J', 'H') current += 1 continue elif (current == 0) and not _string_at(current, 4, {'JOSE'}): # Yankelovich/Jankelowicz primary, secondary = _metaph_add('J', 'A') # Spanish pron. of e.g. 'bajador' elif ( _is_vowel(current - 1) and not _slavo_germanic() and ( (_get_at(current + 1) == 'A') or (_get_at(current + 1) == 'O') ) ): primary, secondary = _metaph_add('J', 'H') elif current == last: primary, secondary = _metaph_add('J', ' ') elif not _string_at( (current + 1), 1, {'L', 'T', 'K', 'S', 'N', 'M', 'B', 'Z'} ) and not _string_at((current - 1), 1, {'S', 'K', 'L'}): primary, secondary = _metaph_add('J') if _get_at(current + 1) == 'J': # it could happen! current += 2 else: current += 1 continue elif _get_at(current) == 'K': if _get_at(current + 1) == 'K': current += 2 else: current += 1 primary, secondary = _metaph_add('K') continue elif _get_at(current) == 'L': if _get_at(current + 1) == 'L': # Spanish e.g. 'cabrillo', 'gallegos' if ( (current == (length - 3)) and _string_at( (current - 1), 4, {'ILLO', 'ILLA', 'ALLE'} ) ) or ( ( _string_at((last - 1), 2, {'AS', 'OS'}) or _string_at(last, 1, {'A', 'O'}) ) and _string_at((current - 1), 4, {'ALLE'}) ): primary, secondary = _metaph_add('L', ' ') current += 2 continue current += 2 else: current += 1 primary, secondary = _metaph_add('L') continue elif _get_at(current) == 'M': if ( ( _string_at((current - 1), 3, {'UMB'}) and ( ((current + 1) == last) or _string_at((current + 2), 2, {'ER'}) ) ) or # 'dumb', 'thumb' (_get_at(current + 1) == 'M') ): current += 2 else: current += 1 primary, secondary = _metaph_add('M') continue elif _get_at(current) == 'N': if _get_at(current + 1) == 'N': current += 2 else: current += 1 primary, secondary = _metaph_add('N') continue elif _get_at(current) == 'Ñ': current += 1 primary, secondary = _metaph_add('N') continue elif _get_at(current) == 'P': if _get_at(current + 1) == 'H': primary, secondary = _metaph_add('F') current += 2 continue # also account for "campbell", "raspberry" elif _string_at((current + 1), 1, {'P', 'B'}): current += 2 else: current += 1 primary, secondary = _metaph_add('P') continue elif _get_at(current) == 'Q': if _get_at(current + 1) == 'Q': current += 2 else: current += 1 primary, secondary = _metaph_add('K') continue elif _get_at(current) == 'R': # french e.g. 'rogier', but exclude 'hochmeier' if ( (current == last) and not _slavo_germanic() and _string_at((current - 2), 2, {'IE'}) and not _string_at((current - 4), 2, {'ME', 'MA'}) ): primary, secondary = _metaph_add('', 'R') else: primary, secondary = _metaph_add('R') if _get_at(current + 1) == 'R': current += 2 else: current += 1 continue elif _get_at(current) == 'S': # special cases 'island', 'isle', 'carlisle', 'carlysle' if _string_at((current - 1), 3, {'ISL', 'YSL'}): current += 1 continue # special case 'sugar-' elif (current == 0) and _string_at(current, 5, {'SUGAR'}): primary, secondary = _metaph_add('X', 'S') current += 1 continue elif _string_at(current, 2, {'SH'}): # Germanic if _string_at( (current + 1), 4, {'HEIM', 'HOEK', 'HOLM', 'HOLZ'} ): primary, secondary = _metaph_add('S') else: primary, secondary = _metaph_add('X') current += 2 continue # Italian & Armenian elif _string_at(current, 3, {'SIO', 'SIA'}) or _string_at( current, 4, {'SIAN'} ): if not _slavo_germanic(): primary, secondary = _metaph_add('S', 'X') else: primary, secondary = _metaph_add('S') current += 3 continue # German & anglicisations, e.g. 'smith' match 'schmidt', # 'snider' match 'schneider' # also, -sz- in Slavic language although in Hungarian it is # pronounced 's' elif ( (current == 0) and _string_at((current + 1), 1, {'M', 'N', 'L', 'W'}) ) or _string_at((current + 1), 1, {'Z'}): primary, secondary = _metaph_add('S', 'X') if _string_at((current + 1), 1, {'Z'}): current += 2 else: current += 1 continue elif _string_at(current, 2, {'SC'}): # Schlesinger's rule if _get_at(current + 2) == 'H': # dutch origin, e.g. 'school', 'schooner' if _string_at( (current + 3), 2, {'OO', 'ER', 'EN', 'UY', 'ED', 'EM'}, ): # 'schermerhorn', 'schenker' if _string_at((current + 3), 2, {'ER', 'EN'}): primary, secondary = _metaph_add('X', 'SK') else: primary, secondary = _metaph_add('SK') current += 3 continue else: if ( (current == 0) and not _is_vowel(3) and (_get_at(3) != 'W') ): primary, secondary = _metaph_add('X', 'S') else: primary, secondary = _metaph_add('X') current += 3 continue elif _string_at((current + 2), 1, {'I', 'E', 'Y'}): primary, secondary = _metaph_add('S') current += 3 continue # else else: primary, secondary = _metaph_add('SK') current += 3 continue else: # french e.g. 'resnais', 'artois' if (current == last) and _string_at( (current - 2), 2, {'AI', 'OI'} ): primary, secondary = _metaph_add('', 'S') else: primary, secondary = _metaph_add('S') if _string_at((current + 1), 1, {'S', 'Z'}): current += 2 else: current += 1 continue elif _get_at(current) == 'T': if _string_at(current, 4, {'TION'}): primary, secondary = _metaph_add('X') current += 3 continue elif _string_at(current, 3, {'TIA', 'TCH'}): primary, secondary = _metaph_add('X') current += 3 continue elif _string_at(current, 2, {'TH'}) or _string_at( current, 3, {'TTH'} ): # special case 'thomas', 'thames' or germanic if ( _string_at((current + 2), 2, {'OM', 'AM'}) or _string_at(0, 4, {'VAN ', 'VON '}) or _string_at(0, 3, {'SCH'}) ): primary, secondary = _metaph_add('T') else: primary, secondary = _metaph_add('0', 'T') current += 2 continue elif _string_at((current + 1), 1, {'T', 'D'}): current += 2 else: current += 1 primary, secondary = _metaph_add('T') continue elif _get_at(current) == 'V': if _get_at(current + 1) == 'V': current += 2 else: current += 1 primary, secondary = _metaph_add('F') continue elif _get_at(current) == 'W': # can also be in middle of word if _string_at(current, 2, {'WR'}): primary, secondary = _metaph_add('R') current += 2 continue elif (current == 0) and ( _is_vowel(current + 1) or _string_at(current, 2, {'WH'}) ): # Wasserman should match Vasserman if _is_vowel(current + 1): primary, secondary = _metaph_add('A', 'F') else: # need Uomo to match Womo primary, secondary = _metaph_add('A') # Arnow should match Arnoff if ( ((current == last) and _is_vowel(current - 1)) or _string_at( (current - 1), 5, {'EWSKI', 'EWSKY', 'OWSKI', 'OWSKY'} ) or _string_at(0, 3, {'SCH'}) ): primary, secondary = _metaph_add('', 'F') current += 1 continue # Polish e.g. 'filipowicz' elif _string_at(current, 4, {'WICZ', 'WITZ'}): primary, secondary = _metaph_add('TS', 'FX') current += 4 continue # else skip it else: current += 1 continue elif _get_at(current) == 'X': # French e.g. breaux if not ( (current == last) and ( _string_at((current - 3), 3, {'IAU', 'EAU'}) or _string_at((current - 2), 2, {'AU', 'OU'}) ) ): primary, secondary = _metaph_add('KS') if _string_at((current + 1), 1, {'C', 'X'}): current += 2 else: current += 1 continue elif _get_at(current) == 'Z': # Chinese Pinyin e.g. 'zhao' if _get_at(current + 1) == 'H': primary, secondary = _metaph_add('J') current += 2 continue elif _string_at((current + 1), 2, {'ZO', 'ZI', 'ZA'}) or ( _slavo_germanic() and ((current > 0) and _get_at(current - 1) != 'T') ): primary, secondary = _metaph_add('S', 'TS') else: primary, secondary = _metaph_add('S') if _get_at(current + 1) == 'Z': current += 2 else: current += 1 continue else: current += 1 if max_length > 0: primary = primary[:max_length] secondary = secondary[:max_length] if primary == secondary: secondary = '' return primary, secondary
def stem(self, word): """Return CLEF German stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = CLEFGerman() >>> stmr.stem('lesen') 'lese' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabier' """ # lowercase, normalize, and compose word = normalize('NFC', text_type(word.lower())) # remove umlauts word = word.translate(self._umlauts) # remove plurals wlen = len(word) - 1 if wlen > 3: if wlen > 5: if word[-3:] == 'nen': return word[:-3] if wlen > 4: if word[-2:] in {'en', 'se', 'es', 'er'}: return word[:-2] if word[-1] in {'e', 'n', 'r', 's'}: return word[:-1] return word
def encode(self, word, mode=1, lang='de'): """Return the phonet code for a word. Parameters ---------- word : str The word to transform mode : int The ponet variant to employ (1 or 2) lang : str ``de`` (default) for German, ``none`` for no language Returns ------- str The phonet value Examples -------- >>> pe = Phonet() >>> pe.encode('Christopher') 'KRISTOFA' >>> pe.encode('Niall') 'NIAL' >>> pe.encode('Smith') 'SMIT' >>> pe.encode('Schmidt') 'SHMIT' >>> pe.encode('Christopher', mode=2) 'KRIZTUFA' >>> pe.encode('Niall', mode=2) 'NIAL' >>> pe.encode('Smith', mode=2) 'ZNIT' >>> pe.encode('Schmidt', mode=2) 'ZNIT' >>> pe.encode('Christopher', lang='none') 'CHRISTOPHER' >>> pe.encode('Niall', lang='none') 'NIAL' >>> pe.encode('Smith', lang='none') 'SMITH' >>> pe.encode('Schmidt', lang='none') 'SCHMIDT' """ phonet_hash = Counter() alpha_pos = Counter() phonet_hash_1 = Counter() phonet_hash_2 = Counter() def _initialize_phonet(lang): """Initialize phonet variables. Parameters ---------- lang : str Language to use for rules """ if lang == 'none': _phonet_rules = self._rules_no_lang else: _phonet_rules = self._rules_german phonet_hash[''] = -1 # German and international umlauts for j in { 'À', 'Á', 'Â', 'Ã', 'Ä', 'Å', 'Æ', 'Ç', 'È', 'É', 'Ê', 'Ë', 'Ì', 'Í', 'Î', 'Ï', 'Ð', 'Ñ', 'Ò', 'Ó', 'Ô', 'Õ', 'Ö', 'Ø', 'Ù', 'Ú', 'Û', 'Ü', 'Ý', 'Þ', 'ß', 'Œ', 'Š', 'Ÿ', }: alpha_pos[j] = 1 phonet_hash[j] = -1 # "normal" letters ('A'-'Z') for i, j in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ'): alpha_pos[j] = i + 2 phonet_hash[j] = -1 for i in range(26): for j in range(28): phonet_hash_1[i, j] = -1 phonet_hash_2[i, j] = -1 # for each phonetc rule for i in range(len(_phonet_rules)): rule = _phonet_rules[i] if rule and i % 3 == 0: # calculate first hash value k = _phonet_rules[i][0] if phonet_hash[k] < 0 and ( _phonet_rules[i + 1] or _phonet_rules[i + 2] ): phonet_hash[k] = i # calculate second hash values if k and alpha_pos[k] >= 2: k = alpha_pos[k] j = k - 2 rule = rule[1:] if not rule: rule = ' ' elif rule[0] == '(': rule = rule[1:] else: rule = rule[0] while rule and (rule[0] != ')'): k = alpha_pos[rule[0]] if k > 0: # add hash value for this letter if phonet_hash_1[j, k] < 0: phonet_hash_1[j, k] = i phonet_hash_2[j, k] = i if phonet_hash_2[j, k] >= (i - 30): phonet_hash_2[j, k] = i else: k = -1 if k <= 0: # add hash value for all letters if phonet_hash_1[j, 0] < 0: phonet_hash_1[j, 0] = i phonet_hash_2[j, 0] = i rule = rule[1:] def _phonet(term, mode, lang): """Return the phonet coded form of a term. Parameters ---------- term : str Term to transform mode : int The ponet variant to employ (1 or 2) lang : str ``de`` (default) for German, ``none`` for no language Returns ------- str The phonet value """ if lang == 'none': _phonet_rules = self._rules_no_lang else: _phonet_rules = self._rules_german char0 = '' dest = term if not term: return '' term_length = len(term) # convert input string to upper-case src = term.translate(self._upper_trans) # check "src" i = 0 j = 0 zeta = 0 while i < len(src): char = src[i] pos = alpha_pos[char] if pos >= 2: xpos = pos - 2 if i + 1 == len(src): pos = alpha_pos[''] else: pos = alpha_pos[src[i + 1]] start1 = phonet_hash_1[xpos, pos] start2 = phonet_hash_1[xpos, 0] end1 = phonet_hash_2[xpos, pos] end2 = phonet_hash_2[xpos, 0] # preserve rule priorities if (start2 >= 0) and ((start1 < 0) or (start2 < start1)): pos = start1 start1 = start2 start2 = pos pos = end1 end1 = end2 end2 = pos if (end1 >= start2) and (start2 >= 0): if end2 > end1: end1 = end2 start2 = -1 end2 = -1 else: pos = phonet_hash[char] start1 = pos end1 = 10000 start2 = -1 end2 = -1 pos = start1 zeta0 = 0 if pos >= 0: # check rules for this char while (_phonet_rules[pos] is None) or ( _phonet_rules[pos][0] == char ): if pos > end1: if start2 > 0: pos = start2 start1 = start2 start2 = -1 end1 = end2 end2 = -1 continue break if (_phonet_rules[pos] is None) or ( _phonet_rules[pos + mode] is None ): # no conversion rule available pos += 3 continue # check whole string matches = 1 # number of matching letters priority = 5 # default priority rule = _phonet_rules[pos] rule = rule[1:] while ( rule and (len(src) > (i + matches)) and (src[i + matches] == rule[0]) and not rule[0].isdigit() and (rule not in '(-<^$') ): matches += 1 rule = rule[1:] if rule and (rule[0] == '('): # check an array of letters if ( (len(src) > (i + matches)) and src[i + matches].isalpha() and (src[i + matches] in rule[1:]) ): matches += 1 while rule and rule[0] != ')': rule = rule[1:] # if rule[0] == ')': rule = rule[1:] if rule: priority0 = ord(rule[0]) else: priority0 = 0 matches0 = matches while rule and rule[0] == '-' and matches > 1: matches -= 1 rule = rule[1:] if rule and rule[0] == '<': rule = rule[1:] if rule and rule[0].isdigit(): # read priority priority = int(rule[0]) rule = rule[1:] if rule and rule[0:2] == '^^': rule = rule[1:] if ( not rule or ( (rule[0] == '^') and ((i == 0) or not src[i - 1].isalpha()) and ( (rule[1:2] != '$') or ( not ( src[ i + matches0 : i + matches0 + 1 ].isalpha() ) and ( src[ i + matches0 : i + matches0 + 1 ] != '.' ) ) ) ) or ( (rule[0] == '$') and (i > 0) and src[i - 1].isalpha() and ( ( not src[ i + matches0 : i + matches0 + 1 ].isalpha() ) and ( src[i + matches0 : i + matches0 + 1] != '.' ) ) ) ): # look for continuation, if: # matches > 1 und NO '-' in first string */ pos0 = -1 start3 = 0 start4 = 0 end3 = 0 end4 = 0 if ( (matches > 1) and src[i + matches : i + matches + 1] and (priority0 != ord('-')) ): char0 = src[i + matches - 1] pos0 = alpha_pos[char0] if pos0 >= 2 and src[i + matches]: xpos = pos0 - 2 pos0 = alpha_pos[src[i + matches]] start3 = phonet_hash_1[xpos, pos0] start4 = phonet_hash_1[xpos, 0] end3 = phonet_hash_2[xpos, pos0] end4 = phonet_hash_2[xpos, 0] # preserve rule priorities if (start4 >= 0) and ( (start3 < 0) or (start4 < start3) ): pos0 = start3 start3 = start4 start4 = pos0 pos0 = end3 end3 = end4 end4 = pos0 if (end3 >= start4) and (start4 >= 0): if end4 > end3: end3 = end4 start4 = -1 end4 = -1 else: pos0 = phonet_hash[char0] start3 = pos0 end3 = 10000 start4 = -1 end4 = -1 pos0 = start3 # check continuation rules for src[i+matches] if pos0 >= 0: while (_phonet_rules[pos0] is None) or ( _phonet_rules[pos0][0] == char0 ): if pos0 > end3: if start4 > 0: pos0 = start4 start3 = start4 start4 = -1 end3 = end4 end4 = -1 continue priority0 = -1 # important break if (_phonet_rules[pos0] is None) or ( _phonet_rules[pos0 + mode] is None ): # no conversion rule available pos0 += 3 continue # check whole string matches0 = matches priority0 = 5 rule = _phonet_rules[pos0] rule = rule[1:] while ( rule and ( src[ i + matches0 : i + matches0 + 1 ] == rule[0] ) and ( not rule[0].isdigit() or (rule in '(-<^$') ) ): matches0 += 1 rule = rule[1:] if rule and rule[0] == '(': # check an array of letters if src[ i + matches0 : i + matches0 + 1 ].isalpha() and ( src[i + matches0] in rule[1:] ): matches0 += 1 while rule and rule[0] != ')': rule = rule[1:] # if rule[0] == ')': rule = rule[1:] while rule and rule[0] == '-': # "matches0" is NOT decremented # because of # "if (matches0 == matches)" rule = rule[1:] if rule and rule[0] == '<': rule = rule[1:] if rule and rule[0].isdigit(): priority0 = int(rule[0]) rule = rule[1:] if ( not rule or # rule == '^' is not possible here ( (rule[0] == '$') and not src[ i + matches0 : i + matches0 + 1 ].isalpha() and ( src[ i + matches0 : i + matches0 + 1 ] != '.' ) ) ): if matches0 == matches: # this is only a partial string pos0 += 3 continue if priority0 < priority: # priority is too low pos0 += 3 continue # continuation rule found break pos0 += 3 # end of "while" if (priority0 >= priority) and ( (_phonet_rules[pos0] is not None) and (_phonet_rules[pos0][0] == char0) ): pos += 3 continue # replace string if _phonet_rules[pos] and ( '<' in _phonet_rules[pos][1:] ): priority0 = 1 else: priority0 = 0 rule = _phonet_rules[pos + mode] if (priority0 == 1) and (zeta == 0): # rule with '<' is applied if ( (j > 0) and rule and ( (dest[j - 1] == char) or (dest[j - 1] == rule[0]) ) ): j -= 1 zeta0 = 1 zeta += 1 matches0 = 0 while rule and src[i + matches0]: src = ( src[0 : i + matches0] + rule[0] + src[i + matches0 + 1 :] ) matches0 += 1 rule = rule[1:] if matches0 < matches: src = ( src[0 : i + matches0] + src[i + matches :] ) char = src[i] else: i = i + matches - 1 zeta = 0 while len(rule) > 1: if (j == 0) or (dest[j - 1] != rule[0]): dest = ( dest[0:j] + rule[0] + dest[min(len(dest), j + 1) :] ) j += 1 rule = rule[1:] # new "current char" if not rule: rule = '' char = '' else: char = rule[0] if ( _phonet_rules[pos] and '^^' in _phonet_rules[pos][1:] ): if char: dest = ( dest[0:j] + char + dest[min(len(dest), j + 1) :] ) j += 1 src = src[i + 1 :] i = 0 zeta0 = 1 break pos += 3 if pos > end1 and start2 > 0: pos = start2 start1 = start2 end1 = end2 start2 = -1 end2 = -1 if zeta0 == 0: if char and ((j == 0) or (dest[j - 1] != char)): # delete multiple letters only dest = ( dest[0:j] + char + dest[min(j + 1, term_length) :] ) j += 1 i += 1 zeta = 0 dest = dest[0:j] return dest _initialize_phonet(lang) word = unicode_normalize('NFKC', text_type(word)) return _phonet(word, mode, lang)
def stem(self, word): """Return Snowball Danish stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = SnowballDanish() >>> stmr.stem('underviser') 'undervis' >>> stmr.stem('suspension') 'suspension' >>> stmr.stem('sikkerhed') 'sikker' """ # lowercase, normalize, and compose word = normalize('NFC', text_type(word.lower())) r1_start = min(max(3, self._sb_r1(word)), len(word)) # Step 1 _r1 = word[r1_start:] if _r1[-7:] == 'erendes': word = word[:-7] elif _r1[-6:] in {'erende', 'hedens'}: word = word[:-6] elif _r1[-5:] in { 'ethed', 'erede', 'heden', 'heder', 'endes', 'ernes', 'erens', 'erets', }: word = word[:-5] elif _r1[-4:] in { 'ered', 'ende', 'erne', 'eren', 'erer', 'heds', 'enes', 'eres', 'eret', }: word = word[:-4] elif _r1[-3:] in {'hed', 'ene', 'ere', 'ens', 'ers', 'ets'}: word = word[:-3] elif _r1[-2:] in {'en', 'er', 'es', 'et'}: word = word[:-2] elif _r1[-1:] == 'e': word = word[:-1] elif _r1[-1:] == 's': if len(word) > 1 and word[-2] in self._s_endings: word = word[:-1] # Step 2 if word[r1_start:][-2:] in {'gd', 'dt', 'gt', 'kt'}: word = word[:-1] # Step 3 if word[-4:] == 'igst': word = word[:-2] _r1 = word[r1_start:] repeat_step2 = False if _r1[-4:] == 'elig': word = word[:-4] repeat_step2 = True elif _r1[-4:] == 'løst': word = word[:-1] elif _r1[-3:] in {'lig', 'els'}: word = word[:-3] repeat_step2 = True elif _r1[-2:] == 'ig': word = word[:-2] repeat_step2 = True if repeat_step2: if word[r1_start:][-2:] in {'gd', 'dt', 'gt', 'kt'}: word = word[:-1] # Step 4 if ( len(word[r1_start:]) >= 1 and len(word) >= 2 and word[-1] == word[-2] and word[-1] not in self._vowels ): word = word[:-1] return word
def stem(self, word, alternate_vowels=False): """Return Snowball German stem. Parameters ---------- word : str The word to stem alternate_vowels : bool Composes ae as ä, oe as ö, and ue as ü before running the algorithm Returns ------- str Word stem Examples -------- >>> stmr = SnowballGerman() >>> stmr.stem('lesen') 'les' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabi' """ # lowercase, normalize, and compose word = normalize('NFC', word.lower()) word = word.replace('ß', 'ss') if len(word) > 2: for i in range(2, len(word)): if word[i] in self._vowels and word[i - 2] in self._vowels: if word[i - 1] == 'u': word = word[: i - 1] + 'U' + word[i:] elif word[i - 1] == 'y': word = word[: i - 1] + 'Y' + word[i:] if alternate_vowels: word = word.replace('ae', 'ä') word = word.replace('oe', 'ö') word = word.replace('que', 'Q') word = word.replace('ue', 'ü') word = word.replace('Q', 'que') r1_start = max(3, self._sb_r1(word)) r2_start = self._sb_r2(word) # Step 1 niss_flag = False if word[-3:] == 'ern': if len(word[r1_start:]) >= 3: word = word[:-3] elif word[-2:] == 'em': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'er': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'en': if len(word[r1_start:]) >= 2: word = word[:-2] niss_flag = True elif word[-2:] == 'es': if len(word[r1_start:]) >= 2: word = word[:-2] niss_flag = True elif word[-1:] == 'e': if len(word[r1_start:]) >= 1: word = word[:-1] niss_flag = True elif word[-1:] == 's': if ( len(word[r1_start:]) >= 1 and len(word) >= 2 and word[-2] in self._s_endings ): word = word[:-1] if niss_flag and word[-4:] == 'niss': word = word[:-1] # Step 2 if word[-3:] == 'est': if len(word[r1_start:]) >= 3: word = word[:-3] elif word[-2:] == 'en': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'er': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'st': if ( len(word[r1_start:]) >= 2 and len(word) >= 6 and word[-3] in self._st_endings ): word = word[:-2] # Step 3 if word[-4:] == 'isch': if len(word[r2_start:]) >= 4 and word[-5] != 'e': word = word[:-4] elif word[-4:] in {'lich', 'heit'}: if len(word[r2_start:]) >= 4: word = word[:-4] if word[-2:] in {'er', 'en'} and len(word[r1_start:]) >= 2: word = word[:-2] elif word[-4:] == 'keit': if len(word[r2_start:]) >= 4: word = word[:-4] if word[-4:] == 'lich' and len(word[r2_start:]) >= 4: word = word[:-4] elif word[-2:] == 'ig' and len(word[r2_start:]) >= 2: word = word[:-2] elif word[-3:] in {'end', 'ung'}: if len(word[r2_start:]) >= 3: word = word[:-3] if ( word[-2:] == 'ig' and len(word[r2_start:]) >= 2 and word[-3] != 'e' ): word = word[:-2] elif word[-2:] in {'ig', 'ik'}: if len(word[r2_start:]) >= 2 and word[-3] != 'e': word = word[:-2] # Change 'Y' and 'U' back to lowercase if survived stemming for i in range(0, len(word)): if word[i] == 'Y': word = word[:i] + 'y' + word[i + 1 :] elif word[i] == 'U': word = word[:i] + 'u' + word[i + 1 :] # Remove umlauts _umlauts = dict(zip((ord(_) for _ in 'äöü'), 'aou')) word = word.translate(_umlauts) return word
def dist_abs(self, src, tar, max_offset=5): """Return the "simplest" Sift4 distance between two terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison max_offset : int The number of characters to search for matching letters Returns ------- int The Sift4 distance according to the simplest formula Examples -------- >>> cmp = Sift4Simplest() >>> cmp.dist_abs('cat', 'hat') 1 >>> cmp.dist_abs('Niall', 'Neil') 2 >>> cmp.dist_abs('Colin', 'Cuilen') 3 >>> cmp.dist_abs('ATCG', 'TAGC') 2 """ if not src: return len(tar) if not tar: return len(src) src_len = len(src) tar_len = len(tar) src_cur = 0 tar_cur = 0 lcss = 0 local_cs = 0 while (src_cur < src_len) and (tar_cur < tar_len): if src[src_cur] == tar[tar_cur]: local_cs += 1 else: lcss += local_cs local_cs = 0 if src_cur != tar_cur: src_cur = tar_cur = max(src_cur, tar_cur) for i in range(max_offset): if not ( (src_cur + i < src_len) or (tar_cur + i < tar_len) ): break if (src_cur + i < src_len) and ( src[src_cur + i] == tar[tar_cur] ): src_cur += i local_cs += 1 break if (tar_cur + i < tar_len) and ( src[src_cur] == tar[tar_cur + i] ): tar_cur += i local_cs += 1 break src_cur += 1 tar_cur += 1 lcss += local_cs return round(max(src_len, tar_len) - lcss)
def typo(src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY'): """Return the typo distance between two strings. This is a wrapper for :py:meth:`Typo.typo`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison metric : str Supported values include: ``euclidean``, ``manhattan``, ``log-euclidean``, and ``log-manhattan`` cost : tuple A 4-tuple representing the cost of the four possible edits: inserts, deletes, substitutions, and shift, respectively (by default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be significantly less than the cost of an insertion & deletion unless a log metric is used. layout : str Name of the keyboard layout to use (Currently supported: ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``) Returns ------- float Typo distance Examples -------- >>> typo('cat', 'hat') 1.5811388 >>> typo('Niall', 'Neil') 2.8251407 >>> typo('Colin', 'Cuilen') 3.4142137 >>> typo('ATCG', 'TAGC') 2.5 >>> typo('cat', 'hat', metric='manhattan') 2.0 >>> typo('Niall', 'Neil', metric='manhattan') 3.0 >>> typo('Colin', 'Cuilen', metric='manhattan') 3.5 >>> typo('ATCG', 'TAGC', metric='manhattan') 2.5 >>> typo('cat', 'hat', metric='log-manhattan') 0.804719 >>> typo('Niall', 'Neil', metric='log-manhattan') 2.2424533 >>> typo('Colin', 'Cuilen', metric='log-manhattan') 2.2424533 >>> typo('ATCG', 'TAGC', metric='log-manhattan') 2.3465736 """ return Typo().dist_abs(src, tar, metric, cost, layout)
def dist_typo( src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY' ): """Return the normalized typo distance between two strings. This is a wrapper for :py:meth:`Typo.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison metric : str Supported values include: ``euclidean``, ``manhattan``, ``log-euclidean``, and ``log-manhattan`` cost : tuple A 4-tuple representing the cost of the four possible edits: inserts, deletes, substitutions, and shift, respectively (by default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be significantly less than the cost of an insertion & deletion unless a log metric is used. layout : str Name of the keyboard layout to use (Currently supported: ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``) Returns ------- float Normalized typo distance Examples -------- >>> round(dist_typo('cat', 'hat'), 12) 0.527046283086 >>> round(dist_typo('Niall', 'Neil'), 12) 0.565028142929 >>> round(dist_typo('Colin', 'Cuilen'), 12) 0.569035609563 >>> dist_typo('ATCG', 'TAGC') 0.625 """ return Typo().dist(src, tar, metric, cost, layout)
def sim_typo( src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY' ): """Return the normalized typo similarity between two strings. This is a wrapper for :py:meth:`Typo.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison metric : str Supported values include: ``euclidean``, ``manhattan``, ``log-euclidean``, and ``log-manhattan`` cost : tuple A 4-tuple representing the cost of the four possible edits: inserts, deletes, substitutions, and shift, respectively (by default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be significantly less than the cost of an insertion & deletion unless a log metric is used. layout : str Name of the keyboard layout to use (Currently supported: ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``) Returns ------- float Normalized typo similarity Examples -------- >>> round(sim_typo('cat', 'hat'), 12) 0.472953716914 >>> round(sim_typo('Niall', 'Neil'), 12) 0.434971857071 >>> round(sim_typo('Colin', 'Cuilen'), 12) 0.430964390437 >>> sim_typo('ATCG', 'TAGC') 0.375 """ return Typo().sim(src, tar, metric, cost, layout)
def dist_abs( self, src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY', ): """Return the typo distance between two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison metric : str Supported values include: ``euclidean``, ``manhattan``, ``log-euclidean``, and ``log-manhattan`` cost : tuple A 4-tuple representing the cost of the four possible edits: inserts, deletes, substitutions, and shift, respectively (by default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be significantly less than the cost of an insertion & deletion unless a log metric is used. layout : str Name of the keyboard layout to use (Currently supported: ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``) Returns ------- float Typo distance Raises ------ ValueError char not found in any keyboard layouts Examples -------- >>> cmp = Typo() >>> cmp.dist_abs('cat', 'hat') 1.5811388 >>> cmp.dist_abs('Niall', 'Neil') 2.8251407 >>> cmp.dist_abs('Colin', 'Cuilen') 3.4142137 >>> cmp.dist_abs('ATCG', 'TAGC') 2.5 >>> cmp.dist_abs('cat', 'hat', metric='manhattan') 2.0 >>> cmp.dist_abs('Niall', 'Neil', metric='manhattan') 3.0 >>> cmp.dist_abs('Colin', 'Cuilen', metric='manhattan') 3.5 >>> cmp.dist_abs('ATCG', 'TAGC', metric='manhattan') 2.5 >>> cmp.dist_abs('cat', 'hat', metric='log-manhattan') 0.804719 >>> cmp.dist_abs('Niall', 'Neil', metric='log-manhattan') 2.2424533 >>> cmp.dist_abs('Colin', 'Cuilen', metric='log-manhattan') 2.2424533 >>> cmp.dist_abs('ATCG', 'TAGC', metric='log-manhattan') 2.3465736 """ ins_cost, del_cost, sub_cost, shift_cost = cost if src == tar: return 0.0 if not src: return len(tar) * ins_cost if not tar: return len(src) * del_cost keyboard = self._keyboard[layout] lowercase = {item for sublist in keyboard[0] for item in sublist} uppercase = {item for sublist in keyboard[1] for item in sublist} def _kb_array_for_char(char): """Return the keyboard layout that contains ch. Parameters ---------- char : str The character to lookup Returns ------- tuple A keyboard Raises ------ ValueError char not found in any keyboard layouts """ if char in lowercase: return keyboard[0] elif char in uppercase: return keyboard[1] raise ValueError(char + ' not found in any keyboard layouts') def _substitution_cost(char1, char2): cost = sub_cost cost *= metric_dict[metric](char1, char2) + shift_cost * ( _kb_array_for_char(char1) != _kb_array_for_char(char2) ) return cost def _get_char_coord(char, kb_array): """Return the row & column of char in the keyboard. Parameters ---------- char : str The character to search for kb_array : tuple of tuples The array of key positions Returns ------- tuple The row & column of the key """ for row in kb_array: # pragma: no branch if char in row: return kb_array.index(row), row.index(char) def _euclidean_keyboard_distance(char1, char2): row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1)) row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2)) return ((row1 - row2) ** 2 + (col1 - col2) ** 2) ** 0.5 def _manhattan_keyboard_distance(char1, char2): row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1)) row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2)) return abs(row1 - row2) + abs(col1 - col2) def _log_euclidean_keyboard_distance(char1, char2): return log(1 + _euclidean_keyboard_distance(char1, char2)) def _log_manhattan_keyboard_distance(char1, char2): return log(1 + _manhattan_keyboard_distance(char1, char2)) metric_dict = { 'euclidean': _euclidean_keyboard_distance, 'manhattan': _manhattan_keyboard_distance, 'log-euclidean': _log_euclidean_keyboard_distance, 'log-manhattan': _log_manhattan_keyboard_distance, } d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32) for i in range(len(src) + 1): d_mat[i, 0] = i * del_cost for j in range(len(tar) + 1): d_mat[0, j] = j * ins_cost for i in range(len(src)): for j in range(len(tar)): d_mat[i + 1, j + 1] = min( d_mat[i + 1, j] + ins_cost, # ins d_mat[i, j + 1] + del_cost, # del d_mat[i, j] + ( _substitution_cost(src[i], tar[j]) if src[i] != tar[j] else 0 ), # sub/== ) return d_mat[len(src), len(tar)]
def dist( self, src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY', ): """Return the normalized typo distance between two strings. This is typo distance, normalized to [0, 1]. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison metric : str Supported values include: ``euclidean``, ``manhattan``, ``log-euclidean``, and ``log-manhattan`` cost : tuple A 4-tuple representing the cost of the four possible edits: inserts, deletes, substitutions, and shift, respectively (by default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be significantly less than the cost of an insertion & deletion unless a log metric is used. layout : str Name of the keyboard layout to use (Currently supported: ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``) Returns ------- float Normalized typo distance Examples -------- >>> cmp = Typo() >>> round(cmp.dist('cat', 'hat'), 12) 0.527046283086 >>> round(cmp.dist('Niall', 'Neil'), 12) 0.565028142929 >>> round(cmp.dist('Colin', 'Cuilen'), 12) 0.569035609563 >>> cmp.dist('ATCG', 'TAGC') 0.625 """ if src == tar: return 0.0 ins_cost, del_cost = cost[:2] return self.dist_abs(src, tar, metric, cost, layout) / ( max(len(src) * del_cost, len(tar) * ins_cost) )