repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _suitableVerbExpansion | def _suitableVerbExpansion( foundSubcatChain ):
'''
V6tab etteantud jadast osa, mis sobib:
*) kui liikmeid on 3, keskmine on konjuktsioon ning esimene ja viimane
klapivad, tagastab selle kolmiku;
Nt. ei_0 saa_0 lihtsalt välja astuda_? ja_? uttu tõmmata_?
=> astuda ja tõmmata
*) kui liikmeid on rohkem kui 3, teine on konjuktsioon ning esimene ja
kolmas klapivad, ning l6pus pole verbe, tagastab esikolmiku;
*) kui liikmeid on rohkem kui yks, v6tab liikmeks esimese mitte-
konjunktsiooni (kui selline leidub);
Kui need tingimused pole t2idetud, tagastab tyhis6ne;
'''
markings = []
tokens = []
nonConjTokens = []
for (marking, token) in foundSubcatChain:
markings.append( marking )
tokens.append( token )
if marking != '&':
nonConjTokens.append( token )
if (len(markings) == 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&'):
return tokens
elif (len(markings) > 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&' and \
all([m == '&' for m in markings[3:]]) ):
return tokens[:3]
elif (len(nonConjTokens) > 0):
return nonConjTokens[:1]
return [] | python | def _suitableVerbExpansion( foundSubcatChain ):
'''
V6tab etteantud jadast osa, mis sobib:
*) kui liikmeid on 3, keskmine on konjuktsioon ning esimene ja viimane
klapivad, tagastab selle kolmiku;
Nt. ei_0 saa_0 lihtsalt välja astuda_? ja_? uttu tõmmata_?
=> astuda ja tõmmata
*) kui liikmeid on rohkem kui 3, teine on konjuktsioon ning esimene ja
kolmas klapivad, ning l6pus pole verbe, tagastab esikolmiku;
*) kui liikmeid on rohkem kui yks, v6tab liikmeks esimese mitte-
konjunktsiooni (kui selline leidub);
Kui need tingimused pole t2idetud, tagastab tyhis6ne;
'''
markings = []
tokens = []
nonConjTokens = []
for (marking, token) in foundSubcatChain:
markings.append( marking )
tokens.append( token )
if marking != '&':
nonConjTokens.append( token )
if (len(markings) == 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&'):
return tokens
elif (len(markings) > 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&' and \
all([m == '&' for m in markings[3:]]) ):
return tokens[:3]
elif (len(nonConjTokens) > 0):
return nonConjTokens[:1]
return [] | V6tab etteantud jadast osa, mis sobib:
*) kui liikmeid on 3, keskmine on konjuktsioon ning esimene ja viimane
klapivad, tagastab selle kolmiku;
Nt. ei_0 saa_0 lihtsalt välja astuda_? ja_? uttu tõmmata_?
=> astuda ja tõmmata
*) kui liikmeid on rohkem kui 3, teine on konjuktsioon ning esimene ja
kolmas klapivad, ning l6pus pole verbe, tagastab esikolmiku;
*) kui liikmeid on rohkem kui yks, v6tab liikmeks esimese mitte-
konjunktsiooni (kui selline leidub);
Kui need tingimused pole t2idetud, tagastab tyhis6ne; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1052-L1080 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _expandSaamaWithTud | def _expandSaamaWithTud( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis määrab spetsiifilised rektsiooniseosed: täiendab 'saama'-verbiga lõppevaid
verbijadasid, lisades (v6imalusel) nende l6ppu 'tud'-infiniitverbi
(nt. sai tehtud, sai käidud ujumas);
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains;
'''
verbTud = WordTemplate({POSTAG:'V', FORM:'^(tud|dud)$'})
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
for verbObj in foundChains:
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
lastVerbWID = verbObj[PHRASE][-1]
lastToken = [token for token in clauseTokens if token[WORD_ID] == lastVerbWID]
lastIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == lastVerbWID]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [analysis[ROOT] for analysis in verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0]
# Leiame, kas tegemist on 'saama' verbiga
if mainVerbLemma == 'saa':
#
# Saama + 'tud', lubame eraldada verbiahelana vaid siis, kui:
# *) 'tud' on osalause l6pus ning vahel pole punktuatsioonim2rke, nt:
# Kord sai_0 laadalt isegi aprikoosipuu koduaeda viidud_0 .
# *) 'saama' on osalause l6pus ning vahetult eelneb 'tud', nt:
# Ja et see vajaduse korral avalikustatud_1 saaks_1 .
#
expansion = None
if not _isClauseFinal(lastVerbWID, clauseTokens ):
for i in range(lastIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if verbTud.matches(token) and _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], tokenWID, True, True, False):
expansion = token
break
elif lastIndex-1 > -1:
if verbTud.matches(clauseTokens[lastIndex-1]):
expansion = clauseTokens[lastIndex-1]
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append( tokenWID )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( expansion, verbTud ) )
if verbOlema.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb') | python | def _expandSaamaWithTud( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis määrab spetsiifilised rektsiooniseosed: täiendab 'saama'-verbiga lõppevaid
verbijadasid, lisades (v6imalusel) nende l6ppu 'tud'-infiniitverbi
(nt. sai tehtud, sai käidud ujumas);
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains;
'''
verbTud = WordTemplate({POSTAG:'V', FORM:'^(tud|dud)$'})
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
for verbObj in foundChains:
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
lastVerbWID = verbObj[PHRASE][-1]
lastToken = [token for token in clauseTokens if token[WORD_ID] == lastVerbWID]
lastIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == lastVerbWID]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [analysis[ROOT] for analysis in verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0]
# Leiame, kas tegemist on 'saama' verbiga
if mainVerbLemma == 'saa':
#
# Saama + 'tud', lubame eraldada verbiahelana vaid siis, kui:
# *) 'tud' on osalause l6pus ning vahel pole punktuatsioonim2rke, nt:
# Kord sai_0 laadalt isegi aprikoosipuu koduaeda viidud_0 .
# *) 'saama' on osalause l6pus ning vahetult eelneb 'tud', nt:
# Ja et see vajaduse korral avalikustatud_1 saaks_1 .
#
expansion = None
if not _isClauseFinal(lastVerbWID, clauseTokens ):
for i in range(lastIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if verbTud.matches(token) and _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], tokenWID, True, True, False):
expansion = token
break
elif lastIndex-1 > -1:
if verbTud.matches(clauseTokens[lastIndex-1]):
expansion = clauseTokens[lastIndex-1]
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append( tokenWID )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( expansion, verbTud ) )
if verbOlema.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb') | Meetod, mis määrab spetsiifilised rektsiooniseosed: täiendab 'saama'-verbiga lõppevaid
verbijadasid, lisades (v6imalusel) nende l6ppu 'tud'-infiniitverbi
(nt. sai tehtud, sai käidud ujumas);
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1082-L1130 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _expandVerbChainsBySubcat | def _expandVerbChainsBySubcat( clauseTokens, clauseID, foundChains, verbSubcat, \
skipQuestionable=False, \
breakOnPunctuation=True ):
'''
Meetod, mis proovib laiendada (mitte-'olema') verbidega l6ppevaid predikaadifraase,
lisades nende lõppu rektsiooniseoste järgi uusi infiniitverbe,
nt "kutsub" + "langetama"
"püütakse" + "keelustada" "või" "takistada"
"ei julgenud" + "arvata",
"ei hakka" + "tülitama";
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse); Sisend 'verbSubcat' sisaldab andmeid verb-infiniitverb
rektsiooniseoste kohta;
Tulemusena t2iendatakse olemasolevat verbijadade listi (foundChains), pikendades seal
olevaid verbiga lõppevaid fraase, millal võimalik;
'''
global _breakerJaNingEgaVoi, _breakerKomaLopus, _breakerPunktuats
verb = WordTemplate({POSTAG:'V'})
verbInf1 = WordTemplate({POSTAG:'V', FORM:'^(da|ma|maks|mas|mast|mata)$'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
sonaMitte = WordTemplate({ROOT:'^mitte$',POSTAG:'D'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
# Leiame, millised verbid on veel vabad (st v6ivad potentsiaalselt liituda)
freeVerbsWIDs = [t[WORD_ID] for t in clauseTokens if verbInf1.matches(t) and t[WORD_ID] not in annotatedWords]
for verbObj in foundChains:
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
# Leiame viimasele s6nale vastava token'i, selle lemma ja vormitunnuse
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
lastIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == verbObj[PHRASE][-1]]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [(analysis[ROOT], analysis[FORM]) for analysis in verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0][0]
mainVerbForm = mainVerb[0][1]
positivePhrase = (verbObj[POLARITY] == 'POS')
egaPhrase = (verbObj[PATTERN][0] == 'ega')
# Teeme kindlaks, kas verbi lemma on ylesm2rgitud rektsiooniseoste leksikoni
if mainVerbLemma in verbSubcat:
subcatForms = verbSubcat[ mainVerbLemma ]
for subcatForm in subcatForms:
foundSubcatChain = []
addingCompleted = False
# Kui on tegu vaieldava rektsiooniseosega: j2tame vahele v6i, kui vaieldavad
# on lubatud, eemaldame vaieldavuse m2rgi
if re.match("^.+\?$", subcatForm):
if skipQuestionable:
continue
else:
subcatForm = subcatForm.replace('?', '')
#
# 1) Otsime sobivat verbi v6i verbifraasi s6na tagant, osalause teisest poolest
#
j = lastIndex + 1
while (j < len(clauseTokens)):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
# Katkestame kui:
# *) satume juba m2rgendatud s6nale;
# *) satume punktuatsioonile;
if tokenWID in annotatedWords:
break
if breakOnPunctuation and _breakerPunktuats.matches(token):
break
# Lisame kui:
# *) satume konjunktsioonile;
# *) satume sobivas vormis verbile;
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append( (subcatForm, token) )
# Katkestame kui:
# *) satume komale (kuna koma v6ib kinnituda sobiva s6na l6ppu);
if _breakerKomaLopus.matches(token):
break
j += 1
#
# Kui osalause teisest poolest midagi ei leidnud, vaatame
# osalause esimest poolt:
#
# 2) Otsime sobivat verbi v6i verbifraasi vahetult s6na algusest
# (seda vaid siis, kui tegemist pole nö 'ega'-verbifraasiga -
# nondele midagi eelneda ei saagi);
# ( NB! 'ega' fraaside puhul tuleks tegelikult ka tagasi vaadata,
# aga ainult verbi ja 'ega' vahele, ja mitte mingil juhul
# 'ega'-st ettepoole );
#
if not _suitableVerbExpansion( foundSubcatChain ) and not egaPhrase:
minWid = min( verbObj[PHRASE] )
j = lastIndex - 1
while (j > -1):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
# Katkestame kui:
# *) satume juba m2rgendatud s6nale (mis pole sellest fraasist);
# *) satume komale v6i muule punktuatsioonile;
# *) satume s6nale, mis on k6ige esimesest fraasiliikmest tagapool kui 2 s6na;
if tokenWID in annotatedWords and tokenWID not in verbObj[PHRASE]:
break
if _breakerKomaLopus.matches(token) or (breakOnPunctuation and _breakerPunktuats.matches(token)):
break
if token[WORD_ID]+1 < minWid:
break
# Lisame kui:
# *) satume konjunktsioonile;
# *) satume sobivas vormis verbile;
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append( (subcatForm, token) )
j -= 1
suitablePhrase = _suitableVerbExpansion( foundSubcatChain )
if suitablePhrase:
#
# Kui sobiv fraasikandidaat leidus, teostamine liitmise
#
for token in suitablePhrase:
tokenWID = token[WORD_ID]
verbObj[PHRASE].append( tokenWID )
annotatedWords.append( tokenWID )
if _breakerJaNingEgaVoi.matches(token):
verbObj[PATTERN].append('&')
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token, _breakerJaNingEgaVoi ) )
elif len(suitablePhrase) == 1 and verbOlema.matches(token):
verbObj[PATTERN].append('ole')
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token, verbOlema ) )
freeVerbsWIDs.remove( tokenWID )
else:
verbObj[PATTERN].append('verb')
analysisIDs = [i for i in range(len(token[ANALYSIS])) if subcatForm == token[ANALYSIS][i][FORM]]
assert len(analysisIDs) > 0
verbObj[ANALYSIS_IDS].append( analysisIDs )
freeVerbsWIDs.remove( tokenWID )
if not freeVerbsWIDs:
verbObj[OTHER_VERBS] = False
addingCompleted = True
if addingCompleted:
break | python | def _expandVerbChainsBySubcat( clauseTokens, clauseID, foundChains, verbSubcat, \
skipQuestionable=False, \
breakOnPunctuation=True ):
'''
Meetod, mis proovib laiendada (mitte-'olema') verbidega l6ppevaid predikaadifraase,
lisades nende lõppu rektsiooniseoste järgi uusi infiniitverbe,
nt "kutsub" + "langetama"
"püütakse" + "keelustada" "või" "takistada"
"ei julgenud" + "arvata",
"ei hakka" + "tülitama";
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse); Sisend 'verbSubcat' sisaldab andmeid verb-infiniitverb
rektsiooniseoste kohta;
Tulemusena t2iendatakse olemasolevat verbijadade listi (foundChains), pikendades seal
olevaid verbiga lõppevaid fraase, millal võimalik;
'''
global _breakerJaNingEgaVoi, _breakerKomaLopus, _breakerPunktuats
verb = WordTemplate({POSTAG:'V'})
verbInf1 = WordTemplate({POSTAG:'V', FORM:'^(da|ma|maks|mas|mast|mata)$'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
sonaMitte = WordTemplate({ROOT:'^mitte$',POSTAG:'D'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
# Leiame, millised verbid on veel vabad (st v6ivad potentsiaalselt liituda)
freeVerbsWIDs = [t[WORD_ID] for t in clauseTokens if verbInf1.matches(t) and t[WORD_ID] not in annotatedWords]
for verbObj in foundChains:
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
# Leiame viimasele s6nale vastava token'i, selle lemma ja vormitunnuse
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
lastIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == verbObj[PHRASE][-1]]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [(analysis[ROOT], analysis[FORM]) for analysis in verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0][0]
mainVerbForm = mainVerb[0][1]
positivePhrase = (verbObj[POLARITY] == 'POS')
egaPhrase = (verbObj[PATTERN][0] == 'ega')
# Teeme kindlaks, kas verbi lemma on ylesm2rgitud rektsiooniseoste leksikoni
if mainVerbLemma in verbSubcat:
subcatForms = verbSubcat[ mainVerbLemma ]
for subcatForm in subcatForms:
foundSubcatChain = []
addingCompleted = False
# Kui on tegu vaieldava rektsiooniseosega: j2tame vahele v6i, kui vaieldavad
# on lubatud, eemaldame vaieldavuse m2rgi
if re.match("^.+\?$", subcatForm):
if skipQuestionable:
continue
else:
subcatForm = subcatForm.replace('?', '')
#
# 1) Otsime sobivat verbi v6i verbifraasi s6na tagant, osalause teisest poolest
#
j = lastIndex + 1
while (j < len(clauseTokens)):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
# Katkestame kui:
# *) satume juba m2rgendatud s6nale;
# *) satume punktuatsioonile;
if tokenWID in annotatedWords:
break
if breakOnPunctuation and _breakerPunktuats.matches(token):
break
# Lisame kui:
# *) satume konjunktsioonile;
# *) satume sobivas vormis verbile;
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append( (subcatForm, token) )
# Katkestame kui:
# *) satume komale (kuna koma v6ib kinnituda sobiva s6na l6ppu);
if _breakerKomaLopus.matches(token):
break
j += 1
#
# Kui osalause teisest poolest midagi ei leidnud, vaatame
# osalause esimest poolt:
#
# 2) Otsime sobivat verbi v6i verbifraasi vahetult s6na algusest
# (seda vaid siis, kui tegemist pole nö 'ega'-verbifraasiga -
# nondele midagi eelneda ei saagi);
# ( NB! 'ega' fraaside puhul tuleks tegelikult ka tagasi vaadata,
# aga ainult verbi ja 'ega' vahele, ja mitte mingil juhul
# 'ega'-st ettepoole );
#
if not _suitableVerbExpansion( foundSubcatChain ) and not egaPhrase:
minWid = min( verbObj[PHRASE] )
j = lastIndex - 1
while (j > -1):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
# Katkestame kui:
# *) satume juba m2rgendatud s6nale (mis pole sellest fraasist);
# *) satume komale v6i muule punktuatsioonile;
# *) satume s6nale, mis on k6ige esimesest fraasiliikmest tagapool kui 2 s6na;
if tokenWID in annotatedWords and tokenWID not in verbObj[PHRASE]:
break
if _breakerKomaLopus.matches(token) or (breakOnPunctuation and _breakerPunktuats.matches(token)):
break
if token[WORD_ID]+1 < minWid:
break
# Lisame kui:
# *) satume konjunktsioonile;
# *) satume sobivas vormis verbile;
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append( (subcatForm, token) )
j -= 1
suitablePhrase = _suitableVerbExpansion( foundSubcatChain )
if suitablePhrase:
#
# Kui sobiv fraasikandidaat leidus, teostamine liitmise
#
for token in suitablePhrase:
tokenWID = token[WORD_ID]
verbObj[PHRASE].append( tokenWID )
annotatedWords.append( tokenWID )
if _breakerJaNingEgaVoi.matches(token):
verbObj[PATTERN].append('&')
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token, _breakerJaNingEgaVoi ) )
elif len(suitablePhrase) == 1 and verbOlema.matches(token):
verbObj[PATTERN].append('ole')
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token, verbOlema ) )
freeVerbsWIDs.remove( tokenWID )
else:
verbObj[PATTERN].append('verb')
analysisIDs = [i for i in range(len(token[ANALYSIS])) if subcatForm == token[ANALYSIS][i][FORM]]
assert len(analysisIDs) > 0
verbObj[ANALYSIS_IDS].append( analysisIDs )
freeVerbsWIDs.remove( tokenWID )
if not freeVerbsWIDs:
verbObj[OTHER_VERBS] = False
addingCompleted = True
if addingCompleted:
break | Meetod, mis proovib laiendada (mitte-'olema') verbidega l6ppevaid predikaadifraase,
lisades nende lõppu rektsiooniseoste järgi uusi infiniitverbe,
nt "kutsub" + "langetama"
"püütakse" + "keelustada" "või" "takistada"
"ei julgenud" + "arvata",
"ei hakka" + "tülitama";
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse); Sisend 'verbSubcat' sisaldab andmeid verb-infiniitverb
rektsiooniseoste kohta;
Tulemusena t2iendatakse olemasolevat verbijadade listi (foundChains), pikendades seal
olevaid verbiga lõppevaid fraase, millal võimalik; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1134-L1282 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _extractEgaNegFromSent | def _extractEgaNegFromSent( sentTokens, clausesDict, foundChains ):
''' Meetod, mis tuvastab antud lausest 'ega'-predikaadiga seotud eituse(d): ega + sobiv verb.
*) Juhtudel kui 'ega'-le j2rgneb juba tuvastatud, positiivse polaarsusega verbiahel (nt
ahel mille alguses on käskivas kõneviisis verb), liidetakse 'ega' olemasoleva ahela ette
ning muudetakse ahela polaarsus negatiivseks;
*) Muudel juhtudel otsitakse 'ega'-le j2rgnevat 'ei'-ga sobivat verbi (enamasti on selleks
'nud'-verb) ning liidetakse see (teatud heuristikute j2rgi) 'ega'-ga yheks fraasiks;
Tagastab True, kui uute 'ega' fraaside seas leidus m6ni selline, mida potentsiaalselt saaks
veel m6ne teise verbi liitmisega laiendada, muudel juhtudel tagastab False;
Miks see meetod opereerib tervel lausel, mitte yksikul osalausel?
>> Kuna 'ega' on sageli m2rgitud osalause piiriks (p2rast 'ega'-t v6ib l6ppeda osalause),
ei saa 'ega'-le j2rgnevaid verbe alati otsida yhe osalause seest, vaid tuleb vaadata
korraga mitut k6rvutiolevat osalauset; k2esolevalt lihtsustame ja vaatame tervet
lauset.
'''
sonaEga = WordTemplate({ROOT:'^ega$',POSTAG:'[DJ]'})
verbEiJarel = WordTemplate({POSTAG:'V',FORM:'(o|nud|tud|nuks|nuvat|vat|ks|ta|taks|tavat)$'})
verbEiJarel2 = WordTemplate({ROOT:'^mine$', POSTAG:'V',FORM:'neg o$'})
verbTud = WordTemplate({POSTAG:'V',FORM:'(tud)$'})
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
expandableEgaFound = False
for i in range(len(sentTokens)):
token = sentTokens[i]
if sonaEga.matches(token) and token[WORD_ID] not in annotatedWords:
matchFound = False
if i+1 < len(sentTokens) and sentTokens[i+1][WORD_ID] in annotatedWords:
#
# K6ige lihtsam juht: eelnevalt on verbifraas juba tuvastatud (ja
# eeldatavasti maksimaalses pikkuses), seega pole teha muud, kui sellele
# ega ette panna ning polaarsuse negatiivseks muuta:
# Te saate kaebusi palju ega_0 jõua_0 nendele reageerida_0 .
# vene keelt ta ei mõista ega_0 või_0 seepärast olla_0 vene spioon
# NB! Lisamist ei teosta siiski juhtudel kui:
# *) J2rgnev fraas on juba negatiivse polaarsusega (selline laiendamine
# tekitaks lihtsalt mustreid juurde, aga sisulist infot juurde ei
# annaks);
# *) J2rgnev s6na pole 'ei'-ga yhilduv verb (t6en2oliselt on mingi jama,
# nt morf yhestamisel);
# *) J2rgnev s6na kuulub verbiahelasse, mis algab enne 'ega'-t (see viitab
# tegelikult sellele, et k6nealune verbiahel on katkiselt eraldatud);
#
for verbObj in foundChains:
if sentTokens[i+1][WORD_ID] in verbObj[PHRASE] and verbObj[POLARITY] != 'NEG' and \
(verbEiJarel.matches( sentTokens[i+1] ) or verbEiJarel2.matches( sentTokens[i+1] )) \
and i < min( verbObj[PHRASE] ):
verbObj[PHRASE].insert(0, token[WORD_ID])
verbObj[PATTERN].insert(0, 'ega')
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs( token, sonaEga ) )
annotatedWords.append( token[WORD_ID] )
matchFound = True
break
elif i+1 < len(sentTokens) and verbEiJarel.matches( sentTokens[i+1] ) and \
sentTokens[i+1][WORD_ID] not in annotatedWords:
#
# Heuristik:
# kui 'ega'-le j2rgneb vahetult 'ei'-ga sobiv verb (peaks olema
# infiniitne nud/tud verb, kuna finiitsed leitakse ja seotakse
# t6en2oliselt eelmises harus), siis eraldame uue fraasina:
#
# Hakkasin Ainikiga rääkima ega_0 pööranud_0 Ivole enam tähelepanu .
# Tereese oli tükk aega vait ega_0 teadnud_0 , kas tõtt rääkida või mitte .
#
# >> clauseID-iks saab j2rgneva verbi ID, kuna 'ega' j2relt l2heb sageli
# osalausepiir ning ega-le eelnevad verbid kindlasti sellega seotud olla
# ei saa.
clauseID = sentTokens[i+1][CLAUSE_IDX]
wid1 = sentTokens[i][WORD_ID]
wid2 = sentTokens[i+1][WORD_ID]
verbObj = { PHRASE: [wid1, wid2], PATTERN: ["ega", "verb"] }
verbObj[CLAUSE_IDX] = clauseID
if verbOlema.matches(sentTokens[i+1]):
verbObj[PATTERN][1] = 'ole'
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i], sonaEga ) )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i+1], verbEiJarel ) )
# Teeme kindlaks, kas j2rgneb veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i+2 < len(sentTokens):
for j in range(i+2, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID and verb.matches(token2):
verbObj[OTHER_VERBS] = True
break
if verbObj[OTHER_VERBS]:
expandableEgaFound = True
else:
#
# Kui osalausest on tuvastatud teisi predikaadieituseid ning need
# eelnevad praegusele 'ega'-eitusele , nt:
# Ei lükka ma ümber ega kinnita.
# Ta ei oota ega looda_0 ( enam ).
# V6ib olla tegu keerukama tervikfraasiga, nt:
# Ta ise pole kuidagi saanud ega tahnud_0 end samastada nendega.
# Sellistel juhtudel m2rgime konteksti mitmeseks, kuna 'ega'-fraas
# v6ib toetuda varasemale verbiahelale;
#
for j in range(i-1, -1, -1):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID:
for verbObj2 in foundChains:
if token2[WORD_ID] in verbObj2[PHRASE] and verbObj2[POLARITY] != 'POS':
verbObj[OTHER_VERBS] = True
break
foundChains.append( verbObj )
annotatedWords.extend( verbObj[PHRASE] )
matchFound = True
if not matchFound:
#
# 2. 'ega' + kaugemal järgnev verb
#
# 2.1 Kui 'ega'-le ei j2rgne ega eelne yhtegi eitust, kyll aga j2rgneb
# (osalause piires) 'ei'-le sobiv verb, loeme teatud juhtudel, et
# tegu on sobiva eitusfraasiga.
# Nt.
# Nii et ega_0 Diana jõulureedel sünnitanudki .
# Ega_0 ta tahtnud algul rääkida .
# Yldiselt paistab see muster olevat sage just ilukirjanduses ja
# suulise k6ne l2hedases keelekasutuses, harvem ajakirjanduses ning
# veel v2hem kasutusel teaduskirjanduses;
#
egaClauseID = sentTokens[i][CLAUSE_IDX]
precedingNeg = False
followingNeg = False
followingPos = None
for verbObj1 in foundChains:
if verbObj1[CLAUSE_IDX] == egaClauseID:
if verbObj1[POLARITY] != 'POS':
if any([ wid < sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE] ]):
precedingNeg = True
if any([ wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE] ]):
followingNeg = True
elif verbObj1[POLARITY] == 'POS' and \
all([wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE]]):
followingPos = verbObj1
if not precedingNeg and not followingNeg:
if followingPos:
#
# K6ige lihtsam juht: kui j2rgneb positiivne verbiahel (ja eeldatavasti
# maksimaalses pikkuses) ning:
# *) ahelverbi ja 'ega' vahel pole punktuatsiooni;
# *) ahelverb sisaldab 'ei'-ga yhilduvat verbivormi;
# liidame ahelale 'ega' ette ning muudame polaarsuse negatiivseks:
# Ega_0 neil seal kerge ole_0 . "
# Ega_0 70 eluaastat ole_0 naljaasi !
# Ega_0 sa puusärgis paugutama_0 hakka_0 . "
#
minWID = min(followingPos[PHRASE])
phraseTokens = [t for t in sentTokens if t[WORD_ID] in followingPos[PHRASE]]
if any( [verbEiJarel.matches( t ) for t in phraseTokens] ) and \
not _isSeparatedByPossibleClauseBreakers( sentTokens, token[WORD_ID], minWID, True, True, False):
followingPos[PHRASE].insert(0, token[WORD_ID])
followingPos[PATTERN].insert(0, 'ega')
followingPos[POLARITY] = 'NEG'
followingPos[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs( token, sonaEga ) )
annotatedWords.append( token[WORD_ID] )
matchFound = True
#
# Veakoht - vahel on 'kui':
# " Ega_0 muud kui pista_0 heinad põlema_0
#
elif i+1 < len(sentTokens):
#
# Heuristik:
# Kui 'ega'-le j2rgneb samas osalauses 'ei'-ga sobiv verb ning:
# *) see verb ei ole 'tud'-verb (seega t6en2oliselt on 'nud');
# *) see verb asub osalause l6pus v6i pole 'ega'-st kaugemal kui
# 2 s6na;
# *) see verb ei kuulu juba m2rgendatud verbiahelate sisse;
# siis eraldame uue 'ega'-fraasina, nt:
#
# Ega_0 poiss teda enam vahtinudki_0 .
# Ega_0 keegi sellist tulemust ju soovinud_0 ,
# Ja ega_0 ta soovinudki_0 Semperi kombel ümber õppida .
#
for j in range(i+1, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == egaClauseID and verbEiJarel.matches(token2) and \
not verbTud.matches(token2) and token2[WORD_ID] not in annotatedWords and \
(_isClauseFinal( token2[WORD_ID], clausesDict[token2[CLAUSE_IDX]] ) or \
j-i <= 2):
wid1 = sentTokens[i][WORD_ID]
wid2 = token2[WORD_ID]
verbObj = { PHRASE: [wid1, wid2], PATTERN: ["ega", "verb"] }
verbObj[CLAUSE_IDX] = token2[CLAUSE_IDX]
if verbOlema.matches(token2):
verbObj[PATTERN][1] = 'ole'
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i], sonaEga ) )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token2, verbEiJarel ) )
# Teeme kindlaks, kas osalauses on veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i+2 < len(sentTokens):
for j in range(i+2, len(sentTokens)):
token3 = sentTokens[j]
if token3[CLAUSE_IDX] == verbObj[CLAUSE_IDX] and \
token2 != token3 and verb.matches(token3):
verbObj[OTHER_VERBS] = True
break
if verbObj[OTHER_VERBS]:
expandableEgaFound = True
foundChains.append( verbObj )
annotatedWords.extend( verbObj[PHRASE] )
matchFound = True
break
return expandableEgaFound | python | def _extractEgaNegFromSent( sentTokens, clausesDict, foundChains ):
''' Meetod, mis tuvastab antud lausest 'ega'-predikaadiga seotud eituse(d): ega + sobiv verb.
*) Juhtudel kui 'ega'-le j2rgneb juba tuvastatud, positiivse polaarsusega verbiahel (nt
ahel mille alguses on käskivas kõneviisis verb), liidetakse 'ega' olemasoleva ahela ette
ning muudetakse ahela polaarsus negatiivseks;
*) Muudel juhtudel otsitakse 'ega'-le j2rgnevat 'ei'-ga sobivat verbi (enamasti on selleks
'nud'-verb) ning liidetakse see (teatud heuristikute j2rgi) 'ega'-ga yheks fraasiks;
Tagastab True, kui uute 'ega' fraaside seas leidus m6ni selline, mida potentsiaalselt saaks
veel m6ne teise verbi liitmisega laiendada, muudel juhtudel tagastab False;
Miks see meetod opereerib tervel lausel, mitte yksikul osalausel?
>> Kuna 'ega' on sageli m2rgitud osalause piiriks (p2rast 'ega'-t v6ib l6ppeda osalause),
ei saa 'ega'-le j2rgnevaid verbe alati otsida yhe osalause seest, vaid tuleb vaadata
korraga mitut k6rvutiolevat osalauset; k2esolevalt lihtsustame ja vaatame tervet
lauset.
'''
sonaEga = WordTemplate({ROOT:'^ega$',POSTAG:'[DJ]'})
verbEiJarel = WordTemplate({POSTAG:'V',FORM:'(o|nud|tud|nuks|nuvat|vat|ks|ta|taks|tavat)$'})
verbEiJarel2 = WordTemplate({ROOT:'^mine$', POSTAG:'V',FORM:'neg o$'})
verbTud = WordTemplate({POSTAG:'V',FORM:'(tud)$'})
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
expandableEgaFound = False
for i in range(len(sentTokens)):
token = sentTokens[i]
if sonaEga.matches(token) and token[WORD_ID] not in annotatedWords:
matchFound = False
if i+1 < len(sentTokens) and sentTokens[i+1][WORD_ID] in annotatedWords:
#
# K6ige lihtsam juht: eelnevalt on verbifraas juba tuvastatud (ja
# eeldatavasti maksimaalses pikkuses), seega pole teha muud, kui sellele
# ega ette panna ning polaarsuse negatiivseks muuta:
# Te saate kaebusi palju ega_0 jõua_0 nendele reageerida_0 .
# vene keelt ta ei mõista ega_0 või_0 seepärast olla_0 vene spioon
# NB! Lisamist ei teosta siiski juhtudel kui:
# *) J2rgnev fraas on juba negatiivse polaarsusega (selline laiendamine
# tekitaks lihtsalt mustreid juurde, aga sisulist infot juurde ei
# annaks);
# *) J2rgnev s6na pole 'ei'-ga yhilduv verb (t6en2oliselt on mingi jama,
# nt morf yhestamisel);
# *) J2rgnev s6na kuulub verbiahelasse, mis algab enne 'ega'-t (see viitab
# tegelikult sellele, et k6nealune verbiahel on katkiselt eraldatud);
#
for verbObj in foundChains:
if sentTokens[i+1][WORD_ID] in verbObj[PHRASE] and verbObj[POLARITY] != 'NEG' and \
(verbEiJarel.matches( sentTokens[i+1] ) or verbEiJarel2.matches( sentTokens[i+1] )) \
and i < min( verbObj[PHRASE] ):
verbObj[PHRASE].insert(0, token[WORD_ID])
verbObj[PATTERN].insert(0, 'ega')
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs( token, sonaEga ) )
annotatedWords.append( token[WORD_ID] )
matchFound = True
break
elif i+1 < len(sentTokens) and verbEiJarel.matches( sentTokens[i+1] ) and \
sentTokens[i+1][WORD_ID] not in annotatedWords:
#
# Heuristik:
# kui 'ega'-le j2rgneb vahetult 'ei'-ga sobiv verb (peaks olema
# infiniitne nud/tud verb, kuna finiitsed leitakse ja seotakse
# t6en2oliselt eelmises harus), siis eraldame uue fraasina:
#
# Hakkasin Ainikiga rääkima ega_0 pööranud_0 Ivole enam tähelepanu .
# Tereese oli tükk aega vait ega_0 teadnud_0 , kas tõtt rääkida või mitte .
#
# >> clauseID-iks saab j2rgneva verbi ID, kuna 'ega' j2relt l2heb sageli
# osalausepiir ning ega-le eelnevad verbid kindlasti sellega seotud olla
# ei saa.
clauseID = sentTokens[i+1][CLAUSE_IDX]
wid1 = sentTokens[i][WORD_ID]
wid2 = sentTokens[i+1][WORD_ID]
verbObj = { PHRASE: [wid1, wid2], PATTERN: ["ega", "verb"] }
verbObj[CLAUSE_IDX] = clauseID
if verbOlema.matches(sentTokens[i+1]):
verbObj[PATTERN][1] = 'ole'
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i], sonaEga ) )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i+1], verbEiJarel ) )
# Teeme kindlaks, kas j2rgneb veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i+2 < len(sentTokens):
for j in range(i+2, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID and verb.matches(token2):
verbObj[OTHER_VERBS] = True
break
if verbObj[OTHER_VERBS]:
expandableEgaFound = True
else:
#
# Kui osalausest on tuvastatud teisi predikaadieituseid ning need
# eelnevad praegusele 'ega'-eitusele , nt:
# Ei lükka ma ümber ega kinnita.
# Ta ei oota ega looda_0 ( enam ).
# V6ib olla tegu keerukama tervikfraasiga, nt:
# Ta ise pole kuidagi saanud ega tahnud_0 end samastada nendega.
# Sellistel juhtudel m2rgime konteksti mitmeseks, kuna 'ega'-fraas
# v6ib toetuda varasemale verbiahelale;
#
for j in range(i-1, -1, -1):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == clauseID:
for verbObj2 in foundChains:
if token2[WORD_ID] in verbObj2[PHRASE] and verbObj2[POLARITY] != 'POS':
verbObj[OTHER_VERBS] = True
break
foundChains.append( verbObj )
annotatedWords.extend( verbObj[PHRASE] )
matchFound = True
if not matchFound:
#
# 2. 'ega' + kaugemal järgnev verb
#
# 2.1 Kui 'ega'-le ei j2rgne ega eelne yhtegi eitust, kyll aga j2rgneb
# (osalause piires) 'ei'-le sobiv verb, loeme teatud juhtudel, et
# tegu on sobiva eitusfraasiga.
# Nt.
# Nii et ega_0 Diana jõulureedel sünnitanudki .
# Ega_0 ta tahtnud algul rääkida .
# Yldiselt paistab see muster olevat sage just ilukirjanduses ja
# suulise k6ne l2hedases keelekasutuses, harvem ajakirjanduses ning
# veel v2hem kasutusel teaduskirjanduses;
#
egaClauseID = sentTokens[i][CLAUSE_IDX]
precedingNeg = False
followingNeg = False
followingPos = None
for verbObj1 in foundChains:
if verbObj1[CLAUSE_IDX] == egaClauseID:
if verbObj1[POLARITY] != 'POS':
if any([ wid < sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE] ]):
precedingNeg = True
if any([ wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE] ]):
followingNeg = True
elif verbObj1[POLARITY] == 'POS' and \
all([wid > sentTokens[i][WORD_ID] for wid in verbObj1[PHRASE]]):
followingPos = verbObj1
if not precedingNeg and not followingNeg:
if followingPos:
#
# K6ige lihtsam juht: kui j2rgneb positiivne verbiahel (ja eeldatavasti
# maksimaalses pikkuses) ning:
# *) ahelverbi ja 'ega' vahel pole punktuatsiooni;
# *) ahelverb sisaldab 'ei'-ga yhilduvat verbivormi;
# liidame ahelale 'ega' ette ning muudame polaarsuse negatiivseks:
# Ega_0 neil seal kerge ole_0 . "
# Ega_0 70 eluaastat ole_0 naljaasi !
# Ega_0 sa puusärgis paugutama_0 hakka_0 . "
#
minWID = min(followingPos[PHRASE])
phraseTokens = [t for t in sentTokens if t[WORD_ID] in followingPos[PHRASE]]
if any( [verbEiJarel.matches( t ) for t in phraseTokens] ) and \
not _isSeparatedByPossibleClauseBreakers( sentTokens, token[WORD_ID], minWID, True, True, False):
followingPos[PHRASE].insert(0, token[WORD_ID])
followingPos[PATTERN].insert(0, 'ega')
followingPos[POLARITY] = 'NEG'
followingPos[ANALYSIS_IDS].insert(0, _getMatchingAnalysisIDs( token, sonaEga ) )
annotatedWords.append( token[WORD_ID] )
matchFound = True
#
# Veakoht - vahel on 'kui':
# " Ega_0 muud kui pista_0 heinad põlema_0
#
elif i+1 < len(sentTokens):
#
# Heuristik:
# Kui 'ega'-le j2rgneb samas osalauses 'ei'-ga sobiv verb ning:
# *) see verb ei ole 'tud'-verb (seega t6en2oliselt on 'nud');
# *) see verb asub osalause l6pus v6i pole 'ega'-st kaugemal kui
# 2 s6na;
# *) see verb ei kuulu juba m2rgendatud verbiahelate sisse;
# siis eraldame uue 'ega'-fraasina, nt:
#
# Ega_0 poiss teda enam vahtinudki_0 .
# Ega_0 keegi sellist tulemust ju soovinud_0 ,
# Ja ega_0 ta soovinudki_0 Semperi kombel ümber õppida .
#
for j in range(i+1, len(sentTokens)):
token2 = sentTokens[j]
if token2[CLAUSE_IDX] == egaClauseID and verbEiJarel.matches(token2) and \
not verbTud.matches(token2) and token2[WORD_ID] not in annotatedWords and \
(_isClauseFinal( token2[WORD_ID], clausesDict[token2[CLAUSE_IDX]] ) or \
j-i <= 2):
wid1 = sentTokens[i][WORD_ID]
wid2 = token2[WORD_ID]
verbObj = { PHRASE: [wid1, wid2], PATTERN: ["ega", "verb"] }
verbObj[CLAUSE_IDX] = token2[CLAUSE_IDX]
if verbOlema.matches(token2):
verbObj[PATTERN][1] = 'ole'
verbObj[POLARITY] = 'NEG'
verbObj[ANALYSIS_IDS] = []
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( sentTokens[i], sonaEga ) )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( token2, verbEiJarel ) )
# Teeme kindlaks, kas osalauses on veel verbe, mis v6iksid potentsiaalselt liituda
verbObj[OTHER_VERBS] = False
if i+2 < len(sentTokens):
for j in range(i+2, len(sentTokens)):
token3 = sentTokens[j]
if token3[CLAUSE_IDX] == verbObj[CLAUSE_IDX] and \
token2 != token3 and verb.matches(token3):
verbObj[OTHER_VERBS] = True
break
if verbObj[OTHER_VERBS]:
expandableEgaFound = True
foundChains.append( verbObj )
annotatedWords.extend( verbObj[PHRASE] )
matchFound = True
break
return expandableEgaFound | Meetod, mis tuvastab antud lausest 'ega'-predikaadiga seotud eituse(d): ega + sobiv verb.
*) Juhtudel kui 'ega'-le j2rgneb juba tuvastatud, positiivse polaarsusega verbiahel (nt
ahel mille alguses on käskivas kõneviisis verb), liidetakse 'ega' olemasoleva ahela ette
ning muudetakse ahela polaarsus negatiivseks;
*) Muudel juhtudel otsitakse 'ega'-le j2rgnevat 'ei'-ga sobivat verbi (enamasti on selleks
'nud'-verb) ning liidetakse see (teatud heuristikute j2rgi) 'ega'-ga yheks fraasiks;
Tagastab True, kui uute 'ega' fraaside seas leidus m6ni selline, mida potentsiaalselt saaks
veel m6ne teise verbi liitmisega laiendada, muudel juhtudel tagastab False;
Miks see meetod opereerib tervel lausel, mitte yksikul osalausel?
>> Kuna 'ega' on sageli m2rgitud osalause piiriks (p2rast 'ega'-t v6ib l6ppeda osalause),
ei saa 'ega'-le j2rgnevaid verbe alati otsida yhe osalause seest, vaid tuleb vaadata
korraga mitut k6rvutiolevat osalauset; k2esolevalt lihtsustame ja vaatame tervet
lauset. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1340-L1557 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _determineVerbChainContextualAmbiguity | def _determineVerbChainContextualAmbiguity( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis püüab otsustada iga leitud verbiahela (foundChains liikme) puhul, kas
osalauses leidub veel vabu verbe, millega verbiahelat oleks võimalik täiendada;
Kui vabu verbe ei leidu, muudab verbiahela OTHER_VERBS väärtuse negatiivseks, vastasel
juhul ei tee midagi.
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse), clauseID on vastava osalause indentifikaator;
'''
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
verbSaama = WordTemplate({POSTAG:'V', ROOT:'^(saa)$'})
verbEiAra = WordTemplate({ROOT:'^(ära|ei)$',FORM:'neg.*',POSTAG:'V'})
verbInf = WordTemplate({POSTAG:'V', FORM:'^(da|des|ma|tama|ta|mas|mast|nud|tud|v|mata)$'})
regularVerbInf = WordTemplate({POSTAG:'V', FORM:'^(da|ma|maks|mas|mast|mata)$'})
olemaVerbInf = WordTemplate({POSTAG:'V', FORM:'^(nud|tud|da|ma|mas|mata)$'})
saamaVerbInf = WordTemplate({POSTAG:'V', FORM:'^(tud|da|ma)$'})
sonaMitte = WordTemplate({ROOT:'^mitte$',POSTAG:'D'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
finVerbs = [t for t in clauseTokens if verb.matches(t) and not verbInf.matches(t) ]
negFinVerbs = [t for t in finVerbs if verbEiAra.matches(t)]
looseNegVerbs = [t for t in negFinVerbs if t[WORD_ID] not in annotatedWords]
#
# Kontrollime, milline on osalause finiitverbiline kontekst. Kui seal on mingi potentsiaalne
# segadus, jätamegi küsimärgid alles / kustutamata.
#
if len(negFinVerbs)==0 and len(finVerbs) >= 2:
# *) Kui negatiivseid finiitverbe pole, aga positiivseid on rohkem kui 2, jääb
# praegu lahtiseks, mis seal kontekstis toimub. Jätame kõik küsimärgiga;
return
elif len(looseNegVerbs) > 0:
# *) Kui leidub negatiivseid finiitverbe, mida pole 6nnestunud pikemaks ahelaks
# pikendada, jääb samuti lahtiseks, mis seal kontekstis toimub. Jätame kõik
# küsimärgiga;
return
elif not looseNegVerbs and negFinVerbs and len(negFinVerbs)-len(finVerbs)>0:
# *) Kui negatiivseid verbe leidub rohkem, kui positiivseid, j22b ka lahtiseks,
# mis seal kontekstis täpselt toimub. NB! Mineviku eitus jääb paraku samuti
# lahtiseks, aga praegu ei oska selle vältimiseks lihtsat reeglit anda;
return
rVerbFreeVerbs = None
olemaFreeVerbs = None
saamaFreeVerbs = None
for verbObj in foundChains:
#
# Vaatame verbe, mille osalausekontekstis on teisi (infiniit)verbe; Kui kontekstis
# ei leidu vabu (potentsiaalselt liituda võivaid verbe), märgime konteksti vabaks.
# Nt. alltoodud kontekstis ei ole märgitud verbidele enam yhtki teist verbi
# liita (kuigi kontekstis leidub teisi infiniitverbe):
#
# 1920 vastu võetud Tallinna tehnikumi põhikiri kaotas_0 kehtivuse .
# kuid häirest haaratud õunad nakatuvad_0 kiiresti mädanikesse ,
#
if verbObj[CLAUSE_IDX] == clauseID and verbObj[OTHER_VERBS]:
contextClear = False
#
# Leiame viimasele s6nale vastava token'i, selle lemma ja vormitunnuse
#
lastToken = [ token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1] ]
lastToken = lastToken[0]
analyses = [ lastToken[ANALYSIS][j] for j in range(len(lastToken[ANALYSIS])) if j in verbObj[ANALYSIS_IDS][-1] ]
mainVerb = [ analysis[ROOT] for analysis in analyses ]
mainVerbLemma = mainVerb[0]
#
# Leiame, millised verbid on veel vabad (st v6ivad potentsiaalselt rektsiooni-
# seoses liituda); 'saama' ja 'olema' verbide puhul on potentsiaalsed liitujad
# natuke teistsugused kui ylej22nud verbidel;
#
if 'saa' == mainVerbLemma:
if saamaFreeVerbs == None:
saamaFreeVerbs = [t[WORD_ID] for t in clauseTokens if saamaVerbInf.matches(t) and t[WORD_ID] not in annotatedWords]
if not saamaFreeVerbs:
contextClear = True
elif 'ole' == mainVerbLemma:
if olemaFreeVerbs == None:
olemaFreeVerbs = [t[WORD_ID] for t in clauseTokens if olemaVerbInf.matches(t) and t[WORD_ID] not in annotatedWords]
if not olemaFreeVerbs:
contextClear = True
else:
if rVerbFreeVerbs == None:
rVerbFreeVerbs = [t[WORD_ID] for t in clauseTokens if regularVerbInf.matches(t) and t[WORD_ID] not in annotatedWords]
if not rVerbFreeVerbs:
contextClear = True
#
# Kui yhtegi vaba verbi ei leidunud, märgime konteksti puhtaks
#
if contextClear:
verbObj[OTHER_VERBS] = False | python | def _determineVerbChainContextualAmbiguity( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis püüab otsustada iga leitud verbiahela (foundChains liikme) puhul, kas
osalauses leidub veel vabu verbe, millega verbiahelat oleks võimalik täiendada;
Kui vabu verbe ei leidu, muudab verbiahela OTHER_VERBS väärtuse negatiivseks, vastasel
juhul ei tee midagi.
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse), clauseID on vastava osalause indentifikaator;
'''
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
verbSaama = WordTemplate({POSTAG:'V', ROOT:'^(saa)$'})
verbEiAra = WordTemplate({ROOT:'^(ära|ei)$',FORM:'neg.*',POSTAG:'V'})
verbInf = WordTemplate({POSTAG:'V', FORM:'^(da|des|ma|tama|ta|mas|mast|nud|tud|v|mata)$'})
regularVerbInf = WordTemplate({POSTAG:'V', FORM:'^(da|ma|maks|mas|mast|mata)$'})
olemaVerbInf = WordTemplate({POSTAG:'V', FORM:'^(nud|tud|da|ma|mas|mata)$'})
saamaVerbInf = WordTemplate({POSTAG:'V', FORM:'^(tud|da|ma)$'})
sonaMitte = WordTemplate({ROOT:'^mitte$',POSTAG:'D'})
# J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu
annotatedWords = []
for verbObj in foundChains:
if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])):
# V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega
continue
annotatedWords.extend( verbObj[PHRASE] )
finVerbs = [t for t in clauseTokens if verb.matches(t) and not verbInf.matches(t) ]
negFinVerbs = [t for t in finVerbs if verbEiAra.matches(t)]
looseNegVerbs = [t for t in negFinVerbs if t[WORD_ID] not in annotatedWords]
#
# Kontrollime, milline on osalause finiitverbiline kontekst. Kui seal on mingi potentsiaalne
# segadus, jätamegi küsimärgid alles / kustutamata.
#
if len(negFinVerbs)==0 and len(finVerbs) >= 2:
# *) Kui negatiivseid finiitverbe pole, aga positiivseid on rohkem kui 2, jääb
# praegu lahtiseks, mis seal kontekstis toimub. Jätame kõik küsimärgiga;
return
elif len(looseNegVerbs) > 0:
# *) Kui leidub negatiivseid finiitverbe, mida pole 6nnestunud pikemaks ahelaks
# pikendada, jääb samuti lahtiseks, mis seal kontekstis toimub. Jätame kõik
# küsimärgiga;
return
elif not looseNegVerbs and negFinVerbs and len(negFinVerbs)-len(finVerbs)>0:
# *) Kui negatiivseid verbe leidub rohkem, kui positiivseid, j22b ka lahtiseks,
# mis seal kontekstis täpselt toimub. NB! Mineviku eitus jääb paraku samuti
# lahtiseks, aga praegu ei oska selle vältimiseks lihtsat reeglit anda;
return
rVerbFreeVerbs = None
olemaFreeVerbs = None
saamaFreeVerbs = None
for verbObj in foundChains:
#
# Vaatame verbe, mille osalausekontekstis on teisi (infiniit)verbe; Kui kontekstis
# ei leidu vabu (potentsiaalselt liituda võivaid verbe), märgime konteksti vabaks.
# Nt. alltoodud kontekstis ei ole märgitud verbidele enam yhtki teist verbi
# liita (kuigi kontekstis leidub teisi infiniitverbe):
#
# 1920 vastu võetud Tallinna tehnikumi põhikiri kaotas_0 kehtivuse .
# kuid häirest haaratud õunad nakatuvad_0 kiiresti mädanikesse ,
#
if verbObj[CLAUSE_IDX] == clauseID and verbObj[OTHER_VERBS]:
contextClear = False
#
# Leiame viimasele s6nale vastava token'i, selle lemma ja vormitunnuse
#
lastToken = [ token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1] ]
lastToken = lastToken[0]
analyses = [ lastToken[ANALYSIS][j] for j in range(len(lastToken[ANALYSIS])) if j in verbObj[ANALYSIS_IDS][-1] ]
mainVerb = [ analysis[ROOT] for analysis in analyses ]
mainVerbLemma = mainVerb[0]
#
# Leiame, millised verbid on veel vabad (st v6ivad potentsiaalselt rektsiooni-
# seoses liituda); 'saama' ja 'olema' verbide puhul on potentsiaalsed liitujad
# natuke teistsugused kui ylej22nud verbidel;
#
if 'saa' == mainVerbLemma:
if saamaFreeVerbs == None:
saamaFreeVerbs = [t[WORD_ID] for t in clauseTokens if saamaVerbInf.matches(t) and t[WORD_ID] not in annotatedWords]
if not saamaFreeVerbs:
contextClear = True
elif 'ole' == mainVerbLemma:
if olemaFreeVerbs == None:
olemaFreeVerbs = [t[WORD_ID] for t in clauseTokens if olemaVerbInf.matches(t) and t[WORD_ID] not in annotatedWords]
if not olemaFreeVerbs:
contextClear = True
else:
if rVerbFreeVerbs == None:
rVerbFreeVerbs = [t[WORD_ID] for t in clauseTokens if regularVerbInf.matches(t) and t[WORD_ID] not in annotatedWords]
if not rVerbFreeVerbs:
contextClear = True
#
# Kui yhtegi vaba verbi ei leidunud, märgime konteksti puhtaks
#
if contextClear:
verbObj[OTHER_VERBS] = False | Meetod, mis püüab otsustada iga leitud verbiahela (foundChains liikme) puhul, kas
osalauses leidub veel vabu verbe, millega verbiahelat oleks võimalik täiendada;
Kui vabu verbe ei leidu, muudab verbiahela OTHER_VERBS väärtuse negatiivseks, vastasel
juhul ei tee midagi.
Sisend 'clauseTokens' on list, mis sisaldab yhe osalause k6iki s6nu (pyvabamorfi poolt
tehtud s6na-analyyse), clauseID on vastava osalause indentifikaator; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1560-L1653 |
estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _getMatchingAnalysisIDs | def _getMatchingAnalysisIDs( tokenJson, requiredWordTemplate, discardAnalyses = None ):
''' Tagastab listi tokenJson'i analyysidest, mis sobivad etteantud yksiku
sõnamalli või sõnamallide listi mõne elemendiga (requiredWordTemplate võib
olla üks WordTemplate või list WordTemplate elementidega);
Kui discardAnalyses on defineeritud (ning on WordTemplate), visatakse minema
analyysid, mis vastavad sellele s6namallile;
'''
final_ids = set()
if isinstance(requiredWordTemplate, list):
for wt in requiredWordTemplate:
ids = wt.matchingAnalyseIndexes(tokenJson)
if ids:
final_ids.update(ids)
elif isinstance(requiredWordTemplate, WordTemplate):
ids = requiredWordTemplate.matchingAnalyseIndexes(tokenJson)
final_ids = set(ids)
if discardAnalyses:
if isinstance(discardAnalyses, WordTemplate):
ids2 = discardAnalyses.matchingAnalyseIndexes(tokenJson)
if ids2:
final_ids = final_ids.difference(set(ids2))
else:
raise Exception(' The parameter discardAnalyses should be WordTemplate.')
if len(final_ids) == 0:
raise Exception(' Unable to find matching analyse IDs for: '+str(tokenJson))
return list(final_ids) | python | def _getMatchingAnalysisIDs( tokenJson, requiredWordTemplate, discardAnalyses = None ):
''' Tagastab listi tokenJson'i analyysidest, mis sobivad etteantud yksiku
sõnamalli või sõnamallide listi mõne elemendiga (requiredWordTemplate võib
olla üks WordTemplate või list WordTemplate elementidega);
Kui discardAnalyses on defineeritud (ning on WordTemplate), visatakse minema
analyysid, mis vastavad sellele s6namallile;
'''
final_ids = set()
if isinstance(requiredWordTemplate, list):
for wt in requiredWordTemplate:
ids = wt.matchingAnalyseIndexes(tokenJson)
if ids:
final_ids.update(ids)
elif isinstance(requiredWordTemplate, WordTemplate):
ids = requiredWordTemplate.matchingAnalyseIndexes(tokenJson)
final_ids = set(ids)
if discardAnalyses:
if isinstance(discardAnalyses, WordTemplate):
ids2 = discardAnalyses.matchingAnalyseIndexes(tokenJson)
if ids2:
final_ids = final_ids.difference(set(ids2))
else:
raise Exception(' The parameter discardAnalyses should be WordTemplate.')
if len(final_ids) == 0:
raise Exception(' Unable to find matching analyse IDs for: '+str(tokenJson))
return list(final_ids) | Tagastab listi tokenJson'i analyysidest, mis sobivad etteantud yksiku
sõnamalli või sõnamallide listi mõne elemendiga (requiredWordTemplate võib
olla üks WordTemplate või list WordTemplate elementidega);
Kui discardAnalyses on defineeritud (ning on WordTemplate), visatakse minema
analyysid, mis vastavad sellele s6namallile; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1665-L1691 |
estnltk/estnltk | estnltk/grammar/common.py | is_valid_regex | def is_valid_regex(regex):
"""Function for checking a valid regex."""
if len(regex) == 0:
return False
try:
re.compile(regex)
return True
except sre_constants.error:
return False | python | def is_valid_regex(regex):
"""Function for checking a valid regex."""
if len(regex) == 0:
return False
try:
re.compile(regex)
return True
except sre_constants.error:
return False | Function for checking a valid regex. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/common.py#L29-L37 |
estnltk/estnltk | estnltk/single_layer_operations/layer_positions.py | delete_left | def delete_left(elem1, elem2):
"""
xxxxx
yyyyy
---------
xxx
yyyyy
"""
# assert not (nested(elem1, elem2) or nested(elem2, elem1)), 'deletion not defined for nested elements'
if overlapping_right(elem1, elem2):
elem1['end'] = elem2['start']
return elem1, elem2 | python | def delete_left(elem1, elem2):
"""
xxxxx
yyyyy
---------
xxx
yyyyy
"""
# assert not (nested(elem1, elem2) or nested(elem2, elem1)), 'deletion not defined for nested elements'
if overlapping_right(elem1, elem2):
elem1['end'] = elem2['start']
return elem1, elem2 | xxxxx
yyyyy
---------
xxx
yyyyy | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/single_layer_operations/layer_positions.py#L105-L116 |
estnltk/estnltk | estnltk/single_layer_operations/layer_positions.py | delete_right | def delete_right(elem1, elem2):
"""
xxxxx
yyyyy
---------
xxxxx
yyy
"""
# assert not (nested(elem1, elem2) or nested(elem2, elem1)), 'deletion not defined for nested elements'
if overlapping_left(elem1, elem2):
elem2['start'] = elem1['end']
return elem1, elem2 | python | def delete_right(elem1, elem2):
"""
xxxxx
yyyyy
---------
xxxxx
yyy
"""
# assert not (nested(elem1, elem2) or nested(elem2, elem1)), 'deletion not defined for nested elements'
if overlapping_left(elem1, elem2):
elem2['start'] = elem1['end']
return elem1, elem2 | xxxxx
yyyyy
---------
xxxxx
yyy | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/single_layer_operations/layer_positions.py#L119-L130 |
estnltk/estnltk | estnltk/single_layer_operations/layer_positions.py | iterate_intersecting_pairs | def iterate_intersecting_pairs(layer):
"""
Given a layer of estntltk objects, yields pairwise intersecting elements.
Breaks when the layer is changed or deleted after initializing the iterator.
"""
yielded = set()
ri = layer[:] # Shallow copy the layer
for i1, elem1 in enumerate(ri):
for i2, elem2 in enumerate(ri):
if i1 != i2 and elem1['start'] <= elem2['start'] < elem1['end']:
inds = (i1, i2) if i1 < i2 else (i2, i1)
if inds not in yielded and in_by_identity(layer, elem1) and in_by_identity(layer, elem2):
yielded.add(inds)
yield elem1, elem2 | python | def iterate_intersecting_pairs(layer):
"""
Given a layer of estntltk objects, yields pairwise intersecting elements.
Breaks when the layer is changed or deleted after initializing the iterator.
"""
yielded = set()
ri = layer[:] # Shallow copy the layer
for i1, elem1 in enumerate(ri):
for i2, elem2 in enumerate(ri):
if i1 != i2 and elem1['start'] <= elem2['start'] < elem1['end']:
inds = (i1, i2) if i1 < i2 else (i2, i1)
if inds not in yielded and in_by_identity(layer, elem1) and in_by_identity(layer, elem2):
yielded.add(inds)
yield elem1, elem2 | Given a layer of estntltk objects, yields pairwise intersecting elements.
Breaks when the layer is changed or deleted after initializing the iterator. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/single_layer_operations/layer_positions.py#L153-L166 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker.analyze_text | def analyze_text( self, text, **kwargs ):
'''
Analyzes given Text for noun phrase chunks.
As result of analysis, a layer NOUN_CHUNKS will be attached to the input
Text object, containing a noun phrases detected from the Text;
Note: for preprocessing the Text, MaltParser is used by default. In order
to obtain a decent performance with MaltParser, it is advisable to analyse
texts at their full extent with this method. Splitting a text into smaller
chunks, such as clauses or sentences, and analysing one-small-chunk-at-time
may be rather demanding in terms of performance, because a file-based
preprocessing is used for obtaining the dependency relations.
Parameters
----------
text: estnltk.text.Text
The input text that should be analysed for noun phrases;
force_parsing : bool
If True, uses the *self.parser* to parse the given *text*, and overrides
the syntactic annotations in *text* with the new layer obtained from the
parser;
(default: False)
syntax_layer : str
Specifies which layer of syntactic annotations should be used as a
basis for NP chunking; If the *syntax_layer* exists within the *text*
(and force_parsing==False), uses the syntactic annotations from
*text[syntax_layer]*;
(default: LAYER_CONLL)
cutPhrases: bool
If True, all phrases exceeding the cutMaxThreshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
(default: True)
cutMaxThreshold: int
Threshold indicating the maximum number of words allowed in a
phrase.
If cutPhrases is set, all phrases exceeding the threshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
Automatic analysis of the Balanced Corpus of Estonian suggests
that 97% of all NP chunks are likely chunks of length 1-3, thus
the default threshold is set to 3;
(default value: 3)
return_type: string
If return_type=="text" (Default),
returns the input Text object;
If return_type=="labels",
returns a list of NP labels (strings), containing a label for
each word token in Text, indicating whether the word is at the
beginning of a phrase ('B'), inside a phrase ('I') or does
not belong to any phrase ('O').
If return_type=="tokens",
returns a list of phrases, where each phrase is a list of
tokens, and each token is a dictionary representing word;
If return_type=="strings",
returns a list of text strings, where each string is phrase's
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations;
'''
# 0) Parse given arguments
#
# Output specifics
all_return_types = ["text", "labels", "tokens", "strings"]
return_type = all_return_types[0]
cutPhrases = True
cutMaxThreshold = 3
annotate_text = True
# Syntax layer & Parsing specifics
syntax_layer_name = LAYER_CONLL
force_parsing = False
for argName, argVal in kwargs.items():
if argName == 'cutPhrases':
cutPhrases = bool(argVal)
elif argName == 'force_parsing':
force_parsing = bool(argVal)
elif argName == 'syntax_layer':
syntax_layer_name = argVal
elif argName == 'cutMaxThreshold':
cutMaxThreshold = int(argVal)
elif argName == 'return_type':
if argVal.lower() in all_return_types:
return_type = argVal.lower()
else:
raise Exception(' Unexpected return type: ', argVal)
else:
raise Exception(' Unsupported argument given: '+argName)
#
# 1) Acquire the layers of morphological & syntactic annotations:
#
if not syntax_layer_name in text or force_parsing:
# No existing layer found: produce a new layer with the parser
self.parser.parse_text( text )
if isinstance(self.parser, MaltParser):
syntax_layer_name = LAYER_CONLL
elif isinstance(self.parser, VISLCG3Parser):
syntax_layer_name = LAYER_VISLCG3
else:
raise Exception(' (!) Unknown type of syntactic parser: ',self.parser)
if not text.is_tagged(ANALYSIS):
# If missing, add the layer of morphological analyses
text = text.tag_analysis()
# 2) Process text sentence by sentence
all_np_labels = []
for sentence_text in text.split_by( SENTENCES ):
tokens = sentence_text[WORDS]
syntax_layer = sentence_text[syntax_layer_name]
# Find phrases
np_labels = self._find_phrases( tokens, syntax_layer, cutPhrases, cutMaxThreshold )
# Normalize labels
np_labels = [ 'O' if not l in ['B', 'I'] else l for l in np_labels ]
# Collect results
all_np_labels.extend( np_labels )
# 3) Return input text, labels, phrases or phrase texts
if annotate_text:
self.annotateText( text, NOUN_CHUNKS, all_np_labels )
if return_type == "text":
return text
elif return_type == "labels":
return all_np_labels
elif return_type == "tokens":
return self.get_phrases(text, all_np_labels)
else:
return self.get_phrase_texts(text, all_np_labels) | python | def analyze_text( self, text, **kwargs ):
'''
Analyzes given Text for noun phrase chunks.
As result of analysis, a layer NOUN_CHUNKS will be attached to the input
Text object, containing a noun phrases detected from the Text;
Note: for preprocessing the Text, MaltParser is used by default. In order
to obtain a decent performance with MaltParser, it is advisable to analyse
texts at their full extent with this method. Splitting a text into smaller
chunks, such as clauses or sentences, and analysing one-small-chunk-at-time
may be rather demanding in terms of performance, because a file-based
preprocessing is used for obtaining the dependency relations.
Parameters
----------
text: estnltk.text.Text
The input text that should be analysed for noun phrases;
force_parsing : bool
If True, uses the *self.parser* to parse the given *text*, and overrides
the syntactic annotations in *text* with the new layer obtained from the
parser;
(default: False)
syntax_layer : str
Specifies which layer of syntactic annotations should be used as a
basis for NP chunking; If the *syntax_layer* exists within the *text*
(and force_parsing==False), uses the syntactic annotations from
*text[syntax_layer]*;
(default: LAYER_CONLL)
cutPhrases: bool
If True, all phrases exceeding the cutMaxThreshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
(default: True)
cutMaxThreshold: int
Threshold indicating the maximum number of words allowed in a
phrase.
If cutPhrases is set, all phrases exceeding the threshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
Automatic analysis of the Balanced Corpus of Estonian suggests
that 97% of all NP chunks are likely chunks of length 1-3, thus
the default threshold is set to 3;
(default value: 3)
return_type: string
If return_type=="text" (Default),
returns the input Text object;
If return_type=="labels",
returns a list of NP labels (strings), containing a label for
each word token in Text, indicating whether the word is at the
beginning of a phrase ('B'), inside a phrase ('I') or does
not belong to any phrase ('O').
If return_type=="tokens",
returns a list of phrases, where each phrase is a list of
tokens, and each token is a dictionary representing word;
If return_type=="strings",
returns a list of text strings, where each string is phrase's
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations;
'''
# 0) Parse given arguments
#
# Output specifics
all_return_types = ["text", "labels", "tokens", "strings"]
return_type = all_return_types[0]
cutPhrases = True
cutMaxThreshold = 3
annotate_text = True
# Syntax layer & Parsing specifics
syntax_layer_name = LAYER_CONLL
force_parsing = False
for argName, argVal in kwargs.items():
if argName == 'cutPhrases':
cutPhrases = bool(argVal)
elif argName == 'force_parsing':
force_parsing = bool(argVal)
elif argName == 'syntax_layer':
syntax_layer_name = argVal
elif argName == 'cutMaxThreshold':
cutMaxThreshold = int(argVal)
elif argName == 'return_type':
if argVal.lower() in all_return_types:
return_type = argVal.lower()
else:
raise Exception(' Unexpected return type: ', argVal)
else:
raise Exception(' Unsupported argument given: '+argName)
#
# 1) Acquire the layers of morphological & syntactic annotations:
#
if not syntax_layer_name in text or force_parsing:
# No existing layer found: produce a new layer with the parser
self.parser.parse_text( text )
if isinstance(self.parser, MaltParser):
syntax_layer_name = LAYER_CONLL
elif isinstance(self.parser, VISLCG3Parser):
syntax_layer_name = LAYER_VISLCG3
else:
raise Exception(' (!) Unknown type of syntactic parser: ',self.parser)
if not text.is_tagged(ANALYSIS):
# If missing, add the layer of morphological analyses
text = text.tag_analysis()
# 2) Process text sentence by sentence
all_np_labels = []
for sentence_text in text.split_by( SENTENCES ):
tokens = sentence_text[WORDS]
syntax_layer = sentence_text[syntax_layer_name]
# Find phrases
np_labels = self._find_phrases( tokens, syntax_layer, cutPhrases, cutMaxThreshold )
# Normalize labels
np_labels = [ 'O' if not l in ['B', 'I'] else l for l in np_labels ]
# Collect results
all_np_labels.extend( np_labels )
# 3) Return input text, labels, phrases or phrase texts
if annotate_text:
self.annotateText( text, NOUN_CHUNKS, all_np_labels )
if return_type == "text":
return text
elif return_type == "labels":
return all_np_labels
elif return_type == "tokens":
return self.get_phrases(text, all_np_labels)
else:
return self.get_phrase_texts(text, all_np_labels) | Analyzes given Text for noun phrase chunks.
As result of analysis, a layer NOUN_CHUNKS will be attached to the input
Text object, containing a noun phrases detected from the Text;
Note: for preprocessing the Text, MaltParser is used by default. In order
to obtain a decent performance with MaltParser, it is advisable to analyse
texts at their full extent with this method. Splitting a text into smaller
chunks, such as clauses or sentences, and analysing one-small-chunk-at-time
may be rather demanding in terms of performance, because a file-based
preprocessing is used for obtaining the dependency relations.
Parameters
----------
text: estnltk.text.Text
The input text that should be analysed for noun phrases;
force_parsing : bool
If True, uses the *self.parser* to parse the given *text*, and overrides
the syntactic annotations in *text* with the new layer obtained from the
parser;
(default: False)
syntax_layer : str
Specifies which layer of syntactic annotations should be used as a
basis for NP chunking; If the *syntax_layer* exists within the *text*
(and force_parsing==False), uses the syntactic annotations from
*text[syntax_layer]*;
(default: LAYER_CONLL)
cutPhrases: bool
If True, all phrases exceeding the cutMaxThreshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
(default: True)
cutMaxThreshold: int
Threshold indicating the maximum number of words allowed in a
phrase.
If cutPhrases is set, all phrases exceeding the threshold will be
cut into single word phrases, consisting only of part-of-speech
categories 'S', 'Y', 'H';
Automatic analysis of the Balanced Corpus of Estonian suggests
that 97% of all NP chunks are likely chunks of length 1-3, thus
the default threshold is set to 3;
(default value: 3)
return_type: string
If return_type=="text" (Default),
returns the input Text object;
If return_type=="labels",
returns a list of NP labels (strings), containing a label for
each word token in Text, indicating whether the word is at the
beginning of a phrase ('B'), inside a phrase ('I') or does
not belong to any phrase ('O').
If return_type=="tokens",
returns a list of phrases, where each phrase is a list of
tokens, and each token is a dictionary representing word;
If return_type=="strings",
returns a list of text strings, where each string is phrase's
text;
Regardless the return type, a layer named NOUN_CHUNKS will be added
to the input Text containing noun phrase annotations; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L50-L184 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker._getPOS | def _getPOS( self, token, onlyFirst = True ):
''' Returns POS of the current token.
'''
if onlyFirst:
return token[ANALYSIS][0][POSTAG]
else:
return [ a[POSTAG] for a in token[ANALYSIS] ] | python | def _getPOS( self, token, onlyFirst = True ):
''' Returns POS of the current token.
'''
if onlyFirst:
return token[ANALYSIS][0][POSTAG]
else:
return [ a[POSTAG] for a in token[ANALYSIS] ] | Returns POS of the current token. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L201-L207 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker._getPhrase | def _getPhrase( self, i, sentence, NPlabels ):
''' Fetches the full length phrase from the position i
based on the existing NP phrase annotations (from
NPlabels);
Returns list of sentence tokens in the phrase, and
indices of the phrase;
'''
phrase = []
indices = []
if 0 <= i and i < len(sentence) and NPlabels[i] == 'B':
phrase = [ sentence[i] ]
indices = [ i ]
j = i + 1
while ( j < len(sentence) ):
if NPlabels[j] in ['B', '']:
break
else:
phrase.append( sentence[j] )
indices.append( j )
j += 1
return phrase, indices | python | def _getPhrase( self, i, sentence, NPlabels ):
''' Fetches the full length phrase from the position i
based on the existing NP phrase annotations (from
NPlabels);
Returns list of sentence tokens in the phrase, and
indices of the phrase;
'''
phrase = []
indices = []
if 0 <= i and i < len(sentence) and NPlabels[i] == 'B':
phrase = [ sentence[i] ]
indices = [ i ]
j = i + 1
while ( j < len(sentence) ):
if NPlabels[j] in ['B', '']:
break
else:
phrase.append( sentence[j] )
indices.append( j )
j += 1
return phrase, indices | Fetches the full length phrase from the position i
based on the existing NP phrase annotations (from
NPlabels);
Returns list of sentence tokens in the phrase, and
indices of the phrase; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L209-L229 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker._getCaseAgreement | def _getCaseAgreement(self, token1, token2):
''' Detects whether there is a morphological case agreement
between two consecutive nominals (token1 and token2), and
returns the common case, or None if no agreement exists;
Applies a special set of rules for detecting agreement on
the word in genitive followed by the word in ter, es, ab or
kom.
'''
forms1 = set( [a[FORM] for a in token1[ANALYSIS]] )
forms2 = set( [a[FORM] for a in token2[ANALYSIS]] )
if len(forms1.intersection(forms2))==0:
# Kontrollime ka ni-na-ta-ga k22ndeid:
if 'sg g' in forms1:
if 'sg ter' in forms2:
return 'sg ter'
elif 'sg es' in forms2:
return 'sg es'
elif 'sg ab' in forms2:
return 'sg ab'
elif 'sg kom' in forms2:
return 'sg kom'
elif 'pl g' in forms1:
if 'pl ter' in forms2:
return 'pl ter'
elif 'pl es' in forms2:
return 'pl es'
elif 'pl ab' in forms2:
return 'pl ab'
elif 'pl kom' in forms2:
return 'pl kom'
return None
else:
return list(forms1.intersection(forms2))[0] | python | def _getCaseAgreement(self, token1, token2):
''' Detects whether there is a morphological case agreement
between two consecutive nominals (token1 and token2), and
returns the common case, or None if no agreement exists;
Applies a special set of rules for detecting agreement on
the word in genitive followed by the word in ter, es, ab or
kom.
'''
forms1 = set( [a[FORM] for a in token1[ANALYSIS]] )
forms2 = set( [a[FORM] for a in token2[ANALYSIS]] )
if len(forms1.intersection(forms2))==0:
# Kontrollime ka ni-na-ta-ga k22ndeid:
if 'sg g' in forms1:
if 'sg ter' in forms2:
return 'sg ter'
elif 'sg es' in forms2:
return 'sg es'
elif 'sg ab' in forms2:
return 'sg ab'
elif 'sg kom' in forms2:
return 'sg kom'
elif 'pl g' in forms1:
if 'pl ter' in forms2:
return 'pl ter'
elif 'pl es' in forms2:
return 'pl es'
elif 'pl ab' in forms2:
return 'pl ab'
elif 'pl kom' in forms2:
return 'pl kom'
return None
else:
return list(forms1.intersection(forms2))[0] | Detects whether there is a morphological case agreement
between two consecutive nominals (token1 and token2), and
returns the common case, or None if no agreement exists;
Applies a special set of rules for detecting agreement on
the word in genitive followed by the word in ter, es, ab or
kom. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L231-L263 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker._find_phrases | def _find_phrases( self, sentence, syntax_layer, cutPhrases, cutMaxThreshold ):
''' Detects NP phrases by relying on local dependency relations:
1) Identifies potential heads of NP phrases;
2) Identifies consecutive words that can form an NP phrase:
2.1) potential attribute + potential head;
2.2) quantity word or phrase + nominal;
3) Identifies non-consecutive words (word1 __ wordN) that
can form a complete phrase (including the gap part);
4) Applies post-corrections;
Returns a list of tags, which contains a B-I-O style phrase
tag for each word in the sentence ('B'-begins phrase, 'I'-
inside phrase, or ''-not in phrase);
'''
NPattribPos = [ 'Y', 'S', 'A', 'C', 'G', 'H', 'N', 'O', 'K', 'D', 'P' ]
NPheadPos = [ 'S', 'Y', 'H' ]
NPlabels = [ '' for i in range(len(sentence)) ]
#
# 0 Faas: M2rgistame k6ik s6nad, mis v6iksid olla nimis6nafraasi peas6nad, nt:
# Kas [venelaste] [ambitsioon] on [siiras] ?
# [Raskused] algasid [aastal] 2009 , mil oli [majanduskriis] .
# [Põllumaad] on ka , aga [kartulikasvatamisega] on üksjagu [jamamist] .
#
for i in range(len(sentence)):
pos1 = self._getPOS(sentence[i])
if pos1 in NPheadPos:
NPlabels[i] = 'B'
# Lisaks märgistame ka peasõnadena ka üksikud pronoomenid, kuigi
# eeldame, et need on peasõnadena vaid üksikutes fraasides;
elif pos1 == 'P':
NPlabels[i] = 'B'
#
# I Faas: liidame yheks nimis6nafraasiks k6ik k6rvutiseisvad nimis6nafraasi
# sobivad s6nad, kui esimene on järgmise vahetu alluv, nt:
# [Venemaa otsus] alustada Süürias õhurünnakuid ...
# Kas [venelaste ambitsioon] on siiras ?
# [Järgmine hoop] tuli 2012-2013 ...
# [Eelmise nädala reedel] , [19. septembril] leidsid [kolleegid] ...
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
if i+1 < len(sentence):
label2 = i+1
parent2 = syntax_layer[i+1][PARSER_OUT][0][1]
#label2 = sentence[i+1][SYNTAX_LABEL]
#parent2 = sentence[i+1][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
pos2 = self._getPOS(sentence[i+1])
if int(parent1) == int(label2) and pos1 in NPattribPos and \
pos2 in NPheadPos:
if 'K' in pos1 and NPlabels[i] == '':
#
# 1) erandjuht:
# K fraasi alguses viitab peaaegu alati mingile jamale,
# seega katkestame, et vältida selliseid asju nagu:
# ... , mille [kohta Eestiski] piisavalt näiteid .
# ... lähedaste majandussidemete [tõttu Brasiiliaga] .
# ... kultuuri allutamise [vastu rahavõimule] , vaid töötavad
#
pass
elif 'D' in pos1 and not self._AdvStartingPhrase.matches(sentence[i]):
#
# 2) erandjuht:
# Lubame ainult teatud adverbe fraasi algusesse, et vältida selliseid
# juhte nagu nt:
# ... said sõna [ka lapsed] , aga kingitust
# ... kättesaamatuks [nii Nehatu hamburgeriputka] ,
# ... osta [ka hinnaga 1 kr]
# ... ette [just Los Angelese autonäitusel] ,
# TODO: M6nikord me ikkagi tahame, et D ka sees oleks, nt:
# ... filmis " Tagasi [tulevikku] " ...
pass
else:
if NPlabels[i] == '':
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
#
# II Faas: Koondame kõrvutipaiknevad ja üksteisega alluvussuhtes olevad arvsõnad/numbrid
# üheks arvudest koosnevaks "NP-fraasiks", nt:
# [Sada nelikümmend viis]
# [10 405 miljonit]
# [kaheksa miljardit]
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
if pos1 in ['N', 'O'] and i+1 < len(sentence):
label2 = i+1
parent2 = syntax_layer[i+1][PARSER_OUT][0][1]
#label2 = sentence[i+1][SYNTAX_LABEL]
#parent2 = sentence[i+1][SYNTAX_HEAD]
pos2 = self._getPOS(sentence[i+1])
if pos2 in ['N', 'O'] and ( int(parent2) == int(label1) or \
int(parent1) == int(label2) ):
if NPlabels[i] == '':
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
#
# III Faas: Kleebime otsa NP-fraaside otsa järeltäienditena esinevad numbrilised
# arvud, nt:
# Üritus kandis nime [Rahu Missioon 2007.]
# Allkirjastas [1. jaanuaril 2004.]
# Saabus [uus Peugeot 307] , millest [aastatel 1987-1997] ei osatud unistadagi .
# [Perioodil 1997-2001] oli neid rohkem , [vt tabel 1.]
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
if pos1 in ['N', 'O'] and i-1 > -1:
label2 = i-1
parent2 = syntax_layer[i-1][PARSER_OUT][0][1]
#label2 = sentence[i-1][SYNTAX_LABEL]
#parent2 = sentence[i-1][SYNTAX_HEAD]
if int(parent1) == int(label2) and NPlabels[i-1] != '':
NPlabels[i] = 'I'
#
# IV Faas: Kleebime arvufraaside(hulgafraaside) otsa järeltäienditena esinevad
# nimisõnad, nt:
# Meri laius [100 kilomeetri] pikkusena .
# Aasta alguseks oli mahust järel [30-40 protsenti] .
# Jah , [kümne aasta] tagusega ei maksa siin üldse võrrelda .
# Mees nõudis [10 miljonit dollarit] kahjutasu [10-15 cm] kaugusele .
# Eelmisel nädalal võttis endalt elu veel [kaks politseiametnikku] .
# Kujutlesin [kaheksa miljonit aastat] vana küpressimetsa [mitukümmend aastat] nooremana .
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
if pos1 in ['N', 'O'] and i+1 < len(sentence):
label2 = i+1
parent2 = syntax_layer[i+1][PARSER_OUT][0][1]
#label2 = sentence[i+1][SYNTAX_LABEL]
#parent2 = sentence[i+1][SYNTAX_HEAD]
pos2 = self._getPOS(sentence[i+1])
if int(parent2) == int(label1) and NPlabels[i+1] != '' and pos2 != 'P':
if NPlabels[i]=='':
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
#
# V Faas: Kui NP-fraasi l6pus on arvu/numbrifraas ( N_Y voi N_S ), siis t6stame arvufraasi
# lahku, isegi kui see teatud m22ral l6huks NP-fraasi, nt
# [pindala 48 ha] ==> [pindala] [48 ha]
# [Järvamaal 36 283 inimest] ==> [Järvamaal] [36 283 inimest]
# [kasahhid 3 %] ==> [kasahhid] [3 %]
# [Tallinna Lihatööstuse aktsiatest 80 protsenti] ==>
# [Tallinna Lihatööstuse aktsiatest] [80 protsenti]
#
for i in range( len(sentence) ):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase( i, sentence, NPlabels )
posTags = [ self._getPOS(tok) for tok in phrase ]
if len(phrase)>2 and posTags[-1] in ['S','Y'] and posTags[-2]=='N' and \
posTags[0] not in ['N', 'O']:
#
# Lisakontroll: tegu ei tohiks olla aastarvuga, nt:
# [Eesti Tervishoiuprojekt] [2015 Lisaks]
# [Prantsusmaa loobumine EXPO] [2004 korraldamisest]
#
yearCheck = re.match('.*\d\d\d\d.*', phrase[-2][TEXT])
#
# Lisakontroll: kui eelneb rohkem kui yks arv, siis tuleb
# poolitamispunkti nihutada, nt:
# [Viinis 170] [000 USA dollari]
# [Järvamaal 36] [283 inimest]
#
breakPoint = indices[-2]
j = -3
while posTags[j] == 'N':
breakPoint = indices[j]
j -= 1
if not yearCheck:
NPlabels[breakPoint] = 'B'
#
# VI Faas: Kui NP-fraasi sobiva s6na vanem on +2 v6i rohkema s6na kaugusel s6na j2rel,
# siis pole s6na veel fraasi arvatud;
# Arvame ta fraasi j2rgmistel juhtudel:
# Eelnev j2rgarv, nt:
# ... pühendus meenutab [teisigi] [Põhjala suurmehi] , nagu ...
# ... enamikus jäid [esimese] [Eesti Vabariigi aegadesse] ...
# Eelnev omaduss6na, nt:
# ... makett ühest [olulisest] [jõeäärsest tänavast] sellisena ,
# ... soojendades ja [suures] soojaks [köetud telgis] kuuma teed ...
#
for i in range(len(sentence)-1, -1, -1):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
parentRelativeLoc = int(parent1) - int(label1)
if pos1 in NPattribPos and NPlabels[i]=='' and parentRelativeLoc > 1 and \
i+parentRelativeLoc < len(sentence):
label2 = i+parentRelativeLoc
parent2 = syntax_layer[i+parentRelativeLoc][PARSER_OUT][0][1]
#label2 = sentence[i+parentRelativeLoc][SYNTAX_LABEL]
#parent2 = sentence[i+parentRelativeLoc][SYNTAX_HEAD]
if int(parent1) == int(label2) and NPlabels[i+parentRelativeLoc] != '':
#
# Kogume kokku k6ik kahe s6na vahele j22vad token'id:
#
interveningTokens = []
interveningTokenIDs = []
j = i + 1
while ( j < i + parentRelativeLoc ):
interveningTokens.append( sentence[j] )
interveningTokenIDs.append( j )
j += 1
#
# Eemaldame neist tokenid, mis juba kuuluvad fraasi:
#
if NPlabels[i+parentRelativeLoc] == 'I':
while ( len(interveningTokenIDs) > 0 ):
lastID = interveningTokenIDs.pop()
lastToken = interveningTokens.pop()
if NPlabels[lastID] == 'B':
# Kui j6udsime fraasi alguseni, siis l6petame
break
#
# Kontroll1: s6na ja j2rgneva s6na vahele ei tohi j22da
# punktuatsiooni ega sidendeid, kuna need j2tame alati
# fraasist v2lja;
#
punctIntervening = False
jaNingEgaVoi = False
for interToken in interveningTokens:
if self._punctPos.matches( interToken ):
punctIntervening = True
if self._jaNingEgaVoi.matches( interToken ):
jaNingEgaVoi = True
#
# Leiame s6na ja tema ylema vahelise k22ndeyhilduvuse;
#
caseAgreement = \
self._getCaseAgreement(sentence[i], sentence[i+parentRelativeLoc])
if pos1 == 'O' and not punctIntervening and not jaNingEgaVoi and \
caseAgreement != None:
if len(interveningTokenIDs) == 0:
#
# VI.a. Eelnev s6na on k22ndes yhilduv j2rgarv, nt:
# ... nagu ka teised [Eesti pered] , iga ...
# ... mil esimene [Tšetšeenia sõda] käis täie ...
# ... ka mõnedel teistel [mineraalsetel kütetel] peale autobensiini ...
# ... on pärit kolmandast [Moosese raamatust] ehk leviitide ...
#
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
else:
#
# VI.b. Eelnev s6na on k22ndes yhilduv j2rgarv, ning vahele j22vad
# ainult k22ndes yhilduvad s6nad, nt:
# ... Teised sõjavastased [Euroopa riigid] ilmselt avaldavad ...
# ... tõi ära esimesed pesuehtsad [punased värsid] . ...
# ... Esimene üleriigiline [automatiseeritud haiguseregister] - vähiregister ...
#
agreements = [self._getCaseAgreement(interTok, sentence[i+parentRelativeLoc]) for interTok in interveningTokens]
if all(agreements):
NPlabels[i] = 'B'
j = i + 1
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
if pos1 in ['A','G'] and not punctIntervening and not jaNingEgaVoi and \
caseAgreement != None:
#
# Lisakontroll 1:
# Jätame algusesse lisamata kesksõnadena esinevad sõnad, kuna
# nende puhul on tõenäoliselt tegemist millegi keerukamaga (nn
# lauselühendiga):
# ... Pingilt sekkunud [Chris Anstey] viskas ...
# ... NBA meistriks tüürinud [Phil Jackson] ...
# ... kaasaegsele maailmale mittevastav [teoreetiline lähenemine] ...
#
isVerbParticle = self._verbParticle.matches(sentence[i])
#
# Lisakontroll 2:
# Kui omaduss6na ja fraasi vahele j22b ka teisi s6nu, teeme
# kindlaks, et need s6nad poleks s6naliikidest V, D, J, mis
# on probleemsed, nt:
# D : ... skreipi nii [pärilik] kui ka [nakkav haigus] ...
# V : ... 2002. aasta [keskmine] purunenud [terade saak] ...
# J : ... oleks maakondadele [sobilik] kuni [14-rühmaline loend] Eesti ...
#
interveningProblematicPOS = False
if len(interveningTokenIDs) > 0:
iPosTags = [ a[POSTAG] for t1 in interveningTokens for a in t1[ANALYSIS] ]
interveningProblematicPOS = \
'V' in iPosTags or 'D' in iPosTags or 'J' in iPosTags or 'Z' in iPosTags
if not isVerbParticle and len(interveningTokenIDs) == 0:
#
# VI.c. Eelnev s6na on k22ndes yhilduv ja vahetult eelnev
# omaduss6na (v.a. kesks6na), nt:
# ... peeti pidu karmi [vene korra] ajal ning ...
# ... äravajunud , arhailisest [Suurbritannia nurgast] ...
# ... , võimaldades uutel [vapratel riikidel] kahel pool ...
#
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
elif not isVerbParticle and len(interveningTokenIDs) > 0 and \
not interveningProblematicPOS:
#
# VI.d. Eelnev s6na on k22ndes yhilduv omaduss6na (v.a. kesks6na)
# ning vahele j22b veel v2hemalt yks sobiva POS tag'iga
# s6na, nt:
# ... korral on [tavaline] tugev [päevane unisus] , ...
# ... mõjus silmadele [vana] mustvalge pisike [ekraan] ...
# ... on enesekindel [valgete] higiste [kätega intelligent] ...
#
NPlabels[i] = 'B'
j = i + 1
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
if pos1 in ['C'] and not punctIntervening and not jaNingEgaVoi and \
caseAgreement != None:
if i - 1 > -1 and self._k6ige.matches( sentence[i - 1] ):
#
# VI.e. Eelnev s6na on k22ndes yhilduv keskv6rde omaduss6na,
# millele eelneb yliv6rde tunnus 'k6ige', nt:
# ... juhib perekonda kõige noorem [täiskasvanud naine] . ...
# ... Kõige suurem [akustiline erinevus] oli vokaalide ...
# ... on kõige levinumad [antikolinergilised ravimid] ...
#
NPlabels[i-1] = 'B'
j = i
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
elif re.match('^(pl\s.+|sg\s(ab|abl|ad|all|el|es|ill|in|kom|ter|tr))$', \
caseAgreement):
#
# VI.f. Eelnev s6na on k22ndes yhilduv keskv6rde omaduss6na,
# mis on kas mitmuses v6i yhildub semantilise k22ndega, nt:
# ... olnud üks aktiivsemaid [NATO rahupartnereid] . ...
# ... meestel lisandub halvemale [füüsilisele tervisele] veel ...
# ... Varasemates [samalaadsetes uurimustes] on laste ...
# (grammatilise ainsusek22nde puhul ei pruugi nii kindel
# olla, et kuulub just nimis6nafraasi juurde: v6ib kuuluda
# ka (olema) verbifraasi juurde)
#
NPlabels[i] = 'B'
j = i + 1
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
#ex = self.__debug_extract_NP_from_pos(sentence, NPlabels, i-1, i+parentRelativeLoc)
#try:
# print(sentence[i][TEXT]+' | '+sentence[i+parentRelativeLoc][TEXT]+' | '+pos1+" | "+ex)
#except:
# print(' ### Err ###')
#
# Viimane faas: rakendame nn j2relparandusi, proovime pahna v2lja visata ...
#
self._apply_post_fixes( sentence, NPlabels, cutPhrases, cutMaxThreshold )
return NPlabels | python | def _find_phrases( self, sentence, syntax_layer, cutPhrases, cutMaxThreshold ):
''' Detects NP phrases by relying on local dependency relations:
1) Identifies potential heads of NP phrases;
2) Identifies consecutive words that can form an NP phrase:
2.1) potential attribute + potential head;
2.2) quantity word or phrase + nominal;
3) Identifies non-consecutive words (word1 __ wordN) that
can form a complete phrase (including the gap part);
4) Applies post-corrections;
Returns a list of tags, which contains a B-I-O style phrase
tag for each word in the sentence ('B'-begins phrase, 'I'-
inside phrase, or ''-not in phrase);
'''
NPattribPos = [ 'Y', 'S', 'A', 'C', 'G', 'H', 'N', 'O', 'K', 'D', 'P' ]
NPheadPos = [ 'S', 'Y', 'H' ]
NPlabels = [ '' for i in range(len(sentence)) ]
#
# 0 Faas: M2rgistame k6ik s6nad, mis v6iksid olla nimis6nafraasi peas6nad, nt:
# Kas [venelaste] [ambitsioon] on [siiras] ?
# [Raskused] algasid [aastal] 2009 , mil oli [majanduskriis] .
# [Põllumaad] on ka , aga [kartulikasvatamisega] on üksjagu [jamamist] .
#
for i in range(len(sentence)):
pos1 = self._getPOS(sentence[i])
if pos1 in NPheadPos:
NPlabels[i] = 'B'
# Lisaks märgistame ka peasõnadena ka üksikud pronoomenid, kuigi
# eeldame, et need on peasõnadena vaid üksikutes fraasides;
elif pos1 == 'P':
NPlabels[i] = 'B'
#
# I Faas: liidame yheks nimis6nafraasiks k6ik k6rvutiseisvad nimis6nafraasi
# sobivad s6nad, kui esimene on järgmise vahetu alluv, nt:
# [Venemaa otsus] alustada Süürias õhurünnakuid ...
# Kas [venelaste ambitsioon] on siiras ?
# [Järgmine hoop] tuli 2012-2013 ...
# [Eelmise nädala reedel] , [19. septembril] leidsid [kolleegid] ...
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
if i+1 < len(sentence):
label2 = i+1
parent2 = syntax_layer[i+1][PARSER_OUT][0][1]
#label2 = sentence[i+1][SYNTAX_LABEL]
#parent2 = sentence[i+1][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
pos2 = self._getPOS(sentence[i+1])
if int(parent1) == int(label2) and pos1 in NPattribPos and \
pos2 in NPheadPos:
if 'K' in pos1 and NPlabels[i] == '':
#
# 1) erandjuht:
# K fraasi alguses viitab peaaegu alati mingile jamale,
# seega katkestame, et vältida selliseid asju nagu:
# ... , mille [kohta Eestiski] piisavalt näiteid .
# ... lähedaste majandussidemete [tõttu Brasiiliaga] .
# ... kultuuri allutamise [vastu rahavõimule] , vaid töötavad
#
pass
elif 'D' in pos1 and not self._AdvStartingPhrase.matches(sentence[i]):
#
# 2) erandjuht:
# Lubame ainult teatud adverbe fraasi algusesse, et vältida selliseid
# juhte nagu nt:
# ... said sõna [ka lapsed] , aga kingitust
# ... kättesaamatuks [nii Nehatu hamburgeriputka] ,
# ... osta [ka hinnaga 1 kr]
# ... ette [just Los Angelese autonäitusel] ,
# TODO: M6nikord me ikkagi tahame, et D ka sees oleks, nt:
# ... filmis " Tagasi [tulevikku] " ...
pass
else:
if NPlabels[i] == '':
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
#
# II Faas: Koondame kõrvutipaiknevad ja üksteisega alluvussuhtes olevad arvsõnad/numbrid
# üheks arvudest koosnevaks "NP-fraasiks", nt:
# [Sada nelikümmend viis]
# [10 405 miljonit]
# [kaheksa miljardit]
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
if pos1 in ['N', 'O'] and i+1 < len(sentence):
label2 = i+1
parent2 = syntax_layer[i+1][PARSER_OUT][0][1]
#label2 = sentence[i+1][SYNTAX_LABEL]
#parent2 = sentence[i+1][SYNTAX_HEAD]
pos2 = self._getPOS(sentence[i+1])
if pos2 in ['N', 'O'] and ( int(parent2) == int(label1) or \
int(parent1) == int(label2) ):
if NPlabels[i] == '':
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
#
# III Faas: Kleebime otsa NP-fraaside otsa järeltäienditena esinevad numbrilised
# arvud, nt:
# Üritus kandis nime [Rahu Missioon 2007.]
# Allkirjastas [1. jaanuaril 2004.]
# Saabus [uus Peugeot 307] , millest [aastatel 1987-1997] ei osatud unistadagi .
# [Perioodil 1997-2001] oli neid rohkem , [vt tabel 1.]
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
if pos1 in ['N', 'O'] and i-1 > -1:
label2 = i-1
parent2 = syntax_layer[i-1][PARSER_OUT][0][1]
#label2 = sentence[i-1][SYNTAX_LABEL]
#parent2 = sentence[i-1][SYNTAX_HEAD]
if int(parent1) == int(label2) and NPlabels[i-1] != '':
NPlabels[i] = 'I'
#
# IV Faas: Kleebime arvufraaside(hulgafraaside) otsa järeltäienditena esinevad
# nimisõnad, nt:
# Meri laius [100 kilomeetri] pikkusena .
# Aasta alguseks oli mahust järel [30-40 protsenti] .
# Jah , [kümne aasta] tagusega ei maksa siin üldse võrrelda .
# Mees nõudis [10 miljonit dollarit] kahjutasu [10-15 cm] kaugusele .
# Eelmisel nädalal võttis endalt elu veel [kaks politseiametnikku] .
# Kujutlesin [kaheksa miljonit aastat] vana küpressimetsa [mitukümmend aastat] nooremana .
#
for i in range(len(sentence)):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
if pos1 in ['N', 'O'] and i+1 < len(sentence):
label2 = i+1
parent2 = syntax_layer[i+1][PARSER_OUT][0][1]
#label2 = sentence[i+1][SYNTAX_LABEL]
#parent2 = sentence[i+1][SYNTAX_HEAD]
pos2 = self._getPOS(sentence[i+1])
if int(parent2) == int(label1) and NPlabels[i+1] != '' and pos2 != 'P':
if NPlabels[i]=='':
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
#
# V Faas: Kui NP-fraasi l6pus on arvu/numbrifraas ( N_Y voi N_S ), siis t6stame arvufraasi
# lahku, isegi kui see teatud m22ral l6huks NP-fraasi, nt
# [pindala 48 ha] ==> [pindala] [48 ha]
# [Järvamaal 36 283 inimest] ==> [Järvamaal] [36 283 inimest]
# [kasahhid 3 %] ==> [kasahhid] [3 %]
# [Tallinna Lihatööstuse aktsiatest 80 protsenti] ==>
# [Tallinna Lihatööstuse aktsiatest] [80 protsenti]
#
for i in range( len(sentence) ):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase( i, sentence, NPlabels )
posTags = [ self._getPOS(tok) for tok in phrase ]
if len(phrase)>2 and posTags[-1] in ['S','Y'] and posTags[-2]=='N' and \
posTags[0] not in ['N', 'O']:
#
# Lisakontroll: tegu ei tohiks olla aastarvuga, nt:
# [Eesti Tervishoiuprojekt] [2015 Lisaks]
# [Prantsusmaa loobumine EXPO] [2004 korraldamisest]
#
yearCheck = re.match('.*\d\d\d\d.*', phrase[-2][TEXT])
#
# Lisakontroll: kui eelneb rohkem kui yks arv, siis tuleb
# poolitamispunkti nihutada, nt:
# [Viinis 170] [000 USA dollari]
# [Järvamaal 36] [283 inimest]
#
breakPoint = indices[-2]
j = -3
while posTags[j] == 'N':
breakPoint = indices[j]
j -= 1
if not yearCheck:
NPlabels[breakPoint] = 'B'
#
# VI Faas: Kui NP-fraasi sobiva s6na vanem on +2 v6i rohkema s6na kaugusel s6na j2rel,
# siis pole s6na veel fraasi arvatud;
# Arvame ta fraasi j2rgmistel juhtudel:
# Eelnev j2rgarv, nt:
# ... pühendus meenutab [teisigi] [Põhjala suurmehi] , nagu ...
# ... enamikus jäid [esimese] [Eesti Vabariigi aegadesse] ...
# Eelnev omaduss6na, nt:
# ... makett ühest [olulisest] [jõeäärsest tänavast] sellisena ,
# ... soojendades ja [suures] soojaks [köetud telgis] kuuma teed ...
#
for i in range(len(sentence)-1, -1, -1):
label1 = i
parent1 = syntax_layer[i][PARSER_OUT][0][1]
#label1 = sentence[i][SYNTAX_LABEL]
#parent1 = sentence[i][SYNTAX_HEAD]
pos1 = self._getPOS(sentence[i])
parentRelativeLoc = int(parent1) - int(label1)
if pos1 in NPattribPos and NPlabels[i]=='' and parentRelativeLoc > 1 and \
i+parentRelativeLoc < len(sentence):
label2 = i+parentRelativeLoc
parent2 = syntax_layer[i+parentRelativeLoc][PARSER_OUT][0][1]
#label2 = sentence[i+parentRelativeLoc][SYNTAX_LABEL]
#parent2 = sentence[i+parentRelativeLoc][SYNTAX_HEAD]
if int(parent1) == int(label2) and NPlabels[i+parentRelativeLoc] != '':
#
# Kogume kokku k6ik kahe s6na vahele j22vad token'id:
#
interveningTokens = []
interveningTokenIDs = []
j = i + 1
while ( j < i + parentRelativeLoc ):
interveningTokens.append( sentence[j] )
interveningTokenIDs.append( j )
j += 1
#
# Eemaldame neist tokenid, mis juba kuuluvad fraasi:
#
if NPlabels[i+parentRelativeLoc] == 'I':
while ( len(interveningTokenIDs) > 0 ):
lastID = interveningTokenIDs.pop()
lastToken = interveningTokens.pop()
if NPlabels[lastID] == 'B':
# Kui j6udsime fraasi alguseni, siis l6petame
break
#
# Kontroll1: s6na ja j2rgneva s6na vahele ei tohi j22da
# punktuatsiooni ega sidendeid, kuna need j2tame alati
# fraasist v2lja;
#
punctIntervening = False
jaNingEgaVoi = False
for interToken in interveningTokens:
if self._punctPos.matches( interToken ):
punctIntervening = True
if self._jaNingEgaVoi.matches( interToken ):
jaNingEgaVoi = True
#
# Leiame s6na ja tema ylema vahelise k22ndeyhilduvuse;
#
caseAgreement = \
self._getCaseAgreement(sentence[i], sentence[i+parentRelativeLoc])
if pos1 == 'O' and not punctIntervening and not jaNingEgaVoi and \
caseAgreement != None:
if len(interveningTokenIDs) == 0:
#
# VI.a. Eelnev s6na on k22ndes yhilduv j2rgarv, nt:
# ... nagu ka teised [Eesti pered] , iga ...
# ... mil esimene [Tšetšeenia sõda] käis täie ...
# ... ka mõnedel teistel [mineraalsetel kütetel] peale autobensiini ...
# ... on pärit kolmandast [Moosese raamatust] ehk leviitide ...
#
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
else:
#
# VI.b. Eelnev s6na on k22ndes yhilduv j2rgarv, ning vahele j22vad
# ainult k22ndes yhilduvad s6nad, nt:
# ... Teised sõjavastased [Euroopa riigid] ilmselt avaldavad ...
# ... tõi ära esimesed pesuehtsad [punased värsid] . ...
# ... Esimene üleriigiline [automatiseeritud haiguseregister] - vähiregister ...
#
agreements = [self._getCaseAgreement(interTok, sentence[i+parentRelativeLoc]) for interTok in interveningTokens]
if all(agreements):
NPlabels[i] = 'B'
j = i + 1
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
if pos1 in ['A','G'] and not punctIntervening and not jaNingEgaVoi and \
caseAgreement != None:
#
# Lisakontroll 1:
# Jätame algusesse lisamata kesksõnadena esinevad sõnad, kuna
# nende puhul on tõenäoliselt tegemist millegi keerukamaga (nn
# lauselühendiga):
# ... Pingilt sekkunud [Chris Anstey] viskas ...
# ... NBA meistriks tüürinud [Phil Jackson] ...
# ... kaasaegsele maailmale mittevastav [teoreetiline lähenemine] ...
#
isVerbParticle = self._verbParticle.matches(sentence[i])
#
# Lisakontroll 2:
# Kui omaduss6na ja fraasi vahele j22b ka teisi s6nu, teeme
# kindlaks, et need s6nad poleks s6naliikidest V, D, J, mis
# on probleemsed, nt:
# D : ... skreipi nii [pärilik] kui ka [nakkav haigus] ...
# V : ... 2002. aasta [keskmine] purunenud [terade saak] ...
# J : ... oleks maakondadele [sobilik] kuni [14-rühmaline loend] Eesti ...
#
interveningProblematicPOS = False
if len(interveningTokenIDs) > 0:
iPosTags = [ a[POSTAG] for t1 in interveningTokens for a in t1[ANALYSIS] ]
interveningProblematicPOS = \
'V' in iPosTags or 'D' in iPosTags or 'J' in iPosTags or 'Z' in iPosTags
if not isVerbParticle and len(interveningTokenIDs) == 0:
#
# VI.c. Eelnev s6na on k22ndes yhilduv ja vahetult eelnev
# omaduss6na (v.a. kesks6na), nt:
# ... peeti pidu karmi [vene korra] ajal ning ...
# ... äravajunud , arhailisest [Suurbritannia nurgast] ...
# ... , võimaldades uutel [vapratel riikidel] kahel pool ...
#
NPlabels[i] = 'B'
NPlabels[i+1] = 'I'
elif not isVerbParticle and len(interveningTokenIDs) > 0 and \
not interveningProblematicPOS:
#
# VI.d. Eelnev s6na on k22ndes yhilduv omaduss6na (v.a. kesks6na)
# ning vahele j22b veel v2hemalt yks sobiva POS tag'iga
# s6na, nt:
# ... korral on [tavaline] tugev [päevane unisus] , ...
# ... mõjus silmadele [vana] mustvalge pisike [ekraan] ...
# ... on enesekindel [valgete] higiste [kätega intelligent] ...
#
NPlabels[i] = 'B'
j = i + 1
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
if pos1 in ['C'] and not punctIntervening and not jaNingEgaVoi and \
caseAgreement != None:
if i - 1 > -1 and self._k6ige.matches( sentence[i - 1] ):
#
# VI.e. Eelnev s6na on k22ndes yhilduv keskv6rde omaduss6na,
# millele eelneb yliv6rde tunnus 'k6ige', nt:
# ... juhib perekonda kõige noorem [täiskasvanud naine] . ...
# ... Kõige suurem [akustiline erinevus] oli vokaalide ...
# ... on kõige levinumad [antikolinergilised ravimid] ...
#
NPlabels[i-1] = 'B'
j = i
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
elif re.match('^(pl\s.+|sg\s(ab|abl|ad|all|el|es|ill|in|kom|ter|tr))$', \
caseAgreement):
#
# VI.f. Eelnev s6na on k22ndes yhilduv keskv6rde omaduss6na,
# mis on kas mitmuses v6i yhildub semantilise k22ndega, nt:
# ... olnud üks aktiivsemaid [NATO rahupartnereid] . ...
# ... meestel lisandub halvemale [füüsilisele tervisele] veel ...
# ... Varasemates [samalaadsetes uurimustes] on laste ...
# (grammatilise ainsusek22nde puhul ei pruugi nii kindel
# olla, et kuulub just nimis6nafraasi juurde: v6ib kuuluda
# ka (olema) verbifraasi juurde)
#
NPlabels[i] = 'B'
j = i + 1
while ( j <= i + parentRelativeLoc ):
NPlabels[j] = 'I'
j += 1
#ex = self.__debug_extract_NP_from_pos(sentence, NPlabels, i-1, i+parentRelativeLoc)
#try:
# print(sentence[i][TEXT]+' | '+sentence[i+parentRelativeLoc][TEXT]+' | '+pos1+" | "+ex)
#except:
# print(' ### Err ###')
#
# Viimane faas: rakendame nn j2relparandusi, proovime pahna v2lja visata ...
#
self._apply_post_fixes( sentence, NPlabels, cutPhrases, cutMaxThreshold )
return NPlabels | Detects NP phrases by relying on local dependency relations:
1) Identifies potential heads of NP phrases;
2) Identifies consecutive words that can form an NP phrase:
2.1) potential attribute + potential head;
2.2) quantity word or phrase + nominal;
3) Identifies non-consecutive words (word1 __ wordN) that
can form a complete phrase (including the gap part);
4) Applies post-corrections;
Returns a list of tags, which contains a B-I-O style phrase
tag for each word in the sentence ('B'-begins phrase, 'I'-
inside phrase, or ''-not in phrase); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L270-L647 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker._apply_post_fixes | def _apply_post_fixes( self, sentence, NPlabels, cutPhrases, cutMaxThreshold ):
''' Fraasituvastaja j2relparandused:
*) Tekstis6renduste eemaldamine (s6rendatud tekst ei pruugi olla
fraas, v6ib olla nt terve lause);
*) Problemaatiliste kesks6nade eemaldamine fraasialgusest;
*) Ainult arvs6nadest koosnevate fraaside eemaldamine;
*) ...
*) B/I m2rkide parandus;
*) Fraaside l6ikamine sobivasse pikkusse (kui cutPhrases==True ja
cutMaxThreshold on seadistatud);
'''
for i in range( len(sentence) ):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase( i, sentence, NPlabels )
posTags = [ self._getPOS(tok) for tok in phrase ]
#
# 1) Eemaldame tekstis6rendused, mis kogemata kombel on loetud
# eraldi s6nadeks ja s6nade liitmise abil saadud fraasid, nt:
# et [õ i g e k e e l s u s r a a m a t u i s] sisaldub
# , [k u s v õ i m i l l a l] ma
#
if len(posTags) > 1 and len( set(posTags).difference(set(['Y'])) )==0:
# Kustutame fraasi
for k in indices:
NPlabels[k] = ''
if len(posTags) > 1 and posTags[0] == 'A':
forms = [ a[FORM] for a in phrase[0][ANALYSIS] ]
if 'nud' in forms or 'tud' in forms or 'dud' in forms:
#
# 2) Eemaldame nud/tud fraasi algusest, kui nud/tud
# moodustavad toenaolisel liitoeldise, nt:
# täpselt on [jälgitud seadust] .
# Töötud on [kutsunud protestiga] liituma ka töölisi
# ise ei [saanud naeru] pidama . "
#
if i - 1 > -1 and ( \
self._verbEi.matches(sentence[i-1]) or \
self._verbOle.matches(sentence[i-1]) ):
NPlabels[i] = ''
#print(self.__debug_extract_NP_from_pos(sentence, NPlabels, i))
if len(phrase) > 1 and set(posTags).issubset(set(['O', 'N'])):
#
# 3) Eemaldame vaid arvs6nadest koosnevad fraasid, nt:
# , vaid [800 miljonit] .
# põhjustel umbes [100 000.]
# kolmandat ja [kolmas neljandat] .
# üleeuroopalisel konkursil [esimese kolme] hulka .
# 1990. aastate [teisel poolel] .
# võitis küll [esimese veerandi 31] : 13 ,
#
for k in indices:
NPlabels[k] = ''
if posTags.count( 'N' ) > 7:
#
# 4) Eemaldame ylipikaks veninud numbrifraasid (kuna on kahtlus,
# et sellisel juhul pole tegu mitte numbrifraasiga, vaid
# mingi loetelu/tabeli vms-ga), nt:
# Vaip , [1 1 1 1 1 0 1 1 1 1 Vaip] [ : 2
# [1 0 0 0 0 1 1 1 1 B] Ühes Eesti ettevõttes
#
for k in range( len(indices) ):
ind = indices[k]
pos = posTags[k]
if pos == 'N' and ( k==0 or (k>0 and NPlabels[ind-1]=='') ):
NPlabels[ind] = ''
elif ( k > 0 and NPlabels[ind-1] == '' ):
NPlabels[ind] = 'B'
elif ( k > 0 and NPlabels[ind-1] != '' ):
NPlabels[ind] = 'I'
# Kontrollime, kas fraasis eelneb suurt2helisele s6nale
# mineviku kesks6na, mis pole suurt2heline;
verbPartFollowedByTitle = -1
for j in range( len(phrase) ):
if self._verbPastParticle.matches( phrase[j] ) and \
not phrase[j][TEXT].istitle() and \
j+1 < len(phrase) and \
phrase[j+1][TEXT].istitle():
verbPartFollowedByTitle = j
if verbPartFollowedByTitle == 0:
#
# 5) P2risnimele eelneva kesks6na kustutamine:
# P2risnimele eelnev kesks6na on sageli probleemne, st v6ib olla:
# a) seotud eelneva verbiga, nt:
# ... Hiibus ei [jätnud Elviiret] kiitmata ...
# ... on isa-ema kodu [vahetanud Kohila] vastu ...
# ... on aastaid [olnud Valgemäe perenaine] ...
# b) olla osa keerukamast nimis6nafraasist (lauselyhendist), nt:
# ... revolutsiooni ellu [viinud Reagan] oli ametist lahkudes ...
# ... olümpiamängude A-normi [täitnud Uusorg] ...
# Seet6ttu kustutame teatud tingimustel eelneva kesks6na maha;
#
NPlabels[indices[verbPartFollowedByTitle]] = ''
NPlabels[indices[verbPartFollowedByTitle]+1] = 'B'
if posTags[0] == 'C' and i + 1 > -1 and NPlabels[i-1] == '':
#
# 6) Puuduva 'kõige' lisamine fraasile, mille alguses on C, nt:
# ... Kõige [selgemal päeval] läksin ma taas ...
# ... Ka kõige [avarama ruumiihalusega eurooplane] talub Hiinas ...
# ... Kõige [nõrgema toimega] olid harilik puju ...
#
if self._k6ige.matches( sentence[i-1] ):
NPlabels[i-1] = 'B'
NPlabels[i] = 'I'
if posTags[0] == 'C' and len( posTags ) == 2 and posTags[1] == 'H' and \
NPlabels[i] == 'B':
#
# 7) Empiiriline tähelepanek - kui pärisnime ees on komparatiiv-
# omadussõna, siis enamasti on tegu katkise fraasiga, nt:
# ... nähtavas tulevikus [tähtsam Balkanitel] toimuv kui ...
# ... oma eluasemekuludeks [varasema Tallinnas] kehtinud ...
# ... 30 kraadi [paremal Jedinstvost] ( Ühtsusest ) ...
# Seetõttu eemaldame fraasist C;
#
NPlabels[i] = ''
NPlabels[i+1] = 'B'
# X) Kui kogemata on sattunud m6ni iseseisev 'I' (ilma eelneva 'I' v6i 'B'-ta),
# muudame selle 'B'-ks
for i in range( len(sentence) ):
if NPlabels[i] == 'I':
if i == 0 or (i-1>-1 and NPlabels[i-1] not in ['I','B']):
NPlabels[i] = 'B'
#
# Y) Kui on n6utud fraaside l6ikamine pikkuse j2rgi (j2tta alles vaid fraasid
# pikkusega N), l6ikame pikkust N yletavad fraasid juppideks nii, et alles
# j22vad vaid fraasi peas6naks sobivad s6nad;
#
if cutPhrases and cutMaxThreshold > 0:
NPheadPos = [ 'S', 'Y', 'H' ]
for i in range( len(sentence) ):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase( i, sentence, NPlabels )
posTags = [ self._getPOS(tok) for tok in phrase ]
if len(phrase) > cutMaxThreshold:
for j in range(len(phrase)):
posTag = posTags[j]
if posTag in NPheadPos:
# J2tame alles vaid nimis6nafraasi peas6nadeks
# sobivad s6nad, yksikute s6nadena;
NPlabels[indices[j]] = 'B'
else:
# Kui s6na ei sobi peas6naks, kustutame sellelt
# yldse m2rgenduse;
NPlabels[indices[j]] = '' | python | def _apply_post_fixes( self, sentence, NPlabels, cutPhrases, cutMaxThreshold ):
''' Fraasituvastaja j2relparandused:
*) Tekstis6renduste eemaldamine (s6rendatud tekst ei pruugi olla
fraas, v6ib olla nt terve lause);
*) Problemaatiliste kesks6nade eemaldamine fraasialgusest;
*) Ainult arvs6nadest koosnevate fraaside eemaldamine;
*) ...
*) B/I m2rkide parandus;
*) Fraaside l6ikamine sobivasse pikkusse (kui cutPhrases==True ja
cutMaxThreshold on seadistatud);
'''
for i in range( len(sentence) ):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase( i, sentence, NPlabels )
posTags = [ self._getPOS(tok) for tok in phrase ]
#
# 1) Eemaldame tekstis6rendused, mis kogemata kombel on loetud
# eraldi s6nadeks ja s6nade liitmise abil saadud fraasid, nt:
# et [õ i g e k e e l s u s r a a m a t u i s] sisaldub
# , [k u s v õ i m i l l a l] ma
#
if len(posTags) > 1 and len( set(posTags).difference(set(['Y'])) )==0:
# Kustutame fraasi
for k in indices:
NPlabels[k] = ''
if len(posTags) > 1 and posTags[0] == 'A':
forms = [ a[FORM] for a in phrase[0][ANALYSIS] ]
if 'nud' in forms or 'tud' in forms or 'dud' in forms:
#
# 2) Eemaldame nud/tud fraasi algusest, kui nud/tud
# moodustavad toenaolisel liitoeldise, nt:
# täpselt on [jälgitud seadust] .
# Töötud on [kutsunud protestiga] liituma ka töölisi
# ise ei [saanud naeru] pidama . "
#
if i - 1 > -1 and ( \
self._verbEi.matches(sentence[i-1]) or \
self._verbOle.matches(sentence[i-1]) ):
NPlabels[i] = ''
#print(self.__debug_extract_NP_from_pos(sentence, NPlabels, i))
if len(phrase) > 1 and set(posTags).issubset(set(['O', 'N'])):
#
# 3) Eemaldame vaid arvs6nadest koosnevad fraasid, nt:
# , vaid [800 miljonit] .
# põhjustel umbes [100 000.]
# kolmandat ja [kolmas neljandat] .
# üleeuroopalisel konkursil [esimese kolme] hulka .
# 1990. aastate [teisel poolel] .
# võitis küll [esimese veerandi 31] : 13 ,
#
for k in indices:
NPlabels[k] = ''
if posTags.count( 'N' ) > 7:
#
# 4) Eemaldame ylipikaks veninud numbrifraasid (kuna on kahtlus,
# et sellisel juhul pole tegu mitte numbrifraasiga, vaid
# mingi loetelu/tabeli vms-ga), nt:
# Vaip , [1 1 1 1 1 0 1 1 1 1 Vaip] [ : 2
# [1 0 0 0 0 1 1 1 1 B] Ühes Eesti ettevõttes
#
for k in range( len(indices) ):
ind = indices[k]
pos = posTags[k]
if pos == 'N' and ( k==0 or (k>0 and NPlabels[ind-1]=='') ):
NPlabels[ind] = ''
elif ( k > 0 and NPlabels[ind-1] == '' ):
NPlabels[ind] = 'B'
elif ( k > 0 and NPlabels[ind-1] != '' ):
NPlabels[ind] = 'I'
# Kontrollime, kas fraasis eelneb suurt2helisele s6nale
# mineviku kesks6na, mis pole suurt2heline;
verbPartFollowedByTitle = -1
for j in range( len(phrase) ):
if self._verbPastParticle.matches( phrase[j] ) and \
not phrase[j][TEXT].istitle() and \
j+1 < len(phrase) and \
phrase[j+1][TEXT].istitle():
verbPartFollowedByTitle = j
if verbPartFollowedByTitle == 0:
#
# 5) P2risnimele eelneva kesks6na kustutamine:
# P2risnimele eelnev kesks6na on sageli probleemne, st v6ib olla:
# a) seotud eelneva verbiga, nt:
# ... Hiibus ei [jätnud Elviiret] kiitmata ...
# ... on isa-ema kodu [vahetanud Kohila] vastu ...
# ... on aastaid [olnud Valgemäe perenaine] ...
# b) olla osa keerukamast nimis6nafraasist (lauselyhendist), nt:
# ... revolutsiooni ellu [viinud Reagan] oli ametist lahkudes ...
# ... olümpiamängude A-normi [täitnud Uusorg] ...
# Seet6ttu kustutame teatud tingimustel eelneva kesks6na maha;
#
NPlabels[indices[verbPartFollowedByTitle]] = ''
NPlabels[indices[verbPartFollowedByTitle]+1] = 'B'
if posTags[0] == 'C' and i + 1 > -1 and NPlabels[i-1] == '':
#
# 6) Puuduva 'kõige' lisamine fraasile, mille alguses on C, nt:
# ... Kõige [selgemal päeval] läksin ma taas ...
# ... Ka kõige [avarama ruumiihalusega eurooplane] talub Hiinas ...
# ... Kõige [nõrgema toimega] olid harilik puju ...
#
if self._k6ige.matches( sentence[i-1] ):
NPlabels[i-1] = 'B'
NPlabels[i] = 'I'
if posTags[0] == 'C' and len( posTags ) == 2 and posTags[1] == 'H' and \
NPlabels[i] == 'B':
#
# 7) Empiiriline tähelepanek - kui pärisnime ees on komparatiiv-
# omadussõna, siis enamasti on tegu katkise fraasiga, nt:
# ... nähtavas tulevikus [tähtsam Balkanitel] toimuv kui ...
# ... oma eluasemekuludeks [varasema Tallinnas] kehtinud ...
# ... 30 kraadi [paremal Jedinstvost] ( Ühtsusest ) ...
# Seetõttu eemaldame fraasist C;
#
NPlabels[i] = ''
NPlabels[i+1] = 'B'
# X) Kui kogemata on sattunud m6ni iseseisev 'I' (ilma eelneva 'I' v6i 'B'-ta),
# muudame selle 'B'-ks
for i in range( len(sentence) ):
if NPlabels[i] == 'I':
if i == 0 or (i-1>-1 and NPlabels[i-1] not in ['I','B']):
NPlabels[i] = 'B'
#
# Y) Kui on n6utud fraaside l6ikamine pikkuse j2rgi (j2tta alles vaid fraasid
# pikkusega N), l6ikame pikkust N yletavad fraasid juppideks nii, et alles
# j22vad vaid fraasi peas6naks sobivad s6nad;
#
if cutPhrases and cutMaxThreshold > 0:
NPheadPos = [ 'S', 'Y', 'H' ]
for i in range( len(sentence) ):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase( i, sentence, NPlabels )
posTags = [ self._getPOS(tok) for tok in phrase ]
if len(phrase) > cutMaxThreshold:
for j in range(len(phrase)):
posTag = posTags[j]
if posTag in NPheadPos:
# J2tame alles vaid nimis6nafraasi peas6nadeks
# sobivad s6nad, yksikute s6nadena;
NPlabels[indices[j]] = 'B'
else:
# Kui s6na ei sobi peas6naks, kustutame sellelt
# yldse m2rgenduse;
NPlabels[indices[j]] = '' | Fraasituvastaja j2relparandused:
*) Tekstis6renduste eemaldamine (s6rendatud tekst ei pruugi olla
fraas, v6ib olla nt terve lause);
*) Problemaatiliste kesks6nade eemaldamine fraasialgusest;
*) Ainult arvs6nadest koosnevate fraaside eemaldamine;
*) ...
*) B/I m2rkide parandus;
*) Fraaside l6ikamine sobivasse pikkusse (kui cutPhrases==True ja
cutMaxThreshold on seadistatud); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L653-L799 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker.get_phrases | def get_phrases(self, text, np_labels):
''' Given a Text and a BIO labels (one label for each word in Text) ,
extracts phrases and returns as a list of phrases, where each phrase
is a list of word tokens belonging to the phrase;
Parameters
----------
text: estnltk.text.Text
The input Text, or a list consecutive words (dict objects).
The method attempts to automatically determine the type of
the input;
np_labels : list of str
A list of strings, containing a B-I-O label for each word in
*text*;
Returns
-------
list of (list of tokens)
List of phrases, where each phrase is a list of word tokens
belonging to the phrase;
'''
# 1) Take different inputs to common list of words format:
input_words = []
if isinstance(text, Text):
# input is Text
input_words = text.words
elif isinstance(text, list) and len(text)>0 and isinstance(text[0], dict) and \
TEXT in text[0]:
# input is a list of words
input_words = text
elif text:
raise Exception('Unexpected input text:', text)
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words ('+str(len(input_words))+\
') does not match number of labels '+str(len(np_labels)))
# 2) Extract phrases from input words:
phrases = []
for i, word in enumerate(input_words):
label = np_labels[i]
if label == 'B':
phrases.append([])
if label in ['B', 'I']:
phrases[-1].append( word )
return phrases | python | def get_phrases(self, text, np_labels):
''' Given a Text and a BIO labels (one label for each word in Text) ,
extracts phrases and returns as a list of phrases, where each phrase
is a list of word tokens belonging to the phrase;
Parameters
----------
text: estnltk.text.Text
The input Text, or a list consecutive words (dict objects).
The method attempts to automatically determine the type of
the input;
np_labels : list of str
A list of strings, containing a B-I-O label for each word in
*text*;
Returns
-------
list of (list of tokens)
List of phrases, where each phrase is a list of word tokens
belonging to the phrase;
'''
# 1) Take different inputs to common list of words format:
input_words = []
if isinstance(text, Text):
# input is Text
input_words = text.words
elif isinstance(text, list) and len(text)>0 and isinstance(text[0], dict) and \
TEXT in text[0]:
# input is a list of words
input_words = text
elif text:
raise Exception('Unexpected input text:', text)
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words ('+str(len(input_words))+\
') does not match number of labels '+str(len(np_labels)))
# 2) Extract phrases from input words:
phrases = []
for i, word in enumerate(input_words):
label = np_labels[i]
if label == 'B':
phrases.append([])
if label in ['B', 'I']:
phrases[-1].append( word )
return phrases | Given a Text and a BIO labels (one label for each word in Text) ,
extracts phrases and returns as a list of phrases, where each phrase
is a list of word tokens belonging to the phrase;
Parameters
----------
text: estnltk.text.Text
The input Text, or a list consecutive words (dict objects).
The method attempts to automatically determine the type of
the input;
np_labels : list of str
A list of strings, containing a B-I-O label for each word in
*text*;
Returns
-------
list of (list of tokens)
List of phrases, where each phrase is a list of word tokens
belonging to the phrase; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L807-L852 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker.get_phrase_texts | def get_phrase_texts(self, text, np_labels):
''' Given a Text, and a list describing text annotations in the
B-I-O format (*np_label*), extracts phrases and returns as a
list of phrase texts;
Assumes that the input is same as the input acceptable for
the method NounPhraseChunker.get_phrases();
Returns
-------
list of string
Returns a list of phrase texts;
'''
phrases = self.get_phrases(text, np_labels)
texts = []
for phrase in phrases:
phrase_str = ' '.join([word[TEXT] for word in phrase])
texts.append( phrase_str )
return texts | python | def get_phrase_texts(self, text, np_labels):
''' Given a Text, and a list describing text annotations in the
B-I-O format (*np_label*), extracts phrases and returns as a
list of phrase texts;
Assumes that the input is same as the input acceptable for
the method NounPhraseChunker.get_phrases();
Returns
-------
list of string
Returns a list of phrase texts;
'''
phrases = self.get_phrases(text, np_labels)
texts = []
for phrase in phrases:
phrase_str = ' '.join([word[TEXT] for word in phrase])
texts.append( phrase_str )
return texts | Given a Text, and a list describing text annotations in the
B-I-O format (*np_label*), extracts phrases and returns as a
list of phrase texts;
Assumes that the input is same as the input acceptable for
the method NounPhraseChunker.get_phrases();
Returns
-------
list of string
Returns a list of phrase texts; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L855-L873 |
estnltk/estnltk | estnltk/np_chunker.py | NounPhraseChunker.annotateText | def annotateText(self, text, layer, np_labels = None):
''' Applies this chunker on given Text, and adds results of
the chunking as a new annotation layer to the text.
If the NP annotations are provided (via the input list
*np_labels*), uses the given NP annotations, otherwise
produces new NP_LABEL annotations via the method
self.analyze_text();
Parameters
----------
text: estnltk.text.Text
The input text where the new layer of NP chunking
annotations is to be added;
layer: str
Name of the new layer;
np_labels : list of str
Optional: A list of strings, containing a B-I-O label
for each word in *text*; If provided, uses annotations
from *np_labels*, otherwise creates new annotations
with this chunker;
Returns
-------
text
The input text where a new layer (containing NP
annotations) has been added;
'''
input_words = None
if isinstance(text, Text):
# input is Text
input_words = text.words
else:
raise Exception(' Input text should be of type Text, but it is ', text)
phrases = []
# If NP_LABEL-s are not provided, text needs to be analyzed first:
if not np_labels:
np_labels = self.analyze_text( text, return_type="labels" )
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words ('+str(len(input_words))+\
') does not match number of labels '+str(len(np_labels)))
# Fetch NP chunks
phrases = self.get_phrases( text, np_labels )
# Create and attach annotations to the Text object
annotations = []
if phrases:
for phrase in phrases:
phrase_annotation = {}
phrase_annotation[START] = phrase[0][START]
phrase_annotation[END] = phrase[-1][END]
phrase_annotation[TEXT] = ' '.join([word[TEXT] for word in phrase ])
annotations.append( phrase_annotation )
text[layer] = annotations
return text | python | def annotateText(self, text, layer, np_labels = None):
''' Applies this chunker on given Text, and adds results of
the chunking as a new annotation layer to the text.
If the NP annotations are provided (via the input list
*np_labels*), uses the given NP annotations, otherwise
produces new NP_LABEL annotations via the method
self.analyze_text();
Parameters
----------
text: estnltk.text.Text
The input text where the new layer of NP chunking
annotations is to be added;
layer: str
Name of the new layer;
np_labels : list of str
Optional: A list of strings, containing a B-I-O label
for each word in *text*; If provided, uses annotations
from *np_labels*, otherwise creates new annotations
with this chunker;
Returns
-------
text
The input text where a new layer (containing NP
annotations) has been added;
'''
input_words = None
if isinstance(text, Text):
# input is Text
input_words = text.words
else:
raise Exception(' Input text should be of type Text, but it is ', text)
phrases = []
# If NP_LABEL-s are not provided, text needs to be analyzed first:
if not np_labels:
np_labels = self.analyze_text( text, return_type="labels" )
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words ('+str(len(input_words))+\
') does not match number of labels '+str(len(np_labels)))
# Fetch NP chunks
phrases = self.get_phrases( text, np_labels )
# Create and attach annotations to the Text object
annotations = []
if phrases:
for phrase in phrases:
phrase_annotation = {}
phrase_annotation[START] = phrase[0][START]
phrase_annotation[END] = phrase[-1][END]
phrase_annotation[TEXT] = ' '.join([word[TEXT] for word in phrase ])
annotations.append( phrase_annotation )
text[layer] = annotations
return text | Applies this chunker on given Text, and adds results of
the chunking as a new annotation layer to the text.
If the NP annotations are provided (via the input list
*np_labels*), uses the given NP annotations, otherwise
produces new NP_LABEL annotations via the method
self.analyze_text();
Parameters
----------
text: estnltk.text.Text
The input text where the new layer of NP chunking
annotations is to be added;
layer: str
Name of the new layer;
np_labels : list of str
Optional: A list of strings, containing a B-I-O label
for each word in *text*; If provided, uses annotations
from *np_labels*, otherwise creates new annotations
with this chunker;
Returns
-------
text
The input text where a new layer (containing NP
annotations) has been added; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/np_chunker.py#L879-L931 |
estnltk/estnltk | estnltk/dividing.py | divide | def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep) | python | def divide(elements, by, translate=False, sep=' '):
"""Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
"""
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep) | Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/dividing.py#L338-L355 |
estnltk/estnltk | estnltk/wiki/cleaner.py | dropNested | def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim)
closeRE = re.compile(closeDelim)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -=1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text) | python | def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim)
closeRE = re.compile(closeDelim)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -=1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text) | A matching function for nested expressions, e.g. namespaces and tables. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/cleaner.py#L72-L122 |
estnltk/estnltk | estnltk/wiki/cleaner.py | dropSpans | def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res | python | def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res | Drop from text the blocks identified in :param spans:, possibly nested. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/cleaner.py#L124-L137 |
estnltk/estnltk | estnltk/wiki/cleaner.py | clean | def clean(text):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
text = newlines.sub(r'\n', text)
text = bulletlist.sub(r'', text)
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\.\)\]»)', r'\1', text)
text = re.sub(u'(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
return text | python | def clean(text):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
text = newlines.sub(r'\n', text)
text = bulletlist.sub(r'', text)
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\.\)\]»)', r'\1', text)
text = re.sub(u'(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
return text | Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/cleaner.py#L139-L203 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.disambiguate | def disambiguate(self, docs, **kwargs):
""" Performs morphological analysis along with different morphological
disambiguation steps (pre-disambiguation, vabamorf's disambiguation
and post-disambiguation) in the input document collection `docs`.
Note
----
It is assumed that the documents in the input document collection `docs`
have some similarities, e.g. they are parts of the same story, they are
on the same topic etc., so that morphologically ambiguous words (for
example: proper names) reoccur in different parts of the collection.
The information about reoccurring ambiguous words is then used in
pre-disambiguation and post-disambiguation steps for improving the overall
quality of morphological disambiguation.
Additionally, the input collection `docs` can have two levels: it can be
list of list of estnltk.text.Text . For example, if we have a corpus of
daily newspaper issues from one month, and each issue consists of articles
(published on a single day), we can place the issues to the outer list,
and the articles of the issues to the inner lists.
Parameters
----------
docs: list of estnltk.text.Text
List of texts (documents) in which the morphological disambiguation
is performed. Additionally, the list can have two levels: it can be
list of list of estnltk.text.Text (this can improve quality of the
post-disambiguation);
post_disambiguate : boolean, optional
Applies the lemma-based post-disambiguation on the collection.
Default: True;
disambiguate : boolean, optional
Applies vabamorf's statistical disambiguation on the collection.
Default: True;
Note: this step shouldn't be turned off, unless for testing purposes;
pre_disambiguate : boolean, optional
Applies the pre-disambiguation of proper names on the collection.
Default: True;
vabamorf : boolean, optional
Applies vabamorf's morphological analyzer on the collection.
Default: True;
Note: this step shouldn't be turned off, unless for testing purposes.
Returns
-------
list of estnltk.text.Text
List of morphologically disambiguated texts (documents). Preserves the
structure, if the input was list of list of estnltk.text.Text;
"""
# For testing purposes, morph analysis and morph disambiguation can both
# be switched off:
use_vabamorf = kwargs.get('vabamorf', True)
use_vabamorf_disambiguate = kwargs.get('disambiguate', True)
# Configuration for pre- and post disambiguation:
use_pre_disambiguation = kwargs.get('pre_disambiguate', True)
use_post_disambiguation = kwargs.get('post_disambiguate', True)
kwargs = kwargs
# Inner/default configuration for text objects:
kwargs['disambiguate'] = False # do not use vabamorf disambiguation at first place
kwargs['guess'] = True # should be set for the morph analyzer
kwargs['propername'] = True # should be set for the morph analyzer
# Check, whether the input is a list of lists of docs, or just a list of docs
if not self.__isListOfLists( docs ):
if not self.__isListOfTexts( docs ):
raise Exception("Unexpected input argument 'docs': should be a list of strings or Text-s;")
collections = [ docs ]
else:
collections = docs
# I. perform morphological analysis, pre_disambiguation, and
# statistical (vabamorf) disambiguation with-in a single
# document collection;
for i in range(len(collections)):
docs = [Text(doc, **kwargs) for doc in collections[i]]
# morf.analysis without disambiguation
if use_vabamorf:
docs = [doc.tag_analysis() for doc in docs]
if use_pre_disambiguation:
docs = self.pre_disambiguate(docs)
if use_vabamorf_disambiguate:
docs = self.__vabamorf_disambiguate(docs)
collections[i] = docs
#
# II. perform post disambiguation over all document collections;
#
if use_post_disambiguation:
collections = self.post_disambiguate( collections )
return collections if len(collections)>1 else collections[0] | python | def disambiguate(self, docs, **kwargs):
""" Performs morphological analysis along with different morphological
disambiguation steps (pre-disambiguation, vabamorf's disambiguation
and post-disambiguation) in the input document collection `docs`.
Note
----
It is assumed that the documents in the input document collection `docs`
have some similarities, e.g. they are parts of the same story, they are
on the same topic etc., so that morphologically ambiguous words (for
example: proper names) reoccur in different parts of the collection.
The information about reoccurring ambiguous words is then used in
pre-disambiguation and post-disambiguation steps for improving the overall
quality of morphological disambiguation.
Additionally, the input collection `docs` can have two levels: it can be
list of list of estnltk.text.Text . For example, if we have a corpus of
daily newspaper issues from one month, and each issue consists of articles
(published on a single day), we can place the issues to the outer list,
and the articles of the issues to the inner lists.
Parameters
----------
docs: list of estnltk.text.Text
List of texts (documents) in which the morphological disambiguation
is performed. Additionally, the list can have two levels: it can be
list of list of estnltk.text.Text (this can improve quality of the
post-disambiguation);
post_disambiguate : boolean, optional
Applies the lemma-based post-disambiguation on the collection.
Default: True;
disambiguate : boolean, optional
Applies vabamorf's statistical disambiguation on the collection.
Default: True;
Note: this step shouldn't be turned off, unless for testing purposes;
pre_disambiguate : boolean, optional
Applies the pre-disambiguation of proper names on the collection.
Default: True;
vabamorf : boolean, optional
Applies vabamorf's morphological analyzer on the collection.
Default: True;
Note: this step shouldn't be turned off, unless for testing purposes.
Returns
-------
list of estnltk.text.Text
List of morphologically disambiguated texts (documents). Preserves the
structure, if the input was list of list of estnltk.text.Text;
"""
# For testing purposes, morph analysis and morph disambiguation can both
# be switched off:
use_vabamorf = kwargs.get('vabamorf', True)
use_vabamorf_disambiguate = kwargs.get('disambiguate', True)
# Configuration for pre- and post disambiguation:
use_pre_disambiguation = kwargs.get('pre_disambiguate', True)
use_post_disambiguation = kwargs.get('post_disambiguate', True)
kwargs = kwargs
# Inner/default configuration for text objects:
kwargs['disambiguate'] = False # do not use vabamorf disambiguation at first place
kwargs['guess'] = True # should be set for the morph analyzer
kwargs['propername'] = True # should be set for the morph analyzer
# Check, whether the input is a list of lists of docs, or just a list of docs
if not self.__isListOfLists( docs ):
if not self.__isListOfTexts( docs ):
raise Exception("Unexpected input argument 'docs': should be a list of strings or Text-s;")
collections = [ docs ]
else:
collections = docs
# I. perform morphological analysis, pre_disambiguation, and
# statistical (vabamorf) disambiguation with-in a single
# document collection;
for i in range(len(collections)):
docs = [Text(doc, **kwargs) for doc in collections[i]]
# morf.analysis without disambiguation
if use_vabamorf:
docs = [doc.tag_analysis() for doc in docs]
if use_pre_disambiguation:
docs = self.pre_disambiguate(docs)
if use_vabamorf_disambiguate:
docs = self.__vabamorf_disambiguate(docs)
collections[i] = docs
#
# II. perform post disambiguation over all document collections;
#
if use_post_disambiguation:
collections = self.post_disambiguate( collections )
return collections if len(collections)>1 else collections[0] | Performs morphological analysis along with different morphological
disambiguation steps (pre-disambiguation, vabamorf's disambiguation
and post-disambiguation) in the input document collection `docs`.
Note
----
It is assumed that the documents in the input document collection `docs`
have some similarities, e.g. they are parts of the same story, they are
on the same topic etc., so that morphologically ambiguous words (for
example: proper names) reoccur in different parts of the collection.
The information about reoccurring ambiguous words is then used in
pre-disambiguation and post-disambiguation steps for improving the overall
quality of morphological disambiguation.
Additionally, the input collection `docs` can have two levels: it can be
list of list of estnltk.text.Text . For example, if we have a corpus of
daily newspaper issues from one month, and each issue consists of articles
(published on a single day), we can place the issues to the outer list,
and the articles of the issues to the inner lists.
Parameters
----------
docs: list of estnltk.text.Text
List of texts (documents) in which the morphological disambiguation
is performed. Additionally, the list can have two levels: it can be
list of list of estnltk.text.Text (this can improve quality of the
post-disambiguation);
post_disambiguate : boolean, optional
Applies the lemma-based post-disambiguation on the collection.
Default: True;
disambiguate : boolean, optional
Applies vabamorf's statistical disambiguation on the collection.
Default: True;
Note: this step shouldn't be turned off, unless for testing purposes;
pre_disambiguate : boolean, optional
Applies the pre-disambiguation of proper names on the collection.
Default: True;
vabamorf : boolean, optional
Applies vabamorf's morphological analyzer on the collection.
Default: True;
Note: this step shouldn't be turned off, unless for testing purposes.
Returns
-------
list of estnltk.text.Text
List of morphologically disambiguated texts (documents). Preserves the
structure, if the input was list of list of estnltk.text.Text; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L26-L121 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__isListOfTexts | def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs) | python | def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs) | Checks whether the input is a list of strings or Text-s; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L135-L139 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__isListOfLists | def __isListOfLists(self, docs):
""" Checks whether the input is a list of list of strings/Text-s;
"""
return isinstance(docs, list) and \
all(self.__isListOfTexts(ds) for ds in docs) | python | def __isListOfLists(self, docs):
""" Checks whether the input is a list of list of strings/Text-s;
"""
return isinstance(docs, list) and \
all(self.__isListOfTexts(ds) for ds in docs) | Checks whether the input is a list of list of strings/Text-s; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L142-L146 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__create_proper_names_lexicon | def __create_proper_names_lexicon(self, docs):
""" Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku
(mis kirjeldab, mitu korda iga pärisnimelemma esines);
"""
lemmaFreq = dict()
for doc in docs:
for word in doc[WORDS]:
# 1) Leiame k6ik s6naga seotud unikaalsed pärisnimelemmad
# (kui neid on)
uniqLemmas = set()
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
uniqLemmas.add( analysis[ROOT] )
# 2) Jäädvustame lemmade sagedused
for lemma in uniqLemmas:
if lemma not in lemmaFreq:
lemmaFreq[lemma] = 1
else:
lemmaFreq[lemma] += 1
return lemmaFreq | python | def __create_proper_names_lexicon(self, docs):
""" Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku
(mis kirjeldab, mitu korda iga pärisnimelemma esines);
"""
lemmaFreq = dict()
for doc in docs:
for word in doc[WORDS]:
# 1) Leiame k6ik s6naga seotud unikaalsed pärisnimelemmad
# (kui neid on)
uniqLemmas = set()
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
uniqLemmas.add( analysis[ROOT] )
# 2) Jäädvustame lemmade sagedused
for lemma in uniqLemmas:
if lemma not in lemmaFreq:
lemmaFreq[lemma] = 1
else:
lemmaFreq[lemma] += 1
return lemmaFreq | Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku
(mis kirjeldab, mitu korda iga pärisnimelemma esines); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L159-L178 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__disambiguate_proper_names_1 | def __disambiguate_proper_names_1(self, docs, lexicon):
""" Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu
erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid
suurima sagedusega analyysi(d) ...
"""
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame kõigi pärisnimede sagedused sagedusleksikonist
highestFreq = 0
properNameAnalyses = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
if analysis[ROOT] in lexicon:
properNameAnalyses.append( analysis )
if lexicon[analysis[ROOT]] > highestFreq:
highestFreq = lexicon[analysis[ROOT]]
else:
raise Exception(' Unable to find lemma ',analysis[ROOT], \
' from the lexicon. ')
# 2) J2tame alles vaid suurima lemmasagedusega pärisnimeanalyysid,
# ylejaanud kustutame maha
if highestFreq > 0:
toDelete = []
for analysis in properNameAnalyses:
if lexicon[analysis[ROOT]] < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | python | def __disambiguate_proper_names_1(self, docs, lexicon):
""" Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu
erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid
suurima sagedusega analyysi(d) ...
"""
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame kõigi pärisnimede sagedused sagedusleksikonist
highestFreq = 0
properNameAnalyses = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
if analysis[ROOT] in lexicon:
properNameAnalyses.append( analysis )
if lexicon[analysis[ROOT]] > highestFreq:
highestFreq = lexicon[analysis[ROOT]]
else:
raise Exception(' Unable to find lemma ',analysis[ROOT], \
' from the lexicon. ')
# 2) J2tame alles vaid suurima lemmasagedusega pärisnimeanalyysid,
# ylejaanud kustutame maha
if highestFreq > 0:
toDelete = []
for analysis in properNameAnalyses:
if lexicon[analysis[ROOT]] < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu
erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid
suurima sagedusega analyysi(d) ... | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L181-L210 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__find_certain_proper_names | def __find_certain_proper_names(self, docs):
""" Moodustame kindlate pärisnimede loendi: vaatame sõnu, millel ongi
ainult pärisnimeanalüüsid ning võtame sealt loendisse unikaalsed
pärisnimed;
"""
certainNames = set()
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid pärisnimeanalüüsidest koosnevaid sõnu
if all([ a[POSTAG] == 'H' for a in word[ANALYSIS] ]):
# Jäädvustame kõik unikaalsed lemmad kui kindlad pärisnimed
for analysis in word[ANALYSIS]:
certainNames.add( analysis[ROOT] )
return certainNames | python | def __find_certain_proper_names(self, docs):
""" Moodustame kindlate pärisnimede loendi: vaatame sõnu, millel ongi
ainult pärisnimeanalüüsid ning võtame sealt loendisse unikaalsed
pärisnimed;
"""
certainNames = set()
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid pärisnimeanalüüsidest koosnevaid sõnu
if all([ a[POSTAG] == 'H' for a in word[ANALYSIS] ]):
# Jäädvustame kõik unikaalsed lemmad kui kindlad pärisnimed
for analysis in word[ANALYSIS]:
certainNames.add( analysis[ROOT] )
return certainNames | Moodustame kindlate pärisnimede loendi: vaatame sõnu, millel ongi
ainult pärisnimeanalüüsid ning võtame sealt loendisse unikaalsed
pärisnimed; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L213-L226 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__find_sentence_initial_proper_names | def __find_sentence_initial_proper_names(self, docs):
""" Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad;
"""
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
# 2) potentsiaalne loendi algus (arv, millele järgneb punkt või
# sulg ja mis ei ole kuupäev);
if not re.match('^[1234567890]*$', word[TEXT] ) and \
not re.match('^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$', word[TEXT] ) and \
re.match("^[1234567890.()]*$", word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
if sentencePos == 0:
# Vaatame lausealgulisi sõnu, millel on nii pärisnimeanalüüs(e)
# kui ka mitte-pärisnimeanalüüs(e)
h_postags = [ a[POSTAG] == 'H' for a in word[ANALYSIS] ]
if any( h_postags ) and not all( h_postags ):
for analysis in word[ANALYSIS]:
# Jätame meelde kõik unikaalsed pärisnimelemmad
if analysis[POSTAG] == 'H':
sentInitialNames.add( analysis[ROOT] )
sentencePos += 1
return sentInitialNames | python | def __find_sentence_initial_proper_names(self, docs):
""" Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad;
"""
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
# 2) potentsiaalne loendi algus (arv, millele järgneb punkt või
# sulg ja mis ei ole kuupäev);
if not re.match('^[1234567890]*$', word[TEXT] ) and \
not re.match('^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$', word[TEXT] ) and \
re.match("^[1234567890.()]*$", word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
if sentencePos == 0:
# Vaatame lausealgulisi sõnu, millel on nii pärisnimeanalüüs(e)
# kui ka mitte-pärisnimeanalüüs(e)
h_postags = [ a[POSTAG] == 'H' for a in word[ANALYSIS] ]
if any( h_postags ) and not all( h_postags ):
for analysis in word[ANALYSIS]:
# Jätame meelde kõik unikaalsed pärisnimelemmad
if analysis[POSTAG] == 'H':
sentInitialNames.add( analysis[ROOT] )
sentencePos += 1
return sentInitialNames | Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L229-L266 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__remove_redundant_proper_names | def __remove_redundant_proper_names(self, docs, lemma_set):
""" Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade
loendi (hulga) põhjal;
"""
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame analyysid, mis tuleks loendi järgi eemaldada
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set:
toDelete.append( analysis )
# 2) Eemaldame yleliigsed analyysid
if toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | python | def __remove_redundant_proper_names(self, docs, lemma_set):
""" Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade
loendi (hulga) põhjal;
"""
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame analyysid, mis tuleks loendi järgi eemaldada
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set:
toDelete.append( analysis )
# 2) Eemaldame yleliigsed analyysid
if toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade
loendi (hulga) põhjal; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L306-L322 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__disambiguate_proper_names_2 | def __disambiguate_proper_names_2(self, docs, lexicon):
""" Kustutame üleliigsed mitte-pärisnimeanalüüsid:
-- kui lause keskel on pärisnimeanalüüsiga sõna, jätamegi alles vaid
pärisnimeanalyys(id);
-- kui lause alguses on pärisnimeanalüüsiga s6na, ning pärisnimelemma
esineb korpuses suurema sagedusega kui 1, jätamegi alles vaid
pärisnimeanalyys(id); vastasel juhul ei kustuta midagi;
"""
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
continue
#
# Vaatame ainult mitmeseid s6nu, mis sisaldavad ka p2risnimeanalyysi
#
if len(word[ANALYSIS]) > 1 and \
any([ a[POSTAG] == 'H' for a in word[ANALYSIS] ]):
if sentencePos != 0:
# 1) Kui oleme lause keskel, valime alati vaid nimeanalyysid
# (eeldades, et nyyseks on järgi jäänud vaid korrektsed nimed)
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] not in ['H', 'G']:
toDelete.append( analysis )
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
#if toDelete:
# self.__debug_print_word_in_sentence_str(sentence, word)
else:
# 2) Kui oleme lause alguses, siis valime ainult nimeanalyysid
# juhul, kui vastav lemma esines ka mujal (st lemma esinemis-
# sagedus on suurem kui 1);
# Kas m6ni lemma esineb p2risnimede leksikonis sagedusega > 1 ?
hasRecurringProperName = False
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[ROOT] in lexicon and lexicon[analysis[ROOT]] > 1:
hasRecurringProperName = True
if analysis[POSTAG] not in ['H', 'G']:
toDelete.append( analysis )
if hasRecurringProperName and toDelete:
# Kui p2risnimi esines ka mujal, j2tame alles vaid p2risnime-
# analyysid:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
#self.__debug_print_word_in_sentence_str(sentence, word)
sentencePos += 1 | python | def __disambiguate_proper_names_2(self, docs, lexicon):
""" Kustutame üleliigsed mitte-pärisnimeanalüüsid:
-- kui lause keskel on pärisnimeanalüüsiga sõna, jätamegi alles vaid
pärisnimeanalyys(id);
-- kui lause alguses on pärisnimeanalüüsiga s6na, ning pärisnimelemma
esineb korpuses suurema sagedusega kui 1, jätamegi alles vaid
pärisnimeanalyys(id); vastasel juhul ei kustuta midagi;
"""
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
continue
#
# Vaatame ainult mitmeseid s6nu, mis sisaldavad ka p2risnimeanalyysi
#
if len(word[ANALYSIS]) > 1 and \
any([ a[POSTAG] == 'H' for a in word[ANALYSIS] ]):
if sentencePos != 0:
# 1) Kui oleme lause keskel, valime alati vaid nimeanalyysid
# (eeldades, et nyyseks on järgi jäänud vaid korrektsed nimed)
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] not in ['H', 'G']:
toDelete.append( analysis )
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
#if toDelete:
# self.__debug_print_word_in_sentence_str(sentence, word)
else:
# 2) Kui oleme lause alguses, siis valime ainult nimeanalyysid
# juhul, kui vastav lemma esines ka mujal (st lemma esinemis-
# sagedus on suurem kui 1);
# Kas m6ni lemma esineb p2risnimede leksikonis sagedusega > 1 ?
hasRecurringProperName = False
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[ROOT] in lexicon and lexicon[analysis[ROOT]] > 1:
hasRecurringProperName = True
if analysis[POSTAG] not in ['H', 'G']:
toDelete.append( analysis )
if hasRecurringProperName and toDelete:
# Kui p2risnimi esines ka mujal, j2tame alles vaid p2risnime-
# analyysid:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
#self.__debug_print_word_in_sentence_str(sentence, word)
sentencePos += 1 | Kustutame üleliigsed mitte-pärisnimeanalüüsid:
-- kui lause keskel on pärisnimeanalüüsiga sõna, jätamegi alles vaid
pärisnimeanalyys(id);
-- kui lause alguses on pärisnimeanalüüsiga s6na, ning pärisnimelemma
esineb korpuses suurema sagedusega kui 1, jätamegi alles vaid
pärisnimeanalyys(id); vastasel juhul ei kustuta midagi; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L325-L378 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.pre_disambiguate | def pre_disambiguate(self, docs):
""" Teostab pärisnimede eelühestamine. Üldiseks eesmärgiks on vähendada mitmesust
suurtähega algavate sonade morf analüüsil, nt eemaldada pärisnime analüüs, kui
suurtäht tähistab tõenäoliselt lausealgust.
"""
# 1) Leiame pärisnimelemmade sagedusleksikoni
lexicon = self.__create_proper_names_lexicon(docs)
# 2) Teeme esialgse kustutamise: kui sõnal on mitu erineva korpuse-
# sagedusega pärisnimeanalüüsi, siis jätame alles vaid kõige
# sagedasema analyysi ...
self.__disambiguate_proper_names_1(docs, lexicon)
# 3) Eemaldame yleliigsed lause alguse pärisnimeanalüüsid;
# Kõigepealt leiame: kindlad pärisnimed, lause alguses esinevad
# p2risnimed ja lause keskel esinevad pärisnimed
certainNames = self.__find_certain_proper_names(docs)
sentInitialNames = self.__find_sentence_initial_proper_names(docs)
sentCentralNames = self.__find_sentence_central_proper_names(docs)
# 3.1) Võrdleme lause alguses ja keskel esinevaid lemmasid: leiame
# lemmad, mis esinesid ainult lause alguses ...
onlySentenceInitial = sentInitialNames.difference(sentCentralNames)
# 3.2) Võrdleme ainult lause alguses esinevaid ning kindlaid pärisnime-
# lemmasid: kui sõna esines vaid lause alguses ega ole kindel
# pärisnimelemma, pole tõenäoliselt tegu pärisnimega ...
notProperNames = onlySentenceInitial.difference(certainNames)
# 3.3) Eemaldame yleliigsed p2risnimeanalyysid (kui selliseid leidus)
if len(notProperNames) > 0:
self.__remove_redundant_proper_names(docs, notProperNames)
# 4) Leiame uue pärisnimelemmade sagedusleksikoni (sagedused on
# tõenäoliselt vahepeal muutunud);
lexicon = self.__create_proper_names_lexicon(docs)
# 5) Teeme üleliigsete mittepärisnimeanalüüside kustutamise sõnadelt,
# millel on lisaks pärisnimeanalüüsidele ka teisi analüüse:
# lausealgusesse jätame alles vaid pärisnimeanalüüsid, kui neid
# esineb korpuses ka mujal;
# lause keskele jätame igal juhul alles vaid pärisnimeanalüüsid;
self.__disambiguate_proper_names_2(docs, lexicon)
return docs | python | def pre_disambiguate(self, docs):
""" Teostab pärisnimede eelühestamine. Üldiseks eesmärgiks on vähendada mitmesust
suurtähega algavate sonade morf analüüsil, nt eemaldada pärisnime analüüs, kui
suurtäht tähistab tõenäoliselt lausealgust.
"""
# 1) Leiame pärisnimelemmade sagedusleksikoni
lexicon = self.__create_proper_names_lexicon(docs)
# 2) Teeme esialgse kustutamise: kui sõnal on mitu erineva korpuse-
# sagedusega pärisnimeanalüüsi, siis jätame alles vaid kõige
# sagedasema analyysi ...
self.__disambiguate_proper_names_1(docs, lexicon)
# 3) Eemaldame yleliigsed lause alguse pärisnimeanalüüsid;
# Kõigepealt leiame: kindlad pärisnimed, lause alguses esinevad
# p2risnimed ja lause keskel esinevad pärisnimed
certainNames = self.__find_certain_proper_names(docs)
sentInitialNames = self.__find_sentence_initial_proper_names(docs)
sentCentralNames = self.__find_sentence_central_proper_names(docs)
# 3.1) Võrdleme lause alguses ja keskel esinevaid lemmasid: leiame
# lemmad, mis esinesid ainult lause alguses ...
onlySentenceInitial = sentInitialNames.difference(sentCentralNames)
# 3.2) Võrdleme ainult lause alguses esinevaid ning kindlaid pärisnime-
# lemmasid: kui sõna esines vaid lause alguses ega ole kindel
# pärisnimelemma, pole tõenäoliselt tegu pärisnimega ...
notProperNames = onlySentenceInitial.difference(certainNames)
# 3.3) Eemaldame yleliigsed p2risnimeanalyysid (kui selliseid leidus)
if len(notProperNames) > 0:
self.__remove_redundant_proper_names(docs, notProperNames)
# 4) Leiame uue pärisnimelemmade sagedusleksikoni (sagedused on
# tõenäoliselt vahepeal muutunud);
lexicon = self.__create_proper_names_lexicon(docs)
# 5) Teeme üleliigsete mittepärisnimeanalüüside kustutamise sõnadelt,
# millel on lisaks pärisnimeanalüüsidele ka teisi analüüse:
# lausealgusesse jätame alles vaid pärisnimeanalüüsid, kui neid
# esineb korpuses ka mujal;
# lause keskele jätame igal juhul alles vaid pärisnimeanalüüsid;
self.__disambiguate_proper_names_2(docs, lexicon)
return docs | Teostab pärisnimede eelühestamine. Üldiseks eesmärgiks on vähendada mitmesust
suurtähega algavate sonade morf analüüsil, nt eemaldada pärisnime analüüs, kui
suurtäht tähistab tõenäoliselt lausealgust. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L381-L421 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__analyses_match | def __analyses_match(self, analysisA, analysisB):
""" Leiame, kas tegu on duplikaatidega ehk täpselt üht ja sama
morfoloogilist infot sisaldavate analüüsidega. """
return POSTAG in analysisA and POSTAG in analysisB and \
analysisA[POSTAG]==analysisB[POSTAG] and \
ROOT in analysisA and ROOT in analysisB and \
analysisA[ROOT]==analysisB[ROOT] and \
FORM in analysisA and FORM in analysisB and \
analysisA[FORM]==analysisB[FORM] and \
CLITIC in analysisA and CLITIC in analysisB and \
analysisA[CLITIC]==analysisB[CLITIC] and \
ENDING in analysisA and ENDING in analysisB and \
analysisA[ENDING]==analysisB[ENDING] | python | def __analyses_match(self, analysisA, analysisB):
""" Leiame, kas tegu on duplikaatidega ehk täpselt üht ja sama
morfoloogilist infot sisaldavate analüüsidega. """
return POSTAG in analysisA and POSTAG in analysisB and \
analysisA[POSTAG]==analysisB[POSTAG] and \
ROOT in analysisA and ROOT in analysisB and \
analysisA[ROOT]==analysisB[ROOT] and \
FORM in analysisA and FORM in analysisB and \
analysisA[FORM]==analysisB[FORM] and \
CLITIC in analysisA and CLITIC in analysisB and \
analysisA[CLITIC]==analysisB[CLITIC] and \
ENDING in analysisA and ENDING in analysisB and \
analysisA[ENDING]==analysisB[ENDING] | Leiame, kas tegu on duplikaatidega ehk täpselt üht ja sama
morfoloogilist infot sisaldavate analüüsidega. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L434-L446 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__remove_duplicate_and_problematic_analyses | def __remove_duplicate_and_problematic_analyses(self, docs):
""" 1) Eemaldab sisendkorpuse kõigi sõnade morf analüüsidest duplikaadid
ehk siis korduvad analüüsid; Nt sõna 'palk' saab kaks analyysi:
'palk' (mis käändub 'palk\palgi') ja 'palk' (mis käändub 'palk\palga'),
aga pärast duplikaatide eemaldamist jääb alles vaid üks;
2) Kui verbi analüüside hulgas on alles nii '-tama' kui ka '-ma', siis
jätta alles vaid '-ma' analüüsid;
"""
for doc in docs:
for word in doc[WORDS]:
# 1) Leiame k6ik analyysi-duplikaadid (kui neid on)
toDelete = []
for i in range(len(word[ANALYSIS])):
if i+1 < len(word[ANALYSIS]):
for j in range(i+1, len(word[ANALYSIS])):
analysisI = word[ANALYSIS][i]
analysisJ = word[ANALYSIS][j]
if self.__analyses_match(analysisI, analysisJ):
if j not in toDelete:
toDelete.append(j)
# 2) Kustutame yleliigsed analyysid
if toDelete:
for a in sorted(toDelete, reverse=True):
del word[ANALYSIS][a]
#
# *) Kui verbi analüüside puhul on olemas nii '-tama' kui ka '-ma'
# lõpp, siis jätta alles vaid -ma, ülejäänud kustutada;
# Nt lõpetama: lõp+tama, lõppe+tama, lõpeta+ma
# teatama: tead+tama, teata+ma
#
if any([ a[POSTAG]=='V' and a[ENDING]=='tama' for a in word[ANALYSIS] ]) and \
any([ a[POSTAG]=='V' and a[ENDING]=='ma' for a in word[ANALYSIS] ]):
toDelete = []
for a in range( len(word[ANALYSIS]) ):
if word[ANALYSIS][a][POSTAG]=='V' and \
word[ANALYSIS][a][ENDING]=='tama':
toDelete.append(a)
if toDelete:
for a in sorted(toDelete, reverse=True):
del word[ANALYSIS][a] | python | def __remove_duplicate_and_problematic_analyses(self, docs):
""" 1) Eemaldab sisendkorpuse kõigi sõnade morf analüüsidest duplikaadid
ehk siis korduvad analüüsid; Nt sõna 'palk' saab kaks analyysi:
'palk' (mis käändub 'palk\palgi') ja 'palk' (mis käändub 'palk\palga'),
aga pärast duplikaatide eemaldamist jääb alles vaid üks;
2) Kui verbi analüüside hulgas on alles nii '-tama' kui ka '-ma', siis
jätta alles vaid '-ma' analüüsid;
"""
for doc in docs:
for word in doc[WORDS]:
# 1) Leiame k6ik analyysi-duplikaadid (kui neid on)
toDelete = []
for i in range(len(word[ANALYSIS])):
if i+1 < len(word[ANALYSIS]):
for j in range(i+1, len(word[ANALYSIS])):
analysisI = word[ANALYSIS][i]
analysisJ = word[ANALYSIS][j]
if self.__analyses_match(analysisI, analysisJ):
if j not in toDelete:
toDelete.append(j)
# 2) Kustutame yleliigsed analyysid
if toDelete:
for a in sorted(toDelete, reverse=True):
del word[ANALYSIS][a]
#
# *) Kui verbi analüüside puhul on olemas nii '-tama' kui ka '-ma'
# lõpp, siis jätta alles vaid -ma, ülejäänud kustutada;
# Nt lõpetama: lõp+tama, lõppe+tama, lõpeta+ma
# teatama: tead+tama, teata+ma
#
if any([ a[POSTAG]=='V' and a[ENDING]=='tama' for a in word[ANALYSIS] ]) and \
any([ a[POSTAG]=='V' and a[ENDING]=='ma' for a in word[ANALYSIS] ]):
toDelete = []
for a in range( len(word[ANALYSIS]) ):
if word[ANALYSIS][a][POSTAG]=='V' and \
word[ANALYSIS][a][ENDING]=='tama':
toDelete.append(a)
if toDelete:
for a in sorted(toDelete, reverse=True):
del word[ANALYSIS][a] | 1) Eemaldab sisendkorpuse kõigi sõnade morf analüüsidest duplikaadid
ehk siis korduvad analüüsid; Nt sõna 'palk' saab kaks analyysi:
'palk' (mis käändub 'palk\palgi') ja 'palk' (mis käändub 'palk\palga'),
aga pärast duplikaatide eemaldamist jääb alles vaid üks;
2) Kui verbi analüüside hulgas on alles nii '-tama' kui ka '-ma', siis
jätta alles vaid '-ma' analüüsid; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L449-L488 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__find_hidden_analyses | def __find_hidden_analyses(self, docs):
""" Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei
tule arvestada lemmade järelühestamisel:
*) kesksõnade nud, dud, tud mitmesused;
*) muutumatute sõnade sõnaliigi mitmesus;
*) oleviku 'olema' mitmesus ('nad on' vs 'ta on');
*) asesõnade ainsuse-mitmuse mitmesus;
*) arv- ja asesõnade vaheline mitmesus;
Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega,
iga võti kujul (doc_index, word_index); """
hidden = dict()
nudTudLopud = re.compile('^.*[ntd]ud$')
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
if ANALYSIS in word and len(word[ANALYSIS]) > 1:
#
# 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus:
# kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, //
nudTud = [ nudTudLopud.match(a[ROOT]) != None or \
nudTudLopud.match(a[ENDING]) != None \
for a in word[ANALYSIS] ]
if nudTud.count( True ) > 1:
hidden[(d, w)] = 1
#
# 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära:
# Nt kui+0 //_D_ // kui+0 //_J_ //
# nagu+0 //_D_ // nagu+0 //_J_ //
lemmas = set([ a[ROOT] for a in word[ANALYSIS] ])
forms = set([ a[FORM] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(forms) == 1 and (list(forms))[0] == '':
hidden[(d, w)] = 1
#
# 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused:
# Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks;
endings = set([ a[ENDING] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and (list(lemmas))[0] == 'ole' and len(endings) == 1 \
and (list(endings))[0] == '0':
hidden[(d, w)] = 1
#
# 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus:
# Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n //
# kes+0 //_P_ sg n // kes+0 //_P_ pl n //
postags = set([ a[POSTAG] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(postags) == 1 and 'P' in postags and \
len(endings) == 1:
hidden[(d, w)] = 1
#
# 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus:
# Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, //
# üks+l //_N_ sg ad, // üks+l //_P_ sg ad, //
if len(lemmas) == 1 and 'P' in postags and ('O' in postags or \
'N' in postags) and len(endings) == 1:
hidden[(d, w)] = 1
return hidden | python | def __find_hidden_analyses(self, docs):
""" Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei
tule arvestada lemmade järelühestamisel:
*) kesksõnade nud, dud, tud mitmesused;
*) muutumatute sõnade sõnaliigi mitmesus;
*) oleviku 'olema' mitmesus ('nad on' vs 'ta on');
*) asesõnade ainsuse-mitmuse mitmesus;
*) arv- ja asesõnade vaheline mitmesus;
Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega,
iga võti kujul (doc_index, word_index); """
hidden = dict()
nudTudLopud = re.compile('^.*[ntd]ud$')
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
if ANALYSIS in word and len(word[ANALYSIS]) > 1:
#
# 1) Kui enamus analüüse on nud/tud/dud analüüsid, peida mitmesus:
# kõla+nud //_V_ nud, // kõla=nud+0 //_A_ // kõla=nud+0 //_A_ sg n, // kõla=nud+d //_A_ pl n, //
nudTud = [ nudTudLopud.match(a[ROOT]) != None or \
nudTudLopud.match(a[ENDING]) != None \
for a in word[ANALYSIS] ]
if nudTud.count( True ) > 1:
hidden[(d, w)] = 1
#
# 2) Kui analyysidel on sama lemma ja puudub vormitunnus, siis peida mitmesused ära:
# Nt kui+0 //_D_ // kui+0 //_J_ //
# nagu+0 //_D_ // nagu+0 //_J_ //
lemmas = set([ a[ROOT] for a in word[ANALYSIS] ])
forms = set([ a[FORM] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(forms) == 1 and (list(forms))[0] == '':
hidden[(d, w)] = 1
#
# 3) Kui 'olema'-analyysidel on sama lemma ning sama l6pp, peida mitmesused:
# Nt 'nad on' vs 'ta on' saavad sama olema-analyysi, mis jääb mitmeseks;
endings = set([ a[ENDING] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and (list(lemmas))[0] == 'ole' and len(endings) == 1 \
and (list(endings))[0] == '0':
hidden[(d, w)] = 1
#
# 4) Kui asesõnadel on sama lemma ja lõpp, peida ainsuse/mitmuse mitmesus:
# Nt kõik+0 //_P_ sg n // kõik+0 //_P_ pl n //
# kes+0 //_P_ sg n // kes+0 //_P_ pl n //
postags = set([ a[POSTAG] for a in word[ANALYSIS] ])
if len(lemmas) == 1 and len(postags) == 1 and 'P' in postags and \
len(endings) == 1:
hidden[(d, w)] = 1
#
# 5) Kui on sama lemma ja lõpp, peida arv- ja asesõnadevaheline mitmesus:
# Nt teine+0 //_O_ pl n, // teine+0 //_P_ pl n, //
# üks+l //_N_ sg ad, // üks+l //_P_ sg ad, //
if len(lemmas) == 1 and 'P' in postags and ('O' in postags or \
'N' in postags) and len(endings) == 1:
hidden[(d, w)] = 1
return hidden | Jätab meelde, millised analüüsid on nn peidetud ehk siis mida ei
tule arvestada lemmade järelühestamisel:
*) kesksõnade nud, dud, tud mitmesused;
*) muutumatute sõnade sõnaliigi mitmesus;
*) oleviku 'olema' mitmesus ('nad on' vs 'ta on');
*) asesõnade ainsuse-mitmuse mitmesus;
*) arv- ja asesõnade vaheline mitmesus;
Tagastab sõnastiku peidetud analüüse sisaldanud sõnade asukohtadega,
iga võti kujul (doc_index, word_index); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L491-L545 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__supplement_lemma_frequency_lexicon | def __supplement_lemma_frequency_lexicon(self, docs, hiddenWords, lexicon, amb_lexicon):
""" Täiendab etteantud sagedusleksikone antud korpuse (docs) põhjal:
*) yldist sagedusleksikoni, kus on k6ik lemmad, v.a. lemmad,
mis kuuluvad nn peidetud sõnade hulka (hiddenWords);
*) mitmeste sagedusleksikoni, kus on vaid mitmeste analyysidega
s6nades esinenud lemmad, v.a. (hiddenWords) lemmad, koos
nende yldiste esinemissagedustega (esimesest leksikonist);
"""
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
# Jätame vahele nn peidetud sõnad
if (d, w) in hiddenWords:
continue
isAmbiguous = (len(word[ANALYSIS])>1)
# Jäädvustame sagedused, verbide omad eraldiseisva märkega:
for a in word[ANALYSIS]:
lemma = a[ROOT]+'ma' if a[POSTAG]=='V' else a[ROOT]
# 1) Jäädvustame üldise sageduse
if lemma not in lexicon:
lexicon[lemma] = 1
else:
lexicon[lemma] += 1
# 2) Jäädvustame mitmeste sõnade esinemise
if isAmbiguous:
amb_lexicon[lemma] = 1
# Kanname yldisest sagedusleksikonist sagedused yle mitmeste lemmade
# sagedusleksikoni
for lemma in amb_lexicon.keys():
amb_lexicon[lemma] = lexicon[lemma] | python | def __supplement_lemma_frequency_lexicon(self, docs, hiddenWords, lexicon, amb_lexicon):
""" Täiendab etteantud sagedusleksikone antud korpuse (docs) põhjal:
*) yldist sagedusleksikoni, kus on k6ik lemmad, v.a. lemmad,
mis kuuluvad nn peidetud sõnade hulka (hiddenWords);
*) mitmeste sagedusleksikoni, kus on vaid mitmeste analyysidega
s6nades esinenud lemmad, v.a. (hiddenWords) lemmad, koos
nende yldiste esinemissagedustega (esimesest leksikonist);
"""
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
# Jätame vahele nn peidetud sõnad
if (d, w) in hiddenWords:
continue
isAmbiguous = (len(word[ANALYSIS])>1)
# Jäädvustame sagedused, verbide omad eraldiseisva märkega:
for a in word[ANALYSIS]:
lemma = a[ROOT]+'ma' if a[POSTAG]=='V' else a[ROOT]
# 1) Jäädvustame üldise sageduse
if lemma not in lexicon:
lexicon[lemma] = 1
else:
lexicon[lemma] += 1
# 2) Jäädvustame mitmeste sõnade esinemise
if isAmbiguous:
amb_lexicon[lemma] = 1
# Kanname yldisest sagedusleksikonist sagedused yle mitmeste lemmade
# sagedusleksikoni
for lemma in amb_lexicon.keys():
amb_lexicon[lemma] = lexicon[lemma] | Täiendab etteantud sagedusleksikone antud korpuse (docs) põhjal:
*) yldist sagedusleksikoni, kus on k6ik lemmad, v.a. lemmad,
mis kuuluvad nn peidetud sõnade hulka (hiddenWords);
*) mitmeste sagedusleksikoni, kus on vaid mitmeste analyysidega
s6nades esinenud lemmad, v.a. (hiddenWords) lemmad, koos
nende yldiste esinemissagedustega (esimesest leksikonist); | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L548-L577 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.__disambiguate_with_lexicon | def __disambiguate_with_lexicon(self, docs, lexicon, hiddenWords):
""" Teostab lemmade leksikoni järgi mitmeste morf analüüside
ühestamise - eemaldab üleliigsed analüüsid;
Toetub ideele "üks tähendus teksti kohta": kui mitmeseks jäänud
lemma esineb tekstis/korpuses ka mujal ning lõppkokkuvõttes
esineb sagedamini kui alternatiivsed analüüsid, siis tõenäoliselt
see ongi õige lemma/analüüs;
"""
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
# Jätame vahele nn peidetud sõnad
if (d, w) in hiddenWords:
continue
# Vaatame vaid mitmeseks jäänud analüüsidega sõnu
if len(word[ANALYSIS]) > 1:
# 1) Leiame suurima esinemissageduse mitmeste lemmade seas
highestFreq = 0
for analysis in word[ANALYSIS]:
lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT]
if lemma in lexicon and lexicon[lemma] > highestFreq:
highestFreq = lexicon[lemma]
if highestFreq > 0:
# 2) Jätame välja kõik analüüsid, mille lemma esinemissagedus
# on väiksem kui suurim esinemissagedus;
toDelete = []
for analysis in word[ANALYSIS]:
lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT]
freq = lexicon[lemma] if lemma in lexicon else 0
if freq < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | python | def __disambiguate_with_lexicon(self, docs, lexicon, hiddenWords):
""" Teostab lemmade leksikoni järgi mitmeste morf analüüside
ühestamise - eemaldab üleliigsed analüüsid;
Toetub ideele "üks tähendus teksti kohta": kui mitmeseks jäänud
lemma esineb tekstis/korpuses ka mujal ning lõppkokkuvõttes
esineb sagedamini kui alternatiivsed analüüsid, siis tõenäoliselt
see ongi õige lemma/analüüs;
"""
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
# Jätame vahele nn peidetud sõnad
if (d, w) in hiddenWords:
continue
# Vaatame vaid mitmeseks jäänud analüüsidega sõnu
if len(word[ANALYSIS]) > 1:
# 1) Leiame suurima esinemissageduse mitmeste lemmade seas
highestFreq = 0
for analysis in word[ANALYSIS]:
lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT]
if lemma in lexicon and lexicon[lemma] > highestFreq:
highestFreq = lexicon[lemma]
if highestFreq > 0:
# 2) Jätame välja kõik analüüsid, mille lemma esinemissagedus
# on väiksem kui suurim esinemissagedus;
toDelete = []
for analysis in word[ANALYSIS]:
lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT]
freq = lexicon[lemma] if lemma in lexicon else 0
if freq < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | Teostab lemmade leksikoni järgi mitmeste morf analüüside
ühestamise - eemaldab üleliigsed analüüsid;
Toetub ideele "üks tähendus teksti kohta": kui mitmeseks jäänud
lemma esineb tekstis/korpuses ka mujal ning lõppkokkuvõttes
esineb sagedamini kui alternatiivsed analüüsid, siis tõenäoliselt
see ongi õige lemma/analüüs; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L580-L612 |
estnltk/estnltk | estnltk/disambiguator.py | Disambiguator.post_disambiguate | def post_disambiguate(self, collections):
""" Teostab mitmeste analüüside lemma-põhise järelühestamise. Järelühestamine
toimub kahes etapis: kõigepealt ühe dokumendikollektsiooni piires ning
seejärel üle kõigi dokumendikollektsioonide (kui sisendis on rohkem kui 1
dokumendikollektsioon);
Sisuliselt kasutatakse ühestamisel "üks tähendus teksti kohta" idee laiendust:
kui mitmeseks jäänud lemma esineb ka mujal (samas kollektsioonis või kõigis
kollektsioonides) ning lõppkokkuvõttes esineb sagedamini kui alternatiivsed
analüüsid, siis tõenäoliselt see ongi õige lemma/analüüs;
"""
#
# I etapp: ühestame ühe dokumendikollektsiooni piires
# (nt üle kõigi samal päeval ilmunud ajaleheartiklite);
#
for docs in collections:
# 1) Eemaldame analüüside seast duplikaadid ja probleemsed
self.__remove_duplicate_and_problematic_analyses(docs)
# 2) Leiame sõnad, mis sisaldavad nn ignoreeritavaid mitmesusi
# (selliseid mitmesusi, mida me ühestamisel ei arvesta);
hiddenWords = self.__find_hidden_analyses(docs)
# 3) Leiame kaks lemmade sagedusleksikoni: üldise lemmade sagedus-
# leksikoni ja mitmeseks jäänud sonade lemmade sagedusleksikoni;
# Mitmeste lemmade leksikoni läheb kirja vastavate lemmade yldine
# sagedus korpuses (kuhu arvatud ka sagedus ühestatud sõnades);
genLemmaLex = dict()
ambLemmaLex = dict()
self.__supplement_lemma_frequency_lexicon(docs, hiddenWords, ambLemmaLex, genLemmaLex)
# 4) Teostame lemmade-p6hise yhestamise: mitmeseks j22nud analyyside
# puhul j2tame alles analyysid, mille lemma esinemisagedus on suurim
# (ja kui k6igi esinemissagedus on v6rdne, siis ei tee midagi)
self.__disambiguate_with_lexicon(docs, ambLemmaLex, hiddenWords)
#
# II etapp: ühestame üle kõikide dokumendikollektsioonide
# (nt üle kõigi ühe aasta ajalehenumbrite, kus
# üks ajalehenumber sisaldab kõiki sama päeva artikleid);
#
if len(collections) > 1:
# Genereerime mitmeste sagedusleksikoni
genLemmaLex = dict()
ambLemmaLex = dict()
for docs in collections:
# *) Leiame sõnad, mis sisaldavad nn ignoreeritavaid mitmesusi
hiddenWords = self.__find_hidden_analyses(docs)
# *) Täiendame üldist lemmade sagedusleksikoni ja mitmeseks jäänud
# lemmade sagedusleksikoni;
self.__supplement_lemma_frequency_lexicon(docs, hiddenWords, ambLemmaLex, genLemmaLex)
# Teostame järelühestamise
for docs in collections:
# *) Leiame sõnad, mis sisaldavad nn ignoreeritavaid mitmesusi
hiddenWords = self.__find_hidden_analyses(docs)
# *) Teostame lemmade-p6hise yhestamise;
self.__disambiguate_with_lexicon(docs, ambLemmaLex, hiddenWords)
return collections | python | def post_disambiguate(self, collections):
""" Teostab mitmeste analüüside lemma-põhise järelühestamise. Järelühestamine
toimub kahes etapis: kõigepealt ühe dokumendikollektsiooni piires ning
seejärel üle kõigi dokumendikollektsioonide (kui sisendis on rohkem kui 1
dokumendikollektsioon);
Sisuliselt kasutatakse ühestamisel "üks tähendus teksti kohta" idee laiendust:
kui mitmeseks jäänud lemma esineb ka mujal (samas kollektsioonis või kõigis
kollektsioonides) ning lõppkokkuvõttes esineb sagedamini kui alternatiivsed
analüüsid, siis tõenäoliselt see ongi õige lemma/analüüs;
"""
#
# I etapp: ühestame ühe dokumendikollektsiooni piires
# (nt üle kõigi samal päeval ilmunud ajaleheartiklite);
#
for docs in collections:
# 1) Eemaldame analüüside seast duplikaadid ja probleemsed
self.__remove_duplicate_and_problematic_analyses(docs)
# 2) Leiame sõnad, mis sisaldavad nn ignoreeritavaid mitmesusi
# (selliseid mitmesusi, mida me ühestamisel ei arvesta);
hiddenWords = self.__find_hidden_analyses(docs)
# 3) Leiame kaks lemmade sagedusleksikoni: üldise lemmade sagedus-
# leksikoni ja mitmeseks jäänud sonade lemmade sagedusleksikoni;
# Mitmeste lemmade leksikoni läheb kirja vastavate lemmade yldine
# sagedus korpuses (kuhu arvatud ka sagedus ühestatud sõnades);
genLemmaLex = dict()
ambLemmaLex = dict()
self.__supplement_lemma_frequency_lexicon(docs, hiddenWords, ambLemmaLex, genLemmaLex)
# 4) Teostame lemmade-p6hise yhestamise: mitmeseks j22nud analyyside
# puhul j2tame alles analyysid, mille lemma esinemisagedus on suurim
# (ja kui k6igi esinemissagedus on v6rdne, siis ei tee midagi)
self.__disambiguate_with_lexicon(docs, ambLemmaLex, hiddenWords)
#
# II etapp: ühestame üle kõikide dokumendikollektsioonide
# (nt üle kõigi ühe aasta ajalehenumbrite, kus
# üks ajalehenumber sisaldab kõiki sama päeva artikleid);
#
if len(collections) > 1:
# Genereerime mitmeste sagedusleksikoni
genLemmaLex = dict()
ambLemmaLex = dict()
for docs in collections:
# *) Leiame sõnad, mis sisaldavad nn ignoreeritavaid mitmesusi
hiddenWords = self.__find_hidden_analyses(docs)
# *) Täiendame üldist lemmade sagedusleksikoni ja mitmeseks jäänud
# lemmade sagedusleksikoni;
self.__supplement_lemma_frequency_lexicon(docs, hiddenWords, ambLemmaLex, genLemmaLex)
# Teostame järelühestamise
for docs in collections:
# *) Leiame sõnad, mis sisaldavad nn ignoreeritavaid mitmesusi
hiddenWords = self.__find_hidden_analyses(docs)
# *) Teostame lemmade-p6hise yhestamise;
self.__disambiguate_with_lexicon(docs, ambLemmaLex, hiddenWords)
return collections | Teostab mitmeste analüüside lemma-põhise järelühestamise. Järelühestamine
toimub kahes etapis: kõigepealt ühe dokumendikollektsiooni piires ning
seejärel üle kõigi dokumendikollektsioonide (kui sisendis on rohkem kui 1
dokumendikollektsioon);
Sisuliselt kasutatakse ühestamisel "üks tähendus teksti kohta" idee laiendust:
kui mitmeseks jäänud lemma esineb ka mujal (samas kollektsioonis või kõigis
kollektsioonides) ning lõppkokkuvõttes esineb sagedamini kui alternatiivsed
analüüsid, siis tõenäoliselt see ongi õige lemma/analüüs; | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L615-L667 |
estnltk/estnltk | estnltk/wordnet_tagger.py | WordnetTagger.tag_text | def tag_text(self, text, **kwargs):
"""Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
"""
for analysis_match in text.analysis:
for candidate in analysis_match:
if candidate['partofspeech'] in PYVABAMORF_TO_WORDNET_POS_MAP:
# Wordnet contains data about the given lemma and pos combination - will annotate.
wordnet_obj = {}
tag_synsets(wordnet_obj, candidate, **kwargs)
return text | python | def tag_text(self, text, **kwargs):
"""Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
"""
for analysis_match in text.analysis:
for candidate in analysis_match:
if candidate['partofspeech'] in PYVABAMORF_TO_WORDNET_POS_MAP:
# Wordnet contains data about the given lemma and pos combination - will annotate.
wordnet_obj = {}
tag_synsets(wordnet_obj, candidate, **kwargs)
return text | Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet_tagger.py#L22-L67 |
estnltk/estnltk | estnltk/tools/cnllconverter.py | get_texts_and_labels | def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels | python | def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels | Given a sentence chunk, extract original texts and labels. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/tools/cnllconverter.py#L12-L23 |
estnltk/estnltk | estnltk/tools/cnllconverter.py | parse_doc | def parse_doc(doc):
"""Exract list of sentences containing (text, label) pairs."""
word_spans = []
sentence_spans = []
sentence_chunks = doc.split('\n\n')
sentences = []
for chunk in sentence_chunks:
sent_texts, sent_labels = get_texts_and_labels(chunk.strip())
sentences.append(list(zip(sent_texts, sent_labels)))
return sentences | python | def parse_doc(doc):
"""Exract list of sentences containing (text, label) pairs."""
word_spans = []
sentence_spans = []
sentence_chunks = doc.split('\n\n')
sentences = []
for chunk in sentence_chunks:
sent_texts, sent_labels = get_texts_and_labels(chunk.strip())
sentences.append(list(zip(sent_texts, sent_labels)))
return sentences | Exract list of sentences containing (text, label) pairs. | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/tools/cnllconverter.py#L25-L34 |
estnltk/estnltk | estnltk/tools/cnllconverter.py | convert | def convert(document):
"""Convert a document to a Text object"""
raw_tokens = []
curpos = 0
text_spans = []
all_labels = []
sent_spans = []
word_texts = []
for sentence in document:
startpos = curpos
for idx, (text, label) in enumerate(sentence):
raw_tokens.append(text)
word_texts.append(text)
all_labels.append(label)
text_spans.append((curpos, curpos+len(text)))
curpos += len(text)
if idx < len(sentence) - 1:
raw_tokens.append(' ')
curpos += 1
sent_spans.append((startpos, curpos))
raw_tokens.append('\n')
curpos += 1
return {
TEXT: ''.join(raw_tokens),
WORDS: [{TEXT: text, START: start, END: end, LABEL: label} for text, (start, end), label in zip(word_texts, text_spans, all_labels)],
SENTENCES: [{START: start, END:end} for start, end in sent_spans]
} | python | def convert(document):
"""Convert a document to a Text object"""
raw_tokens = []
curpos = 0
text_spans = []
all_labels = []
sent_spans = []
word_texts = []
for sentence in document:
startpos = curpos
for idx, (text, label) in enumerate(sentence):
raw_tokens.append(text)
word_texts.append(text)
all_labels.append(label)
text_spans.append((curpos, curpos+len(text)))
curpos += len(text)
if idx < len(sentence) - 1:
raw_tokens.append(' ')
curpos += 1
sent_spans.append((startpos, curpos))
raw_tokens.append('\n')
curpos += 1
return {
TEXT: ''.join(raw_tokens),
WORDS: [{TEXT: text, START: start, END: end, LABEL: label} for text, (start, end), label in zip(word_texts, text_spans, all_labels)],
SENTENCES: [{START: start, END:end} for start, end in sent_spans]
} | Convert a document to a Text object | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/tools/cnllconverter.py#L36-L62 |
agoragames/haigha | haigha/classes/transaction_class.py | TransactionClass._cleanup | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._select_cb = None
self._commit_cb = None
self._rollback_cb = None
super(TransactionClass, self)._cleanup() | python | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._select_cb = None
self._commit_cb = None
self._rollback_cb = None
super(TransactionClass, self)._cleanup() | Cleanup all the local data. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/transaction_class.py#L45-L52 |
agoragames/haigha | haigha/classes/transaction_class.py | TransactionClass.select | def select(self, cb=None):
'''
Set this channel to use transactions.
'''
if not self._enabled:
self._enabled = True
self.send_frame(MethodFrame(self.channel_id, 90, 10))
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok) | python | def select(self, cb=None):
'''
Set this channel to use transactions.
'''
if not self._enabled:
self._enabled = True
self.send_frame(MethodFrame(self.channel_id, 90, 10))
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok) | Set this channel to use transactions. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/transaction_class.py#L54-L62 |
agoragames/haigha | haigha/classes/transaction_class.py | TransactionClass.commit | def commit(self, cb=None):
'''
Commit the current transaction. Caller can specify a callback to use
when the transaction is committed.
'''
# Could call select() but spec 1.9.2.3 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 20))
self._commit_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_commit_ok) | python | def commit(self, cb=None):
'''
Commit the current transaction. Caller can specify a callback to use
when the transaction is committed.
'''
# Could call select() but spec 1.9.2.3 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 20))
self._commit_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_commit_ok) | Commit the current transaction. Caller can specify a callback to use
when the transaction is committed. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/transaction_class.py#L69-L80 |
agoragames/haigha | haigha/classes/transaction_class.py | TransactionClass.rollback | def rollback(self, cb=None):
'''
Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted.
'''
# Could call select() but spec 1.9.2.5 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 30))
self._rollback_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_rollback_ok) | python | def rollback(self, cb=None):
'''
Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted.
'''
# Could call select() but spec 1.9.2.5 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 30))
self._rollback_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_rollback_ok) | Abandon all message publications and acks in the current transaction.
Caller can specify a callback to use when the transaction has been
aborted. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/transaction_class.py#L87-L99 |
agoragames/haigha | haigha/connection.py | Connection.synchronous | def synchronous(self):
'''
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
'''
if self._transport is None:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
return self.transport.synchronous or self._synchronous | python | def synchronous(self):
'''
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
'''
if self._transport is None:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
return self.transport.synchronous or self._synchronous | True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L191-L202 |
agoragames/haigha | haigha/connection.py | Connection.connect | def connect(self, host, port):
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# after another thread/greenlet/context has read and processed the
# recv_start frame. Without more re-write to add_sync_cb though,
# it will block on reading responses that will never arrive
# because the protocol header isn't written yet. TBD if needs
# refactoring. Could encapsulate entirely here, wherein
# read_frames exits if protocol header not yet written. Like other
# synchronous behaviors, adding this callback will result in a
# blocking frame read and process loop until _recv_start and any
# subsequent synchronous callbacks have been processed. In the
# event that this is /not/ a synchronous transport, but the
# caller wants the connect to be synchronous so as to ensure that
# the connection is ready, then do a read frame loop here.
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames() | python | def connect(self, host, port):
'''
Connect to a host and port.
'''
# Clear the connect state immediately since we're no longer connected
# at this point.
self._connected = False
# Only after the socket has connected do we clear this state; closed
# must be False so that writes can be buffered in writePacket(). The
# closed state might have been set to True due to a socket error or a
# redirect.
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
# Have to queue this callback just after connect, it can't go
# into the constructor because the channel needs to be
# "always there" for frame processing, but the synchronous
# callback can't be added until after the protocol header has
# been written. This SHOULD be registered before the protocol
# header is written, in the case where the header bytes are
# written, but this thread/greenlet/context does not return until
# after another thread/greenlet/context has read and processed the
# recv_start frame. Without more re-write to add_sync_cb though,
# it will block on reading responses that will never arrive
# because the protocol header isn't written yet. TBD if needs
# refactoring. Could encapsulate entirely here, wherein
# read_frames exits if protocol header not yet written. Like other
# synchronous behaviors, adding this callback will result in a
# blocking frame read and process loop until _recv_start and any
# subsequent synchronous callbacks have been processed. In the
# event that this is /not/ a synchronous transport, but the
# caller wants the connect to be synchronous so as to ensure that
# the connection is ready, then do a read frame loop here.
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames() | Connect to a host and port. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L204-L252 |
agoragames/haigha | haigha/connection.py | Connection.disconnect | def disconnect(self):
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
self._transport.disconnect()
except Exception:
self.logger.error(
"Failed to disconnect from %s", self._host, exc_info=True)
raise
finally:
self._transport = None | python | def disconnect(self):
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
self._transport.disconnect()
except Exception:
self.logger.error(
"Failed to disconnect from %s", self._host, exc_info=True)
raise
finally:
self._transport = None | Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L254-L274 |
agoragames/haigha | haigha/connection.py | Connection.transport_closed | def transport_closed(self, **kwargs):
"""
Called by Transports when they close unexpectedly, not as a result of
Connection.disconnect().
TODO: document args
"""
msg = 'unknown cause'
self.logger.warning('transport to %s closed : %s' %
(self._host, kwargs.get('msg', msg)))
self._close_info = {
'reply_code': kwargs.get('reply_code', 0),
'reply_text': kwargs.get('msg', msg),
'class_id': kwargs.get('class_id', 0),
'method_id': kwargs.get('method_id', 0)
}
# We're not connected any more, but we're not closed without an
# explicit close call.
self._connected = False
self._transport = None
# Call back to a user-provided close function
self._callback_close() | python | def transport_closed(self, **kwargs):
"""
Called by Transports when they close unexpectedly, not as a result of
Connection.disconnect().
TODO: document args
"""
msg = 'unknown cause'
self.logger.warning('transport to %s closed : %s' %
(self._host, kwargs.get('msg', msg)))
self._close_info = {
'reply_code': kwargs.get('reply_code', 0),
'reply_text': kwargs.get('msg', msg),
'class_id': kwargs.get('class_id', 0),
'method_id': kwargs.get('method_id', 0)
}
# We're not connected any more, but we're not closed without an
# explicit close call.
self._connected = False
self._transport = None
# Call back to a user-provided close function
self._callback_close() | Called by Transports when they close unexpectedly, not as a result of
Connection.disconnect().
TODO: document args | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L279-L302 |
agoragames/haigha | haigha/connection.py | Connection._next_channel_id | def _next_channel_id(self):
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if self._channel_counter >= self._channel_max:
self._channel_counter = 1
return self._channel_counter | python | def _next_channel_id(self):
'''Return the next possible channel id. Is a circular enumeration.'''
self._channel_counter += 1
if self._channel_counter >= self._channel_max:
self._channel_counter = 1
return self._channel_counter | Return the next possible channel id. Is a circular enumeration. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L307-L312 |
agoragames/haigha | haigha/connection.py | Connection.channel | def channel(self, channel_id=None, synchronous=False):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol.
"""
if channel_id is None:
# adjust for channel 0
if len(self._channels) - 1 >= self._channel_max:
raise Connection.TooManyChannels(
"%d channels already open, max %d",
len(self._channels) - 1,
self._channel_max)
channel_id = self._next_channel_id()
while channel_id in self._channels:
channel_id = self._next_channel_id()
elif channel_id in self._channels:
return self._channels[channel_id]
else:
raise Connection.InvalidChannel(
"%s is not a valid channel id", channel_id)
# Call open() here so that ConnectionChannel doesn't have it called.
# Could also solve this other ways, but it's a HACK regardless.
rval = Channel(
self, channel_id, self._class_map, synchronous=synchronous)
self._channels[channel_id] = rval
rval.add_close_listener(self._channel_closed)
rval.open()
return rval | python | def channel(self, channel_id=None, synchronous=False):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol.
"""
if channel_id is None:
# adjust for channel 0
if len(self._channels) - 1 >= self._channel_max:
raise Connection.TooManyChannels(
"%d channels already open, max %d",
len(self._channels) - 1,
self._channel_max)
channel_id = self._next_channel_id()
while channel_id in self._channels:
channel_id = self._next_channel_id()
elif channel_id in self._channels:
return self._channels[channel_id]
else:
raise Connection.InvalidChannel(
"%s is not a valid channel id", channel_id)
# Call open() here so that ConnectionChannel doesn't have it called.
# Could also solve this other ways, but it's a HACK regardless.
rval = Channel(
self, channel_id, self._class_map, synchronous=synchronous)
self._channels[channel_id] = rval
rval.add_close_listener(self._channel_closed)
rval.open()
return rval | Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L314-L348 |
agoragames/haigha | haigha/connection.py | Connection.close | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0,
disconnect=False):
'''
Close this connection.
'''
self._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
if disconnect:
self._closed = True
self.disconnect()
self._callback_close()
else:
self._channels[0].close() | python | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0,
disconnect=False):
'''
Close this connection.
'''
self._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
if disconnect:
self._closed = True
self.disconnect()
self._callback_close()
else:
self._channels[0].close() | Close this connection. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L359-L375 |
agoragames/haigha | haigha/connection.py | Connection.read_frames | def read_frames(self):
'''
Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on.
'''
# It's possible in a concurrent environment that our transport handle
# has gone away, so handle that cleanly.
# TODO: Consider moving this block into Translator base class. In many
# ways it belongs there. One of the problems though is that this is
# essentially the read loop. Each Transport has different rules for
# how to kick this off, and in the case of gevent, this is how a
# blocking call to read from the socket is kicked off.
if self._transport is None:
return
# Send a heartbeat (if needed)
self._channels[0].send_heartbeat()
data = self._transport.read(self._heartbeat)
current_time = time.time()
if data is None:
# Wait for 2 heartbeat intervals before giving up. See AMQP 4.2.7:
# "If a peer detects no incoming traffic (i.e. received octets) for two heartbeat intervals or longer,
# it should close the connection"
if self._heartbeat and (current_time-self._last_octet_time > 2*self._heartbeat):
msg = 'Heartbeats not received from %s for %d seconds' % (self._host, 2*self._heartbeat)
self.transport_closed(msg=msg)
raise ConnectionClosed('Connection is closed: ' + msg)
return
self._last_octet_time = current_time
reader = Reader(data)
p_channels = set()
try:
for frame in Frame.read_frames(reader):
if self._debug > 1:
self.logger.debug("READ: %s", frame)
self._frames_read += 1
ch = self.channel(frame.channel_id)
ch.buffer_frame(frame)
p_channels.add(ch)
except Frame.FrameError as e:
# Frame error in the peer, disconnect
self.close(reply_code=501,
reply_text='frame error from %s : %s' % (
self._host, str(e)),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
# NOTE: we process channels after buffering unused data in order to
# preserve the integrity of the input stream in case a channel needs to
# read input, such as when a channel framing error necessitates the use
# of the synchronous channel.close method. See `Channel.process_frames`.
#
# HACK: read the buffer contents and re-buffer. Would prefer to pass
# buffer back, but there's no good way of asking the total size of the
# buffer, comparing to tell(), and then re-buffering. There's also no
# ability to clear the buffer up to the current position. It would be
# awesome if we could free that memory without a new allocation.
if reader.tell() < len(data):
self._transport.buffer(data[reader.tell():])
self._transport.process_channels(p_channels) | python | def read_frames(self):
'''
Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on.
'''
# It's possible in a concurrent environment that our transport handle
# has gone away, so handle that cleanly.
# TODO: Consider moving this block into Translator base class. In many
# ways it belongs there. One of the problems though is that this is
# essentially the read loop. Each Transport has different rules for
# how to kick this off, and in the case of gevent, this is how a
# blocking call to read from the socket is kicked off.
if self._transport is None:
return
# Send a heartbeat (if needed)
self._channels[0].send_heartbeat()
data = self._transport.read(self._heartbeat)
current_time = time.time()
if data is None:
# Wait for 2 heartbeat intervals before giving up. See AMQP 4.2.7:
# "If a peer detects no incoming traffic (i.e. received octets) for two heartbeat intervals or longer,
# it should close the connection"
if self._heartbeat and (current_time-self._last_octet_time > 2*self._heartbeat):
msg = 'Heartbeats not received from %s for %d seconds' % (self._host, 2*self._heartbeat)
self.transport_closed(msg=msg)
raise ConnectionClosed('Connection is closed: ' + msg)
return
self._last_octet_time = current_time
reader = Reader(data)
p_channels = set()
try:
for frame in Frame.read_frames(reader):
if self._debug > 1:
self.logger.debug("READ: %s", frame)
self._frames_read += 1
ch = self.channel(frame.channel_id)
ch.buffer_frame(frame)
p_channels.add(ch)
except Frame.FrameError as e:
# Frame error in the peer, disconnect
self.close(reply_code=501,
reply_text='frame error from %s : %s' % (
self._host, str(e)),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
# NOTE: we process channels after buffering unused data in order to
# preserve the integrity of the input stream in case a channel needs to
# read input, such as when a channel framing error necessitates the use
# of the synchronous channel.close method. See `Channel.process_frames`.
#
# HACK: read the buffer contents and re-buffer. Would prefer to pass
# buffer back, but there's no good way of asking the total size of the
# buffer, comparing to tell(), and then re-buffering. There's also no
# ability to clear the buffer up to the current position. It would be
# awesome if we could free that memory without a new allocation.
if reader.tell() < len(data):
self._transport.buffer(data[reader.tell():])
self._transport.process_channels(p_channels) | Read frames from the transport and process them. Some transports may
choose to do this in the background, in several threads, and so on. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L393-L458 |
agoragames/haigha | haigha/connection.py | Connection._flush_buffered_frames | def _flush_buffered_frames(self):
'''
Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created.
'''
# In the rare case (a bug) where this is called but send_frame thinks
# they should be buffered, don't clobber.
frames = self._output_frame_buffer
self._output_frame_buffer = []
for frame in frames:
self.send_frame(frame) | python | def _flush_buffered_frames(self):
'''
Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created.
'''
# In the rare case (a bug) where this is called but send_frame thinks
# they should be buffered, don't clobber.
frames = self._output_frame_buffer
self._output_frame_buffer = []
for frame in frames:
self.send_frame(frame) | Callback when protocol has been initialized on channel 0 and we're
ready to send out frames to set up any channels that have been
created. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L460-L471 |
agoragames/haigha | haigha/connection.py | Connection.send_frame | def send_frame(self, frame):
'''
Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames.
'''
if self._closed:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
if self._transport is None or \
(not self._connected and frame.channel_id != 0):
self._output_frame_buffer.append(frame)
return
if self._debug > 1:
self.logger.debug("WRITE: %s", frame)
buf = bytearray()
frame.write_frame(buf)
if len(buf) > self._frame_max:
self.close(
reply_code=501,
reply_text='attempted to send frame of %d bytes, frame max %d' % (
len(buf), self._frame_max),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed(
"connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
self._transport.write(buf)
self._frames_written += 1 | python | def send_frame(self, frame):
'''
Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames.
'''
if self._closed:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
if self._transport is None or \
(not self._connected and frame.channel_id != 0):
self._output_frame_buffer.append(frame)
return
if self._debug > 1:
self.logger.debug("WRITE: %s", frame)
buf = bytearray()
frame.write_frame(buf)
if len(buf) > self._frame_max:
self.close(
reply_code=501,
reply_text='attempted to send frame of %d bytes, frame max %d' % (
len(buf), self._frame_max),
class_id=0, method_id=0, disconnect=True)
raise ConnectionClosed(
"connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
self._transport.write(buf)
self._frames_written += 1 | Send a single frame. If there is no transport or we're not connected
yet, append to the output buffer, else send immediately to the socket.
This is called from within the MethodFrames. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L473-L508 |
agoragames/haigha | haigha/connection.py | ConnectionChannel.dispatch | def dispatch(self, frame):
'''
Override the default dispatch since we don't need the rest of
the stack.
'''
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
frame.class_id, self.channel_id)
else:
raise Frame.InvalidFrameType(
"frame type %d is not supported on channel %d",
frame.type(), self.channel_id) | python | def dispatch(self, frame):
'''
Override the default dispatch since we don't need the rest of
the stack.
'''
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
frame.class_id, self.channel_id)
else:
raise Frame.InvalidFrameType(
"frame type %d is not supported on channel %d",
frame.type(), self.channel_id) | Override the default dispatch since we don't need the rest of
the stack. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L534-L560 |
agoragames/haigha | haigha/connection.py | ConnectionChannel.send_heartbeat | def send_heartbeat(self):
'''
Send a heartbeat if needed. Tracks last heartbeat send time.
'''
# Note that this does not take into account the time that we last
# sent a frame. Hearbeats are so small the effect should be quite
# limited. Also note that we're looking for something near to our
# scheduled interval, because if this is exact, then we'll likely
# actually send a heartbeat at twice the period, which could cause
# a broker to kill the connection if the period is large enough. The
# 90% bound is arbitrary but seems a sensible enough default.
if self.connection._heartbeat:
if time.time() >= (self._last_heartbeat_send + 0.9 *
self.connection._heartbeat):
self.send_frame(HeartbeatFrame(self.channel_id))
self._last_heartbeat_send = time.time() | python | def send_heartbeat(self):
'''
Send a heartbeat if needed. Tracks last heartbeat send time.
'''
# Note that this does not take into account the time that we last
# sent a frame. Hearbeats are so small the effect should be quite
# limited. Also note that we're looking for something near to our
# scheduled interval, because if this is exact, then we'll likely
# actually send a heartbeat at twice the period, which could cause
# a broker to kill the connection if the period is large enough. The
# 90% bound is arbitrary but seems a sensible enough default.
if self.connection._heartbeat:
if time.time() >= (self._last_heartbeat_send + 0.9 *
self.connection._heartbeat):
self.send_frame(HeartbeatFrame(self.channel_id))
self._last_heartbeat_send = time.time() | Send a heartbeat if needed. Tracks last heartbeat send time. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L568-L583 |
agoragames/haigha | haigha/connection.py | ConnectionChannel._send_start_ok | def _send_start_ok(self):
'''Send the start_ok message.'''
args = Writer()
args.write_table(self.connection._properties)
args.write_shortstr(self.connection._login_method)
args.write_longstr(self.connection._login_response)
args.write_shortstr(self.connection._locale)
self.send_frame(MethodFrame(self.channel_id, 10, 11, args))
self.add_synchronous_cb(self._recv_tune) | python | def _send_start_ok(self):
'''Send the start_ok message.'''
args = Writer()
args.write_table(self.connection._properties)
args.write_shortstr(self.connection._login_method)
args.write_longstr(self.connection._login_response)
args.write_shortstr(self.connection._locale)
self.send_frame(MethodFrame(self.channel_id, 10, 11, args))
self.add_synchronous_cb(self._recv_tune) | Send the start_ok message. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L589-L598 |
agoragames/haigha | haigha/classes/exchange_class.py | ExchangeClass._cleanup | def _cleanup(self):
'''
Cleanup local data.
'''
self._declare_cb = None
self._delete_cb = None
super(ExchangeClass, self)._cleanup() | python | def _cleanup(self):
'''
Cleanup local data.
'''
self._declare_cb = None
self._delete_cb = None
super(ExchangeClass, self)._cleanup() | Cleanup local data. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/exchange_class.py#L34-L40 |
agoragames/haigha | haigha/classes/exchange_class.py | ExchangeClass.declare | def declare(self, exchange, type, passive=False, durable=False,
nowait=True, arguments=None, ticket=None, cb=None):
"""
Declare the exchange.
exchange - The name of the exchange to declare
type - One of
"""
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(type).\
write_bits(passive, durable, False, False, nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 10, args))
if not nowait:
self._declare_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_declare_ok) | python | def declare(self, exchange, type, passive=False, durable=False,
nowait=True, arguments=None, ticket=None, cb=None):
"""
Declare the exchange.
exchange - The name of the exchange to declare
type - One of
"""
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(type).\
write_bits(passive, durable, False, False, nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 10, args))
if not nowait:
self._declare_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_declare_ok) | Declare the exchange.
exchange - The name of the exchange to declare
type - One of | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/exchange_class.py#L42-L62 |
agoragames/haigha | haigha/classes/exchange_class.py | ExchangeClass.delete | def delete(self, exchange, if_unused=False, nowait=True, ticket=None,
cb=None):
'''
Delete an exchange.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_bits(if_unused, nowait)
self.send_frame(MethodFrame(self.channel_id, 40, 20, args))
if not nowait:
self._delete_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_delete_ok) | python | def delete(self, exchange, if_unused=False, nowait=True, ticket=None,
cb=None):
'''
Delete an exchange.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_bits(if_unused, nowait)
self.send_frame(MethodFrame(self.channel_id, 40, 20, args))
if not nowait:
self._delete_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_delete_ok) | Delete an exchange. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/exchange_class.py#L72-L87 |
agoragames/haigha | haigha/transports/socket_transport.py | SocketTransport.connect | def connect(self, (host, port), klass=socket.socket):
'''Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made.
'''
self._host = "%s:%s" % (host, port)
for info in socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP):
family, socktype, proto, _, sockaddr = info
self._sock = klass(family, socktype, proto)
self._sock.settimeout(self.connection._connect_timeout)
if self.connection._sock_opts:
_sock_opts = self.connection._sock_opts
for (level, optname), value in _sock_opts.iteritems():
self._sock.setsockopt(level, optname, value)
try:
self._sock.connect(sockaddr)
except socket.error:
self.connection.logger.exception(
"Failed to connect to %s:",
sockaddr,
)
continue
# After connecting, switch to full-blocking mode.
self._sock.settimeout(None)
break
else:
raise | python | def connect(self, (host, port), klass=socket.socket):
'''Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made.
'''
self._host = "%s:%s" % (host, port)
for info in socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP):
family, socktype, proto, _, sockaddr = info
self._sock = klass(family, socktype, proto)
self._sock.settimeout(self.connection._connect_timeout)
if self.connection._sock_opts:
_sock_opts = self.connection._sock_opts
for (level, optname), value in _sock_opts.iteritems():
self._sock.setsockopt(level, optname, value)
try:
self._sock.connect(sockaddr)
except socket.error:
self.connection.logger.exception(
"Failed to connect to %s:",
sockaddr,
)
continue
# After connecting, switch to full-blocking mode.
self._sock.settimeout(None)
break
else:
raise | Connect assuming a host and port tuple.
:param tuple: A tuple containing host and port for a connection.
:param klass: A implementation of socket.socket.
:raises socket.gaierror: If no address can be resolved.
:raises socket.error: If no connection can be made. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/socket_transport.py#L27-L64 |
agoragames/haigha | haigha/transports/socket_transport.py | SocketTransport.read | def read(self, timeout=None):
'''
Read from the transport. If timeout>0, will only block for `timeout`
seconds.
'''
e = None
if not hasattr(self, '_sock'):
return None
try:
# Note that we ignore both None and 0, i.e. we either block with a
# timeout or block completely and let gevent sort it out.
if timeout:
self._sock.settimeout(timeout)
else:
self._sock.settimeout(None)
data = self._sock.recv(
self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
if len(data):
if self.connection.debug > 1:
self.connection.logger.debug(
'read %d bytes from %s' % (len(data), self._host))
if len(self._buffer):
self._buffer.extend(data)
data = self._buffer
self._buffer = bytearray()
return data
# Note that no data means the socket is closed and we'll mark that
# below
except socket.timeout as e:
# Note that this is implemented differently and though it would be
# caught as an EnvironmentError, it has no errno. Not sure whose
# fault that is.
return None
except EnvironmentError as e:
# thrown if we have a timeout and no data
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR):
return None
self.connection.logger.exception(
'error reading from %s' % (self._host))
self.connection.transport_closed(
msg='error reading from %s' % (self._host))
if e:
raise | python | def read(self, timeout=None):
'''
Read from the transport. If timeout>0, will only block for `timeout`
seconds.
'''
e = None
if not hasattr(self, '_sock'):
return None
try:
# Note that we ignore both None and 0, i.e. we either block with a
# timeout or block completely and let gevent sort it out.
if timeout:
self._sock.settimeout(timeout)
else:
self._sock.settimeout(None)
data = self._sock.recv(
self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
if len(data):
if self.connection.debug > 1:
self.connection.logger.debug(
'read %d bytes from %s' % (len(data), self._host))
if len(self._buffer):
self._buffer.extend(data)
data = self._buffer
self._buffer = bytearray()
return data
# Note that no data means the socket is closed and we'll mark that
# below
except socket.timeout as e:
# Note that this is implemented differently and though it would be
# caught as an EnvironmentError, it has no errno. Not sure whose
# fault that is.
return None
except EnvironmentError as e:
# thrown if we have a timeout and no data
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR):
return None
self.connection.logger.exception(
'error reading from %s' % (self._host))
self.connection.transport_closed(
msg='error reading from %s' % (self._host))
if e:
raise | Read from the transport. If timeout>0, will only block for `timeout`
seconds. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/socket_transport.py#L66-L115 |
agoragames/haigha | haigha/transports/socket_transport.py | SocketTransport.buffer | def buffer(self, data):
'''
Buffer unused bytes from the input stream.
'''
if not hasattr(self, '_sock'):
return None
# data will always be a byte array
if len(self._buffer):
self._buffer.extend(data)
else:
self._buffer = bytearray(data) | python | def buffer(self, data):
'''
Buffer unused bytes from the input stream.
'''
if not hasattr(self, '_sock'):
return None
# data will always be a byte array
if len(self._buffer):
self._buffer.extend(data)
else:
self._buffer = bytearray(data) | Buffer unused bytes from the input stream. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/socket_transport.py#L117-L128 |
agoragames/haigha | haigha/transports/socket_transport.py | SocketTransport.write | def write(self, data):
'''
Write some bytes to the transport.
'''
if not hasattr(self, '_sock'):
return None
try:
self._sock.sendall(data)
if self.connection.debug > 1:
self.connection.logger.debug(
'sent %d bytes to %s' % (len(data), self._host))
return
except EnvironmentError:
# sockets raise this type of error, and since if sendall() fails
# we're left in an indeterminate state, assume that any error we
# catch means that the connection is dead. Note that this
# assumption requires this to be a blocking socket; if we ever
# support non-blocking in this class then this whole method has
# to change a lot.
self.connection.logger.exception(
'error writing to %s' % (self._host))
self.connection.transport_closed(
msg='error writing to %s' % (self._host)) | python | def write(self, data):
'''
Write some bytes to the transport.
'''
if not hasattr(self, '_sock'):
return None
try:
self._sock.sendall(data)
if self.connection.debug > 1:
self.connection.logger.debug(
'sent %d bytes to %s' % (len(data), self._host))
return
except EnvironmentError:
# sockets raise this type of error, and since if sendall() fails
# we're left in an indeterminate state, assume that any error we
# catch means that the connection is dead. Note that this
# assumption requires this to be a blocking socket; if we ever
# support non-blocking in this class then this whole method has
# to change a lot.
self.connection.logger.exception(
'error writing to %s' % (self._host))
self.connection.transport_closed(
msg='error writing to %s' % (self._host)) | Write some bytes to the transport. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/socket_transport.py#L130-L156 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._cleanup | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._pending_consumers = None
self._consumer_cb = None
self._get_cb = None
self._recover_cb = None
self._cancel_cb = None
self._return_listener = None
super(BasicClass, self)._cleanup() | python | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._pending_consumers = None
self._consumer_cb = None
self._get_cb = None
self._recover_cb = None
self._cancel_cb = None
self._return_listener = None
super(BasicClass, self)._cleanup() | Cleanup all the local data. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L48-L58 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.set_return_listener | def set_return_listener(self, cb):
'''
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset
'''
if cb is not None and not callable(cb):
raise ValueError('return_listener callback must either be None or '
'a callable, but got: %r' % (cb,))
self._return_listener = cb | python | def set_return_listener(self, cb):
'''
Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset
'''
if cb is not None and not callable(cb):
raise ValueError('return_listener callback must either be None or '
'a callable, but got: %r' % (cb,))
self._return_listener = cb | Set a callback for basic.return listening. Will be called with a single
Message argument.
The return_info attribute of the Message will have the following
properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
RabbitMQ NOTE: if the channel was in confirmation mode when the message
was published, then basic.return will still be followed by basic.ack
later.
:param cb: callable cb(Message); pass None to reset | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L60-L82 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.qos | def qos(self, prefetch_size=0, prefetch_count=0, is_global=False):
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
write_short(prefetch_count).\
write_bit(is_global)
self.send_frame(MethodFrame(self.channel_id, 60, 10, args))
self.channel.add_synchronous_cb(self._recv_qos_ok) | python | def qos(self, prefetch_size=0, prefetch_count=0, is_global=False):
'''
Set QoS on this channel.
'''
args = Writer()
args.write_long(prefetch_size).\
write_short(prefetch_count).\
write_bit(is_global)
self.send_frame(MethodFrame(self.channel_id, 60, 10, args))
self.channel.add_synchronous_cb(self._recv_qos_ok) | Set QoS on this channel. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L96-L106 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.consume | def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None):
'''
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
'''
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
write_bits(no_local, no_ack, exclusive, nowait).\
write_table({}) # unused according to spec
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok)
else:
self._consumer_cb[consumer_tag] = consumer | python | def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None):
'''
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
'''
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
write_bits(no_local, no_ack, exclusive, nowait).\
write_table({}) # unused according to spec
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok)
else:
self._consumer_cb[consumer_tag] = consumer | Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L112-L136 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.cancel | def cancel(self, consumer_tag='', nowait=True, consumer=None, cb=None):
'''
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
'''
if consumer:
tag = self._lookup_consumer_tag_by_consumer(consumer)
if tag:
consumer_tag = tag
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_shortstr(consumer_tag).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 60, 30, args))
if not nowait:
self._cancel_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_cancel_ok)
else:
self._purge_consumer_by_tag(consumer_tag) | python | def cancel(self, consumer_tag='', nowait=True, consumer=None, cb=None):
'''
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
'''
if consumer:
tag = self._lookup_consumer_tag_by_consumer(consumer)
if tag:
consumer_tag = tag
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_shortstr(consumer_tag).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 60, 30, args))
if not nowait:
self._cancel_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_cancel_ok)
else:
self._purge_consumer_by_tag(consumer_tag) | Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L146-L168 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._lookup_consumer_tag_by_consumer | def _lookup_consumer_tag_by_consumer(self, consumer):
'''Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
:rtype: str or None
'''
for (tag, func) in self._consumer_cb.iteritems():
if func == consumer:
return tag | python | def _lookup_consumer_tag_by_consumer(self, consumer):
'''Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
:rtype: str or None
'''
for (tag, func) in self._consumer_cb.iteritems():
if func == consumer:
return tag | Look up consumer tag given its consumer function
NOTE: this protected method may be called by derived classes
:param callable consumer: consumer function
:returns: matching consumer tag or None
:rtype: str or None | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L179-L191 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._purge_consumer_by_tag | def _purge_consumer_by_tag(self, consumer_tag):
'''Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag:
'''
try:
del self._consumer_cb[consumer_tag]
except KeyError:
self.logger.warning(
'no callback registered for consumer tag " %s "', consumer_tag)
else:
self.logger.info('purged consumer with tag " %s "', consumer_tag) | python | def _purge_consumer_by_tag(self, consumer_tag):
'''Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag:
'''
try:
del self._consumer_cb[consumer_tag]
except KeyError:
self.logger.warning(
'no callback registered for consumer tag " %s "', consumer_tag)
else:
self.logger.info('purged consumer with tag " %s "', consumer_tag) | Purge consumer entry from this basic instance
NOTE: this protected method may be called by derived classes
:param str consumer_tag: | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L193-L206 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.publish | def publish(self, msg, exchange, routing_key, mandatory=False,
immediate=False, ticket=None):
'''
publish a message.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bits(mandatory, immediate)
self.send_frame(MethodFrame(self.channel_id, 60, 40, args))
self.send_frame(
HeaderFrame(self.channel_id, 60, 0, len(msg), msg.properties))
f_max = self.channel.connection.frame_max
for f in ContentFrame.create_frames(self.channel_id, msg.body, f_max):
self.send_frame(f) | python | def publish(self, msg, exchange, routing_key, mandatory=False,
immediate=False, ticket=None):
'''
publish a message.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bits(mandatory, immediate)
self.send_frame(MethodFrame(self.channel_id, 60, 40, args))
self.send_frame(
HeaderFrame(self.channel_id, 60, 0, len(msg), msg.properties))
f_max = self.channel.connection.frame_max
for f in ContentFrame.create_frames(self.channel_id, msg.body, f_max):
self.send_frame(f) | publish a message. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L208-L225 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.return_msg | def return_msg(self, reply_code, reply_text, exchange, routing_key):
'''
Return a failed message. Not named "return" because python interpreter
can't deal with that.
'''
args = Writer()
args.write_short(reply_code).\
write_shortstr(reply_text).\
write_shortstr(exchange).\
write_shortstr(routing_key)
self.send_frame(MethodFrame(self.channel_id, 60, 50, args)) | python | def return_msg(self, reply_code, reply_text, exchange, routing_key):
'''
Return a failed message. Not named "return" because python interpreter
can't deal with that.
'''
args = Writer()
args.write_short(reply_code).\
write_shortstr(reply_text).\
write_shortstr(exchange).\
write_shortstr(routing_key)
self.send_frame(MethodFrame(self.channel_id, 60, 50, args)) | Return a failed message. Not named "return" because python interpreter
can't deal with that. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L227-L238 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._recv_return | def _recv_return(self, method_frame):
'''
Handle basic.return method. If we have a complete message, will call the
user's return listener callabck (if any). If there are not enough
frames, will re-queue current frames and raise a FrameUnderflow
NOTE: if the channel was in confirmation mode when the message was
published, then this will still be followed by basic.ack later
'''
msg = self._read_returned_msg(method_frame)
if callable(self._return_listener):
self._return_listener(msg)
else:
self.logger.error(
"Published message returned by broker: info=%s, properties=%s",
msg.return_info, msg.properties) | python | def _recv_return(self, method_frame):
'''
Handle basic.return method. If we have a complete message, will call the
user's return listener callabck (if any). If there are not enough
frames, will re-queue current frames and raise a FrameUnderflow
NOTE: if the channel was in confirmation mode when the message was
published, then this will still be followed by basic.ack later
'''
msg = self._read_returned_msg(method_frame)
if callable(self._return_listener):
self._return_listener(msg)
else:
self.logger.error(
"Published message returned by broker: info=%s, properties=%s",
msg.return_info, msg.properties) | Handle basic.return method. If we have a complete message, will call the
user's return listener callabck (if any). If there are not enough
frames, will re-queue current frames and raise a FrameUnderflow
NOTE: if the channel was in confirmation mode when the message was
published, then this will still be followed by basic.ack later | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L240-L256 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.get | def get(self, queue, consumer=None, no_ack=True, ticket=None):
'''
Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, Message or
None is returned.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(no_ack)
self._get_cb.append(consumer)
self.send_frame(MethodFrame(self.channel_id, 60, 70, args))
return self.channel.add_synchronous_cb(self._recv_get_response) | python | def get(self, queue, consumer=None, no_ack=True, ticket=None):
'''
Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, Message or
None is returned.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(no_ack)
self._get_cb.append(consumer)
self.send_frame(MethodFrame(self.channel_id, 60, 70, args))
return self.channel.add_synchronous_cb(self._recv_get_response) | Ask to fetch a single message from a queue. If a consumer is supplied,
the consumer will be called with either a Message argument, or None if
there is no message in queue. If a synchronous transport, Message or
None is returned. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L266-L280 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._recv_get_response | def _recv_get_response(self, method_frame):
'''
Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway.
'''
if method_frame.method_id == 71:
return self._recv_get_ok(method_frame)
elif method_frame.method_id == 72:
return self._recv_get_empty(method_frame) | python | def _recv_get_response(self, method_frame):
'''
Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway.
'''
if method_frame.method_id == 71:
return self._recv_get_ok(method_frame)
elif method_frame.method_id == 72:
return self._recv_get_empty(method_frame) | Handle either get_ok or get_empty. This is a hack because the
synchronous callback stack is expecting one method to satisfy the
expectation. To keep that loop as tight as possible, work within
those constraints. Use of get is not recommended anyway. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L282-L292 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.ack | def ack(self, delivery_tag, multiple=False):
'''
Acknowledge delivery of a message. If multiple=True, acknowledge up-to
and including delivery_tag.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(multiple)
self.send_frame(MethodFrame(self.channel_id, 60, 80, args)) | python | def ack(self, delivery_tag, multiple=False):
'''
Acknowledge delivery of a message. If multiple=True, acknowledge up-to
and including delivery_tag.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(multiple)
self.send_frame(MethodFrame(self.channel_id, 60, 80, args)) | Acknowledge delivery of a message. If multiple=True, acknowledge up-to
and including delivery_tag. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L309-L318 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.reject | def reject(self, delivery_tag, requeue=False):
'''
Reject a message.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 90, args)) | python | def reject(self, delivery_tag, requeue=False):
'''
Reject a message.
'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 90, args)) | Reject a message. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L320-L328 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.recover_async | def recover_async(self, requeue=False):
'''
Redeliver all unacknowledged messages on this channel.
This method is deprecated in favour of the synchronous
recover/recover-ok
'''
args = Writer()
args.write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 100, args)) | python | def recover_async(self, requeue=False):
'''
Redeliver all unacknowledged messages on this channel.
This method is deprecated in favour of the synchronous
recover/recover-ok
'''
args = Writer()
args.write_bit(requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 100, args)) | Redeliver all unacknowledged messages on this channel.
This method is deprecated in favour of the synchronous
recover/recover-ok | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L330-L340 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass.recover | def recover(self, requeue=False, cb=None):
'''
Ask server to redeliver all unacknowledged messages.
'''
args = Writer()
args.write_bit(requeue)
# The XML spec is incorrect; this method is always synchronous
# http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-January/010738.html
self._recover_cb.append(cb)
self.send_frame(MethodFrame(self.channel_id, 60, 110, args))
self.channel.add_synchronous_cb(self._recv_recover_ok) | python | def recover(self, requeue=False, cb=None):
'''
Ask server to redeliver all unacknowledged messages.
'''
args = Writer()
args.write_bit(requeue)
# The XML spec is incorrect; this method is always synchronous
# http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-January/010738.html
self._recover_cb.append(cb)
self.send_frame(MethodFrame(self.channel_id, 60, 110, args))
self.channel.add_synchronous_cb(self._recv_recover_ok) | Ask server to redeliver all unacknowledged messages. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L342-L353 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._read_msg | def _read_msg(self, method_frame, with_consumer_tag=False,
with_message_count=False):
'''
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
'''
header_frame, body = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr()
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long()
delivery_info = {
'channel': self.channel,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
if with_consumer_tag:
delivery_info['consumer_tag'] = consumer_tag
if with_message_count:
delivery_info['message_count'] = message_count
return Message(body=body, delivery_info=delivery_info,
**header_frame.properties) | python | def _read_msg(self, method_frame, with_consumer_tag=False,
with_message_count=False):
'''
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
'''
header_frame, body = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr()
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long()
delivery_info = {
'channel': self.channel,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
if with_consumer_tag:
delivery_info['consumer_tag'] = consumer_tag
if with_message_count:
delivery_info['message_count'] = message_count
return Message(body=body, delivery_info=delivery_info,
**header_frame.properties) | Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L360-L392 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._read_returned_msg | def _read_returned_msg(self, method_frame):
'''
Support method to read a returned (basic.return) Message from the
current frame buffer. Will return a Message with return_info, or
re-queue current frames and raise a FrameUnderflow.
:returns: Message with the return_info attribute set, where return_info
is a dict with the following properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
'''
header_frame, body = self._reap_msg_frames(method_frame)
return_info = {
'channel': self.channel,
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'exchange': method_frame.args.read_shortstr(),
'routing_key': method_frame.args.read_shortstr()
}
return Message(body=body, return_info=return_info,
**header_frame.properties) | python | def _read_returned_msg(self, method_frame):
'''
Support method to read a returned (basic.return) Message from the
current frame buffer. Will return a Message with return_info, or
re-queue current frames and raise a FrameUnderflow.
:returns: Message with the return_info attribute set, where return_info
is a dict with the following properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key
'''
header_frame, body = self._reap_msg_frames(method_frame)
return_info = {
'channel': self.channel,
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'exchange': method_frame.args.read_shortstr(),
'routing_key': method_frame.args.read_shortstr()
}
return Message(body=body, return_info=return_info,
**header_frame.properties) | Support method to read a returned (basic.return) Message from the
current frame buffer. Will return a Message with return_info, or
re-queue current frames and raise a FrameUnderflow.
:returns: Message with the return_info attribute set, where return_info
is a dict with the following properties:
'channel': Channel instance
'reply_code': reply code (int)
'reply_text': reply text
'exchange': exchange name
'routing_key': routing key | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L394-L419 |
agoragames/haigha | haigha/classes/basic_class.py | BasicClass._reap_msg_frames | def _reap_msg_frames(self, method_frame):
'''
Support method to reap header frame and body from current frame buffer.
Used in processing of basic.return, basic.deliver, and basic.get_ok.
Will return a pair (<header frame>, <body>), or re-queue current frames
and raise a FrameUnderflow.
:returns: pair (<header frame>, <body>)
:rtype: tuple of (HeaderFrame, bytearray)
'''
# No need to assert that is instance of Header or Content frames
# because failure to access as such will result in exception that
# channel will pick up and handle accordingly.
header_frame = self.channel.next_frame()
if header_frame:
size = header_frame.size
body = bytearray()
rbuf_frames = deque([header_frame, method_frame])
while len(body) < size:
content_frame = self.channel.next_frame()
if content_frame:
rbuf_frames.appendleft(content_frame)
body.extend(content_frame.payload.buffer())
else:
self.channel.requeue_frames(rbuf_frames)
raise self.FrameUnderflow()
else:
self.channel.requeue_frames([method_frame])
raise self.FrameUnderflow()
return (header_frame, body) | python | def _reap_msg_frames(self, method_frame):
'''
Support method to reap header frame and body from current frame buffer.
Used in processing of basic.return, basic.deliver, and basic.get_ok.
Will return a pair (<header frame>, <body>), or re-queue current frames
and raise a FrameUnderflow.
:returns: pair (<header frame>, <body>)
:rtype: tuple of (HeaderFrame, bytearray)
'''
# No need to assert that is instance of Header or Content frames
# because failure to access as such will result in exception that
# channel will pick up and handle accordingly.
header_frame = self.channel.next_frame()
if header_frame:
size = header_frame.size
body = bytearray()
rbuf_frames = deque([header_frame, method_frame])
while len(body) < size:
content_frame = self.channel.next_frame()
if content_frame:
rbuf_frames.appendleft(content_frame)
body.extend(content_frame.payload.buffer())
else:
self.channel.requeue_frames(rbuf_frames)
raise self.FrameUnderflow()
else:
self.channel.requeue_frames([method_frame])
raise self.FrameUnderflow()
return (header_frame, body) | Support method to reap header frame and body from current frame buffer.
Used in processing of basic.return, basic.deliver, and basic.get_ok.
Will return a pair (<header frame>, <body>), or re-queue current frames
and raise a FrameUnderflow.
:returns: pair (<header frame>, <body>)
:rtype: tuple of (HeaderFrame, bytearray) | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L421-L452 |
agoragames/haigha | haigha/channel_pool.py | ChannelPool.publish | def publish(self, *args, **kwargs):
'''
Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised.
'''
user_cb = kwargs.pop('cb', None)
# If the first channel we grab is inactive, continue fetching until
# we get an active channel, then put the inactive channels back in
# the pool. Try to keep the overhead to a minimum.
channel = self._get_channel()
if channel and not channel.active:
inactive_channels = set()
while channel and not channel.active:
inactive_channels.add(channel)
channel = self._get_channel()
self._free_channels.update(inactive_channels)
# When the transaction is committed, add the channel back to the pool
# and call any user-defined callbacks. If there is anything in queue,
# pop it and call back to publish(). Only do so if the channel is
# still active though, because otherwise the message will end up at
# the back of the queue, breaking the original order.
def committed():
self._free_channels.add(channel)
if channel.active and not channel.closed:
self._process_queue()
if user_cb is not None:
user_cb()
if channel:
channel.publish_synchronous(*args, cb=committed, **kwargs)
else:
kwargs['cb'] = user_cb
self._queue.append((args, kwargs)) | python | def publish(self, *args, **kwargs):
'''
Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised.
'''
user_cb = kwargs.pop('cb', None)
# If the first channel we grab is inactive, continue fetching until
# we get an active channel, then put the inactive channels back in
# the pool. Try to keep the overhead to a minimum.
channel = self._get_channel()
if channel and not channel.active:
inactive_channels = set()
while channel and not channel.active:
inactive_channels.add(channel)
channel = self._get_channel()
self._free_channels.update(inactive_channels)
# When the transaction is committed, add the channel back to the pool
# and call any user-defined callbacks. If there is anything in queue,
# pop it and call back to publish(). Only do so if the channel is
# still active though, because otherwise the message will end up at
# the back of the queue, breaking the original order.
def committed():
self._free_channels.add(channel)
if channel.active and not channel.closed:
self._process_queue()
if user_cb is not None:
user_cb()
if channel:
channel.publish_synchronous(*args, cb=committed, **kwargs)
else:
kwargs['cb'] = user_cb
self._queue.append((args, kwargs)) | Publish a message. Caller can supply an optional callback which will
be fired when the transaction is committed. Tries very hard to avoid
closed and inactive channels, but a ChannelError or ConnectionError
may still be raised. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel_pool.py#L35-L72 |
agoragames/haigha | haigha/channel_pool.py | ChannelPool._process_queue | def _process_queue(self):
'''
If there are any message in the queue, process one of them.
'''
if len(self._queue):
args, kwargs = self._queue.popleft()
self.publish(*args, **kwargs) | python | def _process_queue(self):
'''
If there are any message in the queue, process one of them.
'''
if len(self._queue):
args, kwargs = self._queue.popleft()
self.publish(*args, **kwargs) | If there are any message in the queue, process one of them. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel_pool.py#L74-L80 |
agoragames/haigha | haigha/channel_pool.py | ChannelPool._get_channel | def _get_channel(self):
'''
Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error.
'''
while len(self._free_channels):
rval = self._free_channels.pop()
if not rval.closed:
return rval
# don't adjust _channels value because the callback will do that
# and we don't want to double count it.
if not self._size or self._channels < self._size:
rval = self._connection.channel()
self._channels += 1
rval.add_close_listener(self._channel_closed_cb)
return rval | python | def _get_channel(self):
'''
Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error.
'''
while len(self._free_channels):
rval = self._free_channels.pop()
if not rval.closed:
return rval
# don't adjust _channels value because the callback will do that
# and we don't want to double count it.
if not self._size or self._channels < self._size:
rval = self._connection.channel()
self._channels += 1
rval.add_close_listener(self._channel_closed_cb)
return rval | Fetch a channel from the pool. Will return a new one if necessary. If
a channel in the free pool is closed, will remove it. Will return None
if we hit the cap. Will clean up any channels that were published to
but closed due to error. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel_pool.py#L82-L100 |
agoragames/haigha | haigha/frames/content_frame.py | ContentFrame.create_frames | def create_frames(self, channel_id, buf, frame_max):
'''
A generator which will create frames from a buffer given a max
frame size.
'''
size = frame_max - 8 # 8 bytes overhead for frame header and footer
offset = 0
while True:
payload = buf[offset:(offset + size)]
if len(payload) == 0:
break
offset += size
yield ContentFrame(channel_id, payload)
if offset >= len(buf):
break | python | def create_frames(self, channel_id, buf, frame_max):
'''
A generator which will create frames from a buffer given a max
frame size.
'''
size = frame_max - 8 # 8 bytes overhead for frame header and footer
offset = 0
while True:
payload = buf[offset:(offset + size)]
if len(payload) == 0:
break
offset += size
yield ContentFrame(channel_id, payload)
if offset >= len(buf):
break | A generator which will create frames from a buffer given a max
frame size. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/content_frame.py#L30-L45 |
agoragames/haigha | haigha/frames/content_frame.py | ContentFrame.write_frame | def write_frame(self, buf):
'''
Write the frame into an existing buffer.
'''
writer = Writer(buf)
writer.write_octet(self.type()).\
write_short(self.channel_id).\
write_long(len(self._payload)).\
write(self._payload).\
write_octet(0xce) | python | def write_frame(self, buf):
'''
Write the frame into an existing buffer.
'''
writer = Writer(buf)
writer.write_octet(self.type()).\
write_short(self.channel_id).\
write_long(len(self._payload)).\
write(self._payload).\
write_octet(0xce) | Write the frame into an existing buffer. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/content_frame.py#L61-L71 |
agoragames/haigha | haigha/transports/event_transport.py | EventTransport.connect | def connect(self, (host, port)):
'''
Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error
'''
self._host = "%s:%s" % (host, port)
self._sock = EventSocket(
read_cb=self._sock_read_cb,
close_cb=self._sock_close_cb,
error_cb=self._sock_error_cb,
debug=self.connection.debug,
logger=self.connection.logger)
if self.connection._sock_opts:
for k, v in self.connection._sock_opts.iteritems():
family, type = k
self._sock.setsockopt(family, type, v)
self._sock.setblocking(False)
self._sock.connect(
(host, port), timeout=self.connection._connect_timeout)
self._heartbeat_timeout = None | python | def connect(self, (host, port)):
'''
Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error
'''
self._host = "%s:%s" % (host, port)
self._sock = EventSocket(
read_cb=self._sock_read_cb,
close_cb=self._sock_close_cb,
error_cb=self._sock_error_cb,
debug=self.connection.debug,
logger=self.connection.logger)
if self.connection._sock_opts:
for k, v in self.connection._sock_opts.iteritems():
family, type = k
self._sock.setsockopt(family, type, v)
self._sock.setblocking(False)
self._sock.connect(
(host, port), timeout=self.connection._connect_timeout)
self._heartbeat_timeout = None | Connect assuming a host and port tuple. Implemented as non-blocking,
and will close the transport if there's an error | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/event_transport.py#L49-L68 |
agoragames/haigha | haigha/transports/event_transport.py | EventTransport.read | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
'''
# NOTE: copying over this comment from Connection, because there is
# knowledge captured here, even if the details are stale
# Because of the timer callback to dataRead when we re-buffered,
# there's a chance that in between we've lost the socket. If that's
# the case, just silently return as some code elsewhere would have
# already notified us. That bug could be fixed by improving the
# message reading so that we consume all possible messages and ensure
# that only a partial message was rebuffered, so that we can rely on
# the next read event to read the subsequent message.
if not hasattr(self, '_sock'):
return None
# This is sort of a hack because we're faking that data is ready, but
# it works for purposes of supporting timeouts
if timeout:
if self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = \
event.timeout(timeout, self._sock_read_cb, self._sock)
elif self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = None
return self._sock.read() | python | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
'''
# NOTE: copying over this comment from Connection, because there is
# knowledge captured here, even if the details are stale
# Because of the timer callback to dataRead when we re-buffered,
# there's a chance that in between we've lost the socket. If that's
# the case, just silently return as some code elsewhere would have
# already notified us. That bug could be fixed by improving the
# message reading so that we consume all possible messages and ensure
# that only a partial message was rebuffered, so that we can rely on
# the next read event to read the subsequent message.
if not hasattr(self, '_sock'):
return None
# This is sort of a hack because we're faking that data is ready, but
# it works for purposes of supporting timeouts
if timeout:
if self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = \
event.timeout(timeout, self._sock_read_cb, self._sock)
elif self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = None
return self._sock.read() | Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/event_transport.py#L70-L99 |
agoragames/haigha | haigha/transports/event_transport.py | EventTransport.disconnect | def disconnect(self):
'''
Disconnect from the transport. Typically socket.close(). This call is
welcome to raise exceptions, which the Connection will catch.
The transport is encouraged to allow for any pending writes to complete
before closing the socket.
'''
if not hasattr(self, '_sock'):
return
# TODO: If there are bytes left on the output, queue the close for
# later.
self._sock.close_cb = None
self._sock.close() | python | def disconnect(self):
'''
Disconnect from the transport. Typically socket.close(). This call is
welcome to raise exceptions, which the Connection will catch.
The transport is encouraged to allow for any pending writes to complete
before closing the socket.
'''
if not hasattr(self, '_sock'):
return
# TODO: If there are bytes left on the output, queue the close for
# later.
self._sock.close_cb = None
self._sock.close() | Disconnect from the transport. Typically socket.close(). This call is
welcome to raise exceptions, which the Connection will catch.
The transport is encouraged to allow for any pending writes to complete
before closing the socket. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/event_transport.py#L117-L131 |
agoragames/haigha | haigha/frames/header_frame.py | HeaderFrame.parse | def parse(self, channel_id, payload):
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
properties[key] = rfunc(payload)
else:
flags = []
while True:
flag_bits = payload.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
properties[key] = rfunc(payload)
shift -= 1
return HeaderFrame(channel_id, class_id, weight, size, properties) | python | def parse(self, channel_id, payload):
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
properties[key] = rfunc(payload)
else:
flags = []
while True:
flag_bits = payload.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
properties[key] = rfunc(payload)
shift -= 1
return HeaderFrame(channel_id, class_id, weight, size, properties) | Parse a header frame for a channel given a Reader payload. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/header_frame.py#L71-L115 |
agoragames/haigha | haigha/frames/header_frame.py | HeaderFrame.write_frame | def write_frame(self, buf):
'''
Write the frame into an existing buffer.
'''
writer = Writer(buf)
writer.write_octet(self.type())
writer.write_short(self.channel_id)
# Track the position where we're going to write the total length
# of the frame arguments.
stream_args_len_pos = len(buf)
writer.write_long(0)
stream_method_pos = len(buf)
writer.write_short(self._class_id)
writer.write_short(self._weight)
writer.write_longlong(self._size)
# Like frame parsing, branch to faster code for default properties
if self.DEFAULT_PROPERTIES:
# Track the position where we're going to write the flags.
flags_pos = len(buf)
writer.write_short(0)
flag_bits = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
val = self._properties.get(key, None)
if val is not None:
flag_bits |= mask
wfunc(writer, val)
writer.write_short_at(flag_bits, flags_pos)
else:
shift = 15
flag_bits = 0
flags = []
stack = deque()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
val = self._properties.get(key, None)
if val is not None:
if shift == 0:
flags.append(flag_bits)
flag_bits = 0
shift = 15
flag_bits |= (1 << shift)
stack.append((wfunc, val))
shift -= 1
flags.append(flag_bits)
for flag_bits in flags:
writer.write_short(flag_bits)
for method, val in stack:
method(writer, val)
# Write the total length back at the beginning of the frame
stream_len = len(buf) - stream_method_pos
writer.write_long_at(stream_len, stream_args_len_pos)
writer.write_octet(0xce) | python | def write_frame(self, buf):
'''
Write the frame into an existing buffer.
'''
writer = Writer(buf)
writer.write_octet(self.type())
writer.write_short(self.channel_id)
# Track the position where we're going to write the total length
# of the frame arguments.
stream_args_len_pos = len(buf)
writer.write_long(0)
stream_method_pos = len(buf)
writer.write_short(self._class_id)
writer.write_short(self._weight)
writer.write_longlong(self._size)
# Like frame parsing, branch to faster code for default properties
if self.DEFAULT_PROPERTIES:
# Track the position where we're going to write the flags.
flags_pos = len(buf)
writer.write_short(0)
flag_bits = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
val = self._properties.get(key, None)
if val is not None:
flag_bits |= mask
wfunc(writer, val)
writer.write_short_at(flag_bits, flags_pos)
else:
shift = 15
flag_bits = 0
flags = []
stack = deque()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
val = self._properties.get(key, None)
if val is not None:
if shift == 0:
flags.append(flag_bits)
flag_bits = 0
shift = 15
flag_bits |= (1 << shift)
stack.append((wfunc, val))
shift -= 1
flags.append(flag_bits)
for flag_bits in flags:
writer.write_short(flag_bits)
for method, val in stack:
method(writer, val)
# Write the total length back at the beginning of the frame
stream_len = len(buf) - stream_method_pos
writer.write_long_at(stream_len, stream_args_len_pos)
writer.write_octet(0xce) | Write the frame into an existing buffer. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/header_frame.py#L129-L188 |
agoragames/haigha | haigha/frames/frame.py | Frame.read_frames | def read_frames(cls, reader):
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
# No more data in the stream
frame = None
except Reader.ReaderError as e:
# Some other format error
raise Frame.FormatError, str(e), sys.exc_info()[-1]
except struct.error as e:
raise Frame.FormatError, str(e), sys.exc_info()[-1]
if frame is None:
reader.seek(frame_start_pos)
break
rval.append(frame)
return rval | python | def read_frames(cls, reader):
'''
Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived.
'''
rval = deque()
while True:
frame_start_pos = reader.tell()
try:
frame = Frame._read_frame(reader)
except Reader.BufferUnderflow:
# No more data in the stream
frame = None
except Reader.ReaderError as e:
# Some other format error
raise Frame.FormatError, str(e), sys.exc_info()[-1]
except struct.error as e:
raise Frame.FormatError, str(e), sys.exc_info()[-1]
if frame is None:
reader.seek(frame_start_pos)
break
rval.append(frame)
return rval | Read one or more frames from an IO stream. Buffer must support file
object interface.
After reading, caller will need to check if there are bytes remaining
in the stream. If there are, then that implies that there is one or
more incomplete frames and more data needs to be read. The position
of the cursor in the frame stream will mark the point at which the
last good frame was read. If the caller is expecting a sequence of
frames and only received a part of that sequence, they are responsible
for buffering those frames until the rest of the frames in the sequence
have arrived. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/frame.py#L49-L84 |
agoragames/haigha | haigha/frames/frame.py | Frame._read_frame | def _read_frame(cls, reader):
'''
Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte.
'''
frame_type = reader.read_octet()
channel_id = reader.read_short()
size = reader.read_long()
payload = Reader(reader, reader.tell(), size)
# Seek to end of payload
reader.seek(size, 1)
ch = reader.read_octet() # footer
if ch != 0xce:
raise Frame.FormatError(
'Framing error, unexpected byte: %x. frame type %x. channel %d, payload size %d',
ch, frame_type, channel_id, size)
frame_class = cls._frame_type_map.get(frame_type)
if not frame_class:
raise Frame.InvalidFrameType("Unknown frame type %x", frame_type)
return frame_class.parse(channel_id, payload) | python | def _read_frame(cls, reader):
'''
Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte.
'''
frame_type = reader.read_octet()
channel_id = reader.read_short()
size = reader.read_long()
payload = Reader(reader, reader.tell(), size)
# Seek to end of payload
reader.seek(size, 1)
ch = reader.read_octet() # footer
if ch != 0xce:
raise Frame.FormatError(
'Framing error, unexpected byte: %x. frame type %x. channel %d, payload size %d',
ch, frame_type, channel_id, size)
frame_class = cls._frame_type_map.get(frame_type)
if not frame_class:
raise Frame.InvalidFrameType("Unknown frame type %x", frame_type)
return frame_class.parse(channel_id, payload) | Read a single frame from a Reader. Will return None if there is an
incomplete frame in the stream.
Raise MissingFooter if there's a problem reading the footer byte. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/frames/frame.py#L87-L112 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitExchangeClass.unbind | def unbind(self, exchange, source, routing_key='', nowait=True,
arguments={}, ticket=None, cb=None):
'''
Unbind an exchange from another.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(source).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 40, args))
if not nowait:
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok) | python | def unbind(self, exchange, source, routing_key='', nowait=True,
arguments={}, ticket=None, cb=None):
'''
Unbind an exchange from another.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(exchange).\
write_shortstr(source).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments or {})
self.send_frame(MethodFrame(self.channel_id, 40, 40, args))
if not nowait:
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok) | Unbind an exchange from another. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L117-L135 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass._cleanup | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._ack_listener = None
self._nack_listener = None
self._broker_cancel_cb_map = None
super(RabbitBasicClass, self)._cleanup() | python | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._ack_listener = None
self._nack_listener = None
self._broker_cancel_cb_map = None
super(RabbitBasicClass, self)._cleanup() | Cleanup all the local data. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L165-L172 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.