repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
Ezhil-Language-Foundation/open-tamil
tamil/utf8.py
joinMeiUyir
def joinMeiUyir(mei_char, uyir_char): """ This function join mei character and uyir character, and retuns as compound uyirmei unicode character. Inputs: mei_char : It must be unicode tamil mei char. uyir_char : It must be unicode tamil uyir char. Written By : Arulalan.T Date : 22.09.2014 """ if not mei_char: return uyir_char if not uyir_char: return mei_char if not isinstance(mei_char, PYTHON3 and str or unicode): raise ValueError(u"Passed input mei character '%s' must be unicode, not just string" % mei_char) if not isinstance(uyir_char, PYTHON3 and str or unicode) and uyir_char != None: raise ValueError(u"Passed input uyir character '%s' must be unicode, not just string" % uyir_char) if mei_char not in grantha_mei_letters: raise ValueError(u"Passed input character '%s' is not a tamil mei character" % mei_char) if uyir_char not in uyir_letters: raise ValueError(u"Passed input character '%s' is not a tamil uyir character" % uyir_char) if uyir_char: uyiridx = uyir_letters.index(uyir_char) else: return mei_char meiidx = grantha_mei_letters.index(mei_char) # calculate uyirmei index uyirmeiidx = meiidx*12 + uyiridx return grantha_uyirmei_letters[uyirmeiidx]
python
def joinMeiUyir(mei_char, uyir_char): """ This function join mei character and uyir character, and retuns as compound uyirmei unicode character. Inputs: mei_char : It must be unicode tamil mei char. uyir_char : It must be unicode tamil uyir char. Written By : Arulalan.T Date : 22.09.2014 """ if not mei_char: return uyir_char if not uyir_char: return mei_char if not isinstance(mei_char, PYTHON3 and str or unicode): raise ValueError(u"Passed input mei character '%s' must be unicode, not just string" % mei_char) if not isinstance(uyir_char, PYTHON3 and str or unicode) and uyir_char != None: raise ValueError(u"Passed input uyir character '%s' must be unicode, not just string" % uyir_char) if mei_char not in grantha_mei_letters: raise ValueError(u"Passed input character '%s' is not a tamil mei character" % mei_char) if uyir_char not in uyir_letters: raise ValueError(u"Passed input character '%s' is not a tamil uyir character" % uyir_char) if uyir_char: uyiridx = uyir_letters.index(uyir_char) else: return mei_char meiidx = grantha_mei_letters.index(mei_char) # calculate uyirmei index uyirmeiidx = meiidx*12 + uyiridx return grantha_uyirmei_letters[uyirmeiidx]
This function join mei character and uyir character, and retuns as compound uyirmei unicode character. Inputs: mei_char : It must be unicode tamil mei char. uyir_char : It must be unicode tamil uyir char. Written By : Arulalan.T Date : 22.09.2014
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L622-L652
Ezhil-Language-Foundation/open-tamil
tamil/regexp.py
expand_tamil
def expand_tamil(start,end): """ expand uyir or mei-letter range etc. i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc. """ # few sequences for seq in [utf8.uyir_letters, utf8.grantha_mei_letters, \ utf8.grantha_agaram_letters]: if is_containing_seq(start,end,seq): return expand_sequence(start,end,seq) # all Tamil letters seq = utf8.grantha_uyirmei_letters if is_containing_seq(start,end,seq): return expand_sequence(start,end,seq) raise Exception("Cannot understand sequence [%s-%s]"%(start,end))
python
def expand_tamil(start,end): """ expand uyir or mei-letter range etc. i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc. """ # few sequences for seq in [utf8.uyir_letters, utf8.grantha_mei_letters, \ utf8.grantha_agaram_letters]: if is_containing_seq(start,end,seq): return expand_sequence(start,end,seq) # all Tamil letters seq = utf8.grantha_uyirmei_letters if is_containing_seq(start,end,seq): return expand_sequence(start,end,seq) raise Exception("Cannot understand sequence [%s-%s]"%(start,end))
expand uyir or mei-letter range etc. i.e. அ-ஔ gets converted to அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ etc.
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/regexp.py#L20-L34
Ezhil-Language-Foundation/open-tamil
tamil/regexp.py
make_pattern
def make_pattern( patt, flags=0 ): """ returns a compile regular expression object """ # print('input',len(patt)) patt_letters = utf8.get_letters( patt ) patt_out = list() idx = 0 # print('output',len(patt_letters)) patt = [None,None] prev = None LEN_PATT = len(patt_letters) while( idx < LEN_PATT ): if utf8.istamil( patt_letters[idx] ) and ( prev == '-' or ((idx+1) < LEN_PATT and patt_letters[idx+1] == u'-') ): if (idx+1) < LEN_PATT and patt_letters[idx+1] == u'-': patt[0] = patt_letters[idx] idx = idx + 2 prev = "-" elif prev == '-': patt[1] = patt_letters[idx] patt_out.extend( expand_tamil( patt[0], patt[1]) ) idx = idx + 1 prev = patt_letters[idx] continue patt_out.extend( patt_letters[idx] ) prev = patt_letters[idx] idx = idx + 1 opattern = u"".join( patt_out ) compile_regexp = re.compile( opattern, flags ) return (compile_regexp,opattern)
python
def make_pattern( patt, flags=0 ): """ returns a compile regular expression object """ # print('input',len(patt)) patt_letters = utf8.get_letters( patt ) patt_out = list() idx = 0 # print('output',len(patt_letters)) patt = [None,None] prev = None LEN_PATT = len(patt_letters) while( idx < LEN_PATT ): if utf8.istamil( patt_letters[idx] ) and ( prev == '-' or ((idx+1) < LEN_PATT and patt_letters[idx+1] == u'-') ): if (idx+1) < LEN_PATT and patt_letters[idx+1] == u'-': patt[0] = patt_letters[idx] idx = idx + 2 prev = "-" elif prev == '-': patt[1] = patt_letters[idx] patt_out.extend( expand_tamil( patt[0], patt[1]) ) idx = idx + 1 prev = patt_letters[idx] continue patt_out.extend( patt_letters[idx] ) prev = patt_letters[idx] idx = idx + 1 opattern = u"".join( patt_out ) compile_regexp = re.compile( opattern, flags ) return (compile_regexp,opattern)
returns a compile regular expression object
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/regexp.py#L36-L65
Ezhil-Language-Foundation/open-tamil
examples/speller/ta_data.py
BigramHash.frequency_model
def frequency_model( self ): """ build a letter frequency model for Tamil letters from a corpus """ prev_letter = None # use a generator in corpus prev_letter = list(self.corpus.next_tamil_letter())[0] for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus key = prev_letter+next_letter val = self.bigram.get(key,None) prev_letter = next_letter if not val: self.bigram[key] = 0 self.bigram[key] += 1 return
python
def frequency_model( self ): """ build a letter frequency model for Tamil letters from a corpus """ prev_letter = None # use a generator in corpus prev_letter = list(self.corpus.next_tamil_letter())[0] for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus key = prev_letter+next_letter val = self.bigram.get(key,None) prev_letter = next_letter if not val: self.bigram[key] = 0 self.bigram[key] += 1 return
build a letter frequency model for Tamil letters from a corpus
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/examples/speller/ta_data.py#L81-L95
Ezhil-Language-Foundation/open-tamil
tamil/iscii.py
convert_to_unicode
def convert_to_unicode( tscii_input ): """ convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.""" output = list() prev = None prev2x = None # need a look ahead of 2 tokens atleast for char in tscii_input: ## print "%2x"%ord(char) # debugging if ord(char) < 128 : # base-ASCII copy to output output.append( char ) prev = None prev2x = None elif ord(char) in ISCII_DIRECT_LOOKUP: if ( prev in ISCII_PRE_MODIFIER ): curr_char = [ISCII[ord(char)],ISCII[prev]] else: # we are direct lookup char curr_char = [ISCII[ord(char)]] char = None output.extend( curr_char ) elif ( (ord(char) in ISCII_POST_MODIFIER) ): if ( (prev in ISCII_DIRECT_LOOKUP) and (prev2x in ISCII_PRE_MODIFIER) ): if len(output) >= 2: del output[-1] #we are reducing this token to something new del output[-2] elif len(output)==1: del output[-1] else: # nothing to delete here.. pass output.extend( [ISCII[prev], ISCII[prev2x]] ) else: print("Warning: malformed ISCII encoded file; skipping characters") prev = None char = None else: # pass - must be one of the pre/post modifiers pass prev2x = prev if char: prev = ord(char) return u"".join(output)
python
def convert_to_unicode( tscii_input ): """ convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.""" output = list() prev = None prev2x = None # need a look ahead of 2 tokens atleast for char in tscii_input: ## print "%2x"%ord(char) # debugging if ord(char) < 128 : # base-ASCII copy to output output.append( char ) prev = None prev2x = None elif ord(char) in ISCII_DIRECT_LOOKUP: if ( prev in ISCII_PRE_MODIFIER ): curr_char = [ISCII[ord(char)],ISCII[prev]] else: # we are direct lookup char curr_char = [ISCII[ord(char)]] char = None output.extend( curr_char ) elif ( (ord(char) in ISCII_POST_MODIFIER) ): if ( (prev in ISCII_DIRECT_LOOKUP) and (prev2x in ISCII_PRE_MODIFIER) ): if len(output) >= 2: del output[-1] #we are reducing this token to something new del output[-2] elif len(output)==1: del output[-1] else: # nothing to delete here.. pass output.extend( [ISCII[prev], ISCII[prev2x]] ) else: print("Warning: malformed ISCII encoded file; skipping characters") prev = None char = None else: # pass - must be one of the pre/post modifiers pass prev2x = prev if char: prev = ord(char) return u"".join(output)
convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/iscii.py#L151-L201
Ezhil-Language-Foundation/open-tamil
examples/wordxsec.py
WordXSec.compute
def compute( self ): # compute the intersection graph into @xsections dictionary wordlist = self.wordlist """ build a dictionary of words, and their intersections """ xsections = {} for i in range(len(wordlist)): word_i = wordlist[i] for j in range(len(wordlist)): word_j = wordlist[j] if i == j: # force self-intersection to be 0 if not xsections.get(word_i,None): xsections[word_i] = [''] else: xsections[word_i].extend(['']) continue # optimize for, i > j, info is calculated already if i > j: xsec_counts = xsections[word_j][i] else: xsec_counts = tamil.utf8.word_intersection( word_i, word_j ) if not xsections.get(word_i,None): xsections[word_i] = [xsec_counts] else: xsections[word_i].extend( [ xsec_counts ] ) self.xsections = xsections
python
def compute( self ): # compute the intersection graph into @xsections dictionary wordlist = self.wordlist """ build a dictionary of words, and their intersections """ xsections = {} for i in range(len(wordlist)): word_i = wordlist[i] for j in range(len(wordlist)): word_j = wordlist[j] if i == j: # force self-intersection to be 0 if not xsections.get(word_i,None): xsections[word_i] = [''] else: xsections[word_i].extend(['']) continue # optimize for, i > j, info is calculated already if i > j: xsec_counts = xsections[word_j][i] else: xsec_counts = tamil.utf8.word_intersection( word_i, word_j ) if not xsections.get(word_i,None): xsections[word_i] = [xsec_counts] else: xsections[word_i].extend( [ xsec_counts ] ) self.xsections = xsections
build a dictionary of words, and their intersections
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/examples/wordxsec.py#L26-L51
Ezhil-Language-Foundation/open-tamil
solthiruthi/heuristics.py
Sequential.in_sequence
def in_sequence( word, ref_set, ref_reason, freq_threshold = 2 ): """ ignore ctx information right now. If repetition/match length >= @freq_threshold then we flag-it """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason freq_count = 0 for char in chars: if char in ref_set: freq_count += 1 if freq_count >= freq_threshold: flag = False break continue freq_count = 0 # continue loop if not flag: reason = ref_reason return flag,reason
python
def in_sequence( word, ref_set, ref_reason, freq_threshold = 2 ): """ ignore ctx information right now. If repetition/match length >= @freq_threshold then we flag-it """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason freq_count = 0 for char in chars: if char in ref_set: freq_count += 1 if freq_count >= freq_threshold: flag = False break continue freq_count = 0 # continue loop if not flag: reason = ref_reason return flag,reason
ignore ctx information right now. If repetition/match length >= @freq_threshold then we flag-it
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/heuristics.py#L34-L50
Ezhil-Language-Foundation/open-tamil
solthiruthi/heuristics.py
AdjacentVowels.apply
def apply(self, word, ctx=None): """ ignore ctx information right now """ return Sequential.in_sequence(word,AdjacentVowels.uyir_letters,AdjacentVowels.reason)
python
def apply(self, word, ctx=None): """ ignore ctx information right now """ return Sequential.in_sequence(word,AdjacentVowels.uyir_letters,AdjacentVowels.reason)
ignore ctx information right now
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/heuristics.py#L59-L61
Ezhil-Language-Foundation/open-tamil
solthiruthi/heuristics.py
AdjacentConsonants.apply
def apply(self, word, ctx=None): """ ignore ctx information right now """ flag,reason = Sequential.in_sequence(word,AdjacentConsonants.mei_letters,AdjacentConsonants.reason,self.freq_threshold) if flag: flag,reason = Sequential.in_sequence(word,AdjacentConsonants.agaram_letters,AdjacentConsonants.reason,self.freq_threshold) return flag,reason
python
def apply(self, word, ctx=None): """ ignore ctx information right now """ flag,reason = Sequential.in_sequence(word,AdjacentConsonants.mei_letters,AdjacentConsonants.reason,self.freq_threshold) if flag: flag,reason = Sequential.in_sequence(word,AdjacentConsonants.agaram_letters,AdjacentConsonants.reason,self.freq_threshold) return flag,reason
ignore ctx information right now
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/heuristics.py#L74-L79
Ezhil-Language-Foundation/open-tamil
solthiruthi/heuristics.py
RepeatedLetters.apply
def apply(self,word,ctx=None): """ ignore ctx information right now """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_letter = None for char in chars: if prev_letter == char: flag = False break prev_letter = char # continue loop if not flag: reason = RepeatedLetters.reason return flag,reason
python
def apply(self,word,ctx=None): """ ignore ctx information right now """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_letter = None for char in chars: if prev_letter == char: flag = False break prev_letter = char # continue loop if not flag: reason = RepeatedLetters.reason return flag,reason
ignore ctx information right now
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/heuristics.py#L85-L98
Ezhil-Language-Foundation/open-tamil
solthiruthi/heuristics.py
BadIME.apply
def apply(self, word, ctx=None): """ ignore ctx information right now """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_char = None for char in chars: rule1,rule2,rule3 = False,False,False # rule 1 : uyir followed by kombugal rule1 = (char[-1] in utf8.accent_symbols) and (char[0] in utf8.uyir_letters) if not rule1: # rule 2 : two pullis adjacent to each other rule2 = len(char) >= 2 and (char[-1] == utf8.pulli_symbols[0]) and (char[-2] == char[-1] ) if not rule2: # rule 3 : none of the accent symbols repeat # exclusions to rule 3 : non-standard Unicode encoding of periya kombu / siriya kombu with thunai kaal rule3 = len(char) >= 2 and (char[-1] in utf8.accent_symbols) and (char[-2] in utf8.accent_symbols) \ and not( char[-1] == u"ா" and char[-2] in [u"ெ",u"ே"]) if rule1 or rule2 or rule3: flag = False reason = BadIME.reason break prev_char = char # continue loop #print([flag,reason]) return flag,reason
python
def apply(self, word, ctx=None): """ ignore ctx information right now """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_char = None for char in chars: rule1,rule2,rule3 = False,False,False # rule 1 : uyir followed by kombugal rule1 = (char[-1] in utf8.accent_symbols) and (char[0] in utf8.uyir_letters) if not rule1: # rule 2 : two pullis adjacent to each other rule2 = len(char) >= 2 and (char[-1] == utf8.pulli_symbols[0]) and (char[-2] == char[-1] ) if not rule2: # rule 3 : none of the accent symbols repeat # exclusions to rule 3 : non-standard Unicode encoding of periya kombu / siriya kombu with thunai kaal rule3 = len(char) >= 2 and (char[-1] in utf8.accent_symbols) and (char[-2] in utf8.accent_symbols) \ and not( char[-1] == u"ா" and char[-2] in [u"ெ",u"ே"]) if rule1 or rule2 or rule3: flag = False reason = BadIME.reason break prev_char = char # continue loop #print([flag,reason]) return flag,reason
ignore ctx information right now
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/heuristics.py#L107-L133
Ezhil-Language-Foundation/open-tamil
tamilstemmer/basestemmer.py
BaseStemmer.set_current
def set_current(self, value): ''' Set the self.current string. ''' self.current = value self.cursor = 0 self.limit = len(self.current) self.limit_backward = 0 self.bra = self.cursor self.ket = self.limit
python
def set_current(self, value): ''' Set the self.current string. ''' self.current = value self.cursor = 0 self.limit = len(self.current) self.limit_backward = 0 self.bra = self.cursor self.ket = self.limit
Set the self.current string.
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamilstemmer/basestemmer.py#L12-L21
Ezhil-Language-Foundation/open-tamil
tamilstemmer/basestemmer.py
BaseStemmer.replace_s
def replace_s(self, c_bra, c_ket, s): ''' to replace chars between c_bra and c_ket in self.current by the chars in s. @type c_bra int @type c_ket int @type s: string ''' adjustment = len(s) - (c_ket - c_bra) self.current = self.current[0:c_bra] + s + self.current[c_ket:] self.limit += adjustment if self.cursor >= c_ket: self.cursor += adjustment elif self.cursor > c_bra: self.cursor = c_bra return adjustment
python
def replace_s(self, c_bra, c_ket, s): ''' to replace chars between c_bra and c_ket in self.current by the chars in s. @type c_bra int @type c_ket int @type s: string ''' adjustment = len(s) - (c_ket - c_bra) self.current = self.current[0:c_bra] + s + self.current[c_ket:] self.limit += adjustment if self.cursor >= c_ket: self.cursor += adjustment elif self.cursor > c_bra: self.cursor = c_bra return adjustment
to replace chars between c_bra and c_ket in self.current by the chars in s. @type c_bra int @type c_ket int @type s: string
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamilstemmer/basestemmer.py#L261-L277
Ezhil-Language-Foundation/open-tamil
tamilstemmer/basestemmer.py
BaseStemmer.slice_to
def slice_to(self, s): ''' Copy the slice into the supplied StringBuffer @type s: string ''' result = '' if self.slice_check(): result = self.current[self.bra:self.ket] return result
python
def slice_to(self, s): ''' Copy the slice into the supplied StringBuffer @type s: string ''' result = '' if self.slice_check(): result = self.current[self.bra:self.ket] return result
Copy the slice into the supplied StringBuffer @type s: string
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamilstemmer/basestemmer.py#L309-L318
Ezhil-Language-Foundation/open-tamil
tamil/tweetparser.py
TamilTweetParser.isTamilPredicate
def isTamilPredicate(word): """ is Tamil word : boolean True/False""" for c in word: if unicodedata.name(c).split()[0] != u'TAMIL' : return False return True
python
def isTamilPredicate(word): """ is Tamil word : boolean True/False""" for c in word: if unicodedata.name(c).split()[0] != u'TAMIL' : return False return True
is Tamil word : boolean True/False
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/tweetparser.py#L71-L76
Ezhil-Language-Foundation/open-tamil
tamil/tweetparser.py
TamilTweetParser.cleanupPunct
def cleanupPunct( tweet ): """ NonEnglishOrTamilOr """ tweet = ''.join( map( lambda c: (unicodedata.name(c).split()[0] in [u'TAMIL',u'LATIN']) and c or u' ', tweet) ) return tweet
python
def cleanupPunct( tweet ): """ NonEnglishOrTamilOr """ tweet = ''.join( map( lambda c: (unicodedata.name(c).split()[0] in [u'TAMIL',u'LATIN']) and c or u' ', tweet) ) return tweet
NonEnglishOrTamilOr
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/tweetparser.py#L79-L82
Ezhil-Language-Foundation/open-tamil
tamil/tweetparser.py
TamilTweetParser.getTamilWords
def getTamilWords( tweet ): """" word needs to all be in the same tamil language """ tweet = TamilTweetParser.cleanupPunct( tweet ); nonETwords = filter( lambda x: len(x) > 0 , re.split(r'\s+',tweet) );#|"+|\'+|#+ tamilWords = filter( TamilTweetParser.isTamilPredicate, nonETwords ); return tamilWords
python
def getTamilWords( tweet ): """" word needs to all be in the same tamil language """ tweet = TamilTweetParser.cleanupPunct( tweet ); nonETwords = filter( lambda x: len(x) > 0 , re.split(r'\s+',tweet) );#|"+|\'+|#+ tamilWords = filter( TamilTweetParser.isTamilPredicate, nonETwords ); return tamilWords
word needs to all be in the same tamil language
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/tweetparser.py#L85-L90
Ezhil-Language-Foundation/open-tamil
tamil/tscii.py
convert_to_unicode
def convert_to_unicode( tscii_input ): """ convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.""" output = list() prev = None prev2x = None # need a look ahead of 2 tokens atleast for char in tscii_input: ## print "%2x"%ord(char) # debugging if ord(char) < 128 : # base-ASCII copy to output output.append( char ) prev = None prev2x = None elif ord(char) in TSCII_DIRECT_LOOKUP: if ( prev in TSCII_PRE_MODIFIER ): curr_char = [TSCII[ord(char)],TSCII[prev]] else: # we are direct lookup char curr_char = [TSCII[ord(char)]] char = None output.extend( curr_char ) elif ( (ord(char) in TSCII_POST_MODIFIER) ): if ( (prev in TSCII_DIRECT_LOOKUP) and (prev2x in TSCII_PRE_MODIFIER) ): if len(output) >= 2: del output[-1] #we are reducing this token to something new del output[-2] elif len(output)==1: del output[-1] else: # nothing to delete here.. pass output.extend( [TSCII[prev], TSCII[prev2x]] ) else: print("Warning: malformed TSCII encoded file; skipping characters") prev = None char = None else: # pass - must be one of the pre/post modifiers pass prev2x = prev if char: prev = ord(char) return u"".join(output)
python
def convert_to_unicode( tscii_input ): """ convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.""" output = list() prev = None prev2x = None # need a look ahead of 2 tokens atleast for char in tscii_input: ## print "%2x"%ord(char) # debugging if ord(char) < 128 : # base-ASCII copy to output output.append( char ) prev = None prev2x = None elif ord(char) in TSCII_DIRECT_LOOKUP: if ( prev in TSCII_PRE_MODIFIER ): curr_char = [TSCII[ord(char)],TSCII[prev]] else: # we are direct lookup char curr_char = [TSCII[ord(char)]] char = None output.extend( curr_char ) elif ( (ord(char) in TSCII_POST_MODIFIER) ): if ( (prev in TSCII_DIRECT_LOOKUP) and (prev2x in TSCII_PRE_MODIFIER) ): if len(output) >= 2: del output[-1] #we are reducing this token to something new del output[-2] elif len(output)==1: del output[-1] else: # nothing to delete here.. pass output.extend( [TSCII[prev], TSCII[prev2x]] ) else: print("Warning: malformed TSCII encoded file; skipping characters") prev = None char = None else: # pass - must be one of the pre/post modifiers pass prev2x = prev if char: prev = ord(char) return u"".join(output)
convert a byte-ASCII encoded string into equivalent Unicode string in the UTF-8 notation.
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/tscii.py#L151-L201
Ezhil-Language-Foundation/open-tamil
tamil/txt2unicode/encode2unicode.py
_get_unique_ch
def _get_unique_ch(text, all_common_encodes): """ text : encode sample strings returns unique word / characters from input text encode strings. """ unique_chars = '' if isinstance(text, str): text = text.split("\n") elif isinstance(text, (list, tuple)): pass special_chars = ['.', ',', ';', ':','', ' ', '\r', '\t', '=', '\n'] for line in text: for word in line.split(' '): if ( not PYTHON3 ): word = word.decode( 'utf-8') for ch in all_common_encodes: if ch in word: word = word.replace(ch, '') # end of for ch in _all_common_encodes_: # if len of word is zero, then go for another word if not word: continue for ch in word: if ch.isdigit() or ch in special_chars: # remove special common chars word = word.replace(ch, '') continue # end of if ch.isdigit() or ...: # Whola, got unique chars from user passed text return word # end of for ch in word: # end of for word in line.split(' '): # end of for line in text: return ''
python
def _get_unique_ch(text, all_common_encodes): """ text : encode sample strings returns unique word / characters from input text encode strings. """ unique_chars = '' if isinstance(text, str): text = text.split("\n") elif isinstance(text, (list, tuple)): pass special_chars = ['.', ',', ';', ':','', ' ', '\r', '\t', '=', '\n'] for line in text: for word in line.split(' '): if ( not PYTHON3 ): word = word.decode( 'utf-8') for ch in all_common_encodes: if ch in word: word = word.replace(ch, '') # end of for ch in _all_common_encodes_: # if len of word is zero, then go for another word if not word: continue for ch in word: if ch.isdigit() or ch in special_chars: # remove special common chars word = word.replace(ch, '') continue # end of if ch.isdigit() or ...: # Whola, got unique chars from user passed text return word # end of for ch in word: # end of for word in line.split(' '): # end of for line in text: return ''
text : encode sample strings returns unique word / characters from input text encode strings.
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/txt2unicode/encode2unicode.py#L179-L213
Ezhil-Language-Foundation/open-tamil
tamil/txt2unicode/encode2unicode.py
_get_unique_common_encodes
def _get_unique_common_encodes(): """ This function will return both unique_encodes and common_encodes as tuple. unique_encodes : In dictionary with encodes name as key and its corresponding encode's unique characters among other available encodes. common_encodes : In set type which has all common encode compound characters from all available encodes. i.e. removed common encode single characters Author : Arulalan.T 04.08.2014 """ _all_unique_encodes_ = [] _all_unicode_encodes_ = {} _all_common_encodes_ = set([]) _all_common_encodes_single_char_ = set([]) for name, encode in _all_encodes_.items(): encode_utf8 = set([PYTHON3 and ch or ch.decode( 'utf-8') for ch in encode.keys()]) _all_unicode_encodes_[name] = encode_utf8 _all_unique_encodes_full_ =_all_unicode_encodes_.copy() for supname, super_encode in _all_unicode_encodes_.items(): for subname, sub_encode in _all_unicode_encodes_.items(): if supname == subname: continue # get unique of super_encode among other encodings super_encode = super_encode - sub_encode # get common for all over encodings common = _all_unique_encodes_full_[supname] - super_encode # merge common to all encodings common _all_common_encodes_ = _all_common_encodes_.union(common) # store super_encode's unique keys with its name _all_unique_encodes_.append((supname, super_encode)) for ch in _all_common_encodes_: # collect single common chars if len(ch) == 1: _all_common_encodes_single_char_.add(ch) # end of for ch in _all_common_encodes_: # remove single common char from compound common chars _all_common_encodes_ -= _all_common_encodes_single_char_ if __WRITE_CHARS_TXT: # write common compound characters of all encodes f = open('all.encodes.common.chars.txt', 'w') for ch in _all_common_encodes_: ch = ch.encode('utf-8') for encode_keys in _all_encodes_.values(): if ch in encode_keys: uni = encode_keys[ch] break # end of if ch in encode_keys: # end of for encode_keys in _all_encodes_.values(): f.write(ch + ' => ' + uni + '\n') # end of for ch in _all_common_encodes_: f.close() # write unique compound characters of all encodes for encode_name, encode_keys in _all_unique_encodes_: f = open(encode_name + '.unique.chars.txt', 'w') for ch in encode_keys: ch = ch.encode('utf-8') uni = _all_encodes_[encode_name][ch] f.write(ch + ' => ' + uni + '\n') # end of for ch in encode_keys: f.close() # end of for encode_name, encode_keys in _all_unique_encodes_: # end of if __WRITE_CHARS_TXT: return (_all_unique_encodes_, _all_common_encodes_)
python
def _get_unique_common_encodes(): """ This function will return both unique_encodes and common_encodes as tuple. unique_encodes : In dictionary with encodes name as key and its corresponding encode's unique characters among other available encodes. common_encodes : In set type which has all common encode compound characters from all available encodes. i.e. removed common encode single characters Author : Arulalan.T 04.08.2014 """ _all_unique_encodes_ = [] _all_unicode_encodes_ = {} _all_common_encodes_ = set([]) _all_common_encodes_single_char_ = set([]) for name, encode in _all_encodes_.items(): encode_utf8 = set([PYTHON3 and ch or ch.decode( 'utf-8') for ch in encode.keys()]) _all_unicode_encodes_[name] = encode_utf8 _all_unique_encodes_full_ =_all_unicode_encodes_.copy() for supname, super_encode in _all_unicode_encodes_.items(): for subname, sub_encode in _all_unicode_encodes_.items(): if supname == subname: continue # get unique of super_encode among other encodings super_encode = super_encode - sub_encode # get common for all over encodings common = _all_unique_encodes_full_[supname] - super_encode # merge common to all encodings common _all_common_encodes_ = _all_common_encodes_.union(common) # store super_encode's unique keys with its name _all_unique_encodes_.append((supname, super_encode)) for ch in _all_common_encodes_: # collect single common chars if len(ch) == 1: _all_common_encodes_single_char_.add(ch) # end of for ch in _all_common_encodes_: # remove single common char from compound common chars _all_common_encodes_ -= _all_common_encodes_single_char_ if __WRITE_CHARS_TXT: # write common compound characters of all encodes f = open('all.encodes.common.chars.txt', 'w') for ch in _all_common_encodes_: ch = ch.encode('utf-8') for encode_keys in _all_encodes_.values(): if ch in encode_keys: uni = encode_keys[ch] break # end of if ch in encode_keys: # end of for encode_keys in _all_encodes_.values(): f.write(ch + ' => ' + uni + '\n') # end of for ch in _all_common_encodes_: f.close() # write unique compound characters of all encodes for encode_name, encode_keys in _all_unique_encodes_: f = open(encode_name + '.unique.chars.txt', 'w') for ch in encode_keys: ch = ch.encode('utf-8') uni = _all_encodes_[encode_name][ch] f.write(ch + ' => ' + uni + '\n') # end of for ch in encode_keys: f.close() # end of for encode_name, encode_keys in _all_unique_encodes_: # end of if __WRITE_CHARS_TXT: return (_all_unique_encodes_, _all_common_encodes_)
This function will return both unique_encodes and common_encodes as tuple. unique_encodes : In dictionary with encodes name as key and its corresponding encode's unique characters among other available encodes. common_encodes : In set type which has all common encode compound characters from all available encodes. i.e. removed common encode single characters Author : Arulalan.T 04.08.2014
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/txt2unicode/encode2unicode.py#L216-L288
Ezhil-Language-Foundation/open-tamil
tamil/txt2unicode/encode2unicode.py
auto2unicode
def auto2unicode(text): """ This function tries to identify encode in available encodings. If it finds, then it will convert text into unicode string. Author : Arulalan.T 04.08.2014 """ _all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes() # get unique word which falls under any one of available encodes from # user passed text lines unique_chars = _get_unique_ch(text, _all_common_encodes_) # count common encode chars clen = len(_all_common_encodes_) msg = "Sorry, couldn't find encode :-(\n" msg += 'Need more words to find unique encode out side of %d ' % clen msg += 'common compound characters' if not unique_chars: print(msg) return '' # end of if not unique_chars: for encode_name, encode_keys in _all_unique_encodes_: if not len(encode_keys): continue for ch in encode_keys: # check either encode char is presnent in word if ch in unique_chars: # found encode print(("Found encode : ", encode_name)) encode = _all_encodes_[encode_name] return encode2unicode(text, encode) # end of if ch in unique_chars: # end of ifor ch in encode_keys: else: print(msg) return ''
python
def auto2unicode(text): """ This function tries to identify encode in available encodings. If it finds, then it will convert text into unicode string. Author : Arulalan.T 04.08.2014 """ _all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes() # get unique word which falls under any one of available encodes from # user passed text lines unique_chars = _get_unique_ch(text, _all_common_encodes_) # count common encode chars clen = len(_all_common_encodes_) msg = "Sorry, couldn't find encode :-(\n" msg += 'Need more words to find unique encode out side of %d ' % clen msg += 'common compound characters' if not unique_chars: print(msg) return '' # end of if not unique_chars: for encode_name, encode_keys in _all_unique_encodes_: if not len(encode_keys): continue for ch in encode_keys: # check either encode char is presnent in word if ch in unique_chars: # found encode print(("Found encode : ", encode_name)) encode = _all_encodes_[encode_name] return encode2unicode(text, encode) # end of if ch in unique_chars: # end of ifor ch in encode_keys: else: print(msg) return ''
This function tries to identify encode in available encodings. If it finds, then it will convert text into unicode string. Author : Arulalan.T 04.08.2014
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/txt2unicode/encode2unicode.py#L292-L330
Ezhil-Language-Foundation/open-tamil
ngram/LetterModels.py
Unigram.frequency_model
def frequency_model( self ): """ build a letter frequency model for Tamil letters from a corpus """ # use a generator in corpus for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus self.letter[next_letter] = self.letter[next_letter] + 1
python
def frequency_model( self ): """ build a letter frequency model for Tamil letters from a corpus """ # use a generator in corpus for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus self.letter[next_letter] = self.letter[next_letter] + 1
build a letter frequency model for Tamil letters from a corpus
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/ngram/LetterModels.py#L43-L48
Ezhil-Language-Foundation/open-tamil
ngram/LetterModels.py
Bigram.language_model
def language_model(self,verbose=True): """ builds a Tamil bigram letter model """ # use a generator in corpus prev = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if prev: self.letter2[prev][next_letter] += 1 if ( verbose ) : print(prev) print(next_letter) print( self.letter2[prev][next_letter] ) prev = next_letter #update always return
python
def language_model(self,verbose=True): """ builds a Tamil bigram letter model """ # use a generator in corpus prev = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if prev: self.letter2[prev][next_letter] += 1 if ( verbose ) : print(prev) print(next_letter) print( self.letter2[prev][next_letter] ) prev = next_letter #update always return
builds a Tamil bigram letter model
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/ngram/LetterModels.py#L65-L78
Ezhil-Language-Foundation/open-tamil
ngram/LetterModels.py
Trigram.language_model
def language_model(self,verbose=True): """ builds a Tamil bigram letter model """ # use a generator in corpus p2 = None p1 = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if p2: trig = p2+p1+next_letter self.letter3[trig] = 1 + self.letter3.get(trig,0) p2 = p1 p1 = next_letter #update always return
python
def language_model(self,verbose=True): """ builds a Tamil bigram letter model """ # use a generator in corpus p2 = None p1 = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if p2: trig = p2+p1+next_letter self.letter3[trig] = 1 + self.letter3.get(trig,0) p2 = p1 p1 = next_letter #update always return
builds a Tamil bigram letter model
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/ngram/LetterModels.py#L97-L109
Ezhil-Language-Foundation/open-tamil
tamil/numeral.py
num2tamilstr
def num2tamilstr( *args ): """ work till one lakh crore - i.e 1e5*1e7 = 1e12. turn number into a numeral, Indian style. Fractions upto 1e-30""" number = args[0] if len(args) < 2: filenames = [] else: filenames = args[1] if len(args) ==3: tensSpecial = args[2] else: tensSpecial='BASIC' if not any( filter( lambda T: isinstance( number, T), [str,unicode,int, long, float]) ) or isinstance(number,complex): raise Exception('num2tamilstr input has to be a long or integer or float') if float(number) > long(1e12): raise Exception('num2tamilstr input is too large') if float(number) < 0: return u"- "+num2tamilstr( -number ) units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10 units_suffix = (u'பூஜ்ஜியம்', u'தொன்று', u'திரண்டு', u'மூன்று', u'நான்கு', u'தைந்து', u'தாறு', u'தேழு', u'தெட்டு', u'தொன்பது', u'பத்து') # 0-10 units_suffix_nine = (u'பூஜ்ஜியம்', u'றொன்று', u'றிரண்டு', u'மூன்று', u'நான்கு', u'றைந்து', u'றாறு', u'றேழு', u'றெட்டு', u'றொன்பது', u'பத்து') # 0-10 tween = [1.0,2.0,5.0,6.0,7.0,8.0,9.0] teens = (u'பதினொன்று', u'பனிரண்டு', u'பதிமூன்று', u'பதினான்கு', u'பதினைந்து',u'பதினாறு', u'பதினேழு', u'பதினெட்டு', u'பத்தொன்பது') # 11-19 tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90 tens_full_prefix = (u'இருபத்து', u'முப்பத்து', u'நாற்பத்து', u'ஐம்பத்து', u'அறுபத்து', u'எழுபத்து', u'எண்பத்து', u'தொன்னூற்று') # 10+-90+ tens_prefix = (u'இருபத்', u'முப்பத்', u'நாற்பத்', u'ஐம்பத்', u'அறுபத்', u'எழுபத்', u'எண்பத்', u'தொன்னூற்') # 10+-90+ hundreds = ( u'நூறு', u'இருநூறு', u'முன்னூறு', u'நாநூறு',u'ஐநூறு', u'அறுநூறு', u'எழுநூறு', u'எண்ணூறு', u'தொள்ளாயிரம்') #100 - 900 hundreds_suffix = (u'நூற்றி', u'இருநூற்று', u'முன்னூற்று', u'நாநூற்று', u'ஐநூற்று', u'அறுநூற்று', u'எழுநூற்று', u'எண்ணூற்று',u'தொள்ளாயிரத்து') #100+ - 900+ one_thousand_prefix = u'ஓர்' thousands = (u'ஆயிரம்',u'ஆயிரத்து') one_prefix = u'ஒரு' lakh = (u'இலட்சம்',u'இலட்சத்து') crore = (u'கோடி',u'கோடியே') pulli = u'புள்ளி' n_one = 1.0 n_ten = 10.0 n_hundred = 100.0 n_thousand = 1000.0 n_lakh = 100.0*n_thousand n_crore = (100.0*n_lakh) # handle fractional parts if float(number) > 0.0 and float(number) < 1.0: rval = [] rval.append(pulli) filenames.append( 'pulli' ) number_str = str(number).replace('0.','') for digit in number_str: filenames.append( "units_%d"%int(digit)) rval.append( units[int(digit)] ) return u' '.join(rval) if isinstance(number,str) or isinstance(number,unicode): result = u"" number = number.strip() assert(len(args) == 1) assert(len(number) > 0) is_negative = number[0] == "-" if is_negative: number = number[1:] frac_part = u"" if number.find(".") >= 0: rat_part,frac_part = number.split(".") frac_part = num2tamilstr(u"0."+frac_part) else: rat_part = number if len(rat_part) > 0: result = num2tamilstr(float(rat_part)) result = result +u" "+ frac_part return is_negative and "-" + result.strip() or result.strip() suffix_base = { n_crore: crore, n_lakh : lakh, n_thousand : thousands} suffix_file_map = {n_crore: "crore", n_lakh : "lakh", n_thousand : "thousands"} file_map = {n_crore :["one_prefix","crore_0"], n_lakh : ["one_prefix","lakh_0"], n_thousand : ["one_thousand_prefix", "thousands_0"], n_hundred : ["hundreds_0"], #special n_ten : ["units_10"], n_one : ["units_1"]} num_map = {n_crore : [one_prefix,crore[0]], n_lakh : [one_prefix,lakh[0]], n_thousand : [one_thousand_prefix, thousands[0]], n_hundred : [hundreds[0]], #special n_ten : [units[10]], n_one : [units[1]]} all_bases = [n_crore, n_lakh, n_thousand, n_hundred, n_ten,n_one] allowed_bases = list(filter( lambda base: number >= base, all_bases )) if len(allowed_bases) >= 1: n_base = allowed_bases[0] if number == n_base: if tensSpecial=='BASIC': filenames.extend(file_map[n_base]) return u" ".join(num_map[n_base]) elif tensSpecial=='NINES': filenames.extend(file_map[n_base]) return units_suffix_nine[long(number%10)] else: filenames.extend(file_map[n_base]) return units_suffix[long(number%10)] quotient_number = long( number/n_base ) residue_number = number - n_base*quotient_number #print number, n_base, quotient_number, residue_number, tensSpecial if n_base == n_one: if isinstance(number,float): int_part = long(number%10) frac = number - float(int_part) filenames.append("units_%d"%int_part) if abs(frac) > 1e-30: if tensSpecial=='BASIC': return units[int_part]+u' ' + num2tamilstr(frac,filenames) elif tensSpecial=='NINES': return units_suffix_nine[int_part]+u' ' + num2tamilstr(frac,filenames) else: return units_suffix[int_part]+u' ' + num2tamilstr(frac,filenames) else: if tensSpecial=='BASIC': return units[int_part] elif tensSpecial=='NINES': return units_suffix_nine[int_part] else: return units_suffix[int_part] else: if tensSpecial=='BASIC': filenames.append("units_%d"%number) return units[number] elif tensSpecial=='NINES': filenames.append("units_%d"%number) return units_suffix_nine[number] else: filenames.append("units_%d"%number) return units_suffix[number] elif n_base == n_ten: if residue_number < 1.0: filenames.append("tens_%d"%(quotient_number-1)) if residue_number == 0.0: return tens[quotient_number-1] #else: //seems not reachable. # numeral = tens[quotient_number-1] elif number < 20: filenames.append("teens_%d"%(number-10)) residue_number = math.fmod(number,1) teen_number = int(math.floor(number - 10)) if residue_number > 1e-30: return teens[teen_number-1] +u' ' + num2tamilstr(residue_number,filenames) else: return teens[teen_number-1]+u' ' if residue_number < 1.0: filenames.append( "tens_%d"%(quotient_number-1) ) numeral = tens[quotient_number-1]+u' ' else: if residue_number in tween: filenames.append( "tens_prefix_%d"%(quotient_number-2) ) numeral = tens_prefix[quotient_number-2] tensSpecial='SPECIAL' if (quotient_number==9): tensSpecial = 'NINES' else: filenames.append( "tens_prefix_%d"%(quotient_number-2) ) numeral = tens_full_prefix[quotient_number-2]+u' ' elif n_base == n_hundred: if residue_number == 0: filenames.append("hundreds_%d"%(quotient_number-1)) return hundreds[quotient_number-1]+u' ' if residue_number < 1.0: filenames.append( "hundreds_%d"%(quotient_number-1) ) numeral = hundreds[quotient_number-1]+u' ' else: filenames.append("hundreds_suffix_%d"%(quotient_number-1)) numeral = hundreds_suffix[quotient_number-1]+u' ' else: if ( quotient_number == 1 ): if n_base == n_thousand: filenames.append("one_thousand_prefix") numeral = one_thousand_prefix else: filenames.append("one_prefix") numeral = one_prefix else: numeral = num2tamilstr( quotient_number, filenames ) if n_base >= n_thousand: suffix = suffix_base[n_base][long(residue_number >= 1)] suffix_filename = "%s_%d"%(suffix_file_map[n_base],long(residue_number >= 1)) if residue_number == 0: filenames.append(suffix_filename) return numeral + u' ' + suffix+u' ' filenames.append(suffix_filename) numeral = numeral + u' ' + suffix+u' ' residue_numeral = num2tamilstr( residue_number, filenames, tensSpecial) #return numeral+u' '+residue_numeral return numeral+residue_numeral # number has to be zero filenames.append("units_0") return units[0]
python
def num2tamilstr( *args ): """ work till one lakh crore - i.e 1e5*1e7 = 1e12. turn number into a numeral, Indian style. Fractions upto 1e-30""" number = args[0] if len(args) < 2: filenames = [] else: filenames = args[1] if len(args) ==3: tensSpecial = args[2] else: tensSpecial='BASIC' if not any( filter( lambda T: isinstance( number, T), [str,unicode,int, long, float]) ) or isinstance(number,complex): raise Exception('num2tamilstr input has to be a long or integer or float') if float(number) > long(1e12): raise Exception('num2tamilstr input is too large') if float(number) < 0: return u"- "+num2tamilstr( -number ) units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10 units_suffix = (u'பூஜ்ஜியம்', u'தொன்று', u'திரண்டு', u'மூன்று', u'நான்கு', u'தைந்து', u'தாறு', u'தேழு', u'தெட்டு', u'தொன்பது', u'பத்து') # 0-10 units_suffix_nine = (u'பூஜ்ஜியம்', u'றொன்று', u'றிரண்டு', u'மூன்று', u'நான்கு', u'றைந்து', u'றாறு', u'றேழு', u'றெட்டு', u'றொன்பது', u'பத்து') # 0-10 tween = [1.0,2.0,5.0,6.0,7.0,8.0,9.0] teens = (u'பதினொன்று', u'பனிரண்டு', u'பதிமூன்று', u'பதினான்கு', u'பதினைந்து',u'பதினாறு', u'பதினேழு', u'பதினெட்டு', u'பத்தொன்பது') # 11-19 tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90 tens_full_prefix = (u'இருபத்து', u'முப்பத்து', u'நாற்பத்து', u'ஐம்பத்து', u'அறுபத்து', u'எழுபத்து', u'எண்பத்து', u'தொன்னூற்று') # 10+-90+ tens_prefix = (u'இருபத்', u'முப்பத்', u'நாற்பத்', u'ஐம்பத்', u'அறுபத்', u'எழுபத்', u'எண்பத்', u'தொன்னூற்') # 10+-90+ hundreds = ( u'நூறு', u'இருநூறு', u'முன்னூறு', u'நாநூறு',u'ஐநூறு', u'அறுநூறு', u'எழுநூறு', u'எண்ணூறு', u'தொள்ளாயிரம்') #100 - 900 hundreds_suffix = (u'நூற்றி', u'இருநூற்று', u'முன்னூற்று', u'நாநூற்று', u'ஐநூற்று', u'அறுநூற்று', u'எழுநூற்று', u'எண்ணூற்று',u'தொள்ளாயிரத்து') #100+ - 900+ one_thousand_prefix = u'ஓர்' thousands = (u'ஆயிரம்',u'ஆயிரத்து') one_prefix = u'ஒரு' lakh = (u'இலட்சம்',u'இலட்சத்து') crore = (u'கோடி',u'கோடியே') pulli = u'புள்ளி' n_one = 1.0 n_ten = 10.0 n_hundred = 100.0 n_thousand = 1000.0 n_lakh = 100.0*n_thousand n_crore = (100.0*n_lakh) # handle fractional parts if float(number) > 0.0 and float(number) < 1.0: rval = [] rval.append(pulli) filenames.append( 'pulli' ) number_str = str(number).replace('0.','') for digit in number_str: filenames.append( "units_%d"%int(digit)) rval.append( units[int(digit)] ) return u' '.join(rval) if isinstance(number,str) or isinstance(number,unicode): result = u"" number = number.strip() assert(len(args) == 1) assert(len(number) > 0) is_negative = number[0] == "-" if is_negative: number = number[1:] frac_part = u"" if number.find(".") >= 0: rat_part,frac_part = number.split(".") frac_part = num2tamilstr(u"0."+frac_part) else: rat_part = number if len(rat_part) > 0: result = num2tamilstr(float(rat_part)) result = result +u" "+ frac_part return is_negative and "-" + result.strip() or result.strip() suffix_base = { n_crore: crore, n_lakh : lakh, n_thousand : thousands} suffix_file_map = {n_crore: "crore", n_lakh : "lakh", n_thousand : "thousands"} file_map = {n_crore :["one_prefix","crore_0"], n_lakh : ["one_prefix","lakh_0"], n_thousand : ["one_thousand_prefix", "thousands_0"], n_hundred : ["hundreds_0"], #special n_ten : ["units_10"], n_one : ["units_1"]} num_map = {n_crore : [one_prefix,crore[0]], n_lakh : [one_prefix,lakh[0]], n_thousand : [one_thousand_prefix, thousands[0]], n_hundred : [hundreds[0]], #special n_ten : [units[10]], n_one : [units[1]]} all_bases = [n_crore, n_lakh, n_thousand, n_hundred, n_ten,n_one] allowed_bases = list(filter( lambda base: number >= base, all_bases )) if len(allowed_bases) >= 1: n_base = allowed_bases[0] if number == n_base: if tensSpecial=='BASIC': filenames.extend(file_map[n_base]) return u" ".join(num_map[n_base]) elif tensSpecial=='NINES': filenames.extend(file_map[n_base]) return units_suffix_nine[long(number%10)] else: filenames.extend(file_map[n_base]) return units_suffix[long(number%10)] quotient_number = long( number/n_base ) residue_number = number - n_base*quotient_number #print number, n_base, quotient_number, residue_number, tensSpecial if n_base == n_one: if isinstance(number,float): int_part = long(number%10) frac = number - float(int_part) filenames.append("units_%d"%int_part) if abs(frac) > 1e-30: if tensSpecial=='BASIC': return units[int_part]+u' ' + num2tamilstr(frac,filenames) elif tensSpecial=='NINES': return units_suffix_nine[int_part]+u' ' + num2tamilstr(frac,filenames) else: return units_suffix[int_part]+u' ' + num2tamilstr(frac,filenames) else: if tensSpecial=='BASIC': return units[int_part] elif tensSpecial=='NINES': return units_suffix_nine[int_part] else: return units_suffix[int_part] else: if tensSpecial=='BASIC': filenames.append("units_%d"%number) return units[number] elif tensSpecial=='NINES': filenames.append("units_%d"%number) return units_suffix_nine[number] else: filenames.append("units_%d"%number) return units_suffix[number] elif n_base == n_ten: if residue_number < 1.0: filenames.append("tens_%d"%(quotient_number-1)) if residue_number == 0.0: return tens[quotient_number-1] #else: //seems not reachable. # numeral = tens[quotient_number-1] elif number < 20: filenames.append("teens_%d"%(number-10)) residue_number = math.fmod(number,1) teen_number = int(math.floor(number - 10)) if residue_number > 1e-30: return teens[teen_number-1] +u' ' + num2tamilstr(residue_number,filenames) else: return teens[teen_number-1]+u' ' if residue_number < 1.0: filenames.append( "tens_%d"%(quotient_number-1) ) numeral = tens[quotient_number-1]+u' ' else: if residue_number in tween: filenames.append( "tens_prefix_%d"%(quotient_number-2) ) numeral = tens_prefix[quotient_number-2] tensSpecial='SPECIAL' if (quotient_number==9): tensSpecial = 'NINES' else: filenames.append( "tens_prefix_%d"%(quotient_number-2) ) numeral = tens_full_prefix[quotient_number-2]+u' ' elif n_base == n_hundred: if residue_number == 0: filenames.append("hundreds_%d"%(quotient_number-1)) return hundreds[quotient_number-1]+u' ' if residue_number < 1.0: filenames.append( "hundreds_%d"%(quotient_number-1) ) numeral = hundreds[quotient_number-1]+u' ' else: filenames.append("hundreds_suffix_%d"%(quotient_number-1)) numeral = hundreds_suffix[quotient_number-1]+u' ' else: if ( quotient_number == 1 ): if n_base == n_thousand: filenames.append("one_thousand_prefix") numeral = one_thousand_prefix else: filenames.append("one_prefix") numeral = one_prefix else: numeral = num2tamilstr( quotient_number, filenames ) if n_base >= n_thousand: suffix = suffix_base[n_base][long(residue_number >= 1)] suffix_filename = "%s_%d"%(suffix_file_map[n_base],long(residue_number >= 1)) if residue_number == 0: filenames.append(suffix_filename) return numeral + u' ' + suffix+u' ' filenames.append(suffix_filename) numeral = numeral + u' ' + suffix+u' ' residue_numeral = num2tamilstr( residue_number, filenames, tensSpecial) #return numeral+u' '+residue_numeral return numeral+residue_numeral # number has to be zero filenames.append("units_0") return units[0]
work till one lakh crore - i.e 1e5*1e7 = 1e12. turn number into a numeral, Indian style. Fractions upto 1e-30
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/numeral.py#L14-L214
Ezhil-Language-Foundation/open-tamil
tamil/numeral.py
num2tamilstr_american
def num2tamilstr_american( *args ): number = args[0] """ work till 1000 trillion - 1 - i.e = 1e12*1e3 - 1. turn number into a numeral, American style. Fractions upto 1e-30. """ if not any( filter( lambda T: isinstance( number, T), [int, str, unicode, long, float]) ) or isinstance(number,complex): raise Exception('num2tamilstr_american input has to be long or integer') if float(number) >= long(1e15): raise Exception('num2tamilstr input is too large') if float(number) < 0: return u"- "+num2tamilstr_american( -float(number) ) units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10 hundreds = ( u'நூறு', u'இருநூறு', u'முன்னூறு', u'நாநூறு',u'ஐநூறு', u'அறுநூறு', u'எழுநூறு', u'எண்ணூறு', u'தொள்ளாயிரம்') #100 - 900 one_thousand_prefix = u'ஓர்' thousands = (u'ஆயிரம்',u'ஆயிரத்து') one_prefix = u'ஒரு' mil = u'மில்லியன்' million = (mil,mil) bil = u'பில்லியன்' billion = (bil,bil) tril = u'டிரில்லியன்' trillion = (tril,tril) n_one = 1 n_ten = 10 n_hundred = 100 n_thousand = 1000 n_million = 1000*n_thousand n_billion = long(1000*n_million) n_trillion = long(1000*n_billion) suffix_base = { n_trillion: trillion, n_billion : billion, n_million : million, n_thousand : thousands} num_map = {n_trillion : [one_prefix,trillion[0]], n_billion : [one_prefix,billion[0]], n_million : [one_prefix,million[0]], n_thousand : [one_thousand_prefix, thousands[0]], n_hundred : [hundreds[0]], #special n_ten : [units[10]], n_one : [units[1]]} all_bases = [n_trillion,n_billion, n_million, n_thousand, n_hundred, n_ten,n_one] allowed_bases = list(filter( lambda base: float(number) >= base, all_bases )) # handle fractional parts if float(number) > 0.0 and float(number) <= 1000.0: return num2tamilstr(number) if isinstance(number,str) or isinstance(number,unicode): result = u"" number = number.strip() assert(len(args) == 1) assert(len(number) > 0) is_negative = number[0] == "-" if is_negative: number = number[1:] frac_part = u"" if number.find(".") >= 0: rat_part,frac_part = number.split(".") frac_part = num2tamilstr_american(u"0."+frac_part) else: rat_part = number if len(rat_part) > 0: result = num2tamilstr_american(float(rat_part)) result = result +u" "+ frac_part return result.strip() if len(allowed_bases) >= 1: n_base = allowed_bases[0] if number == n_base: return u" ".join(num_map[n_base]) quotient_number = long( number/n_base ) residue_number = number - n_base*quotient_number if n_base < n_thousand: raise Exception("This can never happen") else: if ( quotient_number == 1 ): if n_base == n_thousand: numeral = one_thousand_prefix+u' ' else: numeral = one_prefix+u' ' else: numeral = num2tamilstr( quotient_number ) if n_base >= n_thousand: suffix = suffix_base[n_base][long(residue_number >= 1)] if residue_number == 0: return numeral + u' ' + suffix numeral = numeral + u' ' + suffix residue_numeral = num2tamilstr_american( residue_number ) return numeral+u' '+residue_numeral # number has to be zero return units[0]
python
def num2tamilstr_american( *args ): number = args[0] """ work till 1000 trillion - 1 - i.e = 1e12*1e3 - 1. turn number into a numeral, American style. Fractions upto 1e-30. """ if not any( filter( lambda T: isinstance( number, T), [int, str, unicode, long, float]) ) or isinstance(number,complex): raise Exception('num2tamilstr_american input has to be long or integer') if float(number) >= long(1e15): raise Exception('num2tamilstr input is too large') if float(number) < 0: return u"- "+num2tamilstr_american( -float(number) ) units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10 hundreds = ( u'நூறு', u'இருநூறு', u'முன்னூறு', u'நாநூறு',u'ஐநூறு', u'அறுநூறு', u'எழுநூறு', u'எண்ணூறு', u'தொள்ளாயிரம்') #100 - 900 one_thousand_prefix = u'ஓர்' thousands = (u'ஆயிரம்',u'ஆயிரத்து') one_prefix = u'ஒரு' mil = u'மில்லியன்' million = (mil,mil) bil = u'பில்லியன்' billion = (bil,bil) tril = u'டிரில்லியன்' trillion = (tril,tril) n_one = 1 n_ten = 10 n_hundred = 100 n_thousand = 1000 n_million = 1000*n_thousand n_billion = long(1000*n_million) n_trillion = long(1000*n_billion) suffix_base = { n_trillion: trillion, n_billion : billion, n_million : million, n_thousand : thousands} num_map = {n_trillion : [one_prefix,trillion[0]], n_billion : [one_prefix,billion[0]], n_million : [one_prefix,million[0]], n_thousand : [one_thousand_prefix, thousands[0]], n_hundred : [hundreds[0]], #special n_ten : [units[10]], n_one : [units[1]]} all_bases = [n_trillion,n_billion, n_million, n_thousand, n_hundred, n_ten,n_one] allowed_bases = list(filter( lambda base: float(number) >= base, all_bases )) # handle fractional parts if float(number) > 0.0 and float(number) <= 1000.0: return num2tamilstr(number) if isinstance(number,str) or isinstance(number,unicode): result = u"" number = number.strip() assert(len(args) == 1) assert(len(number) > 0) is_negative = number[0] == "-" if is_negative: number = number[1:] frac_part = u"" if number.find(".") >= 0: rat_part,frac_part = number.split(".") frac_part = num2tamilstr_american(u"0."+frac_part) else: rat_part = number if len(rat_part) > 0: result = num2tamilstr_american(float(rat_part)) result = result +u" "+ frac_part return result.strip() if len(allowed_bases) >= 1: n_base = allowed_bases[0] if number == n_base: return u" ".join(num_map[n_base]) quotient_number = long( number/n_base ) residue_number = number - n_base*quotient_number if n_base < n_thousand: raise Exception("This can never happen") else: if ( quotient_number == 1 ): if n_base == n_thousand: numeral = one_thousand_prefix+u' ' else: numeral = one_prefix+u' ' else: numeral = num2tamilstr( quotient_number ) if n_base >= n_thousand: suffix = suffix_base[n_base][long(residue_number >= 1)] if residue_number == 0: return numeral + u' ' + suffix numeral = numeral + u' ' + suffix residue_numeral = num2tamilstr_american( residue_number ) return numeral+u' '+residue_numeral # number has to be zero return units[0]
work till 1000 trillion - 1 - i.e = 1e12*1e3 - 1. turn number into a numeral, American style. Fractions upto 1e-30.
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/numeral.py#L216-L317
Ezhil-Language-Foundation/open-tamil
examples/solpattiyal.py
WordFrequency.get_tamil_words_iterable
def get_tamil_words_iterable( letters ): """ given a list of UTF-8 letters section them into words, grouping them at spaces """ #punctuations = u'-,+,/,*,>,<,_,],[,{,},(,)'.split(',')+[','] #isspace_or_tamil = lambda x: not x in punctuations and tamil.utf8.istamil(x) # correct algorithm for get-tamil-words buf = [] for idx,let in enumerate(letters): if tamil.utf8.istamil( let ): buf.append( let ) else: if len(buf) > 0: yield u"".join( buf ) buf = [] if len(buf) > 0: yield u"".join(buf)
python
def get_tamil_words_iterable( letters ): """ given a list of UTF-8 letters section them into words, grouping them at spaces """ #punctuations = u'-,+,/,*,>,<,_,],[,{,},(,)'.split(',')+[','] #isspace_or_tamil = lambda x: not x in punctuations and tamil.utf8.istamil(x) # correct algorithm for get-tamil-words buf = [] for idx,let in enumerate(letters): if tamil.utf8.istamil( let ): buf.append( let ) else: if len(buf) > 0: yield u"".join( buf ) buf = [] if len(buf) > 0: yield u"".join(buf)
given a list of UTF-8 letters section them into words, grouping them at spaces
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/examples/solpattiyal.py#L30-L45
wdecoster/nanocomp
nanocomp/NanoComp.py
main
def main(): ''' Organization function -setups logging -gets inputdata -calls plotting function ''' args = get_args() try: utils.make_output_dir(args.outdir) utils.init_logs(args, tool="NanoComp") args.format = nanoplotter.check_valid_format(args.format) settings = vars(args) settings["path"] = path.join(args.outdir, args.prefix) sources = [args.fastq, args.bam, args.summary, args.fasta] sourcename = ["fastq", "bam", "summary", "fasta"] if args.split_runs: split_dict = validate_split_runs_file(args.split_runs) datadf = nanoget.get_input( source=[n for n, s in zip(sourcename, sources) if s][0], files=[f for f in sources if f][0], threads=args.threads, readtype=args.readtype, names=args.names, barcoded=args.barcoded, combine="track") datadf, settings = filter_and_transform_data(datadf, vars(args)) if args.raw: datadf.to_csv("NanoComp-data.tsv.gz", sep="\t", index=False, compression="gzip") if args.store: pickle.dump( obj=datadf, file=open(settings["path"] + "NanoComp-data.pickle", 'wb')) if args.split_runs: change_identifiers(datadf, split_dict) if args.barcoded: datadf["dataset"] = datadf["barcode"] identifiers = list(datadf["dataset"].unique()) write_stats( datadfs=[datadf[datadf["dataset"] == i] for i in identifiers], outputfile=settings["path"] + "NanoStats.txt", names=identifiers) if args.plot != 'false': plots = make_plots(datadf, settings) make_report(plots, path.join(args.outdir, args.prefix)) logging.info("Succesfully processed all input.") except Exception as e: logging.error(e, exc_info=True) raise
python
def main(): ''' Organization function -setups logging -gets inputdata -calls plotting function ''' args = get_args() try: utils.make_output_dir(args.outdir) utils.init_logs(args, tool="NanoComp") args.format = nanoplotter.check_valid_format(args.format) settings = vars(args) settings["path"] = path.join(args.outdir, args.prefix) sources = [args.fastq, args.bam, args.summary, args.fasta] sourcename = ["fastq", "bam", "summary", "fasta"] if args.split_runs: split_dict = validate_split_runs_file(args.split_runs) datadf = nanoget.get_input( source=[n for n, s in zip(sourcename, sources) if s][0], files=[f for f in sources if f][0], threads=args.threads, readtype=args.readtype, names=args.names, barcoded=args.barcoded, combine="track") datadf, settings = filter_and_transform_data(datadf, vars(args)) if args.raw: datadf.to_csv("NanoComp-data.tsv.gz", sep="\t", index=False, compression="gzip") if args.store: pickle.dump( obj=datadf, file=open(settings["path"] + "NanoComp-data.pickle", 'wb')) if args.split_runs: change_identifiers(datadf, split_dict) if args.barcoded: datadf["dataset"] = datadf["barcode"] identifiers = list(datadf["dataset"].unique()) write_stats( datadfs=[datadf[datadf["dataset"] == i] for i in identifiers], outputfile=settings["path"] + "NanoStats.txt", names=identifiers) if args.plot != 'false': plots = make_plots(datadf, settings) make_report(plots, path.join(args.outdir, args.prefix)) logging.info("Succesfully processed all input.") except Exception as e: logging.error(e, exc_info=True) raise
Organization function -setups logging -gets inputdata -calls plotting function
https://github.com/wdecoster/nanocomp/blob/0533f258201263858ac0467da37c855880560d2d/nanocomp/NanoComp.py#L16-L64
wdecoster/nanocomp
nanocomp/NanoComp.py
validate_split_runs_file
def validate_split_runs_file(split_runs_file): """Check if structure of file is as expected and return dictionary linking names to run_IDs.""" try: content = [l.strip() for l in split_runs_file.readlines()] if content[0].upper().split('\t') == ['NAME', 'RUN_ID']: return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c} else: sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") except IndexError: sys.exit("ERROR: Format of --split_runs tab separated file not as expected") logging.error("ERROR: Format of --split_runs tab separated file not as expected")
python
def validate_split_runs_file(split_runs_file): """Check if structure of file is as expected and return dictionary linking names to run_IDs.""" try: content = [l.strip() for l in split_runs_file.readlines()] if content[0].upper().split('\t') == ['NAME', 'RUN_ID']: return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c} else: sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") except IndexError: sys.exit("ERROR: Format of --split_runs tab separated file not as expected") logging.error("ERROR: Format of --split_runs tab separated file not as expected")
Check if structure of file is as expected and return dictionary linking names to run_IDs.
https://github.com/wdecoster/nanocomp/blob/0533f258201263858ac0467da37c855880560d2d/nanocomp/NanoComp.py#L195-L206
wdecoster/nanocomp
nanocomp/NanoComp.py
change_identifiers
def change_identifiers(datadf, split_dict): """Change the dataset identifiers based on the names in the dictionary.""" for rid, name in split_dict.items(): datadf.loc[datadf["runIDs"] == rid, "dataset"] = name
python
def change_identifiers(datadf, split_dict): """Change the dataset identifiers based on the names in the dictionary.""" for rid, name in split_dict.items(): datadf.loc[datadf["runIDs"] == rid, "dataset"] = name
Change the dataset identifiers based on the names in the dictionary.
https://github.com/wdecoster/nanocomp/blob/0533f258201263858ac0467da37c855880560d2d/nanocomp/NanoComp.py#L209-L212
wdecoster/nanocomp
nanocomp/NanoComp.py
make_report
def make_report(plots, path): ''' Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy) ''' logging.info("Writing html report.") html_head = """<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <style> table, th, td { text-align: left; padding: 2px; /* border: 1px solid black; border-collapse: collapse; */ } h2 { line-height: 0pt; } </style> <title>NanoComp Report</title> </head>""" html_content = ["\n<body>\n<h1>NanoComp report</h1>"] html_content.append("<h2>Summary statistics</h2>") html_content.append(utils.stats2html(path + "NanoStats.txt")) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_content.append("<h2>Plots</h2>") for plot in plots: html_content.append("\n<h3>" + plot.title + "</h3>\n" + plot.encode()) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_body = '\n'.join(html_content) + "</body></html>" html_str = html_head + html_body with open(path + "NanoComp-report.html", "w") as html_file: html_file.write(html_str) return path + "NanoComp-report.html"
python
def make_report(plots, path): ''' Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy) ''' logging.info("Writing html report.") html_head = """<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <style> table, th, td { text-align: left; padding: 2px; /* border: 1px solid black; border-collapse: collapse; */ } h2 { line-height: 0pt; } </style> <title>NanoComp Report</title> </head>""" html_content = ["\n<body>\n<h1>NanoComp report</h1>"] html_content.append("<h2>Summary statistics</h2>") html_content.append(utils.stats2html(path + "NanoStats.txt")) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_content.append("<h2>Plots</h2>") for plot in plots: html_content.append("\n<h3>" + plot.title + "</h3>\n" + plot.encode()) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_body = '\n'.join(html_content) + "</body></html>" html_str = html_head + html_body with open(path + "NanoComp-report.html", "w") as html_file: html_file.write(html_str) return path + "NanoComp-report.html"
Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy)
https://github.com/wdecoster/nanocomp/blob/0533f258201263858ac0467da37c855880560d2d/nanocomp/NanoComp.py#L309-L346
six8/pytailer
src/tailer/__init__.py
Tailer.follow
def follow(self, delay=1.0): """\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035 """ trailing = True while 1: where = self.file.tell() line = self.file.readline() if line: if trailing and line in self.line_terminators: # This is just the line terminator added to the end of the file # before a new line, ignore. trailing = False continue if line[-1] in self.line_terminators: line = line[:-1] if line[-1:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf line = line[:-1] trailing = False yield line else: trailing = True self.seek(where) time.sleep(delay)
python
def follow(self, delay=1.0): """\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035 """ trailing = True while 1: where = self.file.tell() line = self.file.readline() if line: if trailing and line in self.line_terminators: # This is just the line terminator added to the end of the file # before a new line, ignore. trailing = False continue if line[-1] in self.line_terminators: line = line[:-1] if line[-1:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf line = line[:-1] trailing = False yield line else: trailing = True self.seek(where) time.sleep(delay)
\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
https://github.com/six8/pytailer/blob/8f78431b9d2e63077d7f7150264869506c890024/src/tailer/__init__.py#L153-L182
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
CommonBaseClient._http_response
def _http_response(self, url, method, data=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ header = {'content-type': 'application/json'} if data: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
python
def _http_response(self, url, method, data=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ header = {'content-type': 'application/json'} if data: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L30-L47
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
CommonBaseClient._http_call
def _http_call(self, url, method, data=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ response = self._http_response(url, method, data=data, **kwargs) if not response.content: return {} return response.json()
python
def _http_call(self, url, method, data=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ response = self._http_response(url, method, data=data, **kwargs) if not response.content: return {} return response.json()
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L49-L59
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.search
def search(self, q=''): """GET /v1/search""" if q: q = '?q=' + q return self._http_call('/v1/search' + q, get)
python
def search(self, q=''): """GET /v1/search""" if q: q = '?q=' + q return self._http_call('/v1/search' + q, get)
GET /v1/search
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L73-L77
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.get_images_layer
def get_images_layer(self, image_id): """GET /v1/images/{image_id}/layer""" return self._http_call(self.IMAGE_LAYER, get, image_id=image_id)
python
def get_images_layer(self, image_id): """GET /v1/images/{image_id}/layer""" return self._http_call(self.IMAGE_LAYER, get, image_id=image_id)
GET /v1/images/{image_id}/layer
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L83-L85
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.put_images_layer
def put_images_layer(self, image_id, data): """PUT /v1/images/(image_id)/layer""" return self._http_call(self.IMAGE_LAYER, put, image_id=image_id, data=data)
python
def put_images_layer(self, image_id, data): """PUT /v1/images/(image_id)/layer""" return self._http_call(self.IMAGE_LAYER, put, image_id=image_id, data=data)
PUT /v1/images/(image_id)/layer
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L87-L90
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.put_image_layer
def put_image_layer(self, image_id, data): """PUT /v1/images/(image_id)/json""" return self._http_call(self.IMAGE_JSON, put, data=data, image_id=image_id)
python
def put_image_layer(self, image_id, data): """PUT /v1/images/(image_id)/json""" return self._http_call(self.IMAGE_JSON, put, data=data, image_id=image_id)
PUT /v1/images/(image_id)/json
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L92-L95
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.get_image_layer
def get_image_layer(self, image_id): """GET /v1/images/(image_id)/json""" return self._http_call(self.IMAGE_JSON, get, image_id=image_id)
python
def get_image_layer(self, image_id): """GET /v1/images/(image_id)/json""" return self._http_call(self.IMAGE_JSON, get, image_id=image_id)
GET /v1/images/(image_id)/json
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L97-L99
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.get_image_ancestry
def get_image_ancestry(self, image_id): """GET /v1/images/(image_id)/ancestry""" return self._http_call(self.IMAGE_ANCESTRY, get, image_id=image_id)
python
def get_image_ancestry(self, image_id): """GET /v1/images/(image_id)/ancestry""" return self._http_call(self.IMAGE_ANCESTRY, get, image_id=image_id)
GET /v1/images/(image_id)/ancestry
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L101-L103
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.get_repository_tags
def get_repository_tags(self, namespace, repository): """GET /v1/repositories/(namespace)/(repository)/tags""" return self._http_call(self.TAGS, get, namespace=namespace, repository=repository)
python
def get_repository_tags(self, namespace, repository): """GET /v1/repositories/(namespace)/(repository)/tags""" return self._http_call(self.TAGS, get, namespace=namespace, repository=repository)
GET /v1/repositories/(namespace)/(repository)/tags
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L105-L108
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.get_image_id
def get_image_id(self, namespace, respository, tag): """GET /v1/repositories/(namespace)/(repository)/tags/(tag*)""" return self._http_call(self.TAGS + '/' + tag, get, namespace=namespace, repository=respository)
python
def get_image_id(self, namespace, respository, tag): """GET /v1/repositories/(namespace)/(repository)/tags/(tag*)""" return self._http_call(self.TAGS + '/' + tag, get, namespace=namespace, repository=respository)
GET /v1/repositories/(namespace)/(repository)/tags/(tag*)
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L110-L113
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.get_tag_json
def get_tag_json(self, namespace, repository, tag): """GET /v1/repositories(namespace)/(repository)tags(tag*)/json""" return self._http_call(self.TAGS + '/' + tag + '/json', get, namespace=namespace, repository=repository)
python
def get_tag_json(self, namespace, repository, tag): """GET /v1/repositories(namespace)/(repository)tags(tag*)/json""" return self._http_call(self.TAGS + '/' + tag + '/json', get, namespace=namespace, repository=repository)
GET /v1/repositories(namespace)/(repository)tags(tag*)/json
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L115-L118
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.delete_repository_tag
def delete_repository_tag(self, namespace, repository, tag): """DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)""" return self._http_call(self.TAGS + '/' + tag, delete, namespace=namespace, repository=repository)
python
def delete_repository_tag(self, namespace, repository, tag): """DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)""" return self._http_call(self.TAGS + '/' + tag, delete, namespace=namespace, repository=repository)
DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L120-L123
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.set_tag
def set_tag(self, namespace, repository, tag, image_id): """PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)""" return self._http_call(self.TAGS + '/' + tag, put, data=image_id, namespace=namespace, repository=repository)
python
def set_tag(self, namespace, repository, tag, image_id): """PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)""" return self._http_call(self.TAGS + '/' + tag, put, data=image_id, namespace=namespace, repository=repository)
PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L125-L128
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV1.delete_repository
def delete_repository(self, namespace, repository): """DELETE /v1/repositories/(namespace)/(repository)/""" return self._http_call(self.REPO, delete, namespace=namespace, repository=repository)
python
def delete_repository(self, namespace, repository): """DELETE /v1/repositories/(namespace)/(repository)/""" return self._http_call(self.REPO, delete, namespace=namespace, repository=repository)
DELETE /v1/repositories/(namespace)/(repository)/
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L130-L133
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
BaseClientV2._http_response
def _http_response(self, url, method, data=None, content_type=None, schema=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ if schema is None: schema = self.schema_2 header = { 'content-type': content_type or 'application/json', 'Accept': schema, } # Token specific part. We add the token in the header if necessary auth = self.auth token_required = auth.token_required token = auth.token desired_scope = auth.desired_scope scope = auth.scope if token_required: if not token or desired_scope != scope: logger.debug("Getting new token for scope: %s", desired_scope) auth.get_new_token() header['Authorization'] = 'Bearer %s' % self.auth.token if data and not content_type: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
python
def _http_response(self, url, method, data=None, content_type=None, schema=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ if schema is None: schema = self.schema_2 header = { 'content-type': content_type or 'application/json', 'Accept': schema, } # Token specific part. We add the token in the header if necessary auth = self.auth token_required = auth.token_required token = auth.token desired_scope = auth.desired_scope scope = auth.scope if token_required: if not token or desired_scope != scope: logger.debug("Getting new token for scope: %s", desired_scope) auth.get_new_token() header['Authorization'] = 'Bearer %s' % self.auth.token if data and not content_type: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L229-L269
adafruit/Adafruit_Python_MCP9808
Adafruit_MCP9808/MCP9808.py
MCP9808.begin
def begin(self): """Start taking temperature measurements. Returns True if the device is intialized, False otherwise. """ # Check manufacturer and device ID match expected values. mid = self._device.readU16BE(MCP9808_REG_MANUF_ID) did = self._device.readU16BE(MCP9808_REG_DEVICE_ID) self._logger.debug('Read manufacturer ID: {0:04X}'.format(mid)) self._logger.debug('Read device ID: {0:04X}'.format(did)) return mid == 0x0054 and did == 0x0400
python
def begin(self): """Start taking temperature measurements. Returns True if the device is intialized, False otherwise. """ # Check manufacturer and device ID match expected values. mid = self._device.readU16BE(MCP9808_REG_MANUF_ID) did = self._device.readU16BE(MCP9808_REG_DEVICE_ID) self._logger.debug('Read manufacturer ID: {0:04X}'.format(mid)) self._logger.debug('Read device ID: {0:04X}'.format(did)) return mid == 0x0054 and did == 0x0400
Start taking temperature measurements. Returns True if the device is intialized, False otherwise.
https://github.com/adafruit/Adafruit_Python_MCP9808/blob/5524605a15cfce5668f259de72c88d5be74565f4/Adafruit_MCP9808/MCP9808.py#L67-L76
adafruit/Adafruit_Python_MCP9808
Adafruit_MCP9808/MCP9808.py
MCP9808.readTempC
def readTempC(self): """Read sensor and return its value in degrees celsius.""" # Read temperature register value. t = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP) self._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF)) # Scale and convert to signed value. temp = (t & 0x0FFF) / 16.0 if t & 0x1000: temp -= 256.0 return temp
python
def readTempC(self): """Read sensor and return its value in degrees celsius.""" # Read temperature register value. t = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP) self._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF)) # Scale and convert to signed value. temp = (t & 0x0FFF) / 16.0 if t & 0x1000: temp -= 256.0 return temp
Read sensor and return its value in degrees celsius.
https://github.com/adafruit/Adafruit_Python_MCP9808/blob/5524605a15cfce5668f259de72c88d5be74565f4/Adafruit_MCP9808/MCP9808.py#L78-L87
bmihelac/django-cruds
cruds/utils.py
crud_url_name
def crud_url_name(model, action, prefix=None): """ Returns url name for given model and action. """ if prefix is None: prefix = "" app_label = model._meta.app_label model_lower = model.__name__.lower() return '%s%s_%s_%s' % (prefix, app_label, model_lower, action)
python
def crud_url_name(model, action, prefix=None): """ Returns url name for given model and action. """ if prefix is None: prefix = "" app_label = model._meta.app_label model_lower = model.__name__.lower() return '%s%s_%s_%s' % (prefix, app_label, model_lower, action)
Returns url name for given model and action.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/utils.py#L34-L42
bmihelac/django-cruds
cruds/utils.py
get_fields
def get_fields(model, include=None): """ Returns ordered dict in format 'field': 'verbose_name' """ fields = OrderedDict() info = model._meta if include: selected = [info.get_field(name) for name in include] else: selected = [field for field in info.fields if field.editable] for field in selected: fields[field.name] = field.verbose_name return fields
python
def get_fields(model, include=None): """ Returns ordered dict in format 'field': 'verbose_name' """ fields = OrderedDict() info = model._meta if include: selected = [info.get_field(name) for name in include] else: selected = [field for field in info.fields if field.editable] for field in selected: fields[field.name] = field.verbose_name return fields
Returns ordered dict in format 'field': 'verbose_name'
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/utils.py#L45-L57
bmihelac/django-cruds
cruds/utils.py
crud_url
def crud_url(instance_or_model, action, prefix=None, additional_kwargs=None): """Shortcut function returns URL for instance or model and action. Example:: crud_url(author, 'update') Is same as: reverse('testapp_author_update', kwargs={'pk': author.pk}) Example:: crud_url(Author, 'update') Is same as: reverse('testapp_author_list') """ if additional_kwargs is None: additional_kwargs = {} if isinstance(instance_or_model, Model): additional_kwargs['pk'] = instance_or_model.pk model_name = instance_or_model._meta.model else: model_name = instance_or_model return reverse( crud_url_name(model_name, action, prefix), kwargs=additional_kwargs )
python
def crud_url(instance_or_model, action, prefix=None, additional_kwargs=None): """Shortcut function returns URL for instance or model and action. Example:: crud_url(author, 'update') Is same as: reverse('testapp_author_update', kwargs={'pk': author.pk}) Example:: crud_url(Author, 'update') Is same as: reverse('testapp_author_list') """ if additional_kwargs is None: additional_kwargs = {} if isinstance(instance_or_model, Model): additional_kwargs['pk'] = instance_or_model.pk model_name = instance_or_model._meta.model else: model_name = instance_or_model return reverse( crud_url_name(model_name, action, prefix), kwargs=additional_kwargs )
Shortcut function returns URL for instance or model and action. Example:: crud_url(author, 'update') Is same as: reverse('testapp_author_update', kwargs={'pk': author.pk}) Example:: crud_url(Author, 'update') Is same as: reverse('testapp_author_list')
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/utils.py#L60-L89
bmihelac/django-cruds
cruds/utils.py
crud_permission_name
def crud_permission_name(model, action, convert=True): """Returns permission name using Django naming convention: app_label.action_object. If `convert` is True, `create` and `update` actions would be renamed to `add` and `change`. """ app_label = model._meta.app_label model_lower = model.__name__.lower() if convert: action = MAP_PERMISSION_ACTIONS.get(action, action) return '%s.%s_%s' % ( app_label, action, model_lower )
python
def crud_permission_name(model, action, convert=True): """Returns permission name using Django naming convention: app_label.action_object. If `convert` is True, `create` and `update` actions would be renamed to `add` and `change`. """ app_label = model._meta.app_label model_lower = model.__name__.lower() if convert: action = MAP_PERMISSION_ACTIONS.get(action, action) return '%s.%s_%s' % ( app_label, action, model_lower )
Returns permission name using Django naming convention: app_label.action_object. If `convert` is True, `create` and `update` actions would be renamed to `add` and `change`.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/utils.py#L112-L126
bmihelac/django-cruds
cruds/templatetags/crud_tags.py
format_value
def format_value(obj, field_name): """ Simple value formatting. If value is model instance returns link to detail view if exists. """ display_func = getattr(obj, 'get_%s_display' % field_name, None) if display_func: return display_func() value = getattr(obj, field_name) if isinstance(value, models.fields.files.FieldFile): if value: return mark_safe('<a href="%s">%s</a>' % ( value.url, os.path.basename(value.name), )) else: return '' if isinstance(value, models.Model): return format_value_instance(value) if isinstance(value, models.Manager): return mark_safe(', '.join( [format_value_instance(instance) for instance in value.all()] )) if value is None: value = "" return value
python
def format_value(obj, field_name): """ Simple value formatting. If value is model instance returns link to detail view if exists. """ display_func = getattr(obj, 'get_%s_display' % field_name, None) if display_func: return display_func() value = getattr(obj, field_name) if isinstance(value, models.fields.files.FieldFile): if value: return mark_safe('<a href="%s">%s</a>' % ( value.url, os.path.basename(value.name), )) else: return '' if isinstance(value, models.Model): return format_value_instance(value) if isinstance(value, models.Manager): return mark_safe(', '.join( [format_value_instance(instance) for instance in value.all()] )) if value is None: value = "" return value
Simple value formatting. If value is model instance returns link to detail view if exists.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/templatetags/crud_tags.py#L52-L81
bmihelac/django-cruds
cruds/templatetags/crud_tags.py
crud_fields
def crud_fields(obj, fields=None): """ Display object fields in table rows:: <table> {% crud_fields object 'id, %} </table> * ``fields`` fields to include If fields is ``None`` all fields will be displayed. If fields is ``string`` comma separated field names will be displayed. if field is dictionary, key should be field name and value field verbose name. """ if fields is None: fields = utils.get_fields(type(obj)) elif isinstance(fields, six.string_types): field_names = [f.strip() for f in fields.split(',')] fields = utils.get_fields(type(obj), include=field_names) return { 'object': obj, 'fields': fields, }
python
def crud_fields(obj, fields=None): """ Display object fields in table rows:: <table> {% crud_fields object 'id, %} </table> * ``fields`` fields to include If fields is ``None`` all fields will be displayed. If fields is ``string`` comma separated field names will be displayed. if field is dictionary, key should be field name and value field verbose name. """ if fields is None: fields = utils.get_fields(type(obj)) elif isinstance(fields, six.string_types): field_names = [f.strip() for f in fields.split(',')] fields = utils.get_fields(type(obj), include=field_names) return { 'object': obj, 'fields': fields, }
Display object fields in table rows:: <table> {% crud_fields object 'id, %} </table> * ``fields`` fields to include If fields is ``None`` all fields will be displayed. If fields is ``string`` comma separated field names will be displayed. if field is dictionary, key should be field name and value field verbose name.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/templatetags/crud_tags.py#L85-L109
bmihelac/django-cruds
cruds/templatetags/crud_tags.py
get_fields
def get_fields(model, fields=None): """ Assigns fields for model. """ include = [f.strip() for f in fields.split(',')] if fields else None return utils.get_fields( model, include )
python
def get_fields(model, fields=None): """ Assigns fields for model. """ include = [f.strip() for f in fields.split(',')] if fields else None return utils.get_fields( model, include )
Assigns fields for model.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/templatetags/crud_tags.py#L113-L121
bmihelac/django-cruds
cruds/urls.py
crud_urls
def crud_urls(model, list_view=None, create_view=None, update_view=None, detail_view=None, delete_view=None, url_prefix=None, name_prefix=None, list_views=None, **kwargs): """Returns a list of url patterns for model. :param list_view: :param create_view: :param update_view: :param detail_view: :param delete_view: :param url_prefix: prefix to prepend, default is `'$'` :param name_prefix: prefix to prepend to name, default is empty string :param list_views(dict): additional list views :param **kwargs: additional detail views :returns: urls """ if url_prefix is None: url_prefix = r'^' urls = [] if list_view: urls.append(url( url_prefix + '$', list_view, name=utils.crud_url_name(model, utils.ACTION_LIST, name_prefix) )) if create_view: urls.append(url( url_prefix + r'new/$', create_view, name=utils.crud_url_name(model, utils.ACTION_CREATE, name_prefix) )) if detail_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/$', detail_view, name=utils.crud_url_name(model, utils.ACTION_DETAIL, name_prefix) )) if update_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/edit/$', update_view, name=utils.crud_url_name(model, utils.ACTION_UPDATE, name_prefix) )) if delete_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/remove/$', delete_view, name=utils.crud_url_name(model, utils.ACTION_DELETE, name_prefix) )) if list_views is not None: for name, view in list_views.items(): urls.append(url( url_prefix + r'%s/$' % name, view, name=utils.crud_url_name(model, name, name_prefix) )) for name, view in kwargs.items(): urls.append(url( url_prefix + r'(?P<pk>\d+)/%s/$' % name, view, name=utils.crud_url_name(model, name, name_prefix) )) return urls
python
def crud_urls(model, list_view=None, create_view=None, update_view=None, detail_view=None, delete_view=None, url_prefix=None, name_prefix=None, list_views=None, **kwargs): """Returns a list of url patterns for model. :param list_view: :param create_view: :param update_view: :param detail_view: :param delete_view: :param url_prefix: prefix to prepend, default is `'$'` :param name_prefix: prefix to prepend to name, default is empty string :param list_views(dict): additional list views :param **kwargs: additional detail views :returns: urls """ if url_prefix is None: url_prefix = r'^' urls = [] if list_view: urls.append(url( url_prefix + '$', list_view, name=utils.crud_url_name(model, utils.ACTION_LIST, name_prefix) )) if create_view: urls.append(url( url_prefix + r'new/$', create_view, name=utils.crud_url_name(model, utils.ACTION_CREATE, name_prefix) )) if detail_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/$', detail_view, name=utils.crud_url_name(model, utils.ACTION_DETAIL, name_prefix) )) if update_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/edit/$', update_view, name=utils.crud_url_name(model, utils.ACTION_UPDATE, name_prefix) )) if delete_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/remove/$', delete_view, name=utils.crud_url_name(model, utils.ACTION_DELETE, name_prefix) )) if list_views is not None: for name, view in list_views.items(): urls.append(url( url_prefix + r'%s/$' % name, view, name=utils.crud_url_name(model, name, name_prefix) )) for name, view in kwargs.items(): urls.append(url( url_prefix + r'(?P<pk>\d+)/%s/$' % name, view, name=utils.crud_url_name(model, name, name_prefix) )) return urls
Returns a list of url patterns for model. :param list_view: :param create_view: :param update_view: :param detail_view: :param delete_view: :param url_prefix: prefix to prepend, default is `'$'` :param name_prefix: prefix to prepend to name, default is empty string :param list_views(dict): additional list views :param **kwargs: additional detail views :returns: urls
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/urls.py#L17-L88
bmihelac/django-cruds
cruds/urls.py
crud_for_model
def crud_for_model(model, urlprefix=None): """Returns list of ``url`` items to CRUD a model. """ model_lower = model.__name__.lower() if urlprefix is None: urlprefix = '' urlprefix += model_lower + '/' urls = crud_urls( model, list_view=CRUDListView.as_view(model=model), create_view=CRUDCreateView.as_view(model=model), detail_view=CRUDDetailView.as_view(model=model), update_view=CRUDUpdateView.as_view(model=model), delete_view=CRUDDeleteView.as_view(model=model), url_prefix=urlprefix, ) return urls
python
def crud_for_model(model, urlprefix=None): """Returns list of ``url`` items to CRUD a model. """ model_lower = model.__name__.lower() if urlprefix is None: urlprefix = '' urlprefix += model_lower + '/' urls = crud_urls( model, list_view=CRUDListView.as_view(model=model), create_view=CRUDCreateView.as_view(model=model), detail_view=CRUDDetailView.as_view(model=model), update_view=CRUDUpdateView.as_view(model=model), delete_view=CRUDDeleteView.as_view(model=model), url_prefix=urlprefix, ) return urls
Returns list of ``url`` items to CRUD a model.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/urls.py#L91-L109
bmihelac/django-cruds
cruds/urls.py
crud_for_app
def crud_for_app(app_label, urlprefix=None): """ Returns list of ``url`` items to CRUD an app. """ if urlprefix is None: urlprefix = app_label + '/' app = apps.get_app_config(app_label) urls = [] for model in app.get_models(): urls += crud_for_model(model, urlprefix) return urls
python
def crud_for_app(app_label, urlprefix=None): """ Returns list of ``url`` items to CRUD an app. """ if urlprefix is None: urlprefix = app_label + '/' app = apps.get_app_config(app_label) urls = [] for model in app.get_models(): urls += crud_for_model(model, urlprefix) return urls
Returns list of ``url`` items to CRUD an app.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/urls.py#L112-L122
bmihelac/django-cruds
cruds/views.py
CRUDMixin.get_context_data
def get_context_data(self, **kwargs): """ Adds available urls and names. """ context = super(CRUDMixin, self).get_context_data(**kwargs) context.update({ 'model_verbose_name': self.model._meta.verbose_name, 'model_verbose_name_plural': self.model._meta.verbose_name_plural, }) context['fields'] = utils.get_fields(self.model) if hasattr(self, 'object') and self.object: for action in utils.INSTANCE_ACTIONS: try: url = reverse( utils.crud_url_name(self.model, action), kwargs={'pk': self.object.pk}) except NoReverseMatch: # pragma: no cover url = None context['url_%s' % action] = url for action in utils.LIST_ACTIONS: try: url = reverse(utils.crud_url_name(self.model, action)) except NoReverseMatch: # pragma: no cover url = None context['url_%s' % action] = url return context
python
def get_context_data(self, **kwargs): """ Adds available urls and names. """ context = super(CRUDMixin, self).get_context_data(**kwargs) context.update({ 'model_verbose_name': self.model._meta.verbose_name, 'model_verbose_name_plural': self.model._meta.verbose_name_plural, }) context['fields'] = utils.get_fields(self.model) if hasattr(self, 'object') and self.object: for action in utils.INSTANCE_ACTIONS: try: url = reverse( utils.crud_url_name(self.model, action), kwargs={'pk': self.object.pk}) except NoReverseMatch: # pragma: no cover url = None context['url_%s' % action] = url for action in utils.LIST_ACTIONS: try: url = reverse(utils.crud_url_name(self.model, action)) except NoReverseMatch: # pragma: no cover url = None context['url_%s' % action] = url return context
Adds available urls and names.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/views.py#L23-L52
bmihelac/django-cruds
cruds/views.py
CRUDMixin.get_template_names
def get_template_names(self): """ Adds crud_template_name to default template names. """ names = super(CRUDMixin, self).get_template_names() if self.crud_template_name: names.append(self.crud_template_name) return names
python
def get_template_names(self): """ Adds crud_template_name to default template names. """ names = super(CRUDMixin, self).get_template_names() if self.crud_template_name: names.append(self.crud_template_name) return names
Adds crud_template_name to default template names.
https://github.com/bmihelac/django-cruds/blob/7828aac3eb2b4c02e5f3843c4cbff654d57cf1e7/cruds/views.py#L54-L61
croscon/fleaker
setup.py
install
def install(): """Install Fleaker. In a function so we can protect this file so it's only run when we explicitly invoke it and not, say, when py.test collects all Python modules. """ _version_re = re.compile(r"__version__\s+=\s+(.*)") # pylint: disable=invalid-name with open('./fleaker/__init__.py', 'rb') as file_: version = ast.literal_eval(_version_re.search( # pylint: disable=invalid-name file_.read().decode('utf-8')).group(1)) download_url = ('https://github.com/croscon/fleaker/archive/' 'v{}.tar.gz'.format(version)) setup( name='fleaker', version=version, download_url=download_url, description='Tools and extensions to make Flask development easier.', url='https://github.com/croscon/fleaker', author='Croscon Consulting', author_email='[email protected]', license='BSD', packages=[ 'fleaker', 'fleaker.marshmallow', 'fleaker.marshmallow.fields', 'fleaker.peewee', 'fleaker.peewee.fields', 'fleaker.peewee.mixins', 'fleaker.peewee.mixins.time', ], zip_safe=False, long_description=__doc__, include_package_data=True, platforms='any', install_requires=[ 'Flask', 'Flask-Classful', 'Flask-Login', 'Flask-Marshmallow', 'arrow', 'bcrypt', 'blinker', 'marshmallow', 'marshmallow-jsonschema', 'peewee', 'pendulum', 'phonenumbers', 'simplejson', ], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Web Environment', 'Framework :: Flask', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', # @TODO: Pick specific Python versions; out of the gate flask does 2.6, # 2.7, 3.3, 3.4, and 3.5 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], keywords=['flask', 'web development', 'flask extension'] )
python
def install(): """Install Fleaker. In a function so we can protect this file so it's only run when we explicitly invoke it and not, say, when py.test collects all Python modules. """ _version_re = re.compile(r"__version__\s+=\s+(.*)") # pylint: disable=invalid-name with open('./fleaker/__init__.py', 'rb') as file_: version = ast.literal_eval(_version_re.search( # pylint: disable=invalid-name file_.read().decode('utf-8')).group(1)) download_url = ('https://github.com/croscon/fleaker/archive/' 'v{}.tar.gz'.format(version)) setup( name='fleaker', version=version, download_url=download_url, description='Tools and extensions to make Flask development easier.', url='https://github.com/croscon/fleaker', author='Croscon Consulting', author_email='[email protected]', license='BSD', packages=[ 'fleaker', 'fleaker.marshmallow', 'fleaker.marshmallow.fields', 'fleaker.peewee', 'fleaker.peewee.fields', 'fleaker.peewee.mixins', 'fleaker.peewee.mixins.time', ], zip_safe=False, long_description=__doc__, include_package_data=True, platforms='any', install_requires=[ 'Flask', 'Flask-Classful', 'Flask-Login', 'Flask-Marshmallow', 'arrow', 'bcrypt', 'blinker', 'marshmallow', 'marshmallow-jsonschema', 'peewee', 'pendulum', 'phonenumbers', 'simplejson', ], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Web Environment', 'Framework :: Flask', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', # @TODO: Pick specific Python versions; out of the gate flask does 2.6, # 2.7, 3.3, 3.4, and 3.5 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], keywords=['flask', 'web development', 'flask extension'] )
Install Fleaker. In a function so we can protect this file so it's only run when we explicitly invoke it and not, say, when py.test collects all Python modules.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/setup.py#L50-L120
croscon/fleaker
fleaker/peewee/fields/json.py
JSONField.python_value
def python_value(self, value): """Return the JSON in the database as a ``dict``. Returns: dict: The field run through json.loads """ value = super(JSONField, self).python_value(value) if value is not None: return flask.json.loads(value, **self._load_kwargs)
python
def python_value(self, value): """Return the JSON in the database as a ``dict``. Returns: dict: The field run through json.loads """ value = super(JSONField, self).python_value(value) if value is not None: return flask.json.loads(value, **self._load_kwargs)
Return the JSON in the database as a ``dict``. Returns: dict: The field run through json.loads
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/fields/json.py#L101-L110
croscon/fleaker
fleaker/peewee/fields/json.py
JSONField.db_value
def db_value(self, value): """Store the value in the database. If the value is a dict like object, it is converted to a string before storing. """ # Everything is encoded being before being surfaced value = flask.json.dumps(value) return super(JSONField, self).db_value(value)
python
def db_value(self, value): """Store the value in the database. If the value is a dict like object, it is converted to a string before storing. """ # Everything is encoded being before being surfaced value = flask.json.dumps(value) return super(JSONField, self).db_value(value)
Store the value in the database. If the value is a dict like object, it is converted to a string before storing.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/fields/json.py#L112-L121
croscon/fleaker
fleaker/peewee/mixins/search.py
SearchMixin.search
def search(cls, term, fields=()): """Generic SQL search function that uses SQL ``LIKE`` to search the database for matching records. The records are sorted by their relavancey to the search term. The query searches and sorts on the folling criteria, in order, where the target string is ``exactly``: 1. Straight equality (``x = 'exactly'``) 2. Right hand ``LIKE`` (``x LIKE 'exact%'``) 3. Substring ``LIKE`` (``x LIKE %act%``) Args: term (str): The search term to apply to the query. Keyword Args: fields (list|tuple|None): An optional list of fields to apply the search to. If not provided, the class variable ``Meta.search_fields`` will be used by default. Returns: peewee.SelectQuery: An unexecuted query for the records. Raises: AttributeError: Raised if `search_fields` isn't defined in the class and `fields` aren't provided for the function. """ if not any((cls._meta.search_fields, fields)): raise AttributeError( "A list of searchable fields must be provided in the class's " "search_fields or provided to this function in the `fields` " "kwarg." ) # If fields are provided, override the ones in the class if not fields: fields = cls._meta.search_fields query = cls.select() # Cache the LIKE terms like_term = ''.join((term, '%')) full_like_term = ''.join(('%', term, '%')) # Cache the order by terms # @TODO Peewee's order_by supports an `extend` kwarg will will allow # for updating of the order by part of the query, but it's only # supported in Peewee 2.8.5 and newer. Determine if we can support this # before switching. # http://docs.peewee-orm.com/en/stable/peewee/api.html#SelectQuery.order_by order_by = [] # Store the clauses seperately because it is needed to perform an OR on # them and that's somehow impossible with their query builder in # a loop. clauses = [] for field_name in fields: # Cache the field, raising an exception if the field doesn't # exist. field = getattr(cls, field_name) # Apply the search term case insensitively clauses.append( (field == term) | (field ** like_term) | (field ** full_like_term) ) order_by.append(case(None, ( # Straight matches should show up first (field == term, 0), # Similar terms should show up second (field ** like_term, 1), # Substring matches should show up third (field ** full_like_term, 2), ), default=3).asc()) # Apply the clauses to the query query = query.where(reduce(operator.or_, clauses)) # Apply the sort order so it's influenced by the search term relevance. query = query.order_by(*order_by) return query
python
def search(cls, term, fields=()): """Generic SQL search function that uses SQL ``LIKE`` to search the database for matching records. The records are sorted by their relavancey to the search term. The query searches and sorts on the folling criteria, in order, where the target string is ``exactly``: 1. Straight equality (``x = 'exactly'``) 2. Right hand ``LIKE`` (``x LIKE 'exact%'``) 3. Substring ``LIKE`` (``x LIKE %act%``) Args: term (str): The search term to apply to the query. Keyword Args: fields (list|tuple|None): An optional list of fields to apply the search to. If not provided, the class variable ``Meta.search_fields`` will be used by default. Returns: peewee.SelectQuery: An unexecuted query for the records. Raises: AttributeError: Raised if `search_fields` isn't defined in the class and `fields` aren't provided for the function. """ if not any((cls._meta.search_fields, fields)): raise AttributeError( "A list of searchable fields must be provided in the class's " "search_fields or provided to this function in the `fields` " "kwarg." ) # If fields are provided, override the ones in the class if not fields: fields = cls._meta.search_fields query = cls.select() # Cache the LIKE terms like_term = ''.join((term, '%')) full_like_term = ''.join(('%', term, '%')) # Cache the order by terms # @TODO Peewee's order_by supports an `extend` kwarg will will allow # for updating of the order by part of the query, but it's only # supported in Peewee 2.8.5 and newer. Determine if we can support this # before switching. # http://docs.peewee-orm.com/en/stable/peewee/api.html#SelectQuery.order_by order_by = [] # Store the clauses seperately because it is needed to perform an OR on # them and that's somehow impossible with their query builder in # a loop. clauses = [] for field_name in fields: # Cache the field, raising an exception if the field doesn't # exist. field = getattr(cls, field_name) # Apply the search term case insensitively clauses.append( (field == term) | (field ** like_term) | (field ** full_like_term) ) order_by.append(case(None, ( # Straight matches should show up first (field == term, 0), # Similar terms should show up second (field ** like_term, 1), # Substring matches should show up third (field ** full_like_term, 2), ), default=3).asc()) # Apply the clauses to the query query = query.where(reduce(operator.or_, clauses)) # Apply the sort order so it's influenced by the search term relevance. query = query.order_by(*order_by) return query
Generic SQL search function that uses SQL ``LIKE`` to search the database for matching records. The records are sorted by their relavancey to the search term. The query searches and sorts on the folling criteria, in order, where the target string is ``exactly``: 1. Straight equality (``x = 'exactly'``) 2. Right hand ``LIKE`` (``x LIKE 'exact%'``) 3. Substring ``LIKE`` (``x LIKE %act%``) Args: term (str): The search term to apply to the query. Keyword Args: fields (list|tuple|None): An optional list of fields to apply the search to. If not provided, the class variable ``Meta.search_fields`` will be used by default. Returns: peewee.SelectQuery: An unexecuted query for the records. Raises: AttributeError: Raised if `search_fields` isn't defined in the class and `fields` aren't provided for the function.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/mixins/search.py#L97-L180
croscon/fleaker
fleaker/marshmallow/fields/foreign_key.py
ForeignKeyField._add_to_schema
def _add_to_schema(self, field_name, schema): """Set the ``attribute`` attr to the field in question so this always gets deserialzed into the field name without ``_id``. Args: field_name (str): The name of the field (the attribute name being set in the schema). schema (marshmallow.Schema): The actual parent schema this field belongs to. """ super(ForeignKeyField, self)._add_to_schema(field_name, schema) if self.get_field_value('convert_fks', default=True): self.attribute = field_name.replace('_id', '')
python
def _add_to_schema(self, field_name, schema): """Set the ``attribute`` attr to the field in question so this always gets deserialzed into the field name without ``_id``. Args: field_name (str): The name of the field (the attribute name being set in the schema). schema (marshmallow.Schema): The actual parent schema this field belongs to. """ super(ForeignKeyField, self)._add_to_schema(field_name, schema) if self.get_field_value('convert_fks', default=True): self.attribute = field_name.replace('_id', '')
Set the ``attribute`` attr to the field in question so this always gets deserialzed into the field name without ``_id``. Args: field_name (str): The name of the field (the attribute name being set in the schema). schema (marshmallow.Schema): The actual parent schema this field belongs to.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/foreign_key.py#L36-L49
croscon/fleaker
fleaker/marshmallow/fields/foreign_key.py
ForeignKeyField._serialize
def _serialize(self, value, attr, obj): """Grab the ID value off the Peewee model so we serialize an ID back. """ # this might be an optional field if value: value = value.id return super(ForeignKeyField, self)._serialize(value, attr, obj)
python
def _serialize(self, value, attr, obj): """Grab the ID value off the Peewee model so we serialize an ID back. """ # this might be an optional field if value: value = value.id return super(ForeignKeyField, self)._serialize(value, attr, obj)
Grab the ID value off the Peewee model so we serialize an ID back.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/foreign_key.py#L51-L58
croscon/fleaker
fleaker/orm.py
_discover_ideal_backend
def _discover_ideal_backend(orm_backend): """Auto-discover the ideal backend based on what is installed. Right now, handles discovery of: * PeeWee * SQLAlchemy Args: orm_backend (str): The ``orm_backend`` value that was passed to the ``create_app`` function. That is, the ORM Backend the User indicated they wanted to use. Returns: str|fleaker.missing.MissingSentinel: Returns a string for the ideal backend if it found one, or :obj:`fleaker.MISSING` if we couldn't find one. Raises: RuntimeError: Raised if no user provided ORM Backend is given and BOTH PeeWee and SQLAlchemy are installed. """ if orm_backend: return orm_backend if peewee is not MISSING and sqlalchemy is not MISSING: raise RuntimeError('Both PeeWee and SQLAlchemy detected as installed, ' 'but no explicit backend provided! Please specify ' 'one!') if peewee is not MISSING: return _PEEWEE_BACKEND elif sqlalchemy is not MISSING: return _SQLALCHEMY_BACKEND else: return MISSING
python
def _discover_ideal_backend(orm_backend): """Auto-discover the ideal backend based on what is installed. Right now, handles discovery of: * PeeWee * SQLAlchemy Args: orm_backend (str): The ``orm_backend`` value that was passed to the ``create_app`` function. That is, the ORM Backend the User indicated they wanted to use. Returns: str|fleaker.missing.MissingSentinel: Returns a string for the ideal backend if it found one, or :obj:`fleaker.MISSING` if we couldn't find one. Raises: RuntimeError: Raised if no user provided ORM Backend is given and BOTH PeeWee and SQLAlchemy are installed. """ if orm_backend: return orm_backend if peewee is not MISSING and sqlalchemy is not MISSING: raise RuntimeError('Both PeeWee and SQLAlchemy detected as installed, ' 'but no explicit backend provided! Please specify ' 'one!') if peewee is not MISSING: return _PEEWEE_BACKEND elif sqlalchemy is not MISSING: return _SQLALCHEMY_BACKEND else: return MISSING
Auto-discover the ideal backend based on what is installed. Right now, handles discovery of: * PeeWee * SQLAlchemy Args: orm_backend (str): The ``orm_backend`` value that was passed to the ``create_app`` function. That is, the ORM Backend the User indicated they wanted to use. Returns: str|fleaker.missing.MissingSentinel: Returns a string for the ideal backend if it found one, or :obj:`fleaker.MISSING` if we couldn't find one. Raises: RuntimeError: Raised if no user provided ORM Backend is given and BOTH PeeWee and SQLAlchemy are installed.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/orm.py#L83-L116
croscon/fleaker
fleaker/orm.py
ORMAwareApp.post_create_app
def post_create_app(cls, app, **settings): """Init the extension for our chosen ORM Backend, if possible. This method will ensure that the ``db`` proxy is set to the right extension and that that extension is properly created and configured. Since it needs to call ``init_app`` it MUST be a Post Create Hook. If the chosen backend is PeeWee and no ``DATABASE`` config value is provided, we will delay initializing the extension until one is. Args: app (flask.Flask): The Flask application that was just made through the :meth:`create_app` factory that we should bind extensions to. Kwargs: orm_backend (str): If you want to explicitly specify an ORM Backend to use, you should send it in this kwarg. Valid values are either: ``'peewee'`` or ``'sqlalchemy'``. peewee_database (str): An explicit database connection URI we should immeditately add to the configuration that should be used to configure the PeeWee ORM Backend. This will result in the ``DATABASE`` key being set to this value in the config and will result in the PeeWee Flask extension being initialized IMMEDIATELY and not delayed until the next call to :meth:`configure`. Returns: flask.Flask: Returns the app it was given once this is done. Raises: RuntimeError: This is raised if we are asked to create the PeeWee ORM, but are not given a database URI in either the ``DATABASE`` config value, or the explicit ``peewee_database`` setting. """ global _SELECTED_BACKEND backend = settings.pop('orm_backend', None) backend = _discover_ideal_backend(backend) # did not specify a backend, bail early if backend is MISSING: return app _swap_backends_error = ('Cannot swap ORM backends after one is ' 'declared!') if backend == _PEEWEE_BACKEND: if (_SELECTED_BACKEND is not MISSING and _SELECTED_BACKEND != _PEEWEE_EXT): raise RuntimeError(_swap_backends_error) # @TODO (orm): Does this really need to be ``peewee_database``? can # it be ``orm_database``? database_uri = settings.pop('peewee_database', None) if database_uri: app.config['DATABASE'] = database_uri if 'DATABASE' not in app.config: # since there is no DATABASE in the config, we need to wait # until we init this; so we'll just do it after configure is # called. try: app.add_post_configure_callback( partial(cls._init_peewee_ext, app), run_once=True ) except NotImplementedError: # this composed app doesn't implement multi-stage # configuration, so there's no way we can proceed without # an explicit DB =/; yes it's possible this could swallow # another error, but if it does... the easiest fix is to do # the same # @TODO (docs): Multi Stage Configuration should be in # the docs err_msg = """\ The app you are trying to construct does not support Multi Stage Configuration and no connection info for the database was given at creation! Please call `create_app` again and provide your database connection string as the `peewee_database` kwarg!\ """ raise RuntimeError(err_msg) else: # the DATABASE is already present, go ahead and just init now cls._init_peewee_ext(app) _SELECTED_BACKEND = _PEEWEE_EXT elif backend == _SQLALCHEMY_BACKEND: # @TODO (orm): Finish SQLA implementation # do sqla bootstrap code if (_SELECTED_BACKEND is not MISSING and _SELECTED_BACKEND != _SQLA_EXT): raise RuntimeError(_swap_backends_error) _SELECTED_BACKEND = _SQLA_EXT _SQLA_EXT.init_app(app) else: err_msg = ("Explicit ORM backend provided, but could not recognize" " the value! Valid values are: '{}' and '{}';" " received: '{}' instead!") err_msg = err_msg.format(_PEEWEE_BACKEND, _SQLALCHEMY_BACKEND, backend) raise RuntimeError(err_msg) return app
python
def post_create_app(cls, app, **settings): """Init the extension for our chosen ORM Backend, if possible. This method will ensure that the ``db`` proxy is set to the right extension and that that extension is properly created and configured. Since it needs to call ``init_app`` it MUST be a Post Create Hook. If the chosen backend is PeeWee and no ``DATABASE`` config value is provided, we will delay initializing the extension until one is. Args: app (flask.Flask): The Flask application that was just made through the :meth:`create_app` factory that we should bind extensions to. Kwargs: orm_backend (str): If you want to explicitly specify an ORM Backend to use, you should send it in this kwarg. Valid values are either: ``'peewee'`` or ``'sqlalchemy'``. peewee_database (str): An explicit database connection URI we should immeditately add to the configuration that should be used to configure the PeeWee ORM Backend. This will result in the ``DATABASE`` key being set to this value in the config and will result in the PeeWee Flask extension being initialized IMMEDIATELY and not delayed until the next call to :meth:`configure`. Returns: flask.Flask: Returns the app it was given once this is done. Raises: RuntimeError: This is raised if we are asked to create the PeeWee ORM, but are not given a database URI in either the ``DATABASE`` config value, or the explicit ``peewee_database`` setting. """ global _SELECTED_BACKEND backend = settings.pop('orm_backend', None) backend = _discover_ideal_backend(backend) # did not specify a backend, bail early if backend is MISSING: return app _swap_backends_error = ('Cannot swap ORM backends after one is ' 'declared!') if backend == _PEEWEE_BACKEND: if (_SELECTED_BACKEND is not MISSING and _SELECTED_BACKEND != _PEEWEE_EXT): raise RuntimeError(_swap_backends_error) # @TODO (orm): Does this really need to be ``peewee_database``? can # it be ``orm_database``? database_uri = settings.pop('peewee_database', None) if database_uri: app.config['DATABASE'] = database_uri if 'DATABASE' not in app.config: # since there is no DATABASE in the config, we need to wait # until we init this; so we'll just do it after configure is # called. try: app.add_post_configure_callback( partial(cls._init_peewee_ext, app), run_once=True ) except NotImplementedError: # this composed app doesn't implement multi-stage # configuration, so there's no way we can proceed without # an explicit DB =/; yes it's possible this could swallow # another error, but if it does... the easiest fix is to do # the same # @TODO (docs): Multi Stage Configuration should be in # the docs err_msg = """\ The app you are trying to construct does not support Multi Stage Configuration and no connection info for the database was given at creation! Please call `create_app` again and provide your database connection string as the `peewee_database` kwarg!\ """ raise RuntimeError(err_msg) else: # the DATABASE is already present, go ahead and just init now cls._init_peewee_ext(app) _SELECTED_BACKEND = _PEEWEE_EXT elif backend == _SQLALCHEMY_BACKEND: # @TODO (orm): Finish SQLA implementation # do sqla bootstrap code if (_SELECTED_BACKEND is not MISSING and _SELECTED_BACKEND != _SQLA_EXT): raise RuntimeError(_swap_backends_error) _SELECTED_BACKEND = _SQLA_EXT _SQLA_EXT.init_app(app) else: err_msg = ("Explicit ORM backend provided, but could not recognize" " the value! Valid values are: '{}' and '{}';" " received: '{}' instead!") err_msg = err_msg.format(_PEEWEE_BACKEND, _SQLALCHEMY_BACKEND, backend) raise RuntimeError(err_msg) return app
Init the extension for our chosen ORM Backend, if possible. This method will ensure that the ``db`` proxy is set to the right extension and that that extension is properly created and configured. Since it needs to call ``init_app`` it MUST be a Post Create Hook. If the chosen backend is PeeWee and no ``DATABASE`` config value is provided, we will delay initializing the extension until one is. Args: app (flask.Flask): The Flask application that was just made through the :meth:`create_app` factory that we should bind extensions to. Kwargs: orm_backend (str): If you want to explicitly specify an ORM Backend to use, you should send it in this kwarg. Valid values are either: ``'peewee'`` or ``'sqlalchemy'``. peewee_database (str): An explicit database connection URI we should immeditately add to the configuration that should be used to configure the PeeWee ORM Backend. This will result in the ``DATABASE`` key being set to this value in the config and will result in the PeeWee Flask extension being initialized IMMEDIATELY and not delayed until the next call to :meth:`configure`. Returns: flask.Flask: Returns the app it was given once this is done. Raises: RuntimeError: This is raised if we are asked to create the PeeWee ORM, but are not given a database URI in either the ``DATABASE`` config value, or the explicit ``peewee_database`` setting.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/orm.py#L193-L300
croscon/fleaker
fleaker/orm.py
ORMAwareApp._init_peewee_ext
def _init_peewee_ext(cls, app, dummy_configuration=None, dummy_configure_args=None): """Init the actual PeeWee extension with the app that was created. Since PeeWee requires the ``DATABASE`` config parameter to be present IMMEDIATELY upon initializing the application, we need to delay this construction. This is because, in standard use, we will create the app and attempt to init this extension BEFORE we configure the app, which is totally fine. To fix this, we just need to set this up to try and run after every call to configure. If there is not ``DATABASE`` config parameter present when run, this method does nothing other than reschedule itself to run in the future. In all cases, this is a Post Configure Hook that should RUN ONCE! Args: app (flask.Flask): The application you want to init the PeeWee Flask extension for. Hint: if you need to use this as a callback, use a partial to provide this. dummy_configuration (dict): The resulting application configuration that the post_configure hook provides to all of it's callbacks. We will NEVER use this, but since we utilize the post_configure system to register this for complicated apps, we gotta accept it. dummy_configure_args (list[object]): The args passed to the :meth:`configure` function that triggered this callback. Just like the above arg, we'll never use it, but we must accept it. """ # the database still isn't present, go ahead and register the callback # again, so we can try later. if 'DATABASE' not in app.config: app.add_post_configure_callback(partial(cls._init_peewee_ext, app), run_once=True) return _PEEWEE_EXT.init_app(app)
python
def _init_peewee_ext(cls, app, dummy_configuration=None, dummy_configure_args=None): """Init the actual PeeWee extension with the app that was created. Since PeeWee requires the ``DATABASE`` config parameter to be present IMMEDIATELY upon initializing the application, we need to delay this construction. This is because, in standard use, we will create the app and attempt to init this extension BEFORE we configure the app, which is totally fine. To fix this, we just need to set this up to try and run after every call to configure. If there is not ``DATABASE`` config parameter present when run, this method does nothing other than reschedule itself to run in the future. In all cases, this is a Post Configure Hook that should RUN ONCE! Args: app (flask.Flask): The application you want to init the PeeWee Flask extension for. Hint: if you need to use this as a callback, use a partial to provide this. dummy_configuration (dict): The resulting application configuration that the post_configure hook provides to all of it's callbacks. We will NEVER use this, but since we utilize the post_configure system to register this for complicated apps, we gotta accept it. dummy_configure_args (list[object]): The args passed to the :meth:`configure` function that triggered this callback. Just like the above arg, we'll never use it, but we must accept it. """ # the database still isn't present, go ahead and register the callback # again, so we can try later. if 'DATABASE' not in app.config: app.add_post_configure_callback(partial(cls._init_peewee_ext, app), run_once=True) return _PEEWEE_EXT.init_app(app)
Init the actual PeeWee extension with the app that was created. Since PeeWee requires the ``DATABASE`` config parameter to be present IMMEDIATELY upon initializing the application, we need to delay this construction. This is because, in standard use, we will create the app and attempt to init this extension BEFORE we configure the app, which is totally fine. To fix this, we just need to set this up to try and run after every call to configure. If there is not ``DATABASE`` config parameter present when run, this method does nothing other than reschedule itself to run in the future. In all cases, this is a Post Configure Hook that should RUN ONCE! Args: app (flask.Flask): The application you want to init the PeeWee Flask extension for. Hint: if you need to use this as a callback, use a partial to provide this. dummy_configuration (dict): The resulting application configuration that the post_configure hook provides to all of it's callbacks. We will NEVER use this, but since we utilize the post_configure system to register this for complicated apps, we gotta accept it. dummy_configure_args (list[object]): The args passed to the :meth:`configure` function that triggered this callback. Just like the above arg, we'll never use it, but we must accept it.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/orm.py#L303-L338
croscon/fleaker
fleaker/config.py
MultiStageConfigurableApp.configure
def configure(self, *args, **kwargs): """Configure the Application through a varied number of sources of different types. This function chains multiple possible configuration methods together in order to just "make it work". You can pass multiple configuration sources in to the method and each one will be tried in a sane fashion. Later sources will override earlier sources if keys collide. For example: .. code:: python from application import default_config app.configure(default_config, os.environ, '.secrets') In the above example, values stored in ``default_config`` will be loaded first, then overwritten by those in ``os.environ``, and so on. An endless number of configuration sources may be passed. Configuration sources are type checked and processed according to the following rules: * ``string`` - if the source is a ``str``, we will assume it is a file or module that should be loaded. If the file ends in ``.json``, then :meth:`flask.Config.from_json` is used; if the file ends in ``.py`` or ``.cfg``, then :meth:`flask.Config.from_pyfile` is used; if the module has any other extension we assume it is an import path, import the module and pass that to :meth:`flask.Config.from_object`. See below for a few more semantics on module loading. * ``dict-like`` - if the source is ``dict-like``, then :meth:`flask.Config.from_mapping` will be used. ``dict-like`` is defined as anything implementing an ``items`` method that returns a tuple of ``key``, ``val``. * ``class`` or ``module`` - if the source is an uninstantiated ``class`` or ``module``, then :meth:`flask.Config.from_object` will be used. Just like Flask's standard configuration, only uppercased keys will be loaded into the config. If the item we are passed is a ``string`` and it is determined to be a possible Python module, then a leading ``.`` is relevant. If a leading ``.`` is provided, we assume that the module to import is located in the current package and operate as such; if it begins with anything else we assume the import path provided is absolute. This allows you to source configuration stored in a module in your package, or in another package. Args: *args (object): Any object you want us to try to configure from. Keyword Args: whitelist_keys_from_mappings (bool): Should we whitelist the keys we pull from mappings? Very useful if you're passing in an entire OS ``environ`` and you want to omit things like ``LESSPIPE``. If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is ``True``, we will use that as our whitelist instead of pre-existing app config keys. """ whitelist_keys_from_mappings = kwargs.get( 'whitelist_keys_from_mappings', False ) whitelist = kwargs.get('whitelist') for item in args: if isinstance(item, string_types): _, ext = splitext(item) if ext == '.json': self._configure_from_json(item) elif ext in ('.cfg', '.py'): self._configure_from_pyfile(item) else: self._configure_from_module(item) elif isinstance(item, (types.ModuleType, type)): self._configure_from_object(item) elif hasattr(item, 'items'): # assume everything else is a mapping like object; ``.items()`` # is what Flask uses under the hood for this method # @TODO: This doesn't handle the edge case of using a tuple of # two element tuples to config; but Flask does that. IMO, if # you do that, you're a monster. self._configure_from_mapping( item, whitelist_keys=whitelist_keys_from_mappings, whitelist=whitelist ) else: raise TypeError("Could not determine a valid type for this" " configuration object: `{}`!".format(item)) # we just finished here, run the post configure callbacks self._run_post_configure_callbacks(args)
python
def configure(self, *args, **kwargs): """Configure the Application through a varied number of sources of different types. This function chains multiple possible configuration methods together in order to just "make it work". You can pass multiple configuration sources in to the method and each one will be tried in a sane fashion. Later sources will override earlier sources if keys collide. For example: .. code:: python from application import default_config app.configure(default_config, os.environ, '.secrets') In the above example, values stored in ``default_config`` will be loaded first, then overwritten by those in ``os.environ``, and so on. An endless number of configuration sources may be passed. Configuration sources are type checked and processed according to the following rules: * ``string`` - if the source is a ``str``, we will assume it is a file or module that should be loaded. If the file ends in ``.json``, then :meth:`flask.Config.from_json` is used; if the file ends in ``.py`` or ``.cfg``, then :meth:`flask.Config.from_pyfile` is used; if the module has any other extension we assume it is an import path, import the module and pass that to :meth:`flask.Config.from_object`. See below for a few more semantics on module loading. * ``dict-like`` - if the source is ``dict-like``, then :meth:`flask.Config.from_mapping` will be used. ``dict-like`` is defined as anything implementing an ``items`` method that returns a tuple of ``key``, ``val``. * ``class`` or ``module`` - if the source is an uninstantiated ``class`` or ``module``, then :meth:`flask.Config.from_object` will be used. Just like Flask's standard configuration, only uppercased keys will be loaded into the config. If the item we are passed is a ``string`` and it is determined to be a possible Python module, then a leading ``.`` is relevant. If a leading ``.`` is provided, we assume that the module to import is located in the current package and operate as such; if it begins with anything else we assume the import path provided is absolute. This allows you to source configuration stored in a module in your package, or in another package. Args: *args (object): Any object you want us to try to configure from. Keyword Args: whitelist_keys_from_mappings (bool): Should we whitelist the keys we pull from mappings? Very useful if you're passing in an entire OS ``environ`` and you want to omit things like ``LESSPIPE``. If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is ``True``, we will use that as our whitelist instead of pre-existing app config keys. """ whitelist_keys_from_mappings = kwargs.get( 'whitelist_keys_from_mappings', False ) whitelist = kwargs.get('whitelist') for item in args: if isinstance(item, string_types): _, ext = splitext(item) if ext == '.json': self._configure_from_json(item) elif ext in ('.cfg', '.py'): self._configure_from_pyfile(item) else: self._configure_from_module(item) elif isinstance(item, (types.ModuleType, type)): self._configure_from_object(item) elif hasattr(item, 'items'): # assume everything else is a mapping like object; ``.items()`` # is what Flask uses under the hood for this method # @TODO: This doesn't handle the edge case of using a tuple of # two element tuples to config; but Flask does that. IMO, if # you do that, you're a monster. self._configure_from_mapping( item, whitelist_keys=whitelist_keys_from_mappings, whitelist=whitelist ) else: raise TypeError("Could not determine a valid type for this" " configuration object: `{}`!".format(item)) # we just finished here, run the post configure callbacks self._run_post_configure_callbacks(args)
Configure the Application through a varied number of sources of different types. This function chains multiple possible configuration methods together in order to just "make it work". You can pass multiple configuration sources in to the method and each one will be tried in a sane fashion. Later sources will override earlier sources if keys collide. For example: .. code:: python from application import default_config app.configure(default_config, os.environ, '.secrets') In the above example, values stored in ``default_config`` will be loaded first, then overwritten by those in ``os.environ``, and so on. An endless number of configuration sources may be passed. Configuration sources are type checked and processed according to the following rules: * ``string`` - if the source is a ``str``, we will assume it is a file or module that should be loaded. If the file ends in ``.json``, then :meth:`flask.Config.from_json` is used; if the file ends in ``.py`` or ``.cfg``, then :meth:`flask.Config.from_pyfile` is used; if the module has any other extension we assume it is an import path, import the module and pass that to :meth:`flask.Config.from_object`. See below for a few more semantics on module loading. * ``dict-like`` - if the source is ``dict-like``, then :meth:`flask.Config.from_mapping` will be used. ``dict-like`` is defined as anything implementing an ``items`` method that returns a tuple of ``key``, ``val``. * ``class`` or ``module`` - if the source is an uninstantiated ``class`` or ``module``, then :meth:`flask.Config.from_object` will be used. Just like Flask's standard configuration, only uppercased keys will be loaded into the config. If the item we are passed is a ``string`` and it is determined to be a possible Python module, then a leading ``.`` is relevant. If a leading ``.`` is provided, we assume that the module to import is located in the current package and operate as such; if it begins with anything else we assume the import path provided is absolute. This allows you to source configuration stored in a module in your package, or in another package. Args: *args (object): Any object you want us to try to configure from. Keyword Args: whitelist_keys_from_mappings (bool): Should we whitelist the keys we pull from mappings? Very useful if you're passing in an entire OS ``environ`` and you want to omit things like ``LESSPIPE``. If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is ``True``, we will use that as our whitelist instead of pre-existing app config keys.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L56-L156
croscon/fleaker
fleaker/config.py
MultiStageConfigurableApp._configure_from_module
def _configure_from_module(self, item): """Configure from a module by import path. Effectively, you give this an absolute or relative import path, it will import it, and then pass the resulting object to ``_configure_from_object``. Args: item (str): A string pointing to a valid import path. Returns: fleaker.App: Returns itself. """ package = None if item[0] == '.': package = self.import_name obj = importlib.import_module(item, package=package) self.config.from_object(obj) return self
python
def _configure_from_module(self, item): """Configure from a module by import path. Effectively, you give this an absolute or relative import path, it will import it, and then pass the resulting object to ``_configure_from_object``. Args: item (str): A string pointing to a valid import path. Returns: fleaker.App: Returns itself. """ package = None if item[0] == '.': package = self.import_name obj = importlib.import_module(item, package=package) self.config.from_object(obj) return self
Configure from a module by import path. Effectively, you give this an absolute or relative import path, it will import it, and then pass the resulting object to ``_configure_from_object``. Args: item (str): A string pointing to a valid import path. Returns: fleaker.App: Returns itself.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L195-L218
croscon/fleaker
fleaker/config.py
MultiStageConfigurableApp._configure_from_mapping
def _configure_from_mapping(self, item, whitelist_keys=False, whitelist=None): """Configure from a mapping, or dict, like object. Args: item (dict): A dict-like object that we can pluck values from. Keyword Args: whitelist_keys (bool): Should we whitelist the keys before adding them to the configuration? If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.App: Returns itself. """ if whitelist is None: whitelist = self.config.keys() if whitelist_keys: item = {k: v for k, v in item.items() if k in whitelist} self.config.from_mapping(item) return self
python
def _configure_from_mapping(self, item, whitelist_keys=False, whitelist=None): """Configure from a mapping, or dict, like object. Args: item (dict): A dict-like object that we can pluck values from. Keyword Args: whitelist_keys (bool): Should we whitelist the keys before adding them to the configuration? If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.App: Returns itself. """ if whitelist is None: whitelist = self.config.keys() if whitelist_keys: item = {k: v for k, v in item.items() if k in whitelist} self.config.from_mapping(item) return self
Configure from a mapping, or dict, like object. Args: item (dict): A dict-like object that we can pluck values from. Keyword Args: whitelist_keys (bool): Should we whitelist the keys before adding them to the configuration? If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.App: Returns itself.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L220-L250
croscon/fleaker
fleaker/config.py
MultiStageConfigurableApp.configure_from_environment
def configure_from_environment(self, whitelist_keys=False, whitelist=None): """Configure from the entire set of available environment variables. This is really a shorthand for grabbing ``os.environ`` and passing to :meth:`_configure_from_mapping`. As always, only uppercase keys are loaded. Keyword Args: whitelist_keys (bool): Should we whitelist the keys by only pulling those that are already present in the config? Useful for avoiding adding things like ``LESSPIPE`` to your app config. If no whitelist is provided, we use the current config keys as our whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.base.BaseApplication: Returns itself. """ self._configure_from_mapping(os.environ, whitelist_keys=whitelist_keys, whitelist=whitelist) return self
python
def configure_from_environment(self, whitelist_keys=False, whitelist=None): """Configure from the entire set of available environment variables. This is really a shorthand for grabbing ``os.environ`` and passing to :meth:`_configure_from_mapping`. As always, only uppercase keys are loaded. Keyword Args: whitelist_keys (bool): Should we whitelist the keys by only pulling those that are already present in the config? Useful for avoiding adding things like ``LESSPIPE`` to your app config. If no whitelist is provided, we use the current config keys as our whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.base.BaseApplication: Returns itself. """ self._configure_from_mapping(os.environ, whitelist_keys=whitelist_keys, whitelist=whitelist) return self
Configure from the entire set of available environment variables. This is really a shorthand for grabbing ``os.environ`` and passing to :meth:`_configure_from_mapping`. As always, only uppercase keys are loaded. Keyword Args: whitelist_keys (bool): Should we whitelist the keys by only pulling those that are already present in the config? Useful for avoiding adding things like ``LESSPIPE`` to your app config. If no whitelist is provided, we use the current config keys as our whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.base.BaseApplication: Returns itself.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L267-L293
croscon/fleaker
fleaker/config.py
MultiStageConfigurableApp.add_post_configure_callback
def add_post_configure_callback(self, callback, run_once=False): """Add a new callback to be run after every call to :meth:`configure`. Functions run at the end of :meth:`configure` are given the application's resulting configuration and the arguments passed to :meth:`configure`, in that order. As a note, this first argument will be an immutable dictionary. The return value of all registered callbacks is entirely ignored. Callbacks are run in the order they are registered, but you should never depend on another callback. .. admonition:: The "Resulting" Configuration The first argument to the callback is always the "resulting" configuration from the call to :meth:`configure`. What this means is you will get the Application's FROZEN configuration after the call to :meth:`configure` finished. Moreover, this resulting configuration will be an :class:`~werkzeug.datastructures.ImmutableDict`. The purpose of a Post Configure callback is not to futher alter the configuration, but rather to do lazy initialization for anything that absolutely requires the configuration, so any attempt to alter the configuration of the app has been made intentionally difficult! Args: callback (function): The function you wish to run after :meth:`configure`. Will receive the application's current configuration as the first arugment, and the same arguments passed to :meth:`configure` as the second. Keyword Args: run_once (bool): Should this callback run every time configure is called? Or just once and be deregistered? Pass ``True`` to only run it once. Returns: fleaker.base.BaseApplication: Returns itself for a fluent interface. """ if run_once: self._post_configure_callbacks['single'].append(callback) else: self._post_configure_callbacks['multiple'].append(callback) return self
python
def add_post_configure_callback(self, callback, run_once=False): """Add a new callback to be run after every call to :meth:`configure`. Functions run at the end of :meth:`configure` are given the application's resulting configuration and the arguments passed to :meth:`configure`, in that order. As a note, this first argument will be an immutable dictionary. The return value of all registered callbacks is entirely ignored. Callbacks are run in the order they are registered, but you should never depend on another callback. .. admonition:: The "Resulting" Configuration The first argument to the callback is always the "resulting" configuration from the call to :meth:`configure`. What this means is you will get the Application's FROZEN configuration after the call to :meth:`configure` finished. Moreover, this resulting configuration will be an :class:`~werkzeug.datastructures.ImmutableDict`. The purpose of a Post Configure callback is not to futher alter the configuration, but rather to do lazy initialization for anything that absolutely requires the configuration, so any attempt to alter the configuration of the app has been made intentionally difficult! Args: callback (function): The function you wish to run after :meth:`configure`. Will receive the application's current configuration as the first arugment, and the same arguments passed to :meth:`configure` as the second. Keyword Args: run_once (bool): Should this callback run every time configure is called? Or just once and be deregistered? Pass ``True`` to only run it once. Returns: fleaker.base.BaseApplication: Returns itself for a fluent interface. """ if run_once: self._post_configure_callbacks['single'].append(callback) else: self._post_configure_callbacks['multiple'].append(callback) return self
Add a new callback to be run after every call to :meth:`configure`. Functions run at the end of :meth:`configure` are given the application's resulting configuration and the arguments passed to :meth:`configure`, in that order. As a note, this first argument will be an immutable dictionary. The return value of all registered callbacks is entirely ignored. Callbacks are run in the order they are registered, but you should never depend on another callback. .. admonition:: The "Resulting" Configuration The first argument to the callback is always the "resulting" configuration from the call to :meth:`configure`. What this means is you will get the Application's FROZEN configuration after the call to :meth:`configure` finished. Moreover, this resulting configuration will be an :class:`~werkzeug.datastructures.ImmutableDict`. The purpose of a Post Configure callback is not to futher alter the configuration, but rather to do lazy initialization for anything that absolutely requires the configuration, so any attempt to alter the configuration of the app has been made intentionally difficult! Args: callback (function): The function you wish to run after :meth:`configure`. Will receive the application's current configuration as the first arugment, and the same arguments passed to :meth:`configure` as the second. Keyword Args: run_once (bool): Should this callback run every time configure is called? Or just once and be deregistered? Pass ``True`` to only run it once. Returns: fleaker.base.BaseApplication: Returns itself for a fluent interface.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L295-L344
croscon/fleaker
fleaker/config.py
MultiStageConfigurableApp._run_post_configure_callbacks
def _run_post_configure_callbacks(self, configure_args): """Run all post configure callbacks we have stored. Functions are passed the configuration that resulted from the call to :meth:`configure` as the first argument, in an immutable form; and are given the arguments passed to :meth:`configure` for the second argument. Returns from callbacks are ignored in all fashion. Args: configure_args (list[object]): The full list of arguments passed to :meth:`configure`. Returns: None: Does not return anything. """ resulting_configuration = ImmutableDict(self.config) # copy callbacks in case people edit them while running multiple_callbacks = copy.copy( self._post_configure_callbacks['multiple'] ) single_callbacks = copy.copy(self._post_configure_callbacks['single']) # clear out the singles self._post_configure_callbacks['single'] = [] for callback in multiple_callbacks: callback(resulting_configuration, configure_args) # now do the single run callbacks for callback in single_callbacks: callback(resulting_configuration, configure_args)
python
def _run_post_configure_callbacks(self, configure_args): """Run all post configure callbacks we have stored. Functions are passed the configuration that resulted from the call to :meth:`configure` as the first argument, in an immutable form; and are given the arguments passed to :meth:`configure` for the second argument. Returns from callbacks are ignored in all fashion. Args: configure_args (list[object]): The full list of arguments passed to :meth:`configure`. Returns: None: Does not return anything. """ resulting_configuration = ImmutableDict(self.config) # copy callbacks in case people edit them while running multiple_callbacks = copy.copy( self._post_configure_callbacks['multiple'] ) single_callbacks = copy.copy(self._post_configure_callbacks['single']) # clear out the singles self._post_configure_callbacks['single'] = [] for callback in multiple_callbacks: callback(resulting_configuration, configure_args) # now do the single run callbacks for callback in single_callbacks: callback(resulting_configuration, configure_args)
Run all post configure callbacks we have stored. Functions are passed the configuration that resulted from the call to :meth:`configure` as the first argument, in an immutable form; and are given the arguments passed to :meth:`configure` for the second argument. Returns from callbacks are ignored in all fashion. Args: configure_args (list[object]): The full list of arguments passed to :meth:`configure`. Returns: None: Does not return anything.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/config.py#L346-L379
croscon/fleaker
fleaker/marshmallow/schema.py
Schema.make_instance
def make_instance(cls, data): """Validate the data and create a model instance from the data. Args: data (dict): The unserialized data to insert into the new model instance through it's constructor. Returns: peewee.Model|sqlalchemy.Model: The model instance with it's data inserted into it. Raises: AttributeError: This is raised if ``Meta.model`` isn't set on the schema's definition. """ schema = cls() if not hasattr(schema.Meta, 'model'): raise AttributeError("In order to make an instance, a model for " "the schema must be defined in the Meta " "class.") serialized_data = schema.load(data).data return cls.Meta.model(**serialized_data)
python
def make_instance(cls, data): """Validate the data and create a model instance from the data. Args: data (dict): The unserialized data to insert into the new model instance through it's constructor. Returns: peewee.Model|sqlalchemy.Model: The model instance with it's data inserted into it. Raises: AttributeError: This is raised if ``Meta.model`` isn't set on the schema's definition. """ schema = cls() if not hasattr(schema.Meta, 'model'): raise AttributeError("In order to make an instance, a model for " "the schema must be defined in the Meta " "class.") serialized_data = schema.load(data).data return cls.Meta.model(**serialized_data)
Validate the data and create a model instance from the data. Args: data (dict): The unserialized data to insert into the new model instance through it's constructor. Returns: peewee.Model|sqlalchemy.Model: The model instance with it's data inserted into it. Raises: AttributeError: This is raised if ``Meta.model`` isn't set on the schema's definition.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/schema.py#L41-L65
croscon/fleaker
fleaker/marshmallow/schema.py
Schema.invalid_fields
def invalid_fields(self, data, original_data): """Validator that checks if any keys provided aren't in the schema. Say your schema has support for keys ``a`` and ``b`` and the data provided has keys ``a``, ``b``, and ``c``. When the data is loaded into the schema, a :class:`marshmallow.ValidationError` will be raised informing the developer that excess keys have been provided. Raises: marshmallow.ValidationError: Raised if extra keys exist in the passed in data. """ errors = [] for field in original_data: # Skip nested fields because they will loop infinitely if isinstance(field, (set, list, tuple, dict)): continue if field not in self.fields.keys(): errors.append(field) if errors: raise ValidationError("Invalid field", field_names=errors)
python
def invalid_fields(self, data, original_data): """Validator that checks if any keys provided aren't in the schema. Say your schema has support for keys ``a`` and ``b`` and the data provided has keys ``a``, ``b``, and ``c``. When the data is loaded into the schema, a :class:`marshmallow.ValidationError` will be raised informing the developer that excess keys have been provided. Raises: marshmallow.ValidationError: Raised if extra keys exist in the passed in data. """ errors = [] for field in original_data: # Skip nested fields because they will loop infinitely if isinstance(field, (set, list, tuple, dict)): continue if field not in self.fields.keys(): errors.append(field) if errors: raise ValidationError("Invalid field", field_names=errors)
Validator that checks if any keys provided aren't in the schema. Say your schema has support for keys ``a`` and ``b`` and the data provided has keys ``a``, ``b``, and ``c``. When the data is loaded into the schema, a :class:`marshmallow.ValidationError` will be raised informing the developer that excess keys have been provided. Raises: marshmallow.ValidationError: Raised if extra keys exist in the passed in data.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/schema.py#L68-L91
croscon/fleaker
fleaker/peewee/fields/arrow.py
ArrowDateTimeField.python_value
def python_value(self, value): """Return the value in the data base as an arrow object. Returns: arrow.Arrow: An instance of arrow with the field filled in. """ value = super(ArrowDateTimeField, self).python_value(value) if (isinstance(value, (datetime.datetime, datetime.date, string_types))): return arrow.get(value) return value
python
def python_value(self, value): """Return the value in the data base as an arrow object. Returns: arrow.Arrow: An instance of arrow with the field filled in. """ value = super(ArrowDateTimeField, self).python_value(value) if (isinstance(value, (datetime.datetime, datetime.date, string_types))): return arrow.get(value) return value
Return the value in the data base as an arrow object. Returns: arrow.Arrow: An instance of arrow with the field filled in.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/fields/arrow.py#L68-L80
croscon/fleaker
fleaker/peewee/fields/arrow.py
ArrowDateTimeField.db_value
def db_value(self, value): """Convert the Arrow instance to a datetime for saving in the db.""" if isinstance(value, string_types): value = arrow.get(value) if isinstance(value, arrow.Arrow): value = value.datetime return super(ArrowDateTimeField, self).db_value(value)
python
def db_value(self, value): """Convert the Arrow instance to a datetime for saving in the db.""" if isinstance(value, string_types): value = arrow.get(value) if isinstance(value, arrow.Arrow): value = value.datetime return super(ArrowDateTimeField, self).db_value(value)
Convert the Arrow instance to a datetime for saving in the db.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/fields/arrow.py#L82-L90
croscon/fleaker
fleaker/component.py
Component.init_app
def init_app(self, app, context=DEFAULT_DICT): """Lazy constructor for the :class:`Component` class. This method will allow the component to be used like a Flask extension/singleton. Args: app (flask.Flask): The Application to base this Component upon. Useful for app wide singletons. Keyword Args: context (dict, optional): The contextual information to supply to this component. """ if context is not _CONTEXT_MISSING: self.update_context(context, app=app) # do not readd callbacks if already present; and if there's no context # present, there's no real need to add callbacks if (app not in _CONTEXT_CALLBACK_MAP and context is not _CONTEXT_MISSING): key = self._get_context_name(app=app) self._context_callbacks(app, key, original_context=context)
python
def init_app(self, app, context=DEFAULT_DICT): """Lazy constructor for the :class:`Component` class. This method will allow the component to be used like a Flask extension/singleton. Args: app (flask.Flask): The Application to base this Component upon. Useful for app wide singletons. Keyword Args: context (dict, optional): The contextual information to supply to this component. """ if context is not _CONTEXT_MISSING: self.update_context(context, app=app) # do not readd callbacks if already present; and if there's no context # present, there's no real need to add callbacks if (app not in _CONTEXT_CALLBACK_MAP and context is not _CONTEXT_MISSING): key = self._get_context_name(app=app) self._context_callbacks(app, key, original_context=context)
Lazy constructor for the :class:`Component` class. This method will allow the component to be used like a Flask extension/singleton. Args: app (flask.Flask): The Application to base this Component upon. Useful for app wide singletons. Keyword Args: context (dict, optional): The contextual information to supply to this component.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L102-L124
croscon/fleaker
fleaker/component.py
Component._context_callbacks
def _context_callbacks(app, key, original_context=_CONTEXT_MISSING): """Register the callbacks we need to properly pop and push the app-local context for a component. Args: app (flask.Flask): The app who this context belongs to. This is the only sender our Blinker signal will listen to. key (str): The key on ``_CONTEXT_LOCALS`` that this app's context listens to. Kwargs: original_context (dict): The original context present whenever these callbacks were registered. We will restore the context to this value whenever the app context gets popped. Returns: (function, function): A two-element tuple of the dynamic functions we generated as appcontext callbacks. The first element is the callback for ``appcontext_pushed`` (i.e., get and store the current context) and the second element is the callback for ``appcontext_popped`` (i.e., restore the current context to to it's original value). """ def _get_context(dummy_app): """Set the context proxy so that it points to a specific context. """ _CONTEXT_LOCALS.context = _CONTEXT_LOCALS(key) # pylint: disable=assigning-non-slot def _clear_context(dummy_app): """Remove the context proxy that points to a specific context and restore the original context, if there was one. """ try: del _CONTEXT_LOCALS.context except AttributeError: pass if original_context is not _CONTEXT_MISSING: setattr(_CONTEXT_LOCALS, key, original_context) # store for later so Blinker doesn't remove these listeners and so we # don't add them twice _CONTEXT_CALLBACK_MAP[app] = (_get_context, _clear_context) # and listen for any app context changes appcontext_pushed.connect(_get_context, app) appcontext_popped.connect(_clear_context, app) return (_get_context, _clear_context)
python
def _context_callbacks(app, key, original_context=_CONTEXT_MISSING): """Register the callbacks we need to properly pop and push the app-local context for a component. Args: app (flask.Flask): The app who this context belongs to. This is the only sender our Blinker signal will listen to. key (str): The key on ``_CONTEXT_LOCALS`` that this app's context listens to. Kwargs: original_context (dict): The original context present whenever these callbacks were registered. We will restore the context to this value whenever the app context gets popped. Returns: (function, function): A two-element tuple of the dynamic functions we generated as appcontext callbacks. The first element is the callback for ``appcontext_pushed`` (i.e., get and store the current context) and the second element is the callback for ``appcontext_popped`` (i.e., restore the current context to to it's original value). """ def _get_context(dummy_app): """Set the context proxy so that it points to a specific context. """ _CONTEXT_LOCALS.context = _CONTEXT_LOCALS(key) # pylint: disable=assigning-non-slot def _clear_context(dummy_app): """Remove the context proxy that points to a specific context and restore the original context, if there was one. """ try: del _CONTEXT_LOCALS.context except AttributeError: pass if original_context is not _CONTEXT_MISSING: setattr(_CONTEXT_LOCALS, key, original_context) # store for later so Blinker doesn't remove these listeners and so we # don't add them twice _CONTEXT_CALLBACK_MAP[app] = (_get_context, _clear_context) # and listen for any app context changes appcontext_pushed.connect(_get_context, app) appcontext_popped.connect(_clear_context, app) return (_get_context, _clear_context)
Register the callbacks we need to properly pop and push the app-local context for a component. Args: app (flask.Flask): The app who this context belongs to. This is the only sender our Blinker signal will listen to. key (str): The key on ``_CONTEXT_LOCALS`` that this app's context listens to. Kwargs: original_context (dict): The original context present whenever these callbacks were registered. We will restore the context to this value whenever the app context gets popped. Returns: (function, function): A two-element tuple of the dynamic functions we generated as appcontext callbacks. The first element is the callback for ``appcontext_pushed`` (i.e., get and store the current context) and the second element is the callback for ``appcontext_popped`` (i.e., restore the current context to to it's original value).
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L127-L175
croscon/fleaker
fleaker/component.py
Component.update_context
def update_context(self, context, app=None): """Replace the component's context with a new one. Args: context (dict): The new context to set this component's context to. Keyword Args: app (flask.Flask, optional): The app to update this context for. If not provided, the result of ``Component.app`` will be used. """ if (app is None and self._context is _CONTEXT_MISSING and not in_app_context()): raise RuntimeError("Attempted to update component context without" " a bound app context or eager app set! Please" " pass the related app you want to update the" " context for!") if self._context is not _CONTEXT_MISSING: self._context = ImmutableDict(context) else: key = self._get_context_name(app=app) setattr(_CONTEXT_LOCALS, key, ImmutableDict(context))
python
def update_context(self, context, app=None): """Replace the component's context with a new one. Args: context (dict): The new context to set this component's context to. Keyword Args: app (flask.Flask, optional): The app to update this context for. If not provided, the result of ``Component.app`` will be used. """ if (app is None and self._context is _CONTEXT_MISSING and not in_app_context()): raise RuntimeError("Attempted to update component context without" " a bound app context or eager app set! Please" " pass the related app you want to update the" " context for!") if self._context is not _CONTEXT_MISSING: self._context = ImmutableDict(context) else: key = self._get_context_name(app=app) setattr(_CONTEXT_LOCALS, key, ImmutableDict(context))
Replace the component's context with a new one. Args: context (dict): The new context to set this component's context to. Keyword Args: app (flask.Flask, optional): The app to update this context for. If not provided, the result of ``Component.app`` will be used.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L199-L220
croscon/fleaker
fleaker/component.py
Component.clear_context
def clear_context(self, app=None): """Clear the component's context. Keyword Args: app (flask.Flask, optional): The app to clear this component's context for. If omitted, the value from ``Component.app`` is used. """ if (app is None and self._context is _CONTEXT_MISSING and not in_app_context()): raise RuntimeError("Attempted to clear component context without" " a bound app context or eager app set! Please" " pass the related app you want to update the" " context for!") if self._context is not _CONTEXT_MISSING: self._context = DEFAULT_DICT else: key = self._get_context_name(app=app) setattr(_CONTEXT_LOCALS, key, DEFAULT_DICT)
python
def clear_context(self, app=None): """Clear the component's context. Keyword Args: app (flask.Flask, optional): The app to clear this component's context for. If omitted, the value from ``Component.app`` is used. """ if (app is None and self._context is _CONTEXT_MISSING and not in_app_context()): raise RuntimeError("Attempted to clear component context without" " a bound app context or eager app set! Please" " pass the related app you want to update the" " context for!") if self._context is not _CONTEXT_MISSING: self._context = DEFAULT_DICT else: key = self._get_context_name(app=app) setattr(_CONTEXT_LOCALS, key, DEFAULT_DICT)
Clear the component's context. Keyword Args: app (flask.Flask, optional): The app to clear this component's context for. If omitted, the value from ``Component.app`` is used.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L222-L241
croscon/fleaker
fleaker/component.py
Component.app
def app(self): """Internal method that will supply the app to use internally. Returns: flask.Flask: The app to use within the component. Raises: RuntimeError: This is raised if no app was provided to the component and the method is being called outside of an application context. """ app = self._app or current_app if not in_app_context(app): raise RuntimeError("This component hasn't been initialized yet " "and an app context doesn't exist.") # If current_app is the app, this must be used in order for their IDs # to be the same, as current_app will wrap the app in a proxy. if hasattr(app, '_get_current_object'): app = app._get_current_object() return app
python
def app(self): """Internal method that will supply the app to use internally. Returns: flask.Flask: The app to use within the component. Raises: RuntimeError: This is raised if no app was provided to the component and the method is being called outside of an application context. """ app = self._app or current_app if not in_app_context(app): raise RuntimeError("This component hasn't been initialized yet " "and an app context doesn't exist.") # If current_app is the app, this must be used in order for their IDs # to be the same, as current_app will wrap the app in a proxy. if hasattr(app, '_get_current_object'): app = app._get_current_object() return app
Internal method that will supply the app to use internally. Returns: flask.Flask: The app to use within the component. Raises: RuntimeError: This is raised if no app was provided to the component and the method is being called outside of an application context.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L244-L266
croscon/fleaker
fleaker/component.py
Component._get_context_name
def _get_context_name(self, app=None): """Generate the name of the context variable for this component & app. Because we store the ``context`` in a Local so the component can be used across multiple apps, we cannot store the context on the instance itself. This function will generate a unique and predictable key in which to store the context. Returns: str: The name of the context variable to set and get the context from. """ elements = [ self.__class__.__name__, 'context', text_type(id(self)), ] if app: elements.append(text_type(id(app))) else: try: elements.append(text_type(id(self.app))) except RuntimeError: pass return '_'.join(elements)
python
def _get_context_name(self, app=None): """Generate the name of the context variable for this component & app. Because we store the ``context`` in a Local so the component can be used across multiple apps, we cannot store the context on the instance itself. This function will generate a unique and predictable key in which to store the context. Returns: str: The name of the context variable to set and get the context from. """ elements = [ self.__class__.__name__, 'context', text_type(id(self)), ] if app: elements.append(text_type(id(app))) else: try: elements.append(text_type(id(self.app))) except RuntimeError: pass return '_'.join(elements)
Generate the name of the context variable for this component & app. Because we store the ``context`` in a Local so the component can be used across multiple apps, we cannot store the context on the instance itself. This function will generate a unique and predictable key in which to store the context. Returns: str: The name of the context variable to set and get the context from.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L277-L303
croscon/fleaker
fleaker/base.py
BaseApplication.create_app
def create_app(cls, import_name, **settings): """Create a standard Fleaker web application. This is the main entrypoint for creating your Fleaker application. Instead of defining your own app factory function, it's preferred that you use :meth:`create_app`, which is responsible for automatically configuring extensions (such as your ORM), parsing setup code for mixins, and calling relevant hooks (such as to setup logging). Usage is easy: .. code:: python from fleaker import App def my_create_app(): app = App.create_app(__name__) return app And the rest works like a normal Flask app with application factories setup! .. versionadded:: 0.1.0 This has always been the preferred way to create Fleaker Applications. """ settings = cls.pre_create_app(**settings) # now whitelist the settings flask_kwargs = cls._whitelist_standard_flask_kwargs(settings) app = cls(import_name, **flask_kwargs) return cls.post_create_app(app, **settings)
python
def create_app(cls, import_name, **settings): """Create a standard Fleaker web application. This is the main entrypoint for creating your Fleaker application. Instead of defining your own app factory function, it's preferred that you use :meth:`create_app`, which is responsible for automatically configuring extensions (such as your ORM), parsing setup code for mixins, and calling relevant hooks (such as to setup logging). Usage is easy: .. code:: python from fleaker import App def my_create_app(): app = App.create_app(__name__) return app And the rest works like a normal Flask app with application factories setup! .. versionadded:: 0.1.0 This has always been the preferred way to create Fleaker Applications. """ settings = cls.pre_create_app(**settings) # now whitelist the settings flask_kwargs = cls._whitelist_standard_flask_kwargs(settings) app = cls(import_name, **flask_kwargs) return cls.post_create_app(app, **settings)
Create a standard Fleaker web application. This is the main entrypoint for creating your Fleaker application. Instead of defining your own app factory function, it's preferred that you use :meth:`create_app`, which is responsible for automatically configuring extensions (such as your ORM), parsing setup code for mixins, and calling relevant hooks (such as to setup logging). Usage is easy: .. code:: python from fleaker import App def my_create_app(): app = App.create_app(__name__) return app And the rest works like a normal Flask app with application factories setup! .. versionadded:: 0.1.0 This has always been the preferred way to create Fleaker Applications.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/base.py#L72-L104
croscon/fleaker
fleaker/base.py
BaseApplication._whitelist_standard_flask_kwargs
def _whitelist_standard_flask_kwargs(cls, kwargs): """Whitelist a dictionary of kwargs to remove any that are not valid for Flask's ``__init__`` constructor. Since many Fleaker app mixins define their own kwargs for use in construction and Flask itself does not accept ``**kwargs``, we need to whitelist anything unknown. Uses the proper argspec from the :meth:`flask.Flask.__init__` so it should handle all args. Args: kwargs (dict): The dictionary of kwargs you want to whitelist. Returns: dict: The whitelisted dictionary of kwargs. """ # prevent any copy shenanigans from happening kwargs = deepcopy(kwargs) if not cls._flask_init_argspec_cache: cls._flask_init_argspec_cache = inspect.getargspec(Flask.__init__) return {key: val for key, val in iteritems(kwargs) if key in cls._flask_init_argspec_cache.args}
python
def _whitelist_standard_flask_kwargs(cls, kwargs): """Whitelist a dictionary of kwargs to remove any that are not valid for Flask's ``__init__`` constructor. Since many Fleaker app mixins define their own kwargs for use in construction and Flask itself does not accept ``**kwargs``, we need to whitelist anything unknown. Uses the proper argspec from the :meth:`flask.Flask.__init__` so it should handle all args. Args: kwargs (dict): The dictionary of kwargs you want to whitelist. Returns: dict: The whitelisted dictionary of kwargs. """ # prevent any copy shenanigans from happening kwargs = deepcopy(kwargs) if not cls._flask_init_argspec_cache: cls._flask_init_argspec_cache = inspect.getargspec(Flask.__init__) return {key: val for key, val in iteritems(kwargs) if key in cls._flask_init_argspec_cache.args}
Whitelist a dictionary of kwargs to remove any that are not valid for Flask's ``__init__`` constructor. Since many Fleaker app mixins define their own kwargs for use in construction and Flask itself does not accept ``**kwargs``, we need to whitelist anything unknown. Uses the proper argspec from the :meth:`flask.Flask.__init__` so it should handle all args. Args: kwargs (dict): The dictionary of kwargs you want to whitelist. Returns: dict: The whitelisted dictionary of kwargs.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/base.py#L107-L131
croscon/fleaker
fleaker/logging.py
FleakerLogFormatter.format
def format(self, record): """Format the log record.""" levelname = getattr(record, 'levelname', None) record.levelcolor = '' record.endlevelcolor = '' if levelname: level_color = getattr(self.TermColors, levelname, '') record.levelcolor = level_color record.endlevelcolor = self.TermColors.ENDC if level_color else '' return super(FleakerLogFormatter, self).format(record)
python
def format(self, record): """Format the log record.""" levelname = getattr(record, 'levelname', None) record.levelcolor = '' record.endlevelcolor = '' if levelname: level_color = getattr(self.TermColors, levelname, '') record.levelcolor = level_color record.endlevelcolor = self.TermColors.ENDC if level_color else '' return super(FleakerLogFormatter, self).format(record)
Format the log record.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/logging.py#L59-L70
croscon/fleaker
fleaker/exceptions.py
FleakerBaseException.errorhandler_callback
def errorhandler_callback(cls, exc): """This function should be called in the global error handlers. This will allow for consolidating of cleanup tasks if the exception bubbles all the way to the top of the stack. For example, this method will automatically rollback the database session if the exception bubbles to the top. This is the method that :meth:`register_errorhandler` adds as an errorhandler. See the documentation there for more info. Args: exc (FleakerBaseException): The exception that was thrown that we are to handle. """ # @TODO (orm, exc): Implement this when the ORM/DB stuff is done # if not exc.prevent_rollback: # db.session.rollback() if exc.flash_message: flash(exc.flash_message, exc.flash_level) if exc.redirect is not MISSING: return redirect(url_for(exc.redirect, **exc.redirect_args)) error_result = exc.error_page() if error_result is not None: return error_result, exc.status_code or 500
python
def errorhandler_callback(cls, exc): """This function should be called in the global error handlers. This will allow for consolidating of cleanup tasks if the exception bubbles all the way to the top of the stack. For example, this method will automatically rollback the database session if the exception bubbles to the top. This is the method that :meth:`register_errorhandler` adds as an errorhandler. See the documentation there for more info. Args: exc (FleakerBaseException): The exception that was thrown that we are to handle. """ # @TODO (orm, exc): Implement this when the ORM/DB stuff is done # if not exc.prevent_rollback: # db.session.rollback() if exc.flash_message: flash(exc.flash_message, exc.flash_level) if exc.redirect is not MISSING: return redirect(url_for(exc.redirect, **exc.redirect_args)) error_result = exc.error_page() if error_result is not None: return error_result, exc.status_code or 500
This function should be called in the global error handlers. This will allow for consolidating of cleanup tasks if the exception bubbles all the way to the top of the stack. For example, this method will automatically rollback the database session if the exception bubbles to the top. This is the method that :meth:`register_errorhandler` adds as an errorhandler. See the documentation there for more info. Args: exc (FleakerBaseException): The exception that was thrown that we are to handle.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/exceptions.py#L116-L144
croscon/fleaker
fleaker/exceptions.py
ErrorAwareApp.post_create_app
def post_create_app(cls, app, **settings): """Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given. """ register_errorhandler = settings.pop('register_errorhandler', True) if register_errorhandler: AppException.register_errorhandler(app) return app
python
def post_create_app(cls, app, **settings): """Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given. """ register_errorhandler = settings.pop('register_errorhandler', True) if register_errorhandler: AppException.register_errorhandler(app) return app
Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/exceptions.py#L349-L373
croscon/fleaker
examples/fleaker_config/fleaker_config/fleaker_config.py
create_app
def create_app(): """Create the standard app for ``fleaker_config`` and register the two routes required. """ app = App.create_app(__name__) app.configure('.configs.settings') # yes, I should use blueprints; but I don't really care for such a small # toy app @app.route('/config') def get_config(): """Get the current configuration of the app.""" return jsonify(app.config) @app.route('/put_config', methods=['PUT']) def put_config(): """Add to the current configuration of the app. Takes any JSON body and adds all keys to the configs with the provided values. """ data = request.json() for key, val in data.items(): app.config[key] = val return jsonify({'message': 'Config updated!'}) return app
python
def create_app(): """Create the standard app for ``fleaker_config`` and register the two routes required. """ app = App.create_app(__name__) app.configure('.configs.settings') # yes, I should use blueprints; but I don't really care for such a small # toy app @app.route('/config') def get_config(): """Get the current configuration of the app.""" return jsonify(app.config) @app.route('/put_config', methods=['PUT']) def put_config(): """Add to the current configuration of the app. Takes any JSON body and adds all keys to the configs with the provided values. """ data = request.json() for key, val in data.items(): app.config[key] = val return jsonify({'message': 'Config updated!'}) return app
Create the standard app for ``fleaker_config`` and register the two routes required.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/examples/fleaker_config/fleaker_config/fleaker_config.py#L15-L43
croscon/fleaker
fleaker/peewee/mixins/field_signature.py
FieldSignatureMixin.update_signature
def update_signature(self): """Update the signature field by hashing the ``signature_fields``. Raises: AttributeError: This is raised if ``Meta.signature_fields`` has no values in it or if a field in there is not a field on the model. """ if not self._meta.signature_fields: raise AttributeError( "No fields defined in {}.Meta.signature_fields. Please define " "at least one.".format(type(self).__name__) ) # If the field is archived, unset the signature so records in the # future can have this value. if getattr(self, 'archived', False): self.signature = None return # Otherwise, combine the values of the fields together and SHA1 them computed = [getattr(self, value) or ' ' for value in self._meta.signature_fields] computed = ''.join([text_type(value) for value in computed]) # If computed is a falsey value, that means all the fields were # None or blank and that will lead to some pain. if computed: self.signature = sha1(computed.encode('utf-8')).hexdigest()
python
def update_signature(self): """Update the signature field by hashing the ``signature_fields``. Raises: AttributeError: This is raised if ``Meta.signature_fields`` has no values in it or if a field in there is not a field on the model. """ if not self._meta.signature_fields: raise AttributeError( "No fields defined in {}.Meta.signature_fields. Please define " "at least one.".format(type(self).__name__) ) # If the field is archived, unset the signature so records in the # future can have this value. if getattr(self, 'archived', False): self.signature = None return # Otherwise, combine the values of the fields together and SHA1 them computed = [getattr(self, value) or ' ' for value in self._meta.signature_fields] computed = ''.join([text_type(value) for value in computed]) # If computed is a falsey value, that means all the fields were # None or blank and that will lead to some pain. if computed: self.signature = sha1(computed.encode('utf-8')).hexdigest()
Update the signature field by hashing the ``signature_fields``. Raises: AttributeError: This is raised if ``Meta.signature_fields`` has no values in it or if a field in there is not a field on the model.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/mixins/field_signature.py#L102-L130
mozilla-releng/mozapkpublisher
mozapkpublisher/common/googleplay.py
connect
def connect(service_account, credentials_file_path, api_version='v2'): """ Connect to the google play interface """ # Create an httplib2.Http object to handle our HTTP requests an # authorize it with the Credentials. Note that the first parameter, # service_account_name, is the Email address created for the Service # account. It must be the email address associated with the key that # was created. scope = 'https://www.googleapis.com/auth/androidpublisher' credentials = ServiceAccountCredentials.from_p12_keyfile(service_account, credentials_file_path, scopes=scope) http = httplib2.Http() http = credentials.authorize(http) service = build('androidpublisher', api_version, http=http, cache_discovery=False) return service
python
def connect(service_account, credentials_file_path, api_version='v2'): """ Connect to the google play interface """ # Create an httplib2.Http object to handle our HTTP requests an # authorize it with the Credentials. Note that the first parameter, # service_account_name, is the Email address created for the Service # account. It must be the email address associated with the key that # was created. scope = 'https://www.googleapis.com/auth/androidpublisher' credentials = ServiceAccountCredentials.from_p12_keyfile(service_account, credentials_file_path, scopes=scope) http = httplib2.Http() http = credentials.authorize(http) service = build('androidpublisher', api_version, http=http, cache_discovery=False) return service
Connect to the google play interface
https://github.com/mozilla-releng/mozapkpublisher/blob/df61034220153cbb98da74c8ef6de637f9185e12/mozapkpublisher/common/googleplay.py#L159-L175
croscon/fleaker
fleaker/marshmallow/fields/pendulum.py
PendulumField._deserialize
def _deserialize(self, value, attr, obj): """Deserializes a string into a Pendulum object.""" if not self.context.get('convert_dates', True) or not value: return value value = super(PendulumField, self)._deserialize(value, attr, value) timezone = self.get_field_value('timezone') target = pendulum.instance(value) if (timezone and (text_type(target) != text_type(target.in_timezone(timezone)))): raise ValidationError( "The provided datetime is not in the " "{} timezone.".format(timezone) ) return target
python
def _deserialize(self, value, attr, obj): """Deserializes a string into a Pendulum object.""" if not self.context.get('convert_dates', True) or not value: return value value = super(PendulumField, self)._deserialize(value, attr, value) timezone = self.get_field_value('timezone') target = pendulum.instance(value) if (timezone and (text_type(target) != text_type(target.in_timezone(timezone)))): raise ValidationError( "The provided datetime is not in the " "{} timezone.".format(timezone) ) return target
Deserializes a string into a Pendulum object.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/pendulum.py#L42-L58
croscon/fleaker
fleaker/marshmallow/fields/phone_number.py
PhoneNumberField._format_phone_number
def _format_phone_number(self, value, attr): """Format and validate a phone number.""" strict_validation = self.get_field_value( 'strict_phone_validation', default=False ) strict_region = self.get_field_value( 'strict_phone_region', default=strict_validation ) region = self.get_field_value('region', 'US') phone_number_format = self.get_field_value( 'phone_number_format', default=phonenumbers.PhoneNumberFormat.INTERNATIONAL ) # Remove excess special chars, except for the plus sign stripped_value = re.sub(r'[^\w+]', '', value) try: if not stripped_value.startswith('+') and not strict_region: phone = phonenumbers.parse(stripped_value, region) else: phone = phonenumbers.parse(stripped_value) if (not phonenumbers.is_possible_number(phone) or not phonenumbers.is_valid_number(phone) and strict_validation): raise ValidationError( "The value for {} ({}) is not a valid phone " "number.".format(attr, value) ) return phonenumbers.format_number(phone, phone_number_format) except phonenumbers.phonenumberutil.NumberParseException as exc: if strict_validation or strict_region: raise ValidationError(exc)
python
def _format_phone_number(self, value, attr): """Format and validate a phone number.""" strict_validation = self.get_field_value( 'strict_phone_validation', default=False ) strict_region = self.get_field_value( 'strict_phone_region', default=strict_validation ) region = self.get_field_value('region', 'US') phone_number_format = self.get_field_value( 'phone_number_format', default=phonenumbers.PhoneNumberFormat.INTERNATIONAL ) # Remove excess special chars, except for the plus sign stripped_value = re.sub(r'[^\w+]', '', value) try: if not stripped_value.startswith('+') and not strict_region: phone = phonenumbers.parse(stripped_value, region) else: phone = phonenumbers.parse(stripped_value) if (not phonenumbers.is_possible_number(phone) or not phonenumbers.is_valid_number(phone) and strict_validation): raise ValidationError( "The value for {} ({}) is not a valid phone " "number.".format(attr, value) ) return phonenumbers.format_number(phone, phone_number_format) except phonenumbers.phonenumberutil.NumberParseException as exc: if strict_validation or strict_region: raise ValidationError(exc)
Format and validate a phone number.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/phone_number.py#L50-L87
croscon/fleaker
fleaker/marshmallow/fields/phone_number.py
PhoneNumberField._deserialize
def _deserialize(self, value, attr, data): """Format and validate the phone number using libphonenumber.""" if value: value = self._format_phone_number(value, attr) return super(PhoneNumberField, self)._deserialize(value, attr, data)
python
def _deserialize(self, value, attr, data): """Format and validate the phone number using libphonenumber.""" if value: value = self._format_phone_number(value, attr) return super(PhoneNumberField, self)._deserialize(value, attr, data)
Format and validate the phone number using libphonenumber.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/phone_number.py#L89-L94
croscon/fleaker
fleaker/marshmallow/fields/phone_number.py
PhoneNumberField._serialize
def _serialize(self, value, attr, obj): """Format and validate the phone number user libphonenumber.""" value = super(PhoneNumberField, self)._serialize(value, attr, obj) if value: value = self._format_phone_number(value, attr) return value
python
def _serialize(self, value, attr, obj): """Format and validate the phone number user libphonenumber.""" value = super(PhoneNumberField, self)._serialize(value, attr, obj) if value: value = self._format_phone_number(value, attr) return value
Format and validate the phone number user libphonenumber.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/phone_number.py#L96-L103
croscon/fleaker
fleaker/json.py
FleakerJSONEncoder.default
def default(self, obj): """Encode individual objects into their JSON representation. This method is used by :class:`flask.json.JSONEncoder` to encode individual items in the JSON object. Args: obj (object): Any Python object we wish to convert to JSON. Returns: str: The stringified, valid JSON representation of our provided object. """ if isinstance(obj, decimal.Decimal): obj = format(obj, 'f') str_digit = text_type(obj) return (str_digit.rstrip('0').rstrip('.') if '.' in str_digit else str_digit) elif isinstance(obj, phonenumbers.PhoneNumber): return phonenumbers.format_number( obj, phonenumbers.PhoneNumberFormat.E164 ) elif isinstance(obj, pendulum.Pendulum): return text_type(obj) elif isinstance(obj, arrow.Arrow): return text_type(obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() try: return list(iter(obj)) except TypeError: pass return super(FleakerJSONEncoder, self).default(obj)
python
def default(self, obj): """Encode individual objects into their JSON representation. This method is used by :class:`flask.json.JSONEncoder` to encode individual items in the JSON object. Args: obj (object): Any Python object we wish to convert to JSON. Returns: str: The stringified, valid JSON representation of our provided object. """ if isinstance(obj, decimal.Decimal): obj = format(obj, 'f') str_digit = text_type(obj) return (str_digit.rstrip('0').rstrip('.') if '.' in str_digit else str_digit) elif isinstance(obj, phonenumbers.PhoneNumber): return phonenumbers.format_number( obj, phonenumbers.PhoneNumberFormat.E164 ) elif isinstance(obj, pendulum.Pendulum): return text_type(obj) elif isinstance(obj, arrow.Arrow): return text_type(obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() try: return list(iter(obj)) except TypeError: pass return super(FleakerJSONEncoder, self).default(obj)
Encode individual objects into their JSON representation. This method is used by :class:`flask.json.JSONEncoder` to encode individual items in the JSON object. Args: obj (object): Any Python object we wish to convert to JSON. Returns: str: The stringified, valid JSON representation of our provided object.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/json.py#L63-L104
croscon/fleaker
fleaker/marshmallow/fields/arrow.py
ArrowField._serialize
def _serialize(self, value, attr, obj): """Convert the Arrow object into a string.""" if isinstance(value, arrow.arrow.Arrow): value = value.datetime return super(ArrowField, self)._serialize(value, attr, obj)
python
def _serialize(self, value, attr, obj): """Convert the Arrow object into a string.""" if isinstance(value, arrow.arrow.Arrow): value = value.datetime return super(ArrowField, self)._serialize(value, attr, obj)
Convert the Arrow object into a string.
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/arrow.py#L42-L47