repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
MacHu-GWU/single_file_module-project
sfm/iterable.py
size_of_generator
def size_of_generator(generator, memory_efficient=True): """Get number of items in a generator function. - memory_efficient = True, 3 times slower, but memory_efficient. - memory_efficient = False, faster, but cost more memory. **中文文档** 计算一个生成器函数中的元素的个数。使用memory_efficient=True的方法可以避免将生成器中的 所有元素放入内存, 但是速度稍慢于memory_efficient=False的方法。 """ if memory_efficient: counter = 0 for _ in generator: counter += 1 return counter else: return len(list(generator))
python
def size_of_generator(generator, memory_efficient=True): """Get number of items in a generator function. - memory_efficient = True, 3 times slower, but memory_efficient. - memory_efficient = False, faster, but cost more memory. **中文文档** 计算一个生成器函数中的元素的个数。使用memory_efficient=True的方法可以避免将生成器中的 所有元素放入内存, 但是速度稍慢于memory_efficient=False的方法。 """ if memory_efficient: counter = 0 for _ in generator: counter += 1 return counter else: return len(list(generator))
[ "def", "size_of_generator", "(", "generator", ",", "memory_efficient", "=", "True", ")", ":", "if", "memory_efficient", ":", "counter", "=", "0", "for", "_", "in", "generator", ":", "counter", "+=", "1", "return", "counter", "else", ":", "return", "len", "(", "list", "(", "generator", ")", ")" ]
Get number of items in a generator function. - memory_efficient = True, 3 times slower, but memory_efficient. - memory_efficient = False, faster, but cost more memory. **中文文档** 计算一个生成器函数中的元素的个数。使用memory_efficient=True的方法可以避免将生成器中的 所有元素放入内存, 但是速度稍慢于memory_efficient=False的方法。
[ "Get", "number", "of", "items", "in", "a", "generator", "function", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L443-L460
train
JanHendrikDolling/configvalidator
configvalidator/validators/__init__.py
AndValidator.validate
def validate(self, value): """validate function form OrValidator Returns: True if at least one of the validators validate function return True """ errors = [] self._used_validator = [] for val in self._validators: try: val.validate(value) self._used_validator.append(val) except ValidatorException as e: errors.append(e) except Exception as e: errors.append(ValidatorException("Unknown Error", e)) if len(errors) > 0: raise ValidatorException.from_list(errors) return value
python
def validate(self, value): """validate function form OrValidator Returns: True if at least one of the validators validate function return True """ errors = [] self._used_validator = [] for val in self._validators: try: val.validate(value) self._used_validator.append(val) except ValidatorException as e: errors.append(e) except Exception as e: errors.append(ValidatorException("Unknown Error", e)) if len(errors) > 0: raise ValidatorException.from_list(errors) return value
[ "def", "validate", "(", "self", ",", "value", ")", ":", "errors", "=", "[", "]", "self", ".", "_used_validator", "=", "[", "]", "for", "val", "in", "self", ".", "_validators", ":", "try", ":", "val", ".", "validate", "(", "value", ")", "self", ".", "_used_validator", ".", "append", "(", "val", ")", "except", "ValidatorException", "as", "e", ":", "errors", ".", "append", "(", "e", ")", "except", "Exception", "as", "e", ":", "errors", ".", "append", "(", "ValidatorException", "(", "\"Unknown Error\"", ",", "e", ")", ")", "if", "len", "(", "errors", ")", ">", "0", ":", "raise", "ValidatorException", ".", "from_list", "(", "errors", ")", "return", "value" ]
validate function form OrValidator Returns: True if at least one of the validators validate function return True
[ "validate", "function", "form", "OrValidator" ]
efde23a9352ae1fd6702b04ad964783ce11cbca5
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/validators/__init__.py#L598-L617
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py
MeasureNode.GetTotalValue
def GetTotalValue(self): """Gets the total value of the bar according to it's time signature""" value = "" if hasattr(self, "meter"): top_value = self.meter.beats bottom = self.meter.type fraction = top_value / bottom if fraction == 1: value = "1" else: if fraction > 1: value = "1." if fraction < 1: if fraction >= 0.5: fraction -= 0.5 value = "2" if fraction == 0.25: value += "." return value
python
def GetTotalValue(self): """Gets the total value of the bar according to it's time signature""" value = "" if hasattr(self, "meter"): top_value = self.meter.beats bottom = self.meter.type fraction = top_value / bottom if fraction == 1: value = "1" else: if fraction > 1: value = "1." if fraction < 1: if fraction >= 0.5: fraction -= 0.5 value = "2" if fraction == 0.25: value += "." return value
[ "def", "GetTotalValue", "(", "self", ")", ":", "value", "=", "\"\"", "if", "hasattr", "(", "self", ",", "\"meter\"", ")", ":", "top_value", "=", "self", ".", "meter", ".", "beats", "bottom", "=", "self", ".", "meter", ".", "type", "fraction", "=", "top_value", "/", "bottom", "if", "fraction", "==", "1", ":", "value", "=", "\"1\"", "else", ":", "if", "fraction", ">", "1", ":", "value", "=", "\"1.\"", "if", "fraction", "<", "1", ":", "if", "fraction", ">=", "0.5", ":", "fraction", "-=", "0.5", "value", "=", "\"2\"", "if", "fraction", "==", "0.25", ":", "value", "+=", "\".\"", "return", "value" ]
Gets the total value of the bar according to it's time signature
[ "Gets", "the", "total", "value", "of", "the", "bar", "according", "to", "it", "s", "time", "signature" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py#L133-L151
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py
MeasureNode.GetLastKey
def GetLastKey(self, voice=1): """key as in musical key, not index""" voice_obj = self.GetChild(voice) if voice_obj is not None: key = BackwardSearch(KeyNode, voice_obj, 1) if key is not None: return key else: if hasattr(self, "key"): return self.key else: if hasattr(self, "key"): return self.key
python
def GetLastKey(self, voice=1): """key as in musical key, not index""" voice_obj = self.GetChild(voice) if voice_obj is not None: key = BackwardSearch(KeyNode, voice_obj, 1) if key is not None: return key else: if hasattr(self, "key"): return self.key else: if hasattr(self, "key"): return self.key
[ "def", "GetLastKey", "(", "self", ",", "voice", "=", "1", ")", ":", "voice_obj", "=", "self", ".", "GetChild", "(", "voice", ")", "if", "voice_obj", "is", "not", "None", ":", "key", "=", "BackwardSearch", "(", "KeyNode", ",", "voice_obj", ",", "1", ")", "if", "key", "is", "not", "None", ":", "return", "key", "else", ":", "if", "hasattr", "(", "self", ",", "\"key\"", ")", ":", "return", "self", ".", "key", "else", ":", "if", "hasattr", "(", "self", ",", "\"key\"", ")", ":", "return", "self", ".", "key" ]
key as in musical key, not index
[ "key", "as", "in", "musical", "key", "not", "index" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py#L165-L178
train
Godley/MuseParse
MuseParse/helpers.py
SplitString
def SplitString(value): """simple method that puts in spaces every 10 characters""" string_length = len(value) chunks = int(string_length / 10) string_list = list(value) lstring = "" if chunks > 1: lstring = "\\markup { \n\r \column { " for i in range(int(chunks)): lstring += "\n\r\r \\line { \"" index = i * 10 for i in range(index): lstring += string_list[i] lstring += "\" \r\r}" lstring += "\n\r } \n }" if lstring == "": indexes = [ i for i in range( len(string_list)) if string_list[i] == "\r" or string_list[i] == "\n"] lstring = "\\markup { \n\r \column { " if len(indexes) == 0: lstring += "\n\r\r \\line { \"" + \ "".join(string_list) + "\" \n\r\r } \n\r } \n }" else: rows = [] row_1 = string_list[:indexes[0]] rows.append(row_1) for i in range(len(indexes)): start = indexes[i] if i != len(indexes) - 1: end = indexes[i + 1] else: end = len(string_list) row = string_list[start:end] rows.append(row) for row in rows: lstring += "\n\r\r \\line { \"" lstring += "".join(row) lstring += "\" \r\r}" lstring += "\n\r } \n }" return lstring
python
def SplitString(value): """simple method that puts in spaces every 10 characters""" string_length = len(value) chunks = int(string_length / 10) string_list = list(value) lstring = "" if chunks > 1: lstring = "\\markup { \n\r \column { " for i in range(int(chunks)): lstring += "\n\r\r \\line { \"" index = i * 10 for i in range(index): lstring += string_list[i] lstring += "\" \r\r}" lstring += "\n\r } \n }" if lstring == "": indexes = [ i for i in range( len(string_list)) if string_list[i] == "\r" or string_list[i] == "\n"] lstring = "\\markup { \n\r \column { " if len(indexes) == 0: lstring += "\n\r\r \\line { \"" + \ "".join(string_list) + "\" \n\r\r } \n\r } \n }" else: rows = [] row_1 = string_list[:indexes[0]] rows.append(row_1) for i in range(len(indexes)): start = indexes[i] if i != len(indexes) - 1: end = indexes[i + 1] else: end = len(string_list) row = string_list[start:end] rows.append(row) for row in rows: lstring += "\n\r\r \\line { \"" lstring += "".join(row) lstring += "\" \r\r}" lstring += "\n\r } \n }" return lstring
[ "def", "SplitString", "(", "value", ")", ":", "string_length", "=", "len", "(", "value", ")", "chunks", "=", "int", "(", "string_length", "/", "10", ")", "string_list", "=", "list", "(", "value", ")", "lstring", "=", "\"\"", "if", "chunks", ">", "1", ":", "lstring", "=", "\"\\\\markup { \\n\\r \\column { \"", "for", "i", "in", "range", "(", "int", "(", "chunks", ")", ")", ":", "lstring", "+=", "\"\\n\\r\\r \\\\line { \\\"\"", "index", "=", "i", "*", "10", "for", "i", "in", "range", "(", "index", ")", ":", "lstring", "+=", "string_list", "[", "i", "]", "lstring", "+=", "\"\\\" \\r\\r}\"", "lstring", "+=", "\"\\n\\r } \\n }\"", "if", "lstring", "==", "\"\"", ":", "indexes", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "string_list", ")", ")", "if", "string_list", "[", "i", "]", "==", "\"\\r\"", "or", "string_list", "[", "i", "]", "==", "\"\\n\"", "]", "lstring", "=", "\"\\\\markup { \\n\\r \\column { \"", "if", "len", "(", "indexes", ")", "==", "0", ":", "lstring", "+=", "\"\\n\\r\\r \\\\line { \\\"\"", "+", "\"\"", ".", "join", "(", "string_list", ")", "+", "\"\\\" \\n\\r\\r } \\n\\r } \\n }\"", "else", ":", "rows", "=", "[", "]", "row_1", "=", "string_list", "[", ":", "indexes", "[", "0", "]", "]", "rows", ".", "append", "(", "row_1", ")", "for", "i", "in", "range", "(", "len", "(", "indexes", ")", ")", ":", "start", "=", "indexes", "[", "i", "]", "if", "i", "!=", "len", "(", "indexes", ")", "-", "1", ":", "end", "=", "indexes", "[", "i", "+", "1", "]", "else", ":", "end", "=", "len", "(", "string_list", ")", "row", "=", "string_list", "[", "start", ":", "end", "]", "rows", ".", "append", "(", "row", ")", "for", "row", "in", "rows", ":", "lstring", "+=", "\"\\n\\r\\r \\\\line { \\\"\"", "lstring", "+=", "\"\"", ".", "join", "(", "row", ")", "lstring", "+=", "\"\\\" \\r\\r}\"", "lstring", "+=", "\"\\n\\r } \\n }\"", "return", "lstring" ]
simple method that puts in spaces every 10 characters
[ "simple", "method", "that", "puts", "in", "spaces", "every", "10", "characters" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/helpers.py#L4-L46
train
Godley/MuseParse
MuseParse/helpers.py
NumbersToWords
def NumbersToWords(number): """ little function that converts numbers to words. This could be more efficient, and won't work if the number is bigger than 999 but it's for stave names, and I doubt any part would have more than 10 staves let alone 999. """ units = [ 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] tens = [ 'ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'] output = "" if number != 0: str_val = str(number) if 4 > len(str_val) > 2: output += units[int(str_val[0]) - 1] output += "hundred" if str_val[1] != 0: output += "and" + tens[int(str_val[1]) - 1] if str_val[2] != 0: output += units[int(str_val[2]) - 1] if 3 > len(str_val) > 1: output += tens[int(str_val[0]) - 1] if str_val[1] != 0: output += units[int(str_val[1]) - 1] if 2 > len(str_val) == 1: output += units[int(str_val[0]) - 1] else: output = "zero" return output
python
def NumbersToWords(number): """ little function that converts numbers to words. This could be more efficient, and won't work if the number is bigger than 999 but it's for stave names, and I doubt any part would have more than 10 staves let alone 999. """ units = [ 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] tens = [ 'ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'] output = "" if number != 0: str_val = str(number) if 4 > len(str_val) > 2: output += units[int(str_val[0]) - 1] output += "hundred" if str_val[1] != 0: output += "and" + tens[int(str_val[1]) - 1] if str_val[2] != 0: output += units[int(str_val[2]) - 1] if 3 > len(str_val) > 1: output += tens[int(str_val[0]) - 1] if str_val[1] != 0: output += units[int(str_val[1]) - 1] if 2 > len(str_val) == 1: output += units[int(str_val[0]) - 1] else: output = "zero" return output
[ "def", "NumbersToWords", "(", "number", ")", ":", "units", "=", "[", "'one'", ",", "'two'", ",", "'three'", ",", "'four'", ",", "'five'", ",", "'six'", ",", "'seven'", ",", "'eight'", ",", "'nine'", "]", "tens", "=", "[", "'ten'", ",", "'twenty'", ",", "'thirty'", ",", "'forty'", ",", "'fifty'", ",", "'sixty'", ",", "'seventy'", ",", "'eighty'", ",", "'ninety'", "]", "output", "=", "\"\"", "if", "number", "!=", "0", ":", "str_val", "=", "str", "(", "number", ")", "if", "4", ">", "len", "(", "str_val", ")", ">", "2", ":", "output", "+=", "units", "[", "int", "(", "str_val", "[", "0", "]", ")", "-", "1", "]", "output", "+=", "\"hundred\"", "if", "str_val", "[", "1", "]", "!=", "0", ":", "output", "+=", "\"and\"", "+", "tens", "[", "int", "(", "str_val", "[", "1", "]", ")", "-", "1", "]", "if", "str_val", "[", "2", "]", "!=", "0", ":", "output", "+=", "units", "[", "int", "(", "str_val", "[", "2", "]", ")", "-", "1", "]", "if", "3", ">", "len", "(", "str_val", ")", ">", "1", ":", "output", "+=", "tens", "[", "int", "(", "str_val", "[", "0", "]", ")", "-", "1", "]", "if", "str_val", "[", "1", "]", "!=", "0", ":", "output", "+=", "units", "[", "int", "(", "str_val", "[", "1", "]", ")", "-", "1", "]", "if", "2", ">", "len", "(", "str_val", ")", "==", "1", ":", "output", "+=", "units", "[", "int", "(", "str_val", "[", "0", "]", ")", "-", "1", "]", "else", ":", "output", "=", "\"zero\"", "return", "output" ]
little function that converts numbers to words. This could be more efficient, and won't work if the number is bigger than 999 but it's for stave names, and I doubt any part would have more than 10 staves let alone 999.
[ "little", "function", "that", "converts", "numbers", "to", "words", ".", "This", "could", "be", "more", "efficient", "and", "won", "t", "work", "if", "the", "number", "is", "bigger", "than", "999", "but", "it", "s", "for", "stave", "names", "and", "I", "doubt", "any", "part", "would", "have", "more", "than", "10", "staves", "let", "alone", "999", "." ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/helpers.py#L63-L108
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py
PartNode.CheckTotals
def CheckTotals(self): """method to calculate the maximum total lilypond value for a measure without a time signature""" staves = self.GetChildrenIndexes() for staff in staves: child = self.getStaff(staff) child.CheckTotals()
python
def CheckTotals(self): """method to calculate the maximum total lilypond value for a measure without a time signature""" staves = self.GetChildrenIndexes() for staff in staves: child = self.getStaff(staff) child.CheckTotals()
[ "def", "CheckTotals", "(", "self", ")", ":", "staves", "=", "self", ".", "GetChildrenIndexes", "(", ")", "for", "staff", "in", "staves", ":", "child", "=", "self", ".", "getStaff", "(", "staff", ")", "child", ".", "CheckTotals", "(", ")" ]
method to calculate the maximum total lilypond value for a measure without a time signature
[ "method", "to", "calculate", "the", "maximum", "total", "lilypond", "value", "for", "a", "measure", "without", "a", "time", "signature" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py#L60-L65
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py
PartNode.CheckPreviousBarline
def CheckPreviousBarline(self, staff): """method which checks the bar before the current for changes we need to make to it's barlines""" measure_before_last = self.getMeasureAtPosition(-2, staff) last_measure = self.getMeasureAtPosition(-1, staff) if last_measure is not None and measure_before_last is not None: bline1 = measure_before_last.GetBarline("right") bline2 = last_measure.GetBarline("left") if bline1 is not None: if hasattr(bline1, "ending"): if bline2 is not None: if not hasattr(bline2, "ending"): bline1.ending.type = "discontinue" else: bline1.ending.type = "discontinue"
python
def CheckPreviousBarline(self, staff): """method which checks the bar before the current for changes we need to make to it's barlines""" measure_before_last = self.getMeasureAtPosition(-2, staff) last_measure = self.getMeasureAtPosition(-1, staff) if last_measure is not None and measure_before_last is not None: bline1 = measure_before_last.GetBarline("right") bline2 = last_measure.GetBarline("left") if bline1 is not None: if hasattr(bline1, "ending"): if bline2 is not None: if not hasattr(bline2, "ending"): bline1.ending.type = "discontinue" else: bline1.ending.type = "discontinue"
[ "def", "CheckPreviousBarline", "(", "self", ",", "staff", ")", ":", "measure_before_last", "=", "self", ".", "getMeasureAtPosition", "(", "-", "2", ",", "staff", ")", "last_measure", "=", "self", ".", "getMeasureAtPosition", "(", "-", "1", ",", "staff", ")", "if", "last_measure", "is", "not", "None", "and", "measure_before_last", "is", "not", "None", ":", "bline1", "=", "measure_before_last", ".", "GetBarline", "(", "\"right\"", ")", "bline2", "=", "last_measure", ".", "GetBarline", "(", "\"left\"", ")", "if", "bline1", "is", "not", "None", ":", "if", "hasattr", "(", "bline1", ",", "\"ending\"", ")", ":", "if", "bline2", "is", "not", "None", ":", "if", "not", "hasattr", "(", "bline2", ",", "\"ending\"", ")", ":", "bline1", ".", "ending", ".", "type", "=", "\"discontinue\"", "else", ":", "bline1", ".", "ending", ".", "type", "=", "\"discontinue\"" ]
method which checks the bar before the current for changes we need to make to it's barlines
[ "method", "which", "checks", "the", "bar", "before", "the", "current", "for", "changes", "we", "need", "to", "make", "to", "it", "s", "barlines" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/PartNode.py#L78-L91
train
eweast/BencodePy
build/lib/bencodepy/decoder.py
Decoder.__parse
def __parse(self) -> object: """Selects the appropriate method to decode next bencode element and returns the result.""" char = self.data[self.idx: self.idx + 1] if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']: str_len = int(self.__read_to(b':')) return self.__read(str_len) elif char == b'i': self.idx += 1 return int(self.__read_to(b'e')) elif char == b'd': return self.__parse_dict() elif char == b'l': return self.__parse_list() elif char == b'': raise bencodepy.DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx))) else: raise bencodepy.DecodingError( 'Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))
python
def __parse(self) -> object: """Selects the appropriate method to decode next bencode element and returns the result.""" char = self.data[self.idx: self.idx + 1] if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']: str_len = int(self.__read_to(b':')) return self.__read(str_len) elif char == b'i': self.idx += 1 return int(self.__read_to(b'e')) elif char == b'd': return self.__parse_dict() elif char == b'l': return self.__parse_list() elif char == b'': raise bencodepy.DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx))) else: raise bencodepy.DecodingError( 'Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))
[ "def", "__parse", "(", "self", ")", "->", "object", ":", "char", "=", "self", ".", "data", "[", "self", ".", "idx", ":", "self", ".", "idx", "+", "1", "]", "if", "char", "in", "[", "b'1'", ",", "b'2'", ",", "b'3'", ",", "b'4'", ",", "b'5'", ",", "b'6'", ",", "b'7'", ",", "b'8'", ",", "b'9'", ",", "b'0'", "]", ":", "str_len", "=", "int", "(", "self", ".", "__read_to", "(", "b':'", ")", ")", "return", "self", ".", "__read", "(", "str_len", ")", "elif", "char", "==", "b'i'", ":", "self", ".", "idx", "+=", "1", "return", "int", "(", "self", ".", "__read_to", "(", "b'e'", ")", ")", "elif", "char", "==", "b'd'", ":", "return", "self", ".", "__parse_dict", "(", ")", "elif", "char", "==", "b'l'", ":", "return", "self", ".", "__parse_list", "(", ")", "elif", "char", "==", "b''", ":", "raise", "bencodepy", ".", "DecodingError", "(", "'Unexpected End of File at index position of {0}.'", ".", "format", "(", "str", "(", "self", ".", "idx", ")", ")", ")", "else", ":", "raise", "bencodepy", ".", "DecodingError", "(", "'Invalid token character ({0}) at position {1}.'", ".", "format", "(", "str", "(", "char", ")", ",", "str", "(", "self", ".", "idx", ")", ")", ")" ]
Selects the appropriate method to decode next bencode element and returns the result.
[ "Selects", "the", "appropriate", "method", "to", "decode", "next", "bencode", "element", "and", "returns", "the", "result", "." ]
a9c145bd087c61dd8fb28a9dfad46d085c8b8290
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L33-L50
train
eweast/BencodePy
build/lib/bencodepy/decoder.py
Decoder.decode
def decode(self) -> Iterable: """Start of decode process. Returns final results.""" if self.data[0:1] not in (b'd', b'l'): return self.__wrap_with_tuple() return self.__parse()
python
def decode(self) -> Iterable: """Start of decode process. Returns final results.""" if self.data[0:1] not in (b'd', b'l'): return self.__wrap_with_tuple() return self.__parse()
[ "def", "decode", "(", "self", ")", "->", "Iterable", ":", "if", "self", ".", "data", "[", "0", ":", "1", "]", "not", "in", "(", "b'd'", ",", "b'l'", ")", ":", "return", "self", ".", "__wrap_with_tuple", "(", ")", "return", "self", ".", "__parse", "(", ")" ]
Start of decode process. Returns final results.
[ "Start", "of", "decode", "process", ".", "Returns", "final", "results", "." ]
a9c145bd087c61dd8fb28a9dfad46d085c8b8290
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L52-L56
train
eweast/BencodePy
build/lib/bencodepy/decoder.py
Decoder.__wrap_with_tuple
def __wrap_with_tuple(self) -> tuple: """Returns a tuple of all nested bencode elements.""" l = list() length = len(self.data) while self.idx < length: l.append(self.__parse()) return tuple(l)
python
def __wrap_with_tuple(self) -> tuple: """Returns a tuple of all nested bencode elements.""" l = list() length = len(self.data) while self.idx < length: l.append(self.__parse()) return tuple(l)
[ "def", "__wrap_with_tuple", "(", "self", ")", "->", "tuple", ":", "l", "=", "list", "(", ")", "length", "=", "len", "(", "self", ".", "data", ")", "while", "self", ".", "idx", "<", "length", ":", "l", ".", "append", "(", "self", ".", "__parse", "(", ")", ")", "return", "tuple", "(", "l", ")" ]
Returns a tuple of all nested bencode elements.
[ "Returns", "a", "tuple", "of", "all", "nested", "bencode", "elements", "." ]
a9c145bd087c61dd8fb28a9dfad46d085c8b8290
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L58-L64
train
eweast/BencodePy
build/lib/bencodepy/decoder.py
Decoder.__parse_dict
def __parse_dict(self) -> OrderedDict: """Returns an Ordered Dictionary of nested bencode elements.""" self.idx += 1 d = OrderedDict() key_name = None while self.data[self.idx: self.idx + 1] != b'e': if key_name is None: key_name = self.__parse() else: d[key_name] = self.__parse() key_name = None self.idx += 1 return d
python
def __parse_dict(self) -> OrderedDict: """Returns an Ordered Dictionary of nested bencode elements.""" self.idx += 1 d = OrderedDict() key_name = None while self.data[self.idx: self.idx + 1] != b'e': if key_name is None: key_name = self.__parse() else: d[key_name] = self.__parse() key_name = None self.idx += 1 return d
[ "def", "__parse_dict", "(", "self", ")", "->", "OrderedDict", ":", "self", ".", "idx", "+=", "1", "d", "=", "OrderedDict", "(", ")", "key_name", "=", "None", "while", "self", ".", "data", "[", "self", ".", "idx", ":", "self", ".", "idx", "+", "1", "]", "!=", "b'e'", ":", "if", "key_name", "is", "None", ":", "key_name", "=", "self", ".", "__parse", "(", ")", "else", ":", "d", "[", "key_name", "]", "=", "self", ".", "__parse", "(", ")", "key_name", "=", "None", "self", ".", "idx", "+=", "1", "return", "d" ]
Returns an Ordered Dictionary of nested bencode elements.
[ "Returns", "an", "Ordered", "Dictionary", "of", "nested", "bencode", "elements", "." ]
a9c145bd087c61dd8fb28a9dfad46d085c8b8290
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L66-L78
train
eweast/BencodePy
build/lib/bencodepy/decoder.py
Decoder.__parse_list
def __parse_list(self) -> list: """Returns an list of nested bencode elements.""" self.idx += 1 l = [] while self.data[self.idx: self.idx + 1] != b'e': l.append(self.__parse()) self.idx += 1 return l
python
def __parse_list(self) -> list: """Returns an list of nested bencode elements.""" self.idx += 1 l = [] while self.data[self.idx: self.idx + 1] != b'e': l.append(self.__parse()) self.idx += 1 return l
[ "def", "__parse_list", "(", "self", ")", "->", "list", ":", "self", ".", "idx", "+=", "1", "l", "=", "[", "]", "while", "self", ".", "data", "[", "self", ".", "idx", ":", "self", ".", "idx", "+", "1", "]", "!=", "b'e'", ":", "l", ".", "append", "(", "self", ".", "__parse", "(", ")", ")", "self", ".", "idx", "+=", "1", "return", "l" ]
Returns an list of nested bencode elements.
[ "Returns", "an", "list", "of", "nested", "bencode", "elements", "." ]
a9c145bd087c61dd8fb28a9dfad46d085c8b8290
https://github.com/eweast/BencodePy/blob/a9c145bd087c61dd8fb28a9dfad46d085c8b8290/build/lib/bencodepy/decoder.py#L80-L87
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/BaseTree.py
Node.PopAllChildren
def PopAllChildren(self): ''' Method to remove and return all children of current node :return: list of children ''' indexes = self.GetChildrenIndexes() children = [] for c in indexes: child = self.PopChild(c) children.append(child) return children
python
def PopAllChildren(self): ''' Method to remove and return all children of current node :return: list of children ''' indexes = self.GetChildrenIndexes() children = [] for c in indexes: child = self.PopChild(c) children.append(child) return children
[ "def", "PopAllChildren", "(", "self", ")", ":", "indexes", "=", "self", ".", "GetChildrenIndexes", "(", ")", "children", "=", "[", "]", "for", "c", "in", "indexes", ":", "child", "=", "self", ".", "PopChild", "(", "c", ")", "children", ".", "append", "(", "child", ")", "return", "children" ]
Method to remove and return all children of current node :return: list of children
[ "Method", "to", "remove", "and", "return", "all", "children", "of", "current", "node" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/BaseTree.py#L215-L226
train
vasilcovsky/pytinypng
pytinypng/pytinypng.py
_process_file
def _process_file(input_file, output_file, apikey): """Shrinks input_file to output_file. This function should be used only inside process_directory. It takes input_file, tries to shrink it and if shrink was successful save compressed image to output_file. Otherwise raise exception. @return compressed: PNGResponse """ bytes_ = read_binary(input_file) compressed = shrink(bytes_, apikey) if compressed.success and compressed.bytes: write_binary(output_file, compressed.bytes) else: if compressed.errno in FATAL_ERRORS: raise StopProcessing(compressed) elif compressed.errno == TinyPNGError.InternalServerError: raise RetryProcessing(compressed) return compressed
python
def _process_file(input_file, output_file, apikey): """Shrinks input_file to output_file. This function should be used only inside process_directory. It takes input_file, tries to shrink it and if shrink was successful save compressed image to output_file. Otherwise raise exception. @return compressed: PNGResponse """ bytes_ = read_binary(input_file) compressed = shrink(bytes_, apikey) if compressed.success and compressed.bytes: write_binary(output_file, compressed.bytes) else: if compressed.errno in FATAL_ERRORS: raise StopProcessing(compressed) elif compressed.errno == TinyPNGError.InternalServerError: raise RetryProcessing(compressed) return compressed
[ "def", "_process_file", "(", "input_file", ",", "output_file", ",", "apikey", ")", ":", "bytes_", "=", "read_binary", "(", "input_file", ")", "compressed", "=", "shrink", "(", "bytes_", ",", "apikey", ")", "if", "compressed", ".", "success", "and", "compressed", ".", "bytes", ":", "write_binary", "(", "output_file", ",", "compressed", ".", "bytes", ")", "else", ":", "if", "compressed", ".", "errno", "in", "FATAL_ERRORS", ":", "raise", "StopProcessing", "(", "compressed", ")", "elif", "compressed", ".", "errno", "==", "TinyPNGError", ".", "InternalServerError", ":", "raise", "RetryProcessing", "(", "compressed", ")", "return", "compressed" ]
Shrinks input_file to output_file. This function should be used only inside process_directory. It takes input_file, tries to shrink it and if shrink was successful save compressed image to output_file. Otherwise raise exception. @return compressed: PNGResponse
[ "Shrinks", "input_file", "to", "output_file", "." ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/pytinypng.py#L29-L49
train
vasilcovsky/pytinypng
pytinypng/pytinypng.py
process_directory
def process_directory(source, target, apikey, handler, overwrite=False): """Optimize and save png files form source to target directory. @param source: path to input directory @param target: path to output directory @param handler: callback holder, instance of handlers.BaseHandler @param overwrite: boolean flag to allow overwrite already existing files in output directory. """ handler.on_start() attempts = defaultdict(lambda: 0) input_files = files_with_exts(source, suffix='.png') next_ = lambda: next(input_files, None) current_file = next_() response = None last_processed = None while current_file: output_file = target_path(source, target, current_file) if os.path.exists(output_file) and not overwrite: handler.on_skip(current_file, source=source) current_file = next_() continue try: handler.on_pre_item(current_file) last_processed = current_file response = _process_file(current_file, output_file, apikey) current_file = next_() except StopProcessing as e: # Unauthorized or exceed number of allowed monthly calls response = e.response handler.on_stop(response.errmsg) break except RetryProcessing as e: # handle InternalServerError on tinypng side response = e.response if attempts[current_file] < 9: handler.on_retry(current_file) time.sleep(TINYPNG_SLEEP_SEC) attempts[current_file] += 1 else: current_file = next_() finally: handler.on_post_item(response, input_file=last_processed, source=source) handler.on_finish(output_dir=target)
python
def process_directory(source, target, apikey, handler, overwrite=False): """Optimize and save png files form source to target directory. @param source: path to input directory @param target: path to output directory @param handler: callback holder, instance of handlers.BaseHandler @param overwrite: boolean flag to allow overwrite already existing files in output directory. """ handler.on_start() attempts = defaultdict(lambda: 0) input_files = files_with_exts(source, suffix='.png') next_ = lambda: next(input_files, None) current_file = next_() response = None last_processed = None while current_file: output_file = target_path(source, target, current_file) if os.path.exists(output_file) and not overwrite: handler.on_skip(current_file, source=source) current_file = next_() continue try: handler.on_pre_item(current_file) last_processed = current_file response = _process_file(current_file, output_file, apikey) current_file = next_() except StopProcessing as e: # Unauthorized or exceed number of allowed monthly calls response = e.response handler.on_stop(response.errmsg) break except RetryProcessing as e: # handle InternalServerError on tinypng side response = e.response if attempts[current_file] < 9: handler.on_retry(current_file) time.sleep(TINYPNG_SLEEP_SEC) attempts[current_file] += 1 else: current_file = next_() finally: handler.on_post_item(response, input_file=last_processed, source=source) handler.on_finish(output_dir=target)
[ "def", "process_directory", "(", "source", ",", "target", ",", "apikey", ",", "handler", ",", "overwrite", "=", "False", ")", ":", "handler", ".", "on_start", "(", ")", "attempts", "=", "defaultdict", "(", "lambda", ":", "0", ")", "input_files", "=", "files_with_exts", "(", "source", ",", "suffix", "=", "'.png'", ")", "next_", "=", "lambda", ":", "next", "(", "input_files", ",", "None", ")", "current_file", "=", "next_", "(", ")", "response", "=", "None", "last_processed", "=", "None", "while", "current_file", ":", "output_file", "=", "target_path", "(", "source", ",", "target", ",", "current_file", ")", "if", "os", ".", "path", ".", "exists", "(", "output_file", ")", "and", "not", "overwrite", ":", "handler", ".", "on_skip", "(", "current_file", ",", "source", "=", "source", ")", "current_file", "=", "next_", "(", ")", "continue", "try", ":", "handler", ".", "on_pre_item", "(", "current_file", ")", "last_processed", "=", "current_file", "response", "=", "_process_file", "(", "current_file", ",", "output_file", ",", "apikey", ")", "current_file", "=", "next_", "(", ")", "except", "StopProcessing", "as", "e", ":", "# Unauthorized or exceed number of allowed monthly calls", "response", "=", "e", ".", "response", "handler", ".", "on_stop", "(", "response", ".", "errmsg", ")", "break", "except", "RetryProcessing", "as", "e", ":", "# handle InternalServerError on tinypng side", "response", "=", "e", ".", "response", "if", "attempts", "[", "current_file", "]", "<", "9", ":", "handler", ".", "on_retry", "(", "current_file", ")", "time", ".", "sleep", "(", "TINYPNG_SLEEP_SEC", ")", "attempts", "[", "current_file", "]", "+=", "1", "else", ":", "current_file", "=", "next_", "(", ")", "finally", ":", "handler", ".", "on_post_item", "(", "response", ",", "input_file", "=", "last_processed", ",", "source", "=", "source", ")", "handler", ".", "on_finish", "(", "output_dir", "=", "target", ")" ]
Optimize and save png files form source to target directory. @param source: path to input directory @param target: path to output directory @param handler: callback holder, instance of handlers.BaseHandler @param overwrite: boolean flag to allow overwrite already existing files in output directory.
[ "Optimize", "and", "save", "png", "files", "form", "source", "to", "target", "directory", "." ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/pytinypng.py#L52-L104
train
vasilcovsky/pytinypng
pytinypng/pytinypng.py
_main
def _main(args): """Batch compression. args contains: * input - path to input directory * output - path to output directory or None * apikey - TinyPNG API key * overwrite - boolean flag """ if not args.apikey: print("\nPlease provide TinyPNG API key") print("To obtain key visit https://api.tinypng.com/developers\n") sys.exit(1) input_dir = realpath(args.input) if not args.output: output_dir = input_dir + "-output" else: output_dir = realpath(args.output) if input_dir == output_dir: print("\nPlease specify different output directory\n") sys.exit(1) handler = ScreenHandler() try: process_directory(input_dir, output_dir, args.apikey, handler) except KeyboardInterrupt: handler.on_finish(output_dir=output_dir)
python
def _main(args): """Batch compression. args contains: * input - path to input directory * output - path to output directory or None * apikey - TinyPNG API key * overwrite - boolean flag """ if not args.apikey: print("\nPlease provide TinyPNG API key") print("To obtain key visit https://api.tinypng.com/developers\n") sys.exit(1) input_dir = realpath(args.input) if not args.output: output_dir = input_dir + "-output" else: output_dir = realpath(args.output) if input_dir == output_dir: print("\nPlease specify different output directory\n") sys.exit(1) handler = ScreenHandler() try: process_directory(input_dir, output_dir, args.apikey, handler) except KeyboardInterrupt: handler.on_finish(output_dir=output_dir)
[ "def", "_main", "(", "args", ")", ":", "if", "not", "args", ".", "apikey", ":", "print", "(", "\"\\nPlease provide TinyPNG API key\"", ")", "print", "(", "\"To obtain key visit https://api.tinypng.com/developers\\n\"", ")", "sys", ".", "exit", "(", "1", ")", "input_dir", "=", "realpath", "(", "args", ".", "input", ")", "if", "not", "args", ".", "output", ":", "output_dir", "=", "input_dir", "+", "\"-output\"", "else", ":", "output_dir", "=", "realpath", "(", "args", ".", "output", ")", "if", "input_dir", "==", "output_dir", ":", "print", "(", "\"\\nPlease specify different output directory\\n\"", ")", "sys", ".", "exit", "(", "1", ")", "handler", "=", "ScreenHandler", "(", ")", "try", ":", "process_directory", "(", "input_dir", ",", "output_dir", ",", "args", ".", "apikey", ",", "handler", ")", "except", "KeyboardInterrupt", ":", "handler", ".", "on_finish", "(", "output_dir", "=", "output_dir", ")" ]
Batch compression. args contains: * input - path to input directory * output - path to output directory or None * apikey - TinyPNG API key * overwrite - boolean flag
[ "Batch", "compression", "." ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/pytinypng.py#L107-L138
train
envi-idl/envipyengine
envipyengine/taskengine/engine.py
Engine.task
def task(self, task_name): """ Returns an ENVI Py Engine Task object. See ENVI Py Engine Task for examples. :param task_name: The name of the task to retrieve. :return: An ENVI Py Engine Task object. """ return Task(uri=':'.join((self._engine_name, task_name)), cwd=self._cwd)
python
def task(self, task_name): """ Returns an ENVI Py Engine Task object. See ENVI Py Engine Task for examples. :param task_name: The name of the task to retrieve. :return: An ENVI Py Engine Task object. """ return Task(uri=':'.join((self._engine_name, task_name)), cwd=self._cwd)
[ "def", "task", "(", "self", ",", "task_name", ")", ":", "return", "Task", "(", "uri", "=", "':'", ".", "join", "(", "(", "self", ".", "_engine_name", ",", "task_name", ")", ")", ",", "cwd", "=", "self", ".", "_cwd", ")" ]
Returns an ENVI Py Engine Task object. See ENVI Py Engine Task for examples. :param task_name: The name of the task to retrieve. :return: An ENVI Py Engine Task object.
[ "Returns", "an", "ENVI", "Py", "Engine", "Task", "object", ".", "See", "ENVI", "Py", "Engine", "Task", "for", "examples", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/engine.py#L27-L34
train
envi-idl/envipyengine
envipyengine/taskengine/engine.py
Engine.tasks
def tasks(self): """ Returns a list of all tasks known to the engine. :return: A list of task names. """ task_input = {'taskName': 'QueryTaskCatalog'} output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd) return output['outputParameters']['TASKS']
python
def tasks(self): """ Returns a list of all tasks known to the engine. :return: A list of task names. """ task_input = {'taskName': 'QueryTaskCatalog'} output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd) return output['outputParameters']['TASKS']
[ "def", "tasks", "(", "self", ")", ":", "task_input", "=", "{", "'taskName'", ":", "'QueryTaskCatalog'", "}", "output", "=", "taskengine", ".", "execute", "(", "task_input", ",", "self", ".", "_engine_name", ",", "cwd", "=", "self", ".", "_cwd", ")", "return", "output", "[", "'outputParameters'", "]", "[", "'TASKS'", "]" ]
Returns a list of all tasks known to the engine. :return: A list of task names.
[ "Returns", "a", "list", "of", "all", "tasks", "known", "to", "the", "engine", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/engine.py#L37-L45
train
ariebovenberg/snug
snug/query.py
execute
def execute(query, auth=None, client=urllib_request.build_opener()): """Execute a query, returning its result Parameters ---------- query: Query[T] The query to resolve auth: ~typing.Tuple[str, str] \ or ~typing.Callable[[Request], Request] or None This may be: * A (username, password)-tuple for basic authentication * A callable to authenticate requests. * ``None`` (no authentication) client The HTTP client to use. Its type must have been registered with :func:`~snug.clients.send`. If not given, the built-in :mod:`urllib` module is used. Returns ------- T the query result """ exec_fn = getattr(type(query), '__execute__', _default_execute_method) return exec_fn(query, client, _make_auth(auth))
python
def execute(query, auth=None, client=urllib_request.build_opener()): """Execute a query, returning its result Parameters ---------- query: Query[T] The query to resolve auth: ~typing.Tuple[str, str] \ or ~typing.Callable[[Request], Request] or None This may be: * A (username, password)-tuple for basic authentication * A callable to authenticate requests. * ``None`` (no authentication) client The HTTP client to use. Its type must have been registered with :func:`~snug.clients.send`. If not given, the built-in :mod:`urllib` module is used. Returns ------- T the query result """ exec_fn = getattr(type(query), '__execute__', _default_execute_method) return exec_fn(query, client, _make_auth(auth))
[ "def", "execute", "(", "query", ",", "auth", "=", "None", ",", "client", "=", "urllib_request", ".", "build_opener", "(", ")", ")", ":", "exec_fn", "=", "getattr", "(", "type", "(", "query", ")", ",", "'__execute__'", ",", "_default_execute_method", ")", "return", "exec_fn", "(", "query", ",", "client", ",", "_make_auth", "(", "auth", ")", ")" ]
Execute a query, returning its result Parameters ---------- query: Query[T] The query to resolve auth: ~typing.Tuple[str, str] \ or ~typing.Callable[[Request], Request] or None This may be: * A (username, password)-tuple for basic authentication * A callable to authenticate requests. * ``None`` (no authentication) client The HTTP client to use. Its type must have been registered with :func:`~snug.clients.send`. If not given, the built-in :mod:`urllib` module is used. Returns ------- T the query result
[ "Execute", "a", "query", "returning", "its", "result" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/query.py#L192-L218
train
ariebovenberg/snug
snug/query.py
execute_async
def execute_async(query, auth=None, client=event_loop): """Execute a query asynchronously, returning its result Parameters ---------- query: Query[T] The query to resolve auth: ~typing.Tuple[str, str] \ or ~typing.Callable[[Request], Request] or None This may be: * A (username, password)-tuple for basic authentication * A callable to authenticate requests. * ``None`` (no authentication) client The HTTP client to use. Its type must have been registered with :func:`~snug.clients.send_async`. If not given, the built-in :mod:`asyncio` module is used. Returns ------- T the query result Note ---- The default client is very rudimentary. Consider using a :class:`aiohttp.ClientSession` instance as ``client``. """ exc_fn = getattr(type(query), '__execute_async__', Query.__execute_async__) return exc_fn(query, client, _make_auth(auth))
python
def execute_async(query, auth=None, client=event_loop): """Execute a query asynchronously, returning its result Parameters ---------- query: Query[T] The query to resolve auth: ~typing.Tuple[str, str] \ or ~typing.Callable[[Request], Request] or None This may be: * A (username, password)-tuple for basic authentication * A callable to authenticate requests. * ``None`` (no authentication) client The HTTP client to use. Its type must have been registered with :func:`~snug.clients.send_async`. If not given, the built-in :mod:`asyncio` module is used. Returns ------- T the query result Note ---- The default client is very rudimentary. Consider using a :class:`aiohttp.ClientSession` instance as ``client``. """ exc_fn = getattr(type(query), '__execute_async__', Query.__execute_async__) return exc_fn(query, client, _make_auth(auth))
[ "def", "execute_async", "(", "query", ",", "auth", "=", "None", ",", "client", "=", "event_loop", ")", ":", "exc_fn", "=", "getattr", "(", "type", "(", "query", ")", ",", "'__execute_async__'", ",", "Query", ".", "__execute_async__", ")", "return", "exc_fn", "(", "query", ",", "client", ",", "_make_auth", "(", "auth", ")", ")" ]
Execute a query asynchronously, returning its result Parameters ---------- query: Query[T] The query to resolve auth: ~typing.Tuple[str, str] \ or ~typing.Callable[[Request], Request] or None This may be: * A (username, password)-tuple for basic authentication * A callable to authenticate requests. * ``None`` (no authentication) client The HTTP client to use. Its type must have been registered with :func:`~snug.clients.send_async`. If not given, the built-in :mod:`asyncio` module is used. Returns ------- T the query result Note ---- The default client is very rudimentary. Consider using a :class:`aiohttp.ClientSession` instance as ``client``.
[ "Execute", "a", "query", "asynchronously", "returning", "its", "result" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/query.py#L221-L252
train
blockstack/python-utilitybelt
utilitybelt/entropy.py
secure_randint
def secure_randint(min_value, max_value, system_random=None): """ Return a random integer N such that a <= N <= b. Uses SystemRandom for generating random numbers. (which uses os.urandom(), which pulls from /dev/urandom) """ if not system_random: system_random = random.SystemRandom() return system_random.randint(min_value, max_value)
python
def secure_randint(min_value, max_value, system_random=None): """ Return a random integer N such that a <= N <= b. Uses SystemRandom for generating random numbers. (which uses os.urandom(), which pulls from /dev/urandom) """ if not system_random: system_random = random.SystemRandom() return system_random.randint(min_value, max_value)
[ "def", "secure_randint", "(", "min_value", ",", "max_value", ",", "system_random", "=", "None", ")", ":", "if", "not", "system_random", ":", "system_random", "=", "random", ".", "SystemRandom", "(", ")", "return", "system_random", ".", "randint", "(", "min_value", ",", "max_value", ")" ]
Return a random integer N such that a <= N <= b. Uses SystemRandom for generating random numbers. (which uses os.urandom(), which pulls from /dev/urandom)
[ "Return", "a", "random", "integer", "N", "such", "that", "a", "<", "=", "N", "<", "=", "b", "." ]
13d3502aa1a486c9d775ad2c551fb8e7e48b0d96
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/entropy.py#L41-L49
train
ariebovenberg/snug
snug/http.py
_merge_maps
def _merge_maps(m1, m2): """merge two Mapping objects, keeping the type of the first mapping""" return type(m1)(chain(m1.items(), m2.items()))
python
def _merge_maps(m1, m2): """merge two Mapping objects, keeping the type of the first mapping""" return type(m1)(chain(m1.items(), m2.items()))
[ "def", "_merge_maps", "(", "m1", ",", "m2", ")", ":", "return", "type", "(", "m1", ")", "(", "chain", "(", "m1", ".", "items", "(", ")", ",", "m2", ".", "items", "(", ")", ")", ")" ]
merge two Mapping objects, keeping the type of the first mapping
[ "merge", "two", "Mapping", "objects", "keeping", "the", "type", "of", "the", "first", "mapping" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L64-L66
train
ariebovenberg/snug
snug/http.py
basic_auth
def basic_auth(credentials): """Create an HTTP basic authentication callable Parameters ---------- credentials: ~typing.Tuple[str, str] The (username, password)-tuple Returns ------- ~typing.Callable[[Request], Request] A callable which adds basic authentication to a :class:`Request`. """ encoded = b64encode(':'.join(credentials).encode('ascii')).decode() return header_adder({'Authorization': 'Basic ' + encoded})
python
def basic_auth(credentials): """Create an HTTP basic authentication callable Parameters ---------- credentials: ~typing.Tuple[str, str] The (username, password)-tuple Returns ------- ~typing.Callable[[Request], Request] A callable which adds basic authentication to a :class:`Request`. """ encoded = b64encode(':'.join(credentials).encode('ascii')).decode() return header_adder({'Authorization': 'Basic ' + encoded})
[ "def", "basic_auth", "(", "credentials", ")", ":", "encoded", "=", "b64encode", "(", "':'", ".", "join", "(", "credentials", ")", ".", "encode", "(", "'ascii'", ")", ")", ".", "decode", "(", ")", "return", "header_adder", "(", "{", "'Authorization'", ":", "'Basic '", "+", "encoded", "}", ")" ]
Create an HTTP basic authentication callable Parameters ---------- credentials: ~typing.Tuple[str, str] The (username, password)-tuple Returns ------- ~typing.Callable[[Request], Request] A callable which adds basic authentication to a :class:`Request`.
[ "Create", "an", "HTTP", "basic", "authentication", "callable" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L156-L170
train
ariebovenberg/snug
snug/http.py
Request.with_headers
def with_headers(self, headers): """Create a new request with added headers Parameters ---------- headers: Mapping the headers to add """ return self.replace(headers=_merge_maps(self.headers, headers))
python
def with_headers(self, headers): """Create a new request with added headers Parameters ---------- headers: Mapping the headers to add """ return self.replace(headers=_merge_maps(self.headers, headers))
[ "def", "with_headers", "(", "self", ",", "headers", ")", ":", "return", "self", ".", "replace", "(", "headers", "=", "_merge_maps", "(", "self", ".", "headers", ",", "headers", ")", ")" ]
Create a new request with added headers Parameters ---------- headers: Mapping the headers to add
[ "Create", "a", "new", "request", "with", "added", "headers" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L96-L104
train
ariebovenberg/snug
snug/http.py
Request.with_params
def with_params(self, params): """Create a new request with added query parameters Parameters ---------- params: Mapping the query parameters to add """ return self.replace(params=_merge_maps(self.params, params))
python
def with_params(self, params): """Create a new request with added query parameters Parameters ---------- params: Mapping the query parameters to add """ return self.replace(params=_merge_maps(self.params, params))
[ "def", "with_params", "(", "self", ",", "params", ")", ":", "return", "self", ".", "replace", "(", "params", "=", "_merge_maps", "(", "self", ".", "params", ",", "params", ")", ")" ]
Create a new request with added query parameters Parameters ---------- params: Mapping the query parameters to add
[ "Create", "a", "new", "request", "with", "added", "query", "parameters" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/snug/http.py#L116-L124
train
azaghal/pydenticon
pydenticon/__init__.py
Generator._get_bit
def _get_bit(self, n, hash_bytes): """ Determines if the n-th bit of passed bytes is 1 or 0. Arguments: hash_bytes - List of hash byte values for which the n-th bit value should be checked. Each element of the list should be an integer from 0 to 255. Returns: True if the bit is 1. False if the bit is 0. """ if hash_bytes[n // 8] >> int(8 - ((n % 8) + 1)) & 1 == 1: return True return False
python
def _get_bit(self, n, hash_bytes): """ Determines if the n-th bit of passed bytes is 1 or 0. Arguments: hash_bytes - List of hash byte values for which the n-th bit value should be checked. Each element of the list should be an integer from 0 to 255. Returns: True if the bit is 1. False if the bit is 0. """ if hash_bytes[n // 8] >> int(8 - ((n % 8) + 1)) & 1 == 1: return True return False
[ "def", "_get_bit", "(", "self", ",", "n", ",", "hash_bytes", ")", ":", "if", "hash_bytes", "[", "n", "//", "8", "]", ">>", "int", "(", "8", "-", "(", "(", "n", "%", "8", ")", "+", "1", ")", ")", "&", "1", "==", "1", ":", "return", "True", "return", "False" ]
Determines if the n-th bit of passed bytes is 1 or 0. Arguments: hash_bytes - List of hash byte values for which the n-th bit value should be checked. Each element of the list should be an integer from 0 to 255. Returns: True if the bit is 1. False if the bit is 0.
[ "Determines", "if", "the", "n", "-", "th", "bit", "of", "passed", "bytes", "is", "1", "or", "0", "." ]
002ad10fd58adedfb465b5ef96eacbe6a595c2ac
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L88-L106
train
azaghal/pydenticon
pydenticon/__init__.py
Generator._generate_matrix
def _generate_matrix(self, hash_bytes): """ Generates matrix that describes which blocks should be coloured. Arguments: hash_bytes - List of hash byte values for which the identicon is being generated. Each element of the list should be an integer from 0 to 255. Returns: List of rows, where each element in a row is boolean. True means the foreground colour should be used, False means a background colour should be used. """ # Since the identicon needs to be symmetric, we'll need to work on half # the columns (rounded-up), and reflect where necessary. half_columns = self.columns // 2 + self.columns % 2 cells = self.rows * half_columns # Initialise the matrix (list of rows) that will be returned. matrix = [[False] * self.columns for _ in range(self.rows)] # Process the cells one by one. for cell in range(cells): # If the bit from hash correpsonding to this cell is 1, mark the # cell as foreground one. Do not use first byte (since that one is # used for determining the foreground colour. if self._get_bit(cell, hash_bytes[1:]): # Determine the cell coordinates in matrix. column = cell // self.columns row = cell % self.rows # Mark the cell and its reflection. Central column may get # marked twice, but we don't care. matrix[row][column] = True matrix[row][self.columns - column - 1] = True return matrix
python
def _generate_matrix(self, hash_bytes): """ Generates matrix that describes which blocks should be coloured. Arguments: hash_bytes - List of hash byte values for which the identicon is being generated. Each element of the list should be an integer from 0 to 255. Returns: List of rows, where each element in a row is boolean. True means the foreground colour should be used, False means a background colour should be used. """ # Since the identicon needs to be symmetric, we'll need to work on half # the columns (rounded-up), and reflect where necessary. half_columns = self.columns // 2 + self.columns % 2 cells = self.rows * half_columns # Initialise the matrix (list of rows) that will be returned. matrix = [[False] * self.columns for _ in range(self.rows)] # Process the cells one by one. for cell in range(cells): # If the bit from hash correpsonding to this cell is 1, mark the # cell as foreground one. Do not use first byte (since that one is # used for determining the foreground colour. if self._get_bit(cell, hash_bytes[1:]): # Determine the cell coordinates in matrix. column = cell // self.columns row = cell % self.rows # Mark the cell and its reflection. Central column may get # marked twice, but we don't care. matrix[row][column] = True matrix[row][self.columns - column - 1] = True return matrix
[ "def", "_generate_matrix", "(", "self", ",", "hash_bytes", ")", ":", "# Since the identicon needs to be symmetric, we'll need to work on half", "# the columns (rounded-up), and reflect where necessary.", "half_columns", "=", "self", ".", "columns", "//", "2", "+", "self", ".", "columns", "%", "2", "cells", "=", "self", ".", "rows", "*", "half_columns", "# Initialise the matrix (list of rows) that will be returned.", "matrix", "=", "[", "[", "False", "]", "*", "self", ".", "columns", "for", "_", "in", "range", "(", "self", ".", "rows", ")", "]", "# Process the cells one by one.", "for", "cell", "in", "range", "(", "cells", ")", ":", "# If the bit from hash correpsonding to this cell is 1, mark the", "# cell as foreground one. Do not use first byte (since that one is", "# used for determining the foreground colour.", "if", "self", ".", "_get_bit", "(", "cell", ",", "hash_bytes", "[", "1", ":", "]", ")", ":", "# Determine the cell coordinates in matrix.", "column", "=", "cell", "//", "self", ".", "columns", "row", "=", "cell", "%", "self", ".", "rows", "# Mark the cell and its reflection. Central column may get", "# marked twice, but we don't care.", "matrix", "[", "row", "]", "[", "column", "]", "=", "True", "matrix", "[", "row", "]", "[", "self", ".", "columns", "-", "column", "-", "1", "]", "=", "True", "return", "matrix" ]
Generates matrix that describes which blocks should be coloured. Arguments: hash_bytes - List of hash byte values for which the identicon is being generated. Each element of the list should be an integer from 0 to 255. Returns: List of rows, where each element in a row is boolean. True means the foreground colour should be used, False means a background colour should be used.
[ "Generates", "matrix", "that", "describes", "which", "blocks", "should", "be", "coloured", "." ]
002ad10fd58adedfb465b5ef96eacbe6a595c2ac
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L108-L148
train
azaghal/pydenticon
pydenticon/__init__.py
Generator._generate_image
def _generate_image(self, matrix, width, height, padding, foreground, background, image_format): """ Generates an identicon image in requested image format out of the passed block matrix, with the requested width, height, padding, foreground colour, background colour, and image format. Arguments: matrix - Matrix describing which blocks in the identicon should be painted with foreground (background if inverted) colour. width - Width of resulting identicon image in pixels. height - Height of resulting identicon image in pixels. padding - Tuple describing padding around the generated identicon. The tuple should consist out of four values, where each value is the number of pixels to use for padding. The order in tuple is: top, bottom, left, right. foreground - Colour which should be used for foreground (filled blocks), represented as a string of format supported by the PIL.ImageColor module. background - Colour which should be used for background and padding, represented as a string of format supported by the PIL.ImageColor module. image_format - Format to use for the image. Format needs to be supported by the Pillow library. Returns: Identicon image in requested format, returned as a byte list. """ # Set-up a new image object, setting the background to provided value. image = Image.new("RGBA", (width + padding[2] + padding[3], height + padding[0] + padding[1]), background) # Set-up a draw image (for drawing the blocks). draw = ImageDraw.Draw(image) # Calculate the block widht and height. block_width = width // self.columns block_height = height // self.rows # Go through all the elements of a matrix, and draw the rectangles. for row, row_columns in enumerate(matrix): for column, cell in enumerate(row_columns): if cell: # Set-up the coordinates for a block. x1 = padding[2] + column * block_width y1 = padding[0] + row * block_height x2 = padding[2] + (column + 1) * block_width - 1 y2 = padding[0] + (row + 1) * block_height - 1 # Draw the rectangle. draw.rectangle((x1, y1, x2, y2), fill=foreground) # Set-up a stream where image will be saved. stream = BytesIO() if image_format.upper() == "JPEG": image = image.convert(mode="RGB") # Save the image to stream. try: image.save(stream, format=image_format, optimize=True) except KeyError: raise ValueError("Pillow does not support requested image format: %s" % image_format) image_raw = stream.getvalue() stream.close() # Return the resulting image. return image_raw
python
def _generate_image(self, matrix, width, height, padding, foreground, background, image_format): """ Generates an identicon image in requested image format out of the passed block matrix, with the requested width, height, padding, foreground colour, background colour, and image format. Arguments: matrix - Matrix describing which blocks in the identicon should be painted with foreground (background if inverted) colour. width - Width of resulting identicon image in pixels. height - Height of resulting identicon image in pixels. padding - Tuple describing padding around the generated identicon. The tuple should consist out of four values, where each value is the number of pixels to use for padding. The order in tuple is: top, bottom, left, right. foreground - Colour which should be used for foreground (filled blocks), represented as a string of format supported by the PIL.ImageColor module. background - Colour which should be used for background and padding, represented as a string of format supported by the PIL.ImageColor module. image_format - Format to use for the image. Format needs to be supported by the Pillow library. Returns: Identicon image in requested format, returned as a byte list. """ # Set-up a new image object, setting the background to provided value. image = Image.new("RGBA", (width + padding[2] + padding[3], height + padding[0] + padding[1]), background) # Set-up a draw image (for drawing the blocks). draw = ImageDraw.Draw(image) # Calculate the block widht and height. block_width = width // self.columns block_height = height // self.rows # Go through all the elements of a matrix, and draw the rectangles. for row, row_columns in enumerate(matrix): for column, cell in enumerate(row_columns): if cell: # Set-up the coordinates for a block. x1 = padding[2] + column * block_width y1 = padding[0] + row * block_height x2 = padding[2] + (column + 1) * block_width - 1 y2 = padding[0] + (row + 1) * block_height - 1 # Draw the rectangle. draw.rectangle((x1, y1, x2, y2), fill=foreground) # Set-up a stream where image will be saved. stream = BytesIO() if image_format.upper() == "JPEG": image = image.convert(mode="RGB") # Save the image to stream. try: image.save(stream, format=image_format, optimize=True) except KeyError: raise ValueError("Pillow does not support requested image format: %s" % image_format) image_raw = stream.getvalue() stream.close() # Return the resulting image. return image_raw
[ "def", "_generate_image", "(", "self", ",", "matrix", ",", "width", ",", "height", ",", "padding", ",", "foreground", ",", "background", ",", "image_format", ")", ":", "# Set-up a new image object, setting the background to provided value.", "image", "=", "Image", ".", "new", "(", "\"RGBA\"", ",", "(", "width", "+", "padding", "[", "2", "]", "+", "padding", "[", "3", "]", ",", "height", "+", "padding", "[", "0", "]", "+", "padding", "[", "1", "]", ")", ",", "background", ")", "# Set-up a draw image (for drawing the blocks).", "draw", "=", "ImageDraw", ".", "Draw", "(", "image", ")", "# Calculate the block widht and height.", "block_width", "=", "width", "//", "self", ".", "columns", "block_height", "=", "height", "//", "self", ".", "rows", "# Go through all the elements of a matrix, and draw the rectangles.", "for", "row", ",", "row_columns", "in", "enumerate", "(", "matrix", ")", ":", "for", "column", ",", "cell", "in", "enumerate", "(", "row_columns", ")", ":", "if", "cell", ":", "# Set-up the coordinates for a block.", "x1", "=", "padding", "[", "2", "]", "+", "column", "*", "block_width", "y1", "=", "padding", "[", "0", "]", "+", "row", "*", "block_height", "x2", "=", "padding", "[", "2", "]", "+", "(", "column", "+", "1", ")", "*", "block_width", "-", "1", "y2", "=", "padding", "[", "0", "]", "+", "(", "row", "+", "1", ")", "*", "block_height", "-", "1", "# Draw the rectangle.", "draw", ".", "rectangle", "(", "(", "x1", ",", "y1", ",", "x2", ",", "y2", ")", ",", "fill", "=", "foreground", ")", "# Set-up a stream where image will be saved.", "stream", "=", "BytesIO", "(", ")", "if", "image_format", ".", "upper", "(", ")", "==", "\"JPEG\"", ":", "image", "=", "image", ".", "convert", "(", "mode", "=", "\"RGB\"", ")", "# Save the image to stream.", "try", ":", "image", ".", "save", "(", "stream", ",", "format", "=", "image_format", ",", "optimize", "=", "True", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Pillow does not support requested image format: %s\"", "%", "image_format", ")", "image_raw", "=", "stream", ".", "getvalue", "(", ")", "stream", ".", "close", "(", ")", "# Return the resulting image.", "return", "image_raw" ]
Generates an identicon image in requested image format out of the passed block matrix, with the requested width, height, padding, foreground colour, background colour, and image format. Arguments: matrix - Matrix describing which blocks in the identicon should be painted with foreground (background if inverted) colour. width - Width of resulting identicon image in pixels. height - Height of resulting identicon image in pixels. padding - Tuple describing padding around the generated identicon. The tuple should consist out of four values, where each value is the number of pixels to use for padding. The order in tuple is: top, bottom, left, right. foreground - Colour which should be used for foreground (filled blocks), represented as a string of format supported by the PIL.ImageColor module. background - Colour which should be used for background and padding, represented as a string of format supported by the PIL.ImageColor module. image_format - Format to use for the image. Format needs to be supported by the Pillow library. Returns: Identicon image in requested format, returned as a byte list.
[ "Generates", "an", "identicon", "image", "in", "requested", "image", "format", "out", "of", "the", "passed", "block", "matrix", "with", "the", "requested", "width", "height", "padding", "foreground", "colour", "background", "colour", "and", "image", "format", "." ]
002ad10fd58adedfb465b5ef96eacbe6a595c2ac
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L187-L261
train
azaghal/pydenticon
pydenticon/__init__.py
Generator._generate_ascii
def _generate_ascii(self, matrix, foreground, background): """ Generates an identicon "image" in the ASCII format. The image will just output the matrix used to generate the identicon. Arguments: matrix - Matrix describing which blocks in the identicon should be painted with foreground (background if inverted) colour. foreground - Character which should be used for representing foreground. background - Character which should be used for representing background. Returns: ASCII representation of an identicon image, where one block is one character. """ return "\n".join(["".join([foreground if cell else background for cell in row]) for row in matrix])
python
def _generate_ascii(self, matrix, foreground, background): """ Generates an identicon "image" in the ASCII format. The image will just output the matrix used to generate the identicon. Arguments: matrix - Matrix describing which blocks in the identicon should be painted with foreground (background if inverted) colour. foreground - Character which should be used for representing foreground. background - Character which should be used for representing background. Returns: ASCII representation of an identicon image, where one block is one character. """ return "\n".join(["".join([foreground if cell else background for cell in row]) for row in matrix])
[ "def", "_generate_ascii", "(", "self", ",", "matrix", ",", "foreground", ",", "background", ")", ":", "return", "\"\\n\"", ".", "join", "(", "[", "\"\"", ".", "join", "(", "[", "foreground", "if", "cell", "else", "background", "for", "cell", "in", "row", "]", ")", "for", "row", "in", "matrix", "]", ")" ]
Generates an identicon "image" in the ASCII format. The image will just output the matrix used to generate the identicon. Arguments: matrix - Matrix describing which blocks in the identicon should be painted with foreground (background if inverted) colour. foreground - Character which should be used for representing foreground. background - Character which should be used for representing background. Returns: ASCII representation of an identicon image, where one block is one character.
[ "Generates", "an", "identicon", "image", "in", "the", "ASCII", "format", ".", "The", "image", "will", "just", "output", "the", "matrix", "used", "to", "generate", "the", "identicon", "." ]
002ad10fd58adedfb465b5ef96eacbe6a595c2ac
https://github.com/azaghal/pydenticon/blob/002ad10fd58adedfb465b5ef96eacbe6a595c2ac/pydenticon/__init__.py#L263-L285
train
eclipse/unide.python
src/unide/util.py
local_timezone
def local_timezone(value): """Add the local timezone to `value` to make it aware.""" if hasattr(value, "tzinfo") and value.tzinfo is None: return value.replace(tzinfo=dateutil.tz.tzlocal()) return value
python
def local_timezone(value): """Add the local timezone to `value` to make it aware.""" if hasattr(value, "tzinfo") and value.tzinfo is None: return value.replace(tzinfo=dateutil.tz.tzlocal()) return value
[ "def", "local_timezone", "(", "value", ")", ":", "if", "hasattr", "(", "value", ",", "\"tzinfo\"", ")", "and", "value", ".", "tzinfo", "is", "None", ":", "return", "value", ".", "replace", "(", "tzinfo", "=", "dateutil", ".", "tz", ".", "tzlocal", "(", ")", ")", "return", "value" ]
Add the local timezone to `value` to make it aware.
[ "Add", "the", "local", "timezone", "to", "value", "to", "make", "it", "aware", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/util.py#L37-L41
train
eclipse/unide.python
src/unide/util.py
dumps
def dumps(data, **kwargs): """Convert a PPMP entity to JSON. Additional arguments are the same as accepted by `json.dumps`.""" def _encoder(value): if isinstance(value, datetime.datetime): return value.isoformat() if hasattr(value, "_data"): return value._data raise TypeError('Could not encode %r' % value) return json.dumps(data, default=_encoder, **kwargs)
python
def dumps(data, **kwargs): """Convert a PPMP entity to JSON. Additional arguments are the same as accepted by `json.dumps`.""" def _encoder(value): if isinstance(value, datetime.datetime): return value.isoformat() if hasattr(value, "_data"): return value._data raise TypeError('Could not encode %r' % value) return json.dumps(data, default=_encoder, **kwargs)
[ "def", "dumps", "(", "data", ",", "*", "*", "kwargs", ")", ":", "def", "_encoder", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "value", ".", "isoformat", "(", ")", "if", "hasattr", "(", "value", ",", "\"_data\"", ")", ":", "return", "value", ".", "_data", "raise", "TypeError", "(", "'Could not encode %r'", "%", "value", ")", "return", "json", ".", "dumps", "(", "data", ",", "default", "=", "_encoder", ",", "*", "*", "kwargs", ")" ]
Convert a PPMP entity to JSON. Additional arguments are the same as accepted by `json.dumps`.
[ "Convert", "a", "PPMP", "entity", "to", "JSON", ".", "Additional", "arguments", "are", "the", "same", "as", "accepted", "by", "json", ".", "dumps", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/util.py#L68-L81
train
Godley/MuseParse
MuseParse/classes/Output/helpers.py
setup_lilypond
def setup_lilypond(path_to_lilypond_folder="default"): ''' Optional helper method which works out the platform and calls the relevant setup method * param path_to_lilypond_folder: the path where lilypond.exe or the lilypond runner tool in mac is located. Not needed if setup is default, or if using linux * :return: None ''' options = {"win32": setup_lilypond_windows, "darwin": setup_lilypond_osx} if platform.startswith("linux"): setup_lilypond_linux() else: options[platform](path_to_lilypond_folder)
python
def setup_lilypond(path_to_lilypond_folder="default"): ''' Optional helper method which works out the platform and calls the relevant setup method * param path_to_lilypond_folder: the path where lilypond.exe or the lilypond runner tool in mac is located. Not needed if setup is default, or if using linux * :return: None ''' options = {"win32": setup_lilypond_windows, "darwin": setup_lilypond_osx} if platform.startswith("linux"): setup_lilypond_linux() else: options[platform](path_to_lilypond_folder)
[ "def", "setup_lilypond", "(", "path_to_lilypond_folder", "=", "\"default\"", ")", ":", "options", "=", "{", "\"win32\"", ":", "setup_lilypond_windows", ",", "\"darwin\"", ":", "setup_lilypond_osx", "}", "if", "platform", ".", "startswith", "(", "\"linux\"", ")", ":", "setup_lilypond_linux", "(", ")", "else", ":", "options", "[", "platform", "]", "(", "path_to_lilypond_folder", ")" ]
Optional helper method which works out the platform and calls the relevant setup method * param path_to_lilypond_folder: the path where lilypond.exe or the lilypond runner tool in mac is located. Not needed if setup is default, or if using linux * :return: None
[ "Optional", "helper", "method", "which", "works", "out", "the", "platform", "and", "calls", "the", "relevant", "setup", "method" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Output/helpers.py#L13-L26
train
Godley/MuseParse
MuseParse/classes/Output/helpers.py
setup_lilypond_windows
def setup_lilypond_windows(path="default"): ''' Optional helper method which does the environment setup for lilypond in windows. If you've ran this method, you do not need and should not provide a lyscript when you instantiate this class. As this method is static, you can run this method before you set up the LilypondRenderer instance. * parameter: path_to_lilypond is the path to the folder which contains the file "lilypond.exe". Usually ProgramFiles/Lilypond/usr/bin. Leave at default to set to this path. * returns: None ''' default = "C:/Program Files (x86)/LilyPond/usr/bin" path_variable = os.environ['PATH'].split(";") if path == "default": path_variable.append(default) else: path_variable.append(path) os.environ['PATH'] = ";".join(path_variable)
python
def setup_lilypond_windows(path="default"): ''' Optional helper method which does the environment setup for lilypond in windows. If you've ran this method, you do not need and should not provide a lyscript when you instantiate this class. As this method is static, you can run this method before you set up the LilypondRenderer instance. * parameter: path_to_lilypond is the path to the folder which contains the file "lilypond.exe". Usually ProgramFiles/Lilypond/usr/bin. Leave at default to set to this path. * returns: None ''' default = "C:/Program Files (x86)/LilyPond/usr/bin" path_variable = os.environ['PATH'].split(";") if path == "default": path_variable.append(default) else: path_variable.append(path) os.environ['PATH'] = ";".join(path_variable)
[ "def", "setup_lilypond_windows", "(", "path", "=", "\"default\"", ")", ":", "default", "=", "\"C:/Program Files (x86)/LilyPond/usr/bin\"", "path_variable", "=", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "\";\"", ")", "if", "path", "==", "\"default\"", ":", "path_variable", ".", "append", "(", "default", ")", "else", ":", "path_variable", ".", "append", "(", "path", ")", "os", ".", "environ", "[", "'PATH'", "]", "=", "\";\"", ".", "join", "(", "path_variable", ")" ]
Optional helper method which does the environment setup for lilypond in windows. If you've ran this method, you do not need and should not provide a lyscript when you instantiate this class. As this method is static, you can run this method before you set up the LilypondRenderer instance. * parameter: path_to_lilypond is the path to the folder which contains the file "lilypond.exe". Usually ProgramFiles/Lilypond/usr/bin. Leave at default to set to this path. * returns: None
[ "Optional", "helper", "method", "which", "does", "the", "environment", "setup", "for", "lilypond", "in", "windows", ".", "If", "you", "ve", "ran", "this", "method", "you", "do", "not", "need", "and", "should", "not", "provide", "a", "lyscript", "when", "you", "instantiate", "this", "class", ".", "As", "this", "method", "is", "static", "you", "can", "run", "this", "method", "before", "you", "set", "up", "the", "LilypondRenderer", "instance", "." ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Output/helpers.py#L29-L46
train
blockstack/python-utilitybelt
utilitybelt/dicts.py
recursive_dict_to_dict
def recursive_dict_to_dict(rdict): """ Convert a recursive dict to a plain ol' dict. """ d = {} for (k, v) in rdict.items(): if isinstance(v, defaultdict): d[k] = recursive_dict_to_dict(v) else: d[k] = v return d
python
def recursive_dict_to_dict(rdict): """ Convert a recursive dict to a plain ol' dict. """ d = {} for (k, v) in rdict.items(): if isinstance(v, defaultdict): d[k] = recursive_dict_to_dict(v) else: d[k] = v return d
[ "def", "recursive_dict_to_dict", "(", "rdict", ")", ":", "d", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "rdict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "defaultdict", ")", ":", "d", "[", "k", "]", "=", "recursive_dict_to_dict", "(", "v", ")", "else", ":", "d", "[", "k", "]", "=", "v", "return", "d" ]
Convert a recursive dict to a plain ol' dict.
[ "Convert", "a", "recursive", "dict", "to", "a", "plain", "ol", "dict", "." ]
13d3502aa1a486c9d775ad2c551fb8e7e48b0d96
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L17-L26
train
blockstack/python-utilitybelt
utilitybelt/dicts.py
scrub_dict
def scrub_dict(d): """ Recursively inspect a dictionary and remove all empty values, including empty strings, lists, and dictionaries. """ if type(d) is dict: return dict( (k, scrub_dict(v)) for k, v in d.iteritems() if v and scrub_dict(v) ) elif type(d) is list: return [ scrub_dict(v) for v in d if v and scrub_dict(v) ] else: return d
python
def scrub_dict(d): """ Recursively inspect a dictionary and remove all empty values, including empty strings, lists, and dictionaries. """ if type(d) is dict: return dict( (k, scrub_dict(v)) for k, v in d.iteritems() if v and scrub_dict(v) ) elif type(d) is list: return [ scrub_dict(v) for v in d if v and scrub_dict(v) ] else: return d
[ "def", "scrub_dict", "(", "d", ")", ":", "if", "type", "(", "d", ")", "is", "dict", ":", "return", "dict", "(", "(", "k", ",", "scrub_dict", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "iteritems", "(", ")", "if", "v", "and", "scrub_dict", "(", "v", ")", ")", "elif", "type", "(", "d", ")", "is", "list", ":", "return", "[", "scrub_dict", "(", "v", ")", "for", "v", "in", "d", "if", "v", "and", "scrub_dict", "(", "v", ")", "]", "else", ":", "return", "d" ]
Recursively inspect a dictionary and remove all empty values, including empty strings, lists, and dictionaries.
[ "Recursively", "inspect", "a", "dictionary", "and", "remove", "all", "empty", "values", "including", "empty", "strings", "lists", "and", "dictionaries", "." ]
13d3502aa1a486c9d775ad2c551fb8e7e48b0d96
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L29-L42
train
blockstack/python-utilitybelt
utilitybelt/dicts.py
_to_json_type
def _to_json_type(obj, classkey=None): """ Recursively convert the object instance into a valid JSON type. """ if isinstance(obj, dict): data = {} for (k, v) in obj.items(): data[k] = _to_json_type(v, classkey) return data elif hasattr(obj, "_ast"): return _to_json_type(obj._ast()) elif hasattr(obj, "__iter__"): return [_to_json_type(v, classkey) for v in obj] elif hasattr(obj, "__dict__"): data = dict([ (key, _to_json_type(value, classkey)) for key, value in obj.__dict__.iteritems() if not callable(value) and not key.startswith('_') ]) if classkey is not None and hasattr(obj, "__class__"): data[classkey] = obj.__class__.__name__ return data else: return obj
python
def _to_json_type(obj, classkey=None): """ Recursively convert the object instance into a valid JSON type. """ if isinstance(obj, dict): data = {} for (k, v) in obj.items(): data[k] = _to_json_type(v, classkey) return data elif hasattr(obj, "_ast"): return _to_json_type(obj._ast()) elif hasattr(obj, "__iter__"): return [_to_json_type(v, classkey) for v in obj] elif hasattr(obj, "__dict__"): data = dict([ (key, _to_json_type(value, classkey)) for key, value in obj.__dict__.iteritems() if not callable(value) and not key.startswith('_') ]) if classkey is not None and hasattr(obj, "__class__"): data[classkey] = obj.__class__.__name__ return data else: return obj
[ "def", "_to_json_type", "(", "obj", ",", "classkey", "=", "None", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "data", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "obj", ".", "items", "(", ")", ":", "data", "[", "k", "]", "=", "_to_json_type", "(", "v", ",", "classkey", ")", "return", "data", "elif", "hasattr", "(", "obj", ",", "\"_ast\"", ")", ":", "return", "_to_json_type", "(", "obj", ".", "_ast", "(", ")", ")", "elif", "hasattr", "(", "obj", ",", "\"__iter__\"", ")", ":", "return", "[", "_to_json_type", "(", "v", ",", "classkey", ")", "for", "v", "in", "obj", "]", "elif", "hasattr", "(", "obj", ",", "\"__dict__\"", ")", ":", "data", "=", "dict", "(", "[", "(", "key", ",", "_to_json_type", "(", "value", ",", "classkey", ")", ")", "for", "key", ",", "value", "in", "obj", ".", "__dict__", ".", "iteritems", "(", ")", "if", "not", "callable", "(", "value", ")", "and", "not", "key", ".", "startswith", "(", "'_'", ")", "]", ")", "if", "classkey", "is", "not", "None", "and", "hasattr", "(", "obj", ",", "\"__class__\"", ")", ":", "data", "[", "classkey", "]", "=", "obj", ".", "__class__", ".", "__name__", "return", "data", "else", ":", "return", "obj" ]
Recursively convert the object instance into a valid JSON type.
[ "Recursively", "convert", "the", "object", "instance", "into", "a", "valid", "JSON", "type", "." ]
13d3502aa1a486c9d775ad2c551fb8e7e48b0d96
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L45-L67
train
blockstack/python-utilitybelt
utilitybelt/dicts.py
to_dict
def to_dict(obj): """ Convert an instance of an object into a dict. """ d = _to_json_type(obj) if isinstance(d, dict): return scrub_dict(d) else: raise ValueError("The value provided must be an object.")
python
def to_dict(obj): """ Convert an instance of an object into a dict. """ d = _to_json_type(obj) if isinstance(d, dict): return scrub_dict(d) else: raise ValueError("The value provided must be an object.")
[ "def", "to_dict", "(", "obj", ")", ":", "d", "=", "_to_json_type", "(", "obj", ")", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "return", "scrub_dict", "(", "d", ")", "else", ":", "raise", "ValueError", "(", "\"The value provided must be an object.\"", ")" ]
Convert an instance of an object into a dict.
[ "Convert", "an", "instance", "of", "an", "object", "into", "a", "dict", "." ]
13d3502aa1a486c9d775ad2c551fb8e7e48b0d96
https://github.com/blockstack/python-utilitybelt/blob/13d3502aa1a486c9d775ad2c551fb8e7e48b0d96/utilitybelt/dicts.py#L70-L77
train
TylerTemp/docpie
docpie/tracemore.py
print_exc_plus
def print_exc_plus(stream=sys.stdout): '''print normal traceback information with some local arg values''' # code of this mothod is mainly from <Python Cookbook> write = stream.write # assert the mothod exists flush = stream.flush tp, value, tb = sys.exc_info() while tb.tb_next: tb = tb.tb_next stack = list() f = tb.tb_frame while f: stack.append(f) f = f.f_back stack.reverse() try: traceback.print_exc(None, stream) except BaseException as e: write(u("FAILED PRINTING TRACE\n\n")) write(u(str(value))) write(u('\n\n')) finally: flush() write(u('Locals by frame, innermost last\n')) for frame in stack: write(u('\nFrame %s in %s at line %s\n' % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno))) for key, value, in frame.f_locals.items(): write(u('\t%20s = ' % key)) try: write(u('%s\n' % value)) except BaseException: write(u('<ERROR WHILE PRINTING VALUE>\n')) flush()
python
def print_exc_plus(stream=sys.stdout): '''print normal traceback information with some local arg values''' # code of this mothod is mainly from <Python Cookbook> write = stream.write # assert the mothod exists flush = stream.flush tp, value, tb = sys.exc_info() while tb.tb_next: tb = tb.tb_next stack = list() f = tb.tb_frame while f: stack.append(f) f = f.f_back stack.reverse() try: traceback.print_exc(None, stream) except BaseException as e: write(u("FAILED PRINTING TRACE\n\n")) write(u(str(value))) write(u('\n\n')) finally: flush() write(u('Locals by frame, innermost last\n')) for frame in stack: write(u('\nFrame %s in %s at line %s\n' % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno))) for key, value, in frame.f_locals.items(): write(u('\t%20s = ' % key)) try: write(u('%s\n' % value)) except BaseException: write(u('<ERROR WHILE PRINTING VALUE>\n')) flush()
[ "def", "print_exc_plus", "(", "stream", "=", "sys", ".", "stdout", ")", ":", "# code of this mothod is mainly from <Python Cookbook>", "write", "=", "stream", ".", "write", "# assert the mothod exists", "flush", "=", "stream", ".", "flush", "tp", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "while", "tb", ".", "tb_next", ":", "tb", "=", "tb", ".", "tb_next", "stack", "=", "list", "(", ")", "f", "=", "tb", ".", "tb_frame", "while", "f", ":", "stack", ".", "append", "(", "f", ")", "f", "=", "f", ".", "f_back", "stack", ".", "reverse", "(", ")", "try", ":", "traceback", ".", "print_exc", "(", "None", ",", "stream", ")", "except", "BaseException", "as", "e", ":", "write", "(", "u", "(", "\"FAILED PRINTING TRACE\\n\\n\"", ")", ")", "write", "(", "u", "(", "str", "(", "value", ")", ")", ")", "write", "(", "u", "(", "'\\n\\n'", ")", ")", "finally", ":", "flush", "(", ")", "write", "(", "u", "(", "'Locals by frame, innermost last\\n'", ")", ")", "for", "frame", "in", "stack", ":", "write", "(", "u", "(", "'\\nFrame %s in %s at line %s\\n'", "%", "(", "frame", ".", "f_code", ".", "co_name", ",", "frame", ".", "f_code", ".", "co_filename", ",", "frame", ".", "f_lineno", ")", ")", ")", "for", "key", ",", "value", ",", "in", "frame", ".", "f_locals", ".", "items", "(", ")", ":", "write", "(", "u", "(", "'\\t%20s = '", "%", "key", ")", ")", "try", ":", "write", "(", "u", "(", "'%s\\n'", "%", "value", ")", ")", "except", "BaseException", ":", "write", "(", "u", "(", "'<ERROR WHILE PRINTING VALUE>\\n'", ")", ")", "flush", "(", ")" ]
print normal traceback information with some local arg values
[ "print", "normal", "traceback", "information", "with", "some", "local", "arg", "values" ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/tracemore.py#L22-L56
train
MacHu-GWU/single_file_module-project
sfm/textformatter.py
format_single_space_only
def format_single_space_only(text): """Revise consecutive empty space to single space. Example:: " I feel so GOOD!" => "This is so GOOD!" **中文文档** 确保文本中不会出现多余连续1次的空格。 """ return " ".join([word for word in text.strip().split(" ") if len(word) >= 1])
python
def format_single_space_only(text): """Revise consecutive empty space to single space. Example:: " I feel so GOOD!" => "This is so GOOD!" **中文文档** 确保文本中不会出现多余连续1次的空格。 """ return " ".join([word for word in text.strip().split(" ") if len(word) >= 1])
[ "def", "format_single_space_only", "(", "text", ")", ":", "return", "\" \"", ".", "join", "(", "[", "word", "for", "word", "in", "text", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "len", "(", "word", ")", ">=", "1", "]", ")" ]
Revise consecutive empty space to single space. Example:: " I feel so GOOD!" => "This is so GOOD!" **中文文档** 确保文本中不会出现多余连续1次的空格。
[ "Revise", "consecutive", "empty", "space", "to", "single", "space", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/textformatter.py#L26-L37
train
MacHu-GWU/single_file_module-project
sfm/textformatter.py
format_title
def format_title(text): """Capitalize first letter for each words except function words. Example:: title = "Beautiful is Better than Ugly" **中文文档** 将文本 "标题化", 即除了虚词, 每一个英文单词的第一个字母大写。 """ text = text.strip() # if empty string, return "" if len(text) == 0: return text else: text = text.lower() # lower all char # Change to in single space format words = [word for word in text.strip().split(" ") if len(word) >= 1] # Capitalize all words except function word words_new = list() for word in words: if word not in FUNCTION_WORD: word = word[0].upper() + word[1:] words_new.append(word) # Make sure first word always be capitalized words_new[0] = words_new[0][0].upper() + words_new[0][1:] return " ".join(words_new)
python
def format_title(text): """Capitalize first letter for each words except function words. Example:: title = "Beautiful is Better than Ugly" **中文文档** 将文本 "标题化", 即除了虚词, 每一个英文单词的第一个字母大写。 """ text = text.strip() # if empty string, return "" if len(text) == 0: return text else: text = text.lower() # lower all char # Change to in single space format words = [word for word in text.strip().split(" ") if len(word) >= 1] # Capitalize all words except function word words_new = list() for word in words: if word not in FUNCTION_WORD: word = word[0].upper() + word[1:] words_new.append(word) # Make sure first word always be capitalized words_new[0] = words_new[0][0].upper() + words_new[0][1:] return " ".join(words_new)
[ "def", "format_title", "(", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "# if empty string, return \"\"", "if", "len", "(", "text", ")", "==", "0", ":", "return", "text", "else", ":", "text", "=", "text", ".", "lower", "(", ")", "# lower all char", "# Change to in single space format", "words", "=", "[", "word", "for", "word", "in", "text", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "len", "(", "word", ")", ">=", "1", "]", "# Capitalize all words except function word", "words_new", "=", "list", "(", ")", "for", "word", "in", "words", ":", "if", "word", "not", "in", "FUNCTION_WORD", ":", "word", "=", "word", "[", "0", "]", ".", "upper", "(", ")", "+", "word", "[", "1", ":", "]", "words_new", ".", "append", "(", "word", ")", "# Make sure first word always be capitalized", "words_new", "[", "0", "]", "=", "words_new", "[", "0", "]", "[", "0", "]", ".", "upper", "(", ")", "+", "words_new", "[", "0", "]", "[", "1", ":", "]", "return", "\" \"", ".", "join", "(", "words_new", ")" ]
Capitalize first letter for each words except function words. Example:: title = "Beautiful is Better than Ugly" **中文文档** 将文本 "标题化", 即除了虚词, 每一个英文单词的第一个字母大写。
[ "Capitalize", "first", "letter", "for", "each", "words", "except", "function", "words", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/textformatter.py#L40-L71
train
MacHu-GWU/single_file_module-project
sfm/textformatter.py
format_person_name
def format_person_name(text): """Capitalize first letter for each part of the name. Example:: person_name = "James Bond" **中文文档** 将文本修改为人名格式。每个单词的第一个字母大写。 """ text = text.strip() if len(text) == 0: # if empty string, return it return text else: text = text.lower() # lower all char # delete redundant empty space words = [word for word in text.strip().split(" ") if len(word) >= 1] words = [word[0].upper() + word[1:] for word in words] return " ".join(words)
python
def format_person_name(text): """Capitalize first letter for each part of the name. Example:: person_name = "James Bond" **中文文档** 将文本修改为人名格式。每个单词的第一个字母大写。 """ text = text.strip() if len(text) == 0: # if empty string, return it return text else: text = text.lower() # lower all char # delete redundant empty space words = [word for word in text.strip().split(" ") if len(word) >= 1] words = [word[0].upper() + word[1:] for word in words] return " ".join(words)
[ "def", "format_person_name", "(", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "if", "len", "(", "text", ")", "==", "0", ":", "# if empty string, return it", "return", "text", "else", ":", "text", "=", "text", ".", "lower", "(", ")", "# lower all char", "# delete redundant empty space", "words", "=", "[", "word", "for", "word", "in", "text", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "len", "(", "word", ")", ">=", "1", "]", "words", "=", "[", "word", "[", "0", "]", ".", "upper", "(", ")", "+", "word", "[", "1", ":", "]", "for", "word", "in", "words", "]", "return", "\" \"", ".", "join", "(", "words", ")" ]
Capitalize first letter for each part of the name. Example:: person_name = "James Bond" **中文文档** 将文本修改为人名格式。每个单词的第一个字母大写。
[ "Capitalize", "first", "letter", "for", "each", "part", "of", "the", "name", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/textformatter.py#L74-L93
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.dump
def dump(self, path): """ dump DictTree data to json files. """ try: with open(path, "wb") as f: f.write(self.__str__().encode("utf-8")) except: pass with open(path, "wb") as f: pickle.dump(self.__data__, f)
python
def dump(self, path): """ dump DictTree data to json files. """ try: with open(path, "wb") as f: f.write(self.__str__().encode("utf-8")) except: pass with open(path, "wb") as f: pickle.dump(self.__data__, f)
[ "def", "dump", "(", "self", ",", "path", ")", ":", "try", ":", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "__str__", "(", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", "except", ":", "pass", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "self", ".", "__data__", ",", "f", ")" ]
dump DictTree data to json files.
[ "dump", "DictTree", "data", "to", "json", "files", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L117-L128
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.load
def load(cls, path): """ load DictTree from json files. """ try: with open(path, "rb") as f: return cls(__data__=json.loads(f.read().decode("utf-8"))) except: pass with open(path, "rb") as f: return cls(__data__=pickle.load(f))
python
def load(cls, path): """ load DictTree from json files. """ try: with open(path, "rb") as f: return cls(__data__=json.loads(f.read().decode("utf-8"))) except: pass with open(path, "rb") as f: return cls(__data__=pickle.load(f))
[ "def", "load", "(", "cls", ",", "path", ")", ":", "try", ":", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "return", "cls", "(", "__data__", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", ")", "except", ":", "pass", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "return", "cls", "(", "__data__", "=", "pickle", ".", "load", "(", "f", ")", ")" ]
load DictTree from json files.
[ "load", "DictTree", "from", "json", "files", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L131-L142
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.values
def values(self): """ Iterate values. """ for key, value in self.__data__.items(): if key not in (META, KEY): yield DictTree(__data__=value)
python
def values(self): """ Iterate values. """ for key, value in self.__data__.items(): if key not in (META, KEY): yield DictTree(__data__=value)
[ "def", "values", "(", "self", ")", ":", "for", "key", ",", "value", "in", "self", ".", "__data__", ".", "items", "(", ")", ":", "if", "key", "not", "in", "(", "META", ",", "KEY", ")", ":", "yield", "DictTree", "(", "__data__", "=", "value", ")" ]
Iterate values.
[ "Iterate", "values", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L202-L208
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.keys_at
def keys_at(self, depth, counter=1): """ Iterate keys at specified depth. """ if depth < 1: yield ROOT else: if counter == depth: for key in self.keys(): yield key else: counter += 1 for dict_tree in self.values(): for key in dict_tree.keys_at(depth, counter): yield key
python
def keys_at(self, depth, counter=1): """ Iterate keys at specified depth. """ if depth < 1: yield ROOT else: if counter == depth: for key in self.keys(): yield key else: counter += 1 for dict_tree in self.values(): for key in dict_tree.keys_at(depth, counter): yield key
[ "def", "keys_at", "(", "self", ",", "depth", ",", "counter", "=", "1", ")", ":", "if", "depth", "<", "1", ":", "yield", "ROOT", "else", ":", "if", "counter", "==", "depth", ":", "for", "key", "in", "self", ".", "keys", "(", ")", ":", "yield", "key", "else", ":", "counter", "+=", "1", "for", "dict_tree", "in", "self", ".", "values", "(", ")", ":", "for", "key", "in", "dict_tree", ".", "keys_at", "(", "depth", ",", "counter", ")", ":", "yield", "key" ]
Iterate keys at specified depth.
[ "Iterate", "keys", "at", "specified", "depth", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L219-L233
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.values_at
def values_at(self, depth): """ Iterate values at specified depth. """ if depth < 1: yield self else: for dict_tree in self.values(): for value in dict_tree.values_at(depth - 1): yield value
python
def values_at(self, depth): """ Iterate values at specified depth. """ if depth < 1: yield self else: for dict_tree in self.values(): for value in dict_tree.values_at(depth - 1): yield value
[ "def", "values_at", "(", "self", ",", "depth", ")", ":", "if", "depth", "<", "1", ":", "yield", "self", "else", ":", "for", "dict_tree", "in", "self", ".", "values", "(", ")", ":", "for", "value", "in", "dict_tree", ".", "values_at", "(", "depth", "-", "1", ")", ":", "yield", "value" ]
Iterate values at specified depth.
[ "Iterate", "values", "at", "specified", "depth", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L235-L244
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.items_at
def items_at(self, depth): """ Iterate items at specified depth. """ if depth < 1: yield ROOT, self elif depth == 1: for key, value in self.items(): yield key, value else: for dict_tree in self.values(): for key, value in dict_tree.items_at(depth - 1): yield key, value
python
def items_at(self, depth): """ Iterate items at specified depth. """ if depth < 1: yield ROOT, self elif depth == 1: for key, value in self.items(): yield key, value else: for dict_tree in self.values(): for key, value in dict_tree.items_at(depth - 1): yield key, value
[ "def", "items_at", "(", "self", ",", "depth", ")", ":", "if", "depth", "<", "1", ":", "yield", "ROOT", ",", "self", "elif", "depth", "==", "1", ":", "for", "key", ",", "value", "in", "self", ".", "items", "(", ")", ":", "yield", "key", ",", "value", "else", ":", "for", "dict_tree", "in", "self", ".", "values", "(", ")", ":", "for", "key", ",", "value", "in", "dict_tree", ".", "items_at", "(", "depth", "-", "1", ")", ":", "yield", "key", ",", "value" ]
Iterate items at specified depth.
[ "Iterate", "items", "at", "specified", "depth", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L246-L258
train
MacHu-GWU/single_file_module-project
sfm/dtree.py
DictTree.stats
def stats(self, result=None, counter=0): """ Display the node stats info on specific depth in this dict. :: [ {"depth": 0, "leaf": M0, "root": N0}, {"depth": 1, "leaf": M1, "root": N1}, ... {"depth": k, "leaf": Mk, "root": Nk}, ] """ if result is None: result = dict() if counter == 0: if len(self): result[0] = {"depth": 0, "leaf": 0, "root": 1} else: result[0] = {"depth": 0, "leaf": 1, "root": 0} counter += 1 if len(self): result.setdefault( counter, {"depth": counter, "leaf": 0, "root": 0}) for dict_tree in self.values(): if len(dict_tree): # root result[counter]["root"] += 1 else: # leaf result[counter]["leaf"] += 1 dict_tree.stats(result, counter) return [ collections.OrderedDict([ ("depth", info["depth"]), ("leaf", info["leaf"]), ("root", info["root"]), ]) for info in sorted(result.values(), key=lambda x: x["depth"]) ]
python
def stats(self, result=None, counter=0): """ Display the node stats info on specific depth in this dict. :: [ {"depth": 0, "leaf": M0, "root": N0}, {"depth": 1, "leaf": M1, "root": N1}, ... {"depth": k, "leaf": Mk, "root": Nk}, ] """ if result is None: result = dict() if counter == 0: if len(self): result[0] = {"depth": 0, "leaf": 0, "root": 1} else: result[0] = {"depth": 0, "leaf": 1, "root": 0} counter += 1 if len(self): result.setdefault( counter, {"depth": counter, "leaf": 0, "root": 0}) for dict_tree in self.values(): if len(dict_tree): # root result[counter]["root"] += 1 else: # leaf result[counter]["leaf"] += 1 dict_tree.stats(result, counter) return [ collections.OrderedDict([ ("depth", info["depth"]), ("leaf", info["leaf"]), ("root", info["root"]), ]) for info in sorted(result.values(), key=lambda x: x["depth"]) ]
[ "def", "stats", "(", "self", ",", "result", "=", "None", ",", "counter", "=", "0", ")", ":", "if", "result", "is", "None", ":", "result", "=", "dict", "(", ")", "if", "counter", "==", "0", ":", "if", "len", "(", "self", ")", ":", "result", "[", "0", "]", "=", "{", "\"depth\"", ":", "0", ",", "\"leaf\"", ":", "0", ",", "\"root\"", ":", "1", "}", "else", ":", "result", "[", "0", "]", "=", "{", "\"depth\"", ":", "0", ",", "\"leaf\"", ":", "1", ",", "\"root\"", ":", "0", "}", "counter", "+=", "1", "if", "len", "(", "self", ")", ":", "result", ".", "setdefault", "(", "counter", ",", "{", "\"depth\"", ":", "counter", ",", "\"leaf\"", ":", "0", ",", "\"root\"", ":", "0", "}", ")", "for", "dict_tree", "in", "self", ".", "values", "(", ")", ":", "if", "len", "(", "dict_tree", ")", ":", "# root", "result", "[", "counter", "]", "[", "\"root\"", "]", "+=", "1", "else", ":", "# leaf", "result", "[", "counter", "]", "[", "\"leaf\"", "]", "+=", "1", "dict_tree", ".", "stats", "(", "result", ",", "counter", ")", "return", "[", "collections", ".", "OrderedDict", "(", "[", "(", "\"depth\"", ",", "info", "[", "\"depth\"", "]", ")", ",", "(", "\"leaf\"", ",", "info", "[", "\"leaf\"", "]", ")", ",", "(", "\"root\"", ",", "info", "[", "\"root\"", "]", ")", ",", "]", ")", "for", "info", "in", "sorted", "(", "result", ".", "values", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "\"depth\"", "]", ")", "]" ]
Display the node stats info on specific depth in this dict. :: [ {"depth": 0, "leaf": M0, "root": N0}, {"depth": 1, "leaf": M1, "root": N1}, ... {"depth": k, "leaf": Mk, "root": Nk}, ]
[ "Display", "the", "node", "stats", "info", "on", "specific", "depth", "in", "this", "dict", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L271-L311
train
igorcoding/asynctnt-queue
asynctnt_queue/tube.py
Tube.put
async def put(self, data, *, pri=None, ttl=None, ttr=None, delay=None): """ Puts data to the queue and returns a newly created Task :param data: Arbitrary task payload :param pri: Task priority (0 by default) :param ttl: Task time-to-live :param ttr: Task time-to-run :param delay: Task delay :return: Task instance """ opts = {} if pri is not None: opts['pri'] = pri if ttl is not None: opts['ttl'] = ttl if ttr is not None: opts['ttr'] = ttr if delay is not None: opts['delay'] = delay args = (data, opts) res = await self.conn.call(self.__funcs['put'], args) return self._create_task(res.body)
python
async def put(self, data, *, pri=None, ttl=None, ttr=None, delay=None): """ Puts data to the queue and returns a newly created Task :param data: Arbitrary task payload :param pri: Task priority (0 by default) :param ttl: Task time-to-live :param ttr: Task time-to-run :param delay: Task delay :return: Task instance """ opts = {} if pri is not None: opts['pri'] = pri if ttl is not None: opts['ttl'] = ttl if ttr is not None: opts['ttr'] = ttr if delay is not None: opts['delay'] = delay args = (data, opts) res = await self.conn.call(self.__funcs['put'], args) return self._create_task(res.body)
[ "async", "def", "put", "(", "self", ",", "data", ",", "*", ",", "pri", "=", "None", ",", "ttl", "=", "None", ",", "ttr", "=", "None", ",", "delay", "=", "None", ")", ":", "opts", "=", "{", "}", "if", "pri", "is", "not", "None", ":", "opts", "[", "'pri'", "]", "=", "pri", "if", "ttl", "is", "not", "None", ":", "opts", "[", "'ttl'", "]", "=", "ttl", "if", "ttr", "is", "not", "None", ":", "opts", "[", "'ttr'", "]", "=", "ttr", "if", "delay", "is", "not", "None", ":", "opts", "[", "'delay'", "]", "=", "delay", "args", "=", "(", "data", ",", "opts", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'put'", "]", ",", "args", ")", "return", "self", ".", "_create_task", "(", "res", ".", "body", ")" ]
Puts data to the queue and returns a newly created Task :param data: Arbitrary task payload :param pri: Task priority (0 by default) :param ttl: Task time-to-live :param ttr: Task time-to-run :param delay: Task delay :return: Task instance
[ "Puts", "data", "to", "the", "queue", "and", "returns", "a", "newly", "created", "Task" ]
75719b2dd27e8314ae924aea6a7a85be8f48ecc5
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L74-L100
train
igorcoding/asynctnt-queue
asynctnt_queue/tube.py
Tube.take
async def take(self, timeout=None): """ Takes task from the queue, waiting the timeout if specified :param timeout: Seconds to wait for ready tasks :return: Task instance """ args = None if timeout is not None: args = (timeout,) res = await self.conn.call(self.__funcs['take'], args) if len(res.body) > 0: return self._create_task(res.body) return None
python
async def take(self, timeout=None): """ Takes task from the queue, waiting the timeout if specified :param timeout: Seconds to wait for ready tasks :return: Task instance """ args = None if timeout is not None: args = (timeout,) res = await self.conn.call(self.__funcs['take'], args) if len(res.body) > 0: return self._create_task(res.body) return None
[ "async", "def", "take", "(", "self", ",", "timeout", "=", "None", ")", ":", "args", "=", "None", "if", "timeout", "is", "not", "None", ":", "args", "=", "(", "timeout", ",", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'take'", "]", ",", "args", ")", "if", "len", "(", "res", ".", "body", ")", ">", "0", ":", "return", "self", ".", "_create_task", "(", "res", ".", "body", ")", "return", "None" ]
Takes task from the queue, waiting the timeout if specified :param timeout: Seconds to wait for ready tasks :return: Task instance
[ "Takes", "task", "from", "the", "queue", "waiting", "the", "timeout", "if", "specified" ]
75719b2dd27e8314ae924aea6a7a85be8f48ecc5
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L102-L116
train
igorcoding/asynctnt-queue
asynctnt_queue/tube.py
Tube.peek
async def peek(self, task_id): """ Get task without changing its state :param task_id: Task id :return: Task instance """ args = (task_id,) res = await self.conn.call(self.__funcs['peek'], args) return self._create_task(res.body)
python
async def peek(self, task_id): """ Get task without changing its state :param task_id: Task id :return: Task instance """ args = (task_id,) res = await self.conn.call(self.__funcs['peek'], args) return self._create_task(res.body)
[ "async", "def", "peek", "(", "self", ",", "task_id", ")", ":", "args", "=", "(", "task_id", ",", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'peek'", "]", ",", "args", ")", "return", "self", ".", "_create_task", "(", "res", ".", "body", ")" ]
Get task without changing its state :param task_id: Task id :return: Task instance
[ "Get", "task", "without", "changing", "its", "state" ]
75719b2dd27e8314ae924aea6a7a85be8f48ecc5
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L156-L166
train
igorcoding/asynctnt-queue
asynctnt_queue/tube.py
Tube.kick
async def kick(self, count): """ Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked """ args = (count,) res = await self.conn.call(self.__funcs['kick'], args) if self.conn.version < (1, 7): return res.body[0][0] return res.body[0]
python
async def kick(self, count): """ Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked """ args = (count,) res = await self.conn.call(self.__funcs['kick'], args) if self.conn.version < (1, 7): return res.body[0][0] return res.body[0]
[ "async", "def", "kick", "(", "self", ",", "count", ")", ":", "args", "=", "(", "count", ",", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'kick'", "]", ",", "args", ")", "if", "self", ".", "conn", ".", "version", "<", "(", "1", ",", "7", ")", ":", "return", "res", ".", "body", "[", "0", "]", "[", "0", "]", "return", "res", ".", "body", "[", "0", "]" ]
Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked
[ "Kick", "count", "tasks", "from", "queue" ]
75719b2dd27e8314ae924aea6a7a85be8f48ecc5
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L190-L201
train
ariebovenberg/snug
examples/slack/query.py
_parse_content
def _parse_content(response): """parse the response body as JSON, raise on errors""" if response.status_code != 200: raise ApiError(f'unknown error: {response.content.decode()}') result = json.loads(response.content) if not result['ok']: raise ApiError(f'{result["error"]}: {result.get("detail")}') return result
python
def _parse_content(response): """parse the response body as JSON, raise on errors""" if response.status_code != 200: raise ApiError(f'unknown error: {response.content.decode()}') result = json.loads(response.content) if not result['ok']: raise ApiError(f'{result["error"]}: {result.get("detail")}') return result
[ "def", "_parse_content", "(", "response", ")", ":", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "ApiError", "(", "f'unknown error: {response.content.decode()}'", ")", "result", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "if", "not", "result", "[", "'ok'", "]", ":", "raise", "ApiError", "(", "f'{result[\"error\"]}: {result.get(\"detail\")}'", ")", "return", "result" ]
parse the response body as JSON, raise on errors
[ "parse", "the", "response", "body", "as", "JSON", "raise", "on", "errors" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/query.py#L19-L26
train
ariebovenberg/snug
examples/slack/query.py
paginated_retrieval
def paginated_retrieval(methodname, itemtype): """decorator factory for retrieval queries from query params""" return compose( reusable, basic_interaction, map_yield(partial(_params_as_get, methodname)), )
python
def paginated_retrieval(methodname, itemtype): """decorator factory for retrieval queries from query params""" return compose( reusable, basic_interaction, map_yield(partial(_params_as_get, methodname)), )
[ "def", "paginated_retrieval", "(", "methodname", ",", "itemtype", ")", ":", "return", "compose", "(", "reusable", ",", "basic_interaction", ",", "map_yield", "(", "partial", "(", "_params_as_get", ",", "methodname", ")", ")", ",", ")" ]
decorator factory for retrieval queries from query params
[ "decorator", "factory", "for", "retrieval", "queries", "from", "query", "params" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/query.py#L49-L55
train
ariebovenberg/snug
examples/slack/query.py
json_post
def json_post(methodname, rtype, key): """decorator factory for json POST queries""" return compose( reusable, map_return(registry(rtype), itemgetter(key)), basic_interaction, map_yield(partial(_json_as_post, methodname)), oneyield, )
python
def json_post(methodname, rtype, key): """decorator factory for json POST queries""" return compose( reusable, map_return(registry(rtype), itemgetter(key)), basic_interaction, map_yield(partial(_json_as_post, methodname)), oneyield, )
[ "def", "json_post", "(", "methodname", ",", "rtype", ",", "key", ")", ":", "return", "compose", "(", "reusable", ",", "map_return", "(", "registry", "(", "rtype", ")", ",", "itemgetter", "(", "key", ")", ")", ",", "basic_interaction", ",", "map_yield", "(", "partial", "(", "_json_as_post", ",", "methodname", ")", ")", ",", "oneyield", ",", ")" ]
decorator factory for json POST queries
[ "decorator", "factory", "for", "json", "POST", "queries" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/query.py#L62-L70
train
envi-idl/envipyengine
envipyengine/config.py
_read_config
def _read_config(cfg_file): """ Return a ConfigParser object populated from the settings.cfg file. :return: A Config Parser object. """ config = ConfigParser() # maintain case of options config.optionxform = lambda option: option if not os.path.exists(cfg_file): # Create an empty config config.add_section(_MAIN_SECTION_NAME) config.add_section(_ENVIRONMENT_SECTION_NAME) else: config.read(cfg_file) return config
python
def _read_config(cfg_file): """ Return a ConfigParser object populated from the settings.cfg file. :return: A Config Parser object. """ config = ConfigParser() # maintain case of options config.optionxform = lambda option: option if not os.path.exists(cfg_file): # Create an empty config config.add_section(_MAIN_SECTION_NAME) config.add_section(_ENVIRONMENT_SECTION_NAME) else: config.read(cfg_file) return config
[ "def", "_read_config", "(", "cfg_file", ")", ":", "config", "=", "ConfigParser", "(", ")", "# maintain case of options", "config", ".", "optionxform", "=", "lambda", "option", ":", "option", "if", "not", "os", ".", "path", ".", "exists", "(", "cfg_file", ")", ":", "# Create an empty config", "config", ".", "add_section", "(", "_MAIN_SECTION_NAME", ")", "config", ".", "add_section", "(", "_ENVIRONMENT_SECTION_NAME", ")", "else", ":", "config", ".", "read", "(", "cfg_file", ")", "return", "config" ]
Return a ConfigParser object populated from the settings.cfg file. :return: A Config Parser object.
[ "Return", "a", "ConfigParser", "object", "populated", "from", "the", "settings", ".", "cfg", "file", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L145-L160
train
envi-idl/envipyengine
envipyengine/config.py
_write_config
def _write_config(config, cfg_file): """ Write a config object to the settings.cfg file. :param config: A ConfigParser object to write to the settings.cfg file. """ directory = os.path.dirname(cfg_file) if not os.path.exists(directory): os.makedirs(directory) with open(cfg_file, "w+") as output_file: config.write(output_file)
python
def _write_config(config, cfg_file): """ Write a config object to the settings.cfg file. :param config: A ConfigParser object to write to the settings.cfg file. """ directory = os.path.dirname(cfg_file) if not os.path.exists(directory): os.makedirs(directory) with open(cfg_file, "w+") as output_file: config.write(output_file)
[ "def", "_write_config", "(", "config", ",", "cfg_file", ")", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "cfg_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "with", "open", "(", "cfg_file", ",", "\"w+\"", ")", "as", "output_file", ":", "config", ".", "write", "(", "output_file", ")" ]
Write a config object to the settings.cfg file. :param config: A ConfigParser object to write to the settings.cfg file.
[ "Write", "a", "config", "object", "to", "the", "settings", ".", "cfg", "file", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L163-L173
train
envi-idl/envipyengine
envipyengine/config.py
get_environment
def get_environment(): """ Return all environment values from the config files. Values stored in the user configuration file will take precedence over values stored in the system configuration file. :return: A dictionary containing the name/value pairs of all environment settings in the config file. """ section = _ENVIRONMENT_SECTION_NAME # Read system sys_cfg = _read_config(_SYSTEM_CONFIG_FILE) sys_env = \ dict(sys_cfg.items(section)) if sys_cfg.has_section(section) else {} # Read user usr_cfg = _read_config(_USER_CONFIG_FILE) usr_env = \ dict(usr_cfg.items(section)) if usr_cfg.has_section(section) else {} # Merge user into system for k in usr_env.keys(): sys_env[k] = usr_env[k] return sys_env
python
def get_environment(): """ Return all environment values from the config files. Values stored in the user configuration file will take precedence over values stored in the system configuration file. :return: A dictionary containing the name/value pairs of all environment settings in the config file. """ section = _ENVIRONMENT_SECTION_NAME # Read system sys_cfg = _read_config(_SYSTEM_CONFIG_FILE) sys_env = \ dict(sys_cfg.items(section)) if sys_cfg.has_section(section) else {} # Read user usr_cfg = _read_config(_USER_CONFIG_FILE) usr_env = \ dict(usr_cfg.items(section)) if usr_cfg.has_section(section) else {} # Merge user into system for k in usr_env.keys(): sys_env[k] = usr_env[k] return sys_env
[ "def", "get_environment", "(", ")", ":", "section", "=", "_ENVIRONMENT_SECTION_NAME", "# Read system", "sys_cfg", "=", "_read_config", "(", "_SYSTEM_CONFIG_FILE", ")", "sys_env", "=", "dict", "(", "sys_cfg", ".", "items", "(", "section", ")", ")", "if", "sys_cfg", ".", "has_section", "(", "section", ")", "else", "{", "}", "# Read user", "usr_cfg", "=", "_read_config", "(", "_USER_CONFIG_FILE", ")", "usr_env", "=", "dict", "(", "usr_cfg", ".", "items", "(", "section", ")", ")", "if", "usr_cfg", ".", "has_section", "(", "section", ")", "else", "{", "}", "# Merge user into system", "for", "k", "in", "usr_env", ".", "keys", "(", ")", ":", "sys_env", "[", "k", "]", "=", "usr_env", "[", "k", "]", "return", "sys_env" ]
Return all environment values from the config files. Values stored in the user configuration file will take precedence over values stored in the system configuration file. :return: A dictionary containing the name/value pairs of all environment settings in the config file.
[ "Return", "all", "environment", "values", "from", "the", "config", "files", ".", "Values", "stored", "in", "the", "user", "configuration", "file", "will", "take", "precedence", "over", "values", "stored", "in", "the", "system", "configuration", "file", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L176-L199
train
envi-idl/envipyengine
envipyengine/config.py
set_environment
def set_environment(environment, system=False): """ Set engine environment values in the config file. :param environment: A dictionary containing the environment variable settings as key/value pairs. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _ENVIRONMENT_SECTION_NAME for key in environment.keys(): config.set(section, key, environment[key]) _write_config(config, config_filename)
python
def set_environment(environment, system=False): """ Set engine environment values in the config file. :param environment: A dictionary containing the environment variable settings as key/value pairs. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _ENVIRONMENT_SECTION_NAME for key in environment.keys(): config.set(section, key, environment[key]) _write_config(config, config_filename)
[ "def", "set_environment", "(", "environment", ",", "system", "=", "False", ")", ":", "config_filename", "=", "_SYSTEM_CONFIG_FILE", "if", "system", "is", "True", "else", "_USER_CONFIG_FILE", "config", "=", "_read_config", "(", "config_filename", ")", "section", "=", "_ENVIRONMENT_SECTION_NAME", "for", "key", "in", "environment", ".", "keys", "(", ")", ":", "config", ".", "set", "(", "section", ",", "key", ",", "environment", "[", "key", "]", ")", "_write_config", "(", "config", ",", "config_filename", ")" ]
Set engine environment values in the config file. :param environment: A dictionary containing the environment variable settings as key/value pairs. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified.
[ "Set", "engine", "environment", "values", "in", "the", "config", "file", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L202-L218
train
envi-idl/envipyengine
envipyengine/config.py
remove_environment
def remove_environment(environment_var_name, system=False): """ Remove the specified environment setting from the appropriate config file. :param environment_var_name: The name of the environment setting to remove. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _ENVIRONMENT_SECTION_NAME config.remove_option(section, environment_var_name) _write_config(config, config_filename)
python
def remove_environment(environment_var_name, system=False): """ Remove the specified environment setting from the appropriate config file. :param environment_var_name: The name of the environment setting to remove. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _ENVIRONMENT_SECTION_NAME config.remove_option(section, environment_var_name) _write_config(config, config_filename)
[ "def", "remove_environment", "(", "environment_var_name", ",", "system", "=", "False", ")", ":", "config_filename", "=", "_SYSTEM_CONFIG_FILE", "if", "system", "is", "True", "else", "_USER_CONFIG_FILE", "config", "=", "_read_config", "(", "config_filename", ")", "section", "=", "_ENVIRONMENT_SECTION_NAME", "config", ".", "remove_option", "(", "section", ",", "environment_var_name", ")", "_write_config", "(", "config", ",", "config_filename", ")" ]
Remove the specified environment setting from the appropriate config file. :param environment_var_name: The name of the environment setting to remove. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified.
[ "Remove", "the", "specified", "environment", "setting", "from", "the", "appropriate", "config", "file", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L221-L235
train
envi-idl/envipyengine
envipyengine/config.py
get
def get(property_name): """ Returns the value of the specified configuration property. Property values stored in the user configuration file take precedence over values stored in the system configuration file. :param property_name: The name of the property to retrieve. :return: The value of the property. """ config = _read_config(_USER_CONFIG_FILE) section = _MAIN_SECTION_NAME try: property_value = config.get(section, property_name) except (NoOptionError, NoSectionError) as error: # Try the system config file try: config = _read_config(_SYSTEM_CONFIG_FILE) property_value = config.get(section, property_name) except (NoOptionError, NoSectionError) as error: raise NoConfigOptionError(error) return property_value
python
def get(property_name): """ Returns the value of the specified configuration property. Property values stored in the user configuration file take precedence over values stored in the system configuration file. :param property_name: The name of the property to retrieve. :return: The value of the property. """ config = _read_config(_USER_CONFIG_FILE) section = _MAIN_SECTION_NAME try: property_value = config.get(section, property_name) except (NoOptionError, NoSectionError) as error: # Try the system config file try: config = _read_config(_SYSTEM_CONFIG_FILE) property_value = config.get(section, property_name) except (NoOptionError, NoSectionError) as error: raise NoConfigOptionError(error) return property_value
[ "def", "get", "(", "property_name", ")", ":", "config", "=", "_read_config", "(", "_USER_CONFIG_FILE", ")", "section", "=", "_MAIN_SECTION_NAME", "try", ":", "property_value", "=", "config", ".", "get", "(", "section", ",", "property_name", ")", "except", "(", "NoOptionError", ",", "NoSectionError", ")", "as", "error", ":", "# Try the system config file", "try", ":", "config", "=", "_read_config", "(", "_SYSTEM_CONFIG_FILE", ")", "property_value", "=", "config", ".", "get", "(", "section", ",", "property_name", ")", "except", "(", "NoOptionError", ",", "NoSectionError", ")", "as", "error", ":", "raise", "NoConfigOptionError", "(", "error", ")", "return", "property_value" ]
Returns the value of the specified configuration property. Property values stored in the user configuration file take precedence over values stored in the system configuration file. :param property_name: The name of the property to retrieve. :return: The value of the property.
[ "Returns", "the", "value", "of", "the", "specified", "configuration", "property", ".", "Property", "values", "stored", "in", "the", "user", "configuration", "file", "take", "precedence", "over", "values", "stored", "in", "the", "system", "configuration", "file", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L238-L261
train
envi-idl/envipyengine
envipyengine/config.py
set
def set(property_name, value, system=False): """ Sets the configuration property to the specified value. :param property_name: The name of the property to set. :param value: The value for the property. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _MAIN_SECTION_NAME config.set(section, property_name, value) _write_config(config, config_filename)
python
def set(property_name, value, system=False): """ Sets the configuration property to the specified value. :param property_name: The name of the property to set. :param value: The value for the property. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _MAIN_SECTION_NAME config.set(section, property_name, value) _write_config(config, config_filename)
[ "def", "set", "(", "property_name", ",", "value", ",", "system", "=", "False", ")", ":", "config_filename", "=", "_SYSTEM_CONFIG_FILE", "if", "system", "is", "True", "else", "_USER_CONFIG_FILE", "config", "=", "_read_config", "(", "config_filename", ")", "section", "=", "_MAIN_SECTION_NAME", "config", ".", "set", "(", "section", ",", "property_name", ",", "value", ")", "_write_config", "(", "config", ",", "config_filename", ")" ]
Sets the configuration property to the specified value. :param property_name: The name of the property to set. :param value: The value for the property. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified.
[ "Sets", "the", "configuration", "property", "to", "the", "specified", "value", "." ]
567b639d6592deec3289f6122a9e3d18f2f98432
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/config.py#L264-L279
train
bigdatacesga/service-discovery
consul.py
Client.register
def register(self, id, name, address, port=None, tags=None, check=None): """Register a new service with the local consul agent""" service = {} service['ID'] = id service['Name'] = name service['Address'] = address if port: service['Port'] = int(port) if tags: service['Tags'] = tags if check: service['Check'] = check r = requests.put(self.url_register, json=service) if r.status_code != 200: raise consulRegistrationError( 'PUT returned {}'.format(r.status_code)) return r
python
def register(self, id, name, address, port=None, tags=None, check=None): """Register a new service with the local consul agent""" service = {} service['ID'] = id service['Name'] = name service['Address'] = address if port: service['Port'] = int(port) if tags: service['Tags'] = tags if check: service['Check'] = check r = requests.put(self.url_register, json=service) if r.status_code != 200: raise consulRegistrationError( 'PUT returned {}'.format(r.status_code)) return r
[ "def", "register", "(", "self", ",", "id", ",", "name", ",", "address", ",", "port", "=", "None", ",", "tags", "=", "None", ",", "check", "=", "None", ")", ":", "service", "=", "{", "}", "service", "[", "'ID'", "]", "=", "id", "service", "[", "'Name'", "]", "=", "name", "service", "[", "'Address'", "]", "=", "address", "if", "port", ":", "service", "[", "'Port'", "]", "=", "int", "(", "port", ")", "if", "tags", ":", "service", "[", "'Tags'", "]", "=", "tags", "if", "check", ":", "service", "[", "'Check'", "]", "=", "check", "r", "=", "requests", ".", "put", "(", "self", ".", "url_register", ",", "json", "=", "service", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "consulRegistrationError", "(", "'PUT returned {}'", ".", "format", "(", "r", ".", "status_code", ")", ")", "return", "r" ]
Register a new service with the local consul agent
[ "Register", "a", "new", "service", "with", "the", "local", "consul", "agent" ]
5298d68e4dbe7b23848c95a6f75b9d469fb29e4a
https://github.com/bigdatacesga/service-discovery/blob/5298d68e4dbe7b23848c95a6f75b9d469fb29e4a/consul.py#L88-L104
train
bigdatacesga/service-discovery
consul.py
Client.deregister
def deregister(self, id): """Deregister a service with the local consul agent""" r = requests.put('{}/{}'.format(self.url_deregister, id)) if r.status_code != 200: raise consulDeregistrationError( 'PUT returned {}'.format(r.status_code)) return r
python
def deregister(self, id): """Deregister a service with the local consul agent""" r = requests.put('{}/{}'.format(self.url_deregister, id)) if r.status_code != 200: raise consulDeregistrationError( 'PUT returned {}'.format(r.status_code)) return r
[ "def", "deregister", "(", "self", ",", "id", ")", ":", "r", "=", "requests", ".", "put", "(", "'{}/{}'", ".", "format", "(", "self", ".", "url_deregister", ",", "id", ")", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "consulDeregistrationError", "(", "'PUT returned {}'", ".", "format", "(", "r", ".", "status_code", ")", ")", "return", "r" ]
Deregister a service with the local consul agent
[ "Deregister", "a", "service", "with", "the", "local", "consul", "agent" ]
5298d68e4dbe7b23848c95a6f75b9d469fb29e4a
https://github.com/bigdatacesga/service-discovery/blob/5298d68e4dbe7b23848c95a6f75b9d469fb29e4a/consul.py#L106-L112
train
bigdatacesga/service-discovery
consul.py
Client.info
def info(self, name): """Info about a given service""" r = requests.get('{}/{}'.format(self.url_service, name)) return r.json()
python
def info(self, name): """Info about a given service""" r = requests.get('{}/{}'.format(self.url_service, name)) return r.json()
[ "def", "info", "(", "self", ",", "name", ")", ":", "r", "=", "requests", ".", "get", "(", "'{}/{}'", ".", "format", "(", "self", ".", "url_service", ",", "name", ")", ")", "return", "r", ".", "json", "(", ")" ]
Info about a given service
[ "Info", "about", "a", "given", "service" ]
5298d68e4dbe7b23848c95a6f75b9d469fb29e4a
https://github.com/bigdatacesga/service-discovery/blob/5298d68e4dbe7b23848c95a6f75b9d469fb29e4a/consul.py#L119-L122
train
ariebovenberg/snug
tutorial/relations.py
repo.star
def star(self) -> snug.Query[bool]: """star this repo""" req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}') return (yield req).status_code == 204
python
def star(self) -> snug.Query[bool]: """star this repo""" req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}') return (yield req).status_code == 204
[ "def", "star", "(", "self", ")", "->", "snug", ".", "Query", "[", "bool", "]", ":", "req", "=", "snug", ".", "PUT", "(", "BASE", "+", "f'/user/starred/{self.owner}/{self.name}'", ")", "return", "(", "yield", "req", ")", ".", "status_code", "==", "204" ]
star this repo
[ "star", "this", "repo" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/relations.py#L15-L18
train
eclipse/unide.python
src/unide/message.py
device_message
def device_message(device, code, ts=None, origin=None, type=None, severity=None, title=None, description=None, hint=None, **metaData): # pylint: disable=redefined-builtin, too-many-arguments """This quickly builds a time-stamped message. If `ts` is None, the current time is used. """ if ts is None: ts = local_now() payload = MessagePayload(device=device) payload.messages.append( Message( code=code, ts=ts, origin=origin, type=type, severity=severity, title=title, description=description, hint=hint, **metaData)) return dumps(payload)
python
def device_message(device, code, ts=None, origin=None, type=None, severity=None, title=None, description=None, hint=None, **metaData): # pylint: disable=redefined-builtin, too-many-arguments """This quickly builds a time-stamped message. If `ts` is None, the current time is used. """ if ts is None: ts = local_now() payload = MessagePayload(device=device) payload.messages.append( Message( code=code, ts=ts, origin=origin, type=type, severity=severity, title=title, description=description, hint=hint, **metaData)) return dumps(payload)
[ "def", "device_message", "(", "device", ",", "code", ",", "ts", "=", "None", ",", "origin", "=", "None", ",", "type", "=", "None", ",", "severity", "=", "None", ",", "title", "=", "None", ",", "description", "=", "None", ",", "hint", "=", "None", ",", "*", "*", "metaData", ")", ":", "# pylint: disable=redefined-builtin, too-many-arguments", "if", "ts", "is", "None", ":", "ts", "=", "local_now", "(", ")", "payload", "=", "MessagePayload", "(", "device", "=", "device", ")", "payload", ".", "messages", ".", "append", "(", "Message", "(", "code", "=", "code", ",", "ts", "=", "ts", ",", "origin", "=", "origin", ",", "type", "=", "type", ",", "severity", "=", "severity", ",", "title", "=", "title", ",", "description", "=", "description", ",", "hint", "=", "hint", ",", "*", "*", "metaData", ")", ")", "return", "dumps", "(", "payload", ")" ]
This quickly builds a time-stamped message. If `ts` is None, the current time is used.
[ "This", "quickly", "builds", "a", "time", "-", "stamped", "message", ".", "If", "ts", "is", "None", "the", "current", "time", "is", "used", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/message.py#L120-L148
train
MacHu-GWU/single_file_module-project
sfm/obj_file_io.py
_dump
def _dump(obj, abspath, serializer_type, dumper_func=None, compress=True, overwrite=False, verbose=False, **kwargs): """Dump object to file. :param abspath: The file path you want dump to. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param dumper_func: A dumper function that takes an object as input, return binary or string. :type dumper_func: callable function :param compress: default ``False``. If True, then compress binary. :type compress: bool :param overwrite: default ``False``, If ``True``, when you dump to existing file, it silently overwrite it. If ``False``, an alert message is shown. Default setting ``False`` is to prevent overwrite file by mistake. :type overwrite: boolean :param verbose: default True, help-message-display trigger. :type verbose: boolean """ _check_serializer_type(serializer_type) if not inspect.isfunction(dumper_func): raise TypeError("dumper_func has to be a function take object as input " "and return binary!") prt_console("\nDump to '%s' ..." % abspath, verbose) if os.path.exists(abspath): if not overwrite: prt_console( " Stop! File exists and overwrite is not allowed", verbose, ) return st = time.clock() b_or_str = dumper_func(obj, **kwargs) if serializer_type is "str": b = b_or_str.encode("utf-8") else: b = b_or_str if compress: b = zlib.compress(b) with atomic_write(abspath, overwrite=overwrite, mode="wb") as f: f.write(b) elapsed = time.clock() - st prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose) if serializer_type is "str": return b_or_str else: return b
python
def _dump(obj, abspath, serializer_type, dumper_func=None, compress=True, overwrite=False, verbose=False, **kwargs): """Dump object to file. :param abspath: The file path you want dump to. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param dumper_func: A dumper function that takes an object as input, return binary or string. :type dumper_func: callable function :param compress: default ``False``. If True, then compress binary. :type compress: bool :param overwrite: default ``False``, If ``True``, when you dump to existing file, it silently overwrite it. If ``False``, an alert message is shown. Default setting ``False`` is to prevent overwrite file by mistake. :type overwrite: boolean :param verbose: default True, help-message-display trigger. :type verbose: boolean """ _check_serializer_type(serializer_type) if not inspect.isfunction(dumper_func): raise TypeError("dumper_func has to be a function take object as input " "and return binary!") prt_console("\nDump to '%s' ..." % abspath, verbose) if os.path.exists(abspath): if not overwrite: prt_console( " Stop! File exists and overwrite is not allowed", verbose, ) return st = time.clock() b_or_str = dumper_func(obj, **kwargs) if serializer_type is "str": b = b_or_str.encode("utf-8") else: b = b_or_str if compress: b = zlib.compress(b) with atomic_write(abspath, overwrite=overwrite, mode="wb") as f: f.write(b) elapsed = time.clock() - st prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose) if serializer_type is "str": return b_or_str else: return b
[ "def", "_dump", "(", "obj", ",", "abspath", ",", "serializer_type", ",", "dumper_func", "=", "None", ",", "compress", "=", "True", ",", "overwrite", "=", "False", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "_check_serializer_type", "(", "serializer_type", ")", "if", "not", "inspect", ".", "isfunction", "(", "dumper_func", ")", ":", "raise", "TypeError", "(", "\"dumper_func has to be a function take object as input \"", "\"and return binary!\"", ")", "prt_console", "(", "\"\\nDump to '%s' ...\"", "%", "abspath", ",", "verbose", ")", "if", "os", ".", "path", ".", "exists", "(", "abspath", ")", ":", "if", "not", "overwrite", ":", "prt_console", "(", "\" Stop! File exists and overwrite is not allowed\"", ",", "verbose", ",", ")", "return", "st", "=", "time", ".", "clock", "(", ")", "b_or_str", "=", "dumper_func", "(", "obj", ",", "*", "*", "kwargs", ")", "if", "serializer_type", "is", "\"str\"", ":", "b", "=", "b_or_str", ".", "encode", "(", "\"utf-8\"", ")", "else", ":", "b", "=", "b_or_str", "if", "compress", ":", "b", "=", "zlib", ".", "compress", "(", "b", ")", "with", "atomic_write", "(", "abspath", ",", "overwrite", "=", "overwrite", ",", "mode", "=", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "b", ")", "elapsed", "=", "time", ".", "clock", "(", ")", "-", "st", "prt_console", "(", "\" Complete! Elapse %.6f sec.\"", "%", "elapsed", ",", "verbose", ")", "if", "serializer_type", "is", "\"str\"", ":", "return", "b_or_str", "else", ":", "return", "b" ]
Dump object to file. :param abspath: The file path you want dump to. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param dumper_func: A dumper function that takes an object as input, return binary or string. :type dumper_func: callable function :param compress: default ``False``. If True, then compress binary. :type compress: bool :param overwrite: default ``False``, If ``True``, when you dump to existing file, it silently overwrite it. If ``False``, an alert message is shown. Default setting ``False`` is to prevent overwrite file by mistake. :type overwrite: boolean :param verbose: default True, help-message-display trigger. :type verbose: boolean
[ "Dump", "object", "to", "file", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/obj_file_io.py#L80-L145
train
MacHu-GWU/single_file_module-project
sfm/obj_file_io.py
_load
def _load(abspath, serializer_type, loader_func=None, decompress=True, verbose=False, **kwargs): """load object from file. :param abspath: The file path you want load from. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param loader_func: A loader function that takes binary as input, return an object. :type loader_func: callable function :param decompress: default ``False``. If True, then decompress binary. :type decompress: bool :param verbose: default True, help-message-display trigger. :type verbose: boolean """ _check_serializer_type(serializer_type) if not inspect.isfunction(loader_func): raise TypeError("loader_func has to be a function take binary as input " "and return an object!") prt_console("\nLoad from '%s' ..." % abspath, verbose) if not os.path.exists(abspath): raise ValueError("'%s' doesn't exist." % abspath) st = time.clock() with open(abspath, "rb") as f: b = f.read() if decompress: b = zlib.decompress(b) if serializer_type is "str": obj = loader_func(b.decode("utf-8"), **kwargs) else: obj = loader_func(b, **kwargs) elapsed = time.clock() - st prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose) return obj
python
def _load(abspath, serializer_type, loader_func=None, decompress=True, verbose=False, **kwargs): """load object from file. :param abspath: The file path you want load from. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param loader_func: A loader function that takes binary as input, return an object. :type loader_func: callable function :param decompress: default ``False``. If True, then decompress binary. :type decompress: bool :param verbose: default True, help-message-display trigger. :type verbose: boolean """ _check_serializer_type(serializer_type) if not inspect.isfunction(loader_func): raise TypeError("loader_func has to be a function take binary as input " "and return an object!") prt_console("\nLoad from '%s' ..." % abspath, verbose) if not os.path.exists(abspath): raise ValueError("'%s' doesn't exist." % abspath) st = time.clock() with open(abspath, "rb") as f: b = f.read() if decompress: b = zlib.decompress(b) if serializer_type is "str": obj = loader_func(b.decode("utf-8"), **kwargs) else: obj = loader_func(b, **kwargs) elapsed = time.clock() - st prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose) return obj
[ "def", "_load", "(", "abspath", ",", "serializer_type", ",", "loader_func", "=", "None", ",", "decompress", "=", "True", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "_check_serializer_type", "(", "serializer_type", ")", "if", "not", "inspect", ".", "isfunction", "(", "loader_func", ")", ":", "raise", "TypeError", "(", "\"loader_func has to be a function take binary as input \"", "\"and return an object!\"", ")", "prt_console", "(", "\"\\nLoad from '%s' ...\"", "%", "abspath", ",", "verbose", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "abspath", ")", ":", "raise", "ValueError", "(", "\"'%s' doesn't exist.\"", "%", "abspath", ")", "st", "=", "time", ".", "clock", "(", ")", "with", "open", "(", "abspath", ",", "\"rb\"", ")", "as", "f", ":", "b", "=", "f", ".", "read", "(", ")", "if", "decompress", ":", "b", "=", "zlib", ".", "decompress", "(", "b", ")", "if", "serializer_type", "is", "\"str\"", ":", "obj", "=", "loader_func", "(", "b", ".", "decode", "(", "\"utf-8\"", ")", ",", "*", "*", "kwargs", ")", "else", ":", "obj", "=", "loader_func", "(", "b", ",", "*", "*", "kwargs", ")", "elapsed", "=", "time", ".", "clock", "(", ")", "-", "st", "prt_console", "(", "\" Complete! Elapse %.6f sec.\"", "%", "elapsed", ",", "verbose", ")", "return", "obj" ]
load object from file. :param abspath: The file path you want load from. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param loader_func: A loader function that takes binary as input, return an object. :type loader_func: callable function :param decompress: default ``False``. If True, then decompress binary. :type decompress: bool :param verbose: default True, help-message-display trigger. :type verbose: boolean
[ "load", "object", "from", "file", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/obj_file_io.py#L148-L196
train
Pythonity/python-ivona-api
ivona_api/ivona_api.py
IvonaAPI._get_response
def _get_response(self, method, endpoint, data=None): """ Helper method for wrapping API requests, mainly for catching errors in one place. :param method: valid HTTP method :type method: str :param endpoint: API endpoint :type endpoint: str :param data: extra parameters passed with the request :type data: dict :returns: API response :rtype: Response """ url = urljoin(IVONA_REGION_ENDPOINTS[self.region], endpoint) response = getattr(self.session, method)( url, json=data, ) if 'x-amzn-ErrorType' in response.headers: raise IvonaAPIException(response.headers['x-amzn-ErrorType']) if response.status_code != requests.codes.ok: raise IvonaAPIException( "Something wrong happened: {}".format(response.json()) ) return response
python
def _get_response(self, method, endpoint, data=None): """ Helper method for wrapping API requests, mainly for catching errors in one place. :param method: valid HTTP method :type method: str :param endpoint: API endpoint :type endpoint: str :param data: extra parameters passed with the request :type data: dict :returns: API response :rtype: Response """ url = urljoin(IVONA_REGION_ENDPOINTS[self.region], endpoint) response = getattr(self.session, method)( url, json=data, ) if 'x-amzn-ErrorType' in response.headers: raise IvonaAPIException(response.headers['x-amzn-ErrorType']) if response.status_code != requests.codes.ok: raise IvonaAPIException( "Something wrong happened: {}".format(response.json()) ) return response
[ "def", "_get_response", "(", "self", ",", "method", ",", "endpoint", ",", "data", "=", "None", ")", ":", "url", "=", "urljoin", "(", "IVONA_REGION_ENDPOINTS", "[", "self", ".", "region", "]", ",", "endpoint", ")", "response", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "json", "=", "data", ",", ")", "if", "'x-amzn-ErrorType'", "in", "response", ".", "headers", ":", "raise", "IvonaAPIException", "(", "response", ".", "headers", "[", "'x-amzn-ErrorType'", "]", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "raise", "IvonaAPIException", "(", "\"Something wrong happened: {}\"", ".", "format", "(", "response", ".", "json", "(", ")", ")", ")", "return", "response" ]
Helper method for wrapping API requests, mainly for catching errors in one place. :param method: valid HTTP method :type method: str :param endpoint: API endpoint :type endpoint: str :param data: extra parameters passed with the request :type data: dict :returns: API response :rtype: Response
[ "Helper", "method", "for", "wrapping", "API", "requests", "mainly", "for", "catching", "errors", "in", "one", "place", "." ]
490a2e502d4aa769b9f41603eb5d5e5ebf1ea912
https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L88-L116
train
Pythonity/python-ivona-api
ivona_api/ivona_api.py
IvonaAPI.get_available_voices
def get_available_voices(self, language=None, gender=None): """ Returns a list of available voices, via 'ListVoices' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#ListVoices :param language: returned voices language :type language: str :param gender: returned voices gender :type gender: str """ endpoint = 'ListVoices' data = dict() if language: data.update({'Voice': {'Language': language}}) if gender: data.update({'Voice': {'Gender': gender}}) print(data) response = self._get_response('get', endpoint, data) return response.json()['Voices']
python
def get_available_voices(self, language=None, gender=None): """ Returns a list of available voices, via 'ListVoices' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#ListVoices :param language: returned voices language :type language: str :param gender: returned voices gender :type gender: str """ endpoint = 'ListVoices' data = dict() if language: data.update({'Voice': {'Language': language}}) if gender: data.update({'Voice': {'Gender': gender}}) print(data) response = self._get_response('get', endpoint, data) return response.json()['Voices']
[ "def", "get_available_voices", "(", "self", ",", "language", "=", "None", ",", "gender", "=", "None", ")", ":", "endpoint", "=", "'ListVoices'", "data", "=", "dict", "(", ")", "if", "language", ":", "data", ".", "update", "(", "{", "'Voice'", ":", "{", "'Language'", ":", "language", "}", "}", ")", "if", "gender", ":", "data", ".", "update", "(", "{", "'Voice'", ":", "{", "'Gender'", ":", "gender", "}", "}", ")", "print", "(", "data", ")", "response", "=", "self", ".", "_get_response", "(", "'get'", ",", "endpoint", ",", "data", ")", "return", "response", ".", "json", "(", ")", "[", "'Voices'", "]" ]
Returns a list of available voices, via 'ListVoices' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#ListVoices :param language: returned voices language :type language: str :param gender: returned voices gender :type gender: str
[ "Returns", "a", "list", "of", "available", "voices", "via", "ListVoices", "endpoint" ]
490a2e502d4aa769b9f41603eb5d5e5ebf1ea912
https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L118-L143
train
Pythonity/python-ivona-api
ivona_api/ivona_api.py
IvonaAPI.text_to_speech
def text_to_speech(self, text, file, voice_name=None, language=None): """ Saves given text synthesized audio file, via 'CreateSpeech' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech :param text: text to synthesize :type text: str :param file: file that will be used to save the audio :type file: file :param voice_name: voice name :type voice_name: str :param language: voice language :type language: str """ endpoint = 'CreateSpeech' data = { 'Input': { 'Data': text, }, 'OutputFormat': { 'Codec': self.codec.upper(), }, 'Parameters': { 'Rate': self.rate, 'Volume': self.volume, 'SentenceBreak': self.sentence_break, 'ParagraphBreak': self.paragraph_break, }, 'Voice': { 'Name': voice_name or self.voice_name, 'Language': language or self.language, }, } response = self._get_response('post', endpoint, data) file.write(response.content)
python
def text_to_speech(self, text, file, voice_name=None, language=None): """ Saves given text synthesized audio file, via 'CreateSpeech' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech :param text: text to synthesize :type text: str :param file: file that will be used to save the audio :type file: file :param voice_name: voice name :type voice_name: str :param language: voice language :type language: str """ endpoint = 'CreateSpeech' data = { 'Input': { 'Data': text, }, 'OutputFormat': { 'Codec': self.codec.upper(), }, 'Parameters': { 'Rate': self.rate, 'Volume': self.volume, 'SentenceBreak': self.sentence_break, 'ParagraphBreak': self.paragraph_break, }, 'Voice': { 'Name': voice_name or self.voice_name, 'Language': language or self.language, }, } response = self._get_response('post', endpoint, data) file.write(response.content)
[ "def", "text_to_speech", "(", "self", ",", "text", ",", "file", ",", "voice_name", "=", "None", ",", "language", "=", "None", ")", ":", "endpoint", "=", "'CreateSpeech'", "data", "=", "{", "'Input'", ":", "{", "'Data'", ":", "text", ",", "}", ",", "'OutputFormat'", ":", "{", "'Codec'", ":", "self", ".", "codec", ".", "upper", "(", ")", ",", "}", ",", "'Parameters'", ":", "{", "'Rate'", ":", "self", ".", "rate", ",", "'Volume'", ":", "self", ".", "volume", ",", "'SentenceBreak'", ":", "self", ".", "sentence_break", ",", "'ParagraphBreak'", ":", "self", ".", "paragraph_break", ",", "}", ",", "'Voice'", ":", "{", "'Name'", ":", "voice_name", "or", "self", ".", "voice_name", ",", "'Language'", ":", "language", "or", "self", ".", "language", ",", "}", ",", "}", "response", "=", "self", ".", "_get_response", "(", "'post'", ",", "endpoint", ",", "data", ")", "file", ".", "write", "(", "response", ".", "content", ")" ]
Saves given text synthesized audio file, via 'CreateSpeech' endpoint Docs: http://developer.ivona.com/en/speechcloud/actions.html#CreateSpeech :param text: text to synthesize :type text: str :param file: file that will be used to save the audio :type file: file :param voice_name: voice name :type voice_name: str :param language: voice language :type language: str
[ "Saves", "given", "text", "synthesized", "audio", "file", "via", "CreateSpeech", "endpoint" ]
490a2e502d4aa769b9f41603eb5d5e5ebf1ea912
https://github.com/Pythonity/python-ivona-api/blob/490a2e502d4aa769b9f41603eb5d5e5ebf1ea912/ivona_api/ivona_api.py#L145-L184
train
configcat/python-sdk
configcatclient/__init__.py
create_client_with_auto_poll
def create_client_with_auto_poll(api_key, poll_interval_seconds=60, max_init_wait_time_seconds=5, on_configuration_changed_callback=None, config_cache_class=None, base_url=None): """ Create an instance of ConfigCatClient and setup Auto Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param poll_interval_seconds: The client's poll interval in seconds. Default: 60 seconds. :param on_configuration_changed_callback: You can subscribe to configuration changes with this callback :param max_init_wait_time_seconds: maximum waiting time for first configuration fetch in polling mode. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat """ if api_key is None: raise ConfigCatClientException('API Key is required.') if poll_interval_seconds < 1: poll_interval_seconds = 1 if max_init_wait_time_seconds < 0: max_init_wait_time_seconds = 0 return ConfigCatClient(api_key, poll_interval_seconds, max_init_wait_time_seconds, on_configuration_changed_callback, 0, config_cache_class, base_url)
python
def create_client_with_auto_poll(api_key, poll_interval_seconds=60, max_init_wait_time_seconds=5, on_configuration_changed_callback=None, config_cache_class=None, base_url=None): """ Create an instance of ConfigCatClient and setup Auto Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param poll_interval_seconds: The client's poll interval in seconds. Default: 60 seconds. :param on_configuration_changed_callback: You can subscribe to configuration changes with this callback :param max_init_wait_time_seconds: maximum waiting time for first configuration fetch in polling mode. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat """ if api_key is None: raise ConfigCatClientException('API Key is required.') if poll_interval_seconds < 1: poll_interval_seconds = 1 if max_init_wait_time_seconds < 0: max_init_wait_time_seconds = 0 return ConfigCatClient(api_key, poll_interval_seconds, max_init_wait_time_seconds, on_configuration_changed_callback, 0, config_cache_class, base_url)
[ "def", "create_client_with_auto_poll", "(", "api_key", ",", "poll_interval_seconds", "=", "60", ",", "max_init_wait_time_seconds", "=", "5", ",", "on_configuration_changed_callback", "=", "None", ",", "config_cache_class", "=", "None", ",", "base_url", "=", "None", ")", ":", "if", "api_key", "is", "None", ":", "raise", "ConfigCatClientException", "(", "'API Key is required.'", ")", "if", "poll_interval_seconds", "<", "1", ":", "poll_interval_seconds", "=", "1", "if", "max_init_wait_time_seconds", "<", "0", ":", "max_init_wait_time_seconds", "=", "0", "return", "ConfigCatClient", "(", "api_key", ",", "poll_interval_seconds", ",", "max_init_wait_time_seconds", ",", "on_configuration_changed_callback", ",", "0", ",", "config_cache_class", ",", "base_url", ")" ]
Create an instance of ConfigCatClient and setup Auto Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param poll_interval_seconds: The client's poll interval in seconds. Default: 60 seconds. :param on_configuration_changed_callback: You can subscribe to configuration changes with this callback :param max_init_wait_time_seconds: maximum waiting time for first configuration fetch in polling mode. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat
[ "Create", "an", "instance", "of", "ConfigCatClient", "and", "setup", "Auto", "Poll", "mode", "with", "custom", "options" ]
7a893c7958d928276ca02c00d5239987a1acb8d6
https://github.com/configcat/python-sdk/blob/7a893c7958d928276ca02c00d5239987a1acb8d6/configcatclient/__init__.py#L14-L39
train
configcat/python-sdk
configcatclient/__init__.py
create_client_with_lazy_load
def create_client_with_lazy_load(api_key, cache_time_to_live_seconds=60, config_cache_class=None, base_url=None): """ Create an instance of ConfigCatClient and setup Lazy Load mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param cache_time_to_live_seconds: The cache TTL. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat """ if api_key is None: raise ConfigCatClientException('API Key is required.') if cache_time_to_live_seconds < 1: cache_time_to_live_seconds = 1 return ConfigCatClient(api_key, 0, 0, None, cache_time_to_live_seconds, config_cache_class, base_url)
python
def create_client_with_lazy_load(api_key, cache_time_to_live_seconds=60, config_cache_class=None, base_url=None): """ Create an instance of ConfigCatClient and setup Lazy Load mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param cache_time_to_live_seconds: The cache TTL. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat """ if api_key is None: raise ConfigCatClientException('API Key is required.') if cache_time_to_live_seconds < 1: cache_time_to_live_seconds = 1 return ConfigCatClient(api_key, 0, 0, None, cache_time_to_live_seconds, config_cache_class, base_url)
[ "def", "create_client_with_lazy_load", "(", "api_key", ",", "cache_time_to_live_seconds", "=", "60", ",", "config_cache_class", "=", "None", ",", "base_url", "=", "None", ")", ":", "if", "api_key", "is", "None", ":", "raise", "ConfigCatClientException", "(", "'API Key is required.'", ")", "if", "cache_time_to_live_seconds", "<", "1", ":", "cache_time_to_live_seconds", "=", "1", "return", "ConfigCatClient", "(", "api_key", ",", "0", ",", "0", ",", "None", ",", "cache_time_to_live_seconds", ",", "config_cache_class", ",", "base_url", ")" ]
Create an instance of ConfigCatClient and setup Lazy Load mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param cache_time_to_live_seconds: The cache TTL. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat
[ "Create", "an", "instance", "of", "ConfigCatClient", "and", "setup", "Lazy", "Load", "mode", "with", "custom", "options" ]
7a893c7958d928276ca02c00d5239987a1acb8d6
https://github.com/configcat/python-sdk/blob/7a893c7958d928276ca02c00d5239987a1acb8d6/configcatclient/__init__.py#L42-L60
train
configcat/python-sdk
configcatclient/__init__.py
create_client_with_manual_poll
def create_client_with_manual_poll(api_key, config_cache_class=None, base_url=None): """ Create an instance of ConfigCatClient and setup Manual Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat """ if api_key is None: raise ConfigCatClientException('API Key is required.') return ConfigCatClient(api_key, 0, 0, None, 0, config_cache_class, base_url)
python
def create_client_with_manual_poll(api_key, config_cache_class=None, base_url=None): """ Create an instance of ConfigCatClient and setup Manual Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat """ if api_key is None: raise ConfigCatClientException('API Key is required.') return ConfigCatClient(api_key, 0, 0, None, 0, config_cache_class, base_url)
[ "def", "create_client_with_manual_poll", "(", "api_key", ",", "config_cache_class", "=", "None", ",", "base_url", "=", "None", ")", ":", "if", "api_key", "is", "None", ":", "raise", "ConfigCatClientException", "(", "'API Key is required.'", ")", "return", "ConfigCatClient", "(", "api_key", ",", "0", ",", "0", ",", "None", ",", "0", ",", "config_cache_class", ",", "base_url", ")" ]
Create an instance of ConfigCatClient and setup Manual Poll mode with custom options :param api_key: ConfigCat ApiKey to access your configuration. :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache, You can provide an implementation of ConfigCache. :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat
[ "Create", "an", "instance", "of", "ConfigCatClient", "and", "setup", "Manual", "Poll", "mode", "with", "custom", "options" ]
7a893c7958d928276ca02c00d5239987a1acb8d6
https://github.com/configcat/python-sdk/blob/7a893c7958d928276ca02c00d5239987a1acb8d6/configcatclient/__init__.py#L63-L77
train
ariebovenberg/snug
examples/ns/query.py
basic_query
def basic_query(returns): """decorator factory for NS queries""" return compose( reusable, map_send(parse_request), map_yield(prepare_params, snug.prefix_adder(API_PREFIX)), map_return(loads(returns)), oneyield, )
python
def basic_query(returns): """decorator factory for NS queries""" return compose( reusable, map_send(parse_request), map_yield(prepare_params, snug.prefix_adder(API_PREFIX)), map_return(loads(returns)), oneyield, )
[ "def", "basic_query", "(", "returns", ")", ":", "return", "compose", "(", "reusable", ",", "map_send", "(", "parse_request", ")", ",", "map_yield", "(", "prepare_params", ",", "snug", ".", "prefix_adder", "(", "API_PREFIX", ")", ")", ",", "map_return", "(", "loads", "(", "returns", ")", ")", ",", "oneyield", ",", ")" ]
decorator factory for NS queries
[ "decorator", "factory", "for", "NS", "queries" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/ns/query.py#L43-L51
train
ariebovenberg/snug
examples/ns/query.py
departures
def departures(station: str) -> snug.Query[t.List[Departure]]: """departures for a station""" return snug.GET('avt', params={'station': station})
python
def departures(station: str) -> snug.Query[t.List[Departure]]: """departures for a station""" return snug.GET('avt', params={'station': station})
[ "def", "departures", "(", "station", ":", "str", ")", "->", "snug", ".", "Query", "[", "t", ".", "List", "[", "Departure", "]", "]", ":", "return", "snug", ".", "GET", "(", "'avt'", ",", "params", "=", "{", "'station'", ":", "station", "}", ")" ]
departures for a station
[ "departures", "for", "a", "station" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/ns/query.py#L61-L63
train
ariebovenberg/snug
examples/ns/query.py
journey_options
def journey_options(origin: str, destination: str, via: t.Optional[str]=None, before: t.Optional[int]=None, after: t.Optional[int]=None, time: t.Optional[datetime]=None, hsl: t.Optional[bool]=None, year_card: t.Optional[bool]=None) -> ( snug.Query[t.List[Journey]]): """journey recommendations from an origin to a destination station""" return snug.GET('treinplanner', params={ 'fromStation': origin, 'toStation': destination, 'viaStation': via, 'previousAdvices': before, 'nextAdvices': after, 'dateTime': time, 'hslAllowed': hsl, 'yearCard': year_card, })
python
def journey_options(origin: str, destination: str, via: t.Optional[str]=None, before: t.Optional[int]=None, after: t.Optional[int]=None, time: t.Optional[datetime]=None, hsl: t.Optional[bool]=None, year_card: t.Optional[bool]=None) -> ( snug.Query[t.List[Journey]]): """journey recommendations from an origin to a destination station""" return snug.GET('treinplanner', params={ 'fromStation': origin, 'toStation': destination, 'viaStation': via, 'previousAdvices': before, 'nextAdvices': after, 'dateTime': time, 'hslAllowed': hsl, 'yearCard': year_card, })
[ "def", "journey_options", "(", "origin", ":", "str", ",", "destination", ":", "str", ",", "via", ":", "t", ".", "Optional", "[", "str", "]", "=", "None", ",", "before", ":", "t", ".", "Optional", "[", "int", "]", "=", "None", ",", "after", ":", "t", ".", "Optional", "[", "int", "]", "=", "None", ",", "time", ":", "t", ".", "Optional", "[", "datetime", "]", "=", "None", ",", "hsl", ":", "t", ".", "Optional", "[", "bool", "]", "=", "None", ",", "year_card", ":", "t", ".", "Optional", "[", "bool", "]", "=", "None", ")", "->", "(", "snug", ".", "Query", "[", "t", ".", "List", "[", "Journey", "]", "]", ")", ":", "return", "snug", ".", "GET", "(", "'treinplanner'", ",", "params", "=", "{", "'fromStation'", ":", "origin", ",", "'toStation'", ":", "destination", ",", "'viaStation'", ":", "via", ",", "'previousAdvices'", ":", "before", ",", "'nextAdvices'", ":", "after", ",", "'dateTime'", ":", "time", ",", "'hslAllowed'", ":", "hsl", ",", "'yearCard'", ":", "year_card", ",", "}", ")" ]
journey recommendations from an origin to a destination station
[ "journey", "recommendations", "from", "an", "origin", "to", "a", "destination", "station" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/ns/query.py#L67-L86
train
MacHu-GWU/single_file_module-project
sfm/rnd.py
rand_str
def rand_str(length, allowed=CHARSET_ALPHA_DIGITS): """Generate fixed-length random string from your allowed character pool. :param length: total length of this string. :param allowed: allowed charset. Example:: >>> import string >>> rand_str(32) H6ExQPNLzb4Vp3YZtfpyzLNPFwdfnwz6 """ res = list() for _ in range(length): res.append(random.choice(allowed)) return "".join(res)
python
def rand_str(length, allowed=CHARSET_ALPHA_DIGITS): """Generate fixed-length random string from your allowed character pool. :param length: total length of this string. :param allowed: allowed charset. Example:: >>> import string >>> rand_str(32) H6ExQPNLzb4Vp3YZtfpyzLNPFwdfnwz6 """ res = list() for _ in range(length): res.append(random.choice(allowed)) return "".join(res)
[ "def", "rand_str", "(", "length", ",", "allowed", "=", "CHARSET_ALPHA_DIGITS", ")", ":", "res", "=", "list", "(", ")", "for", "_", "in", "range", "(", "length", ")", ":", "res", ".", "append", "(", "random", ".", "choice", "(", "allowed", ")", ")", "return", "\"\"", ".", "join", "(", "res", ")" ]
Generate fixed-length random string from your allowed character pool. :param length: total length of this string. :param allowed: allowed charset. Example:: >>> import string >>> rand_str(32) H6ExQPNLzb4Vp3YZtfpyzLNPFwdfnwz6
[ "Generate", "fixed", "-", "length", "random", "string", "from", "your", "allowed", "character", "pool", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L25-L40
train
MacHu-GWU/single_file_module-project
sfm/rnd.py
rand_hexstr
def rand_hexstr(length, lower=True): """Gererate fixed-length random hexstring, usually for md5. :param length: total length of this string. :param lower: use lower case or upper case. """ if lower: return rand_str(length, allowed=CHARSET_HEXSTR_LOWER) else: return rand_str(length, allowed=CHARSET_HEXSTR_UPPER)
python
def rand_hexstr(length, lower=True): """Gererate fixed-length random hexstring, usually for md5. :param length: total length of this string. :param lower: use lower case or upper case. """ if lower: return rand_str(length, allowed=CHARSET_HEXSTR_LOWER) else: return rand_str(length, allowed=CHARSET_HEXSTR_UPPER)
[ "def", "rand_hexstr", "(", "length", ",", "lower", "=", "True", ")", ":", "if", "lower", ":", "return", "rand_str", "(", "length", ",", "allowed", "=", "CHARSET_HEXSTR_LOWER", ")", "else", ":", "return", "rand_str", "(", "length", ",", "allowed", "=", "CHARSET_HEXSTR_UPPER", ")" ]
Gererate fixed-length random hexstring, usually for md5. :param length: total length of this string. :param lower: use lower case or upper case.
[ "Gererate", "fixed", "-", "length", "random", "hexstring", "usually", "for", "md5", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L43-L52
train
MacHu-GWU/single_file_module-project
sfm/rnd.py
rand_alphastr
def rand_alphastr(length, lower=True, upper=True): """Generate fixed-length random alpha only string. """ if lower is True and upper is True: return rand_str(length, allowed=string.ascii_letters) if lower is True and upper is False: return rand_str(length, allowed=string.ascii_lowercase) if lower is False and upper is True: return rand_str(length, allowed=string.ascii_uppercase) else: raise Exception
python
def rand_alphastr(length, lower=True, upper=True): """Generate fixed-length random alpha only string. """ if lower is True and upper is True: return rand_str(length, allowed=string.ascii_letters) if lower is True and upper is False: return rand_str(length, allowed=string.ascii_lowercase) if lower is False and upper is True: return rand_str(length, allowed=string.ascii_uppercase) else: raise Exception
[ "def", "rand_alphastr", "(", "length", ",", "lower", "=", "True", ",", "upper", "=", "True", ")", ":", "if", "lower", "is", "True", "and", "upper", "is", "True", ":", "return", "rand_str", "(", "length", ",", "allowed", "=", "string", ".", "ascii_letters", ")", "if", "lower", "is", "True", "and", "upper", "is", "False", ":", "return", "rand_str", "(", "length", ",", "allowed", "=", "string", ".", "ascii_lowercase", ")", "if", "lower", "is", "False", "and", "upper", "is", "True", ":", "return", "rand_str", "(", "length", ",", "allowed", "=", "string", ".", "ascii_uppercase", ")", "else", ":", "raise", "Exception" ]
Generate fixed-length random alpha only string.
[ "Generate", "fixed", "-", "length", "random", "alpha", "only", "string", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L55-L65
train
MacHu-GWU/single_file_module-project
sfm/rnd.py
rand_article
def rand_article(num_p=(4, 10), num_s=(2, 15), num_w=(5, 40)): """Random article text. Example:: >>> rand_article() ... """ article = list() for _ in range(random.randint(*num_p)): p = list() for _ in range(random.randint(*num_s)): s = list() for _ in range(random.randint(*num_w)): s.append( rand_str(random.randint(1, 15), string.ascii_lowercase)) p.append(" ".join(s)) article.append(". ".join(p)) return "\n\n".join(article)
python
def rand_article(num_p=(4, 10), num_s=(2, 15), num_w=(5, 40)): """Random article text. Example:: >>> rand_article() ... """ article = list() for _ in range(random.randint(*num_p)): p = list() for _ in range(random.randint(*num_s)): s = list() for _ in range(random.randint(*num_w)): s.append( rand_str(random.randint(1, 15), string.ascii_lowercase)) p.append(" ".join(s)) article.append(". ".join(p)) return "\n\n".join(article)
[ "def", "rand_article", "(", "num_p", "=", "(", "4", ",", "10", ")", ",", "num_s", "=", "(", "2", ",", "15", ")", ",", "num_w", "=", "(", "5", ",", "40", ")", ")", ":", "article", "=", "list", "(", ")", "for", "_", "in", "range", "(", "random", ".", "randint", "(", "*", "num_p", ")", ")", ":", "p", "=", "list", "(", ")", "for", "_", "in", "range", "(", "random", ".", "randint", "(", "*", "num_s", ")", ")", ":", "s", "=", "list", "(", ")", "for", "_", "in", "range", "(", "random", ".", "randint", "(", "*", "num_w", ")", ")", ":", "s", ".", "append", "(", "rand_str", "(", "random", ".", "randint", "(", "1", ",", "15", ")", ",", "string", ".", "ascii_lowercase", ")", ")", "p", ".", "append", "(", "\" \"", ".", "join", "(", "s", ")", ")", "article", ".", "append", "(", "\". \"", ".", "join", "(", "p", ")", ")", "return", "\"\\n\\n\"", ".", "join", "(", "article", ")" ]
Random article text. Example:: >>> rand_article() ...
[ "Random", "article", "text", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L120-L138
train
JanHendrikDolling/configvalidator
configvalidator/tools/parser.py
ParseObj._resolve_dep
def _resolve_dep(self, key): """ this method resolves dependencies for the given key. call the method afther the item "key" was added to the list of avalable items """ if key in self.future_values_key_dep: # there are some dependencies that can be resoled dep_list = self.future_values_key_dep[key] del self.future_values_key_dep[key] # remove dependencies also_finish = [] # iterate over the dependencies that can now be resoled for dep in dep_list: if self.__resolve_dep_helper(dep, key) is True: also_finish.append(dep) # maybe the resolving process leed to new deps that can be resolved for dep in also_finish: self._resolve_dep(dep)
python
def _resolve_dep(self, key): """ this method resolves dependencies for the given key. call the method afther the item "key" was added to the list of avalable items """ if key in self.future_values_key_dep: # there are some dependencies that can be resoled dep_list = self.future_values_key_dep[key] del self.future_values_key_dep[key] # remove dependencies also_finish = [] # iterate over the dependencies that can now be resoled for dep in dep_list: if self.__resolve_dep_helper(dep, key) is True: also_finish.append(dep) # maybe the resolving process leed to new deps that can be resolved for dep in also_finish: self._resolve_dep(dep)
[ "def", "_resolve_dep", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "future_values_key_dep", ":", "# there are some dependencies that can be resoled", "dep_list", "=", "self", ".", "future_values_key_dep", "[", "key", "]", "del", "self", ".", "future_values_key_dep", "[", "key", "]", "# remove dependencies", "also_finish", "=", "[", "]", "# iterate over the dependencies that can now be resoled", "for", "dep", "in", "dep_list", ":", "if", "self", ".", "__resolve_dep_helper", "(", "dep", ",", "key", ")", "is", "True", ":", "also_finish", ".", "append", "(", "dep", ")", "# maybe the resolving process leed to new deps that can be resolved", "for", "dep", "in", "also_finish", ":", "self", ".", "_resolve_dep", "(", "dep", ")" ]
this method resolves dependencies for the given key. call the method afther the item "key" was added to the list of avalable items
[ "this", "method", "resolves", "dependencies", "for", "the", "given", "key", ".", "call", "the", "method", "afther", "the", "item", "key", "was", "added", "to", "the", "list", "of", "avalable", "items" ]
efde23a9352ae1fd6702b04ad964783ce11cbca5
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/tools/parser.py#L132-L148
train
JanHendrikDolling/configvalidator
configvalidator/tools/parser.py
ParseObj._get_all_refs
def _get_all_refs(self, dep, handled_refs=None): """ get al list of all dependencies for the given item "dep" """ if handled_refs is None: handled_refs = [dep] else: if dep in handled_refs: return [] res = [] if dep in self.future_values_key_item: res.extend( self.future_values_key_item[dep]["dependencies"].values()) add = [] for h_d in res: add.extend(self._get_all_refs(h_d, handled_refs)) res.extend(add) return list(set(res))
python
def _get_all_refs(self, dep, handled_refs=None): """ get al list of all dependencies for the given item "dep" """ if handled_refs is None: handled_refs = [dep] else: if dep in handled_refs: return [] res = [] if dep in self.future_values_key_item: res.extend( self.future_values_key_item[dep]["dependencies"].values()) add = [] for h_d in res: add.extend(self._get_all_refs(h_d, handled_refs)) res.extend(add) return list(set(res))
[ "def", "_get_all_refs", "(", "self", ",", "dep", ",", "handled_refs", "=", "None", ")", ":", "if", "handled_refs", "is", "None", ":", "handled_refs", "=", "[", "dep", "]", "else", ":", "if", "dep", "in", "handled_refs", ":", "return", "[", "]", "res", "=", "[", "]", "if", "dep", "in", "self", ".", "future_values_key_item", ":", "res", ".", "extend", "(", "self", ".", "future_values_key_item", "[", "dep", "]", "[", "\"dependencies\"", "]", ".", "values", "(", ")", ")", "add", "=", "[", "]", "for", "h_d", "in", "res", ":", "add", ".", "extend", "(", "self", ".", "_get_all_refs", "(", "h_d", ",", "handled_refs", ")", ")", "res", ".", "extend", "(", "add", ")", "return", "list", "(", "set", "(", "res", ")", ")" ]
get al list of all dependencies for the given item "dep"
[ "get", "al", "list", "of", "all", "dependencies", "for", "the", "given", "item", "dep" ]
efde23a9352ae1fd6702b04ad964783ce11cbca5
https://github.com/JanHendrikDolling/configvalidator/blob/efde23a9352ae1fd6702b04ad964783ce11cbca5/configvalidator/tools/parser.py#L189-L206
train
ariebovenberg/snug
examples/github/query.py
BaseQuery.parse
def parse(response): """check for errors""" if response.status_code == 400: try: msg = json.loads(response.content)['message'] except (KeyError, ValueError): msg = '' raise ApiError(msg) return response
python
def parse(response): """check for errors""" if response.status_code == 400: try: msg = json.loads(response.content)['message'] except (KeyError, ValueError): msg = '' raise ApiError(msg) return response
[ "def", "parse", "(", "response", ")", ":", "if", "response", ".", "status_code", "==", "400", ":", "try", ":", "msg", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "[", "'message'", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "msg", "=", "''", "raise", "ApiError", "(", "msg", ")", "return", "response" ]
check for errors
[ "check", "for", "errors" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/github/query.py#L68-L76
train
nugget/python-anthemav
anthemav/connection.py
Connection.create
def create(cls, host='localhost', port=14999, auto_reconnect=True, loop=None, protocol_class=AVR, update_callback=None): """Initiate a connection to a specific device. Here is where we supply the host and port and callback callables we expect for this AVR class object. :param host: Hostname or IP address of the device :param port: TCP port number of the device :param auto_reconnect: Should the Connection try to automatically reconnect if needed? :param loop: asyncio.loop for async operation :param update_callback" This function is called whenever AVR state data changes :type host: str :type port: int :type auto_reconnect: boolean :type loop: asyncio.loop :type update_callback: callable """ assert port >= 0, 'Invalid port value: %r' % (port) conn = cls() conn.host = host conn.port = port conn._loop = loop or asyncio.get_event_loop() conn._retry_interval = 1 conn._closed = False conn._closing = False conn._halted = False conn._auto_reconnect = auto_reconnect def connection_lost(): """Function callback for Protocoal class when connection is lost.""" if conn._auto_reconnect and not conn._closing: ensure_future(conn._reconnect(), loop=conn._loop) conn.protocol = protocol_class( connection_lost_callback=connection_lost, loop=conn._loop, update_callback=update_callback) yield from conn._reconnect() return conn
python
def create(cls, host='localhost', port=14999, auto_reconnect=True, loop=None, protocol_class=AVR, update_callback=None): """Initiate a connection to a specific device. Here is where we supply the host and port and callback callables we expect for this AVR class object. :param host: Hostname or IP address of the device :param port: TCP port number of the device :param auto_reconnect: Should the Connection try to automatically reconnect if needed? :param loop: asyncio.loop for async operation :param update_callback" This function is called whenever AVR state data changes :type host: str :type port: int :type auto_reconnect: boolean :type loop: asyncio.loop :type update_callback: callable """ assert port >= 0, 'Invalid port value: %r' % (port) conn = cls() conn.host = host conn.port = port conn._loop = loop or asyncio.get_event_loop() conn._retry_interval = 1 conn._closed = False conn._closing = False conn._halted = False conn._auto_reconnect = auto_reconnect def connection_lost(): """Function callback for Protocoal class when connection is lost.""" if conn._auto_reconnect and not conn._closing: ensure_future(conn._reconnect(), loop=conn._loop) conn.protocol = protocol_class( connection_lost_callback=connection_lost, loop=conn._loop, update_callback=update_callback) yield from conn._reconnect() return conn
[ "def", "create", "(", "cls", ",", "host", "=", "'localhost'", ",", "port", "=", "14999", ",", "auto_reconnect", "=", "True", ",", "loop", "=", "None", ",", "protocol_class", "=", "AVR", ",", "update_callback", "=", "None", ")", ":", "assert", "port", ">=", "0", ",", "'Invalid port value: %r'", "%", "(", "port", ")", "conn", "=", "cls", "(", ")", "conn", ".", "host", "=", "host", "conn", ".", "port", "=", "port", "conn", ".", "_loop", "=", "loop", "or", "asyncio", ".", "get_event_loop", "(", ")", "conn", ".", "_retry_interval", "=", "1", "conn", ".", "_closed", "=", "False", "conn", ".", "_closing", "=", "False", "conn", ".", "_halted", "=", "False", "conn", ".", "_auto_reconnect", "=", "auto_reconnect", "def", "connection_lost", "(", ")", ":", "\"\"\"Function callback for Protocoal class when connection is lost.\"\"\"", "if", "conn", ".", "_auto_reconnect", "and", "not", "conn", ".", "_closing", ":", "ensure_future", "(", "conn", ".", "_reconnect", "(", ")", ",", "loop", "=", "conn", ".", "_loop", ")", "conn", ".", "protocol", "=", "protocol_class", "(", "connection_lost_callback", "=", "connection_lost", ",", "loop", "=", "conn", ".", "_loop", ",", "update_callback", "=", "update_callback", ")", "yield", "from", "conn", ".", "_reconnect", "(", ")", "return", "conn" ]
Initiate a connection to a specific device. Here is where we supply the host and port and callback callables we expect for this AVR class object. :param host: Hostname or IP address of the device :param port: TCP port number of the device :param auto_reconnect: Should the Connection try to automatically reconnect if needed? :param loop: asyncio.loop for async operation :param update_callback" This function is called whenever AVR state data changes :type host: str :type port: int :type auto_reconnect: boolean :type loop: asyncio.loop :type update_callback: callable
[ "Initiate", "a", "connection", "to", "a", "specific", "device", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/connection.py#L23-L76
train
nugget/python-anthemav
anthemav/connection.py
Connection.close
def close(self): """Close the AVR device connection and don't try to reconnect.""" self.log.warning('Closing connection to AVR') self._closing = True if self.protocol.transport: self.protocol.transport.close()
python
def close(self): """Close the AVR device connection and don't try to reconnect.""" self.log.warning('Closing connection to AVR') self._closing = True if self.protocol.transport: self.protocol.transport.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "log", ".", "warning", "(", "'Closing connection to AVR'", ")", "self", ".", "_closing", "=", "True", "if", "self", ".", "protocol", ".", "transport", ":", "self", ".", "protocol", ".", "transport", ".", "close", "(", ")" ]
Close the AVR device connection and don't try to reconnect.
[ "Close", "the", "AVR", "device", "connection", "and", "don", "t", "try", "to", "reconnect", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/connection.py#L117-L122
train
MacHu-GWU/single_file_module-project
sfm/ziplib.py
_compress_obj
def _compress_obj(obj, level): """Compress object to bytes. """ return zlib.compress(pickle.dumps(obj, protocol=2), level)
python
def _compress_obj(obj, level): """Compress object to bytes. """ return zlib.compress(pickle.dumps(obj, protocol=2), level)
[ "def", "_compress_obj", "(", "obj", ",", "level", ")", ":", "return", "zlib", ".", "compress", "(", "pickle", ".", "dumps", "(", "obj", ",", "protocol", "=", "2", ")", ",", "level", ")" ]
Compress object to bytes.
[ "Compress", "object", "to", "bytes", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L34-L37
train
MacHu-GWU/single_file_module-project
sfm/ziplib.py
compress
def compress(obj, level=6, return_type="bytes"): """Compress anything to bytes or string. :param obj: could be any object, usually it could be binary, string, or regular python objec.t :param level: :param return_type: if bytes, then return bytes; if str, then return base64.b64encode bytes in utf-8 string. """ if isinstance(obj, binary_type): b = _compress_bytes(obj, level) elif isinstance(obj, string_types): b = _compress_str(obj, level) else: b = _compress_obj(obj, level) if return_type == "bytes": return b elif return_type == "str": return base64.b64encode(b).decode("utf-8") else: raise ValueError("'return_type' has to be one of 'bytes', 'str'!")
python
def compress(obj, level=6, return_type="bytes"): """Compress anything to bytes or string. :param obj: could be any object, usually it could be binary, string, or regular python objec.t :param level: :param return_type: if bytes, then return bytes; if str, then return base64.b64encode bytes in utf-8 string. """ if isinstance(obj, binary_type): b = _compress_bytes(obj, level) elif isinstance(obj, string_types): b = _compress_str(obj, level) else: b = _compress_obj(obj, level) if return_type == "bytes": return b elif return_type == "str": return base64.b64encode(b).decode("utf-8") else: raise ValueError("'return_type' has to be one of 'bytes', 'str'!")
[ "def", "compress", "(", "obj", ",", "level", "=", "6", ",", "return_type", "=", "\"bytes\"", ")", ":", "if", "isinstance", "(", "obj", ",", "binary_type", ")", ":", "b", "=", "_compress_bytes", "(", "obj", ",", "level", ")", "elif", "isinstance", "(", "obj", ",", "string_types", ")", ":", "b", "=", "_compress_str", "(", "obj", ",", "level", ")", "else", ":", "b", "=", "_compress_obj", "(", "obj", ",", "level", ")", "if", "return_type", "==", "\"bytes\"", ":", "return", "b", "elif", "return_type", "==", "\"str\"", ":", "return", "base64", ".", "b64encode", "(", "b", ")", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "raise", "ValueError", "(", "\"'return_type' has to be one of 'bytes', 'str'!\"", ")" ]
Compress anything to bytes or string. :param obj: could be any object, usually it could be binary, string, or regular python objec.t :param level: :param return_type: if bytes, then return bytes; if str, then return base64.b64encode bytes in utf-8 string.
[ "Compress", "anything", "to", "bytes", "or", "string", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L52-L73
train
MacHu-GWU/single_file_module-project
sfm/ziplib.py
decompress
def decompress(obj, return_type="bytes"): """ De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object. """ if isinstance(obj, binary_type): b = zlib.decompress(obj) elif isinstance(obj, string_types): b = zlib.decompress(base64.b64decode(obj.encode("utf-8"))) else: raise TypeError("input cannot be anything other than str and bytes!") if return_type == "bytes": return b elif return_type == "str": return b.decode("utf-8") elif return_type == "obj": return pickle.loads(b) else: raise ValueError( "'return_type' has to be one of 'bytes', 'str' or 'obj'!")
python
def decompress(obj, return_type="bytes"): """ De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object. """ if isinstance(obj, binary_type): b = zlib.decompress(obj) elif isinstance(obj, string_types): b = zlib.decompress(base64.b64decode(obj.encode("utf-8"))) else: raise TypeError("input cannot be anything other than str and bytes!") if return_type == "bytes": return b elif return_type == "str": return b.decode("utf-8") elif return_type == "obj": return pickle.loads(b) else: raise ValueError( "'return_type' has to be one of 'bytes', 'str' or 'obj'!")
[ "def", "decompress", "(", "obj", ",", "return_type", "=", "\"bytes\"", ")", ":", "if", "isinstance", "(", "obj", ",", "binary_type", ")", ":", "b", "=", "zlib", ".", "decompress", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "string_types", ")", ":", "b", "=", "zlib", ".", "decompress", "(", "base64", ".", "b64decode", "(", "obj", ".", "encode", "(", "\"utf-8\"", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"input cannot be anything other than str and bytes!\"", ")", "if", "return_type", "==", "\"bytes\"", ":", "return", "b", "elif", "return_type", "==", "\"str\"", ":", "return", "b", ".", "decode", "(", "\"utf-8\"", ")", "elif", "return_type", "==", "\"obj\"", ":", "return", "pickle", ".", "loads", "(", "b", ")", "else", ":", "raise", "ValueError", "(", "\"'return_type' has to be one of 'bytes', 'str' or 'obj'!\"", ")" ]
De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object.
[ "De", "-", "compress", "it", "to", "it", "s", "original", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L76-L99
train
PSPC-SPAC-buyandsell/didauth
didauth/utils.py
build_signature_template
def build_signature_template(key_id, algorithm, headers): """ Build the Signature template for use with the Authorization header. key_id is the mandatory label indicating to the server which secret to use algorithm is one of the supported algorithms headers is a list of http headers to be included in the signing string. The signature must be interpolated into the template to get the final Authorization header value. """ param_map = {'keyId': key_id, 'algorithm': algorithm, 'signature': '%s'} if headers: headers = [h.lower() for h in headers] param_map['headers'] = ' '.join(headers) kv = map('{0[0]}="{0[1]}"'.format, param_map.items()) kv_string = ','.join(kv) sig_string = 'Signature {0}'.format(kv_string) return sig_string
python
def build_signature_template(key_id, algorithm, headers): """ Build the Signature template for use with the Authorization header. key_id is the mandatory label indicating to the server which secret to use algorithm is one of the supported algorithms headers is a list of http headers to be included in the signing string. The signature must be interpolated into the template to get the final Authorization header value. """ param_map = {'keyId': key_id, 'algorithm': algorithm, 'signature': '%s'} if headers: headers = [h.lower() for h in headers] param_map['headers'] = ' '.join(headers) kv = map('{0[0]}="{0[1]}"'.format, param_map.items()) kv_string = ','.join(kv) sig_string = 'Signature {0}'.format(kv_string) return sig_string
[ "def", "build_signature_template", "(", "key_id", ",", "algorithm", ",", "headers", ")", ":", "param_map", "=", "{", "'keyId'", ":", "key_id", ",", "'algorithm'", ":", "algorithm", ",", "'signature'", ":", "'%s'", "}", "if", "headers", ":", "headers", "=", "[", "h", ".", "lower", "(", ")", "for", "h", "in", "headers", "]", "param_map", "[", "'headers'", "]", "=", "' '", ".", "join", "(", "headers", ")", "kv", "=", "map", "(", "'{0[0]}=\"{0[1]}\"'", ".", "format", ",", "param_map", ".", "items", "(", ")", ")", "kv_string", "=", "','", ".", "join", "(", "kv", ")", "sig_string", "=", "'Signature {0}'", ".", "format", "(", "kv_string", ")", "return", "sig_string" ]
Build the Signature template for use with the Authorization header. key_id is the mandatory label indicating to the server which secret to use algorithm is one of the supported algorithms headers is a list of http headers to be included in the signing string. The signature must be interpolated into the template to get the final Authorization header value.
[ "Build", "the", "Signature", "template", "for", "use", "with", "the", "Authorization", "header", "." ]
e242fff8eddebf6ed52a65b161a229cdfbf5226e
https://github.com/PSPC-SPAC-buyandsell/didauth/blob/e242fff8eddebf6ed52a65b161a229cdfbf5226e/didauth/utils.py#L115-L135
train
MacHu-GWU/single_file_module-project
sfm/geo_search.py
GeoSearchEngine.train
def train(self, data, key_id, key_lat, key_lng, clear_old=True): """ Feed data into database. :type data: list :param data: list of point object, can have other metadata, for example: [{"id": 10001, "lat": xxx, "lng": xxx}, ...] :type key_id: callable :param key_id: callable function, take point object as input, return object id, for example: lambda x: x["id"] :type key_lat: callable :param key_lat: callable function, take point object as input, return object latitude, for example: lambda x: x["lat"] :type key_lng: callable :param key_lng: callable function, take point object as input, return object longitude, for example: lambda x: x["lng"] """ engine, t_point = self.engine, self.t_point if clear_old: try: t_point.drop(engine) except: pass t_point.create(engine) table_data = list() for record in data: id = key_id(record) lat = key_lat(record) lng = key_lng(record) row = {"id": id, "lat": lat, "lng": lng, "data": record} table_data.append(row) ins = t_point.insert() engine.execute(ins, table_data) index = Index('idx_lat_lng', t_point.c.lat, t_point.c.lng) index.create(engine)
python
def train(self, data, key_id, key_lat, key_lng, clear_old=True): """ Feed data into database. :type data: list :param data: list of point object, can have other metadata, for example: [{"id": 10001, "lat": xxx, "lng": xxx}, ...] :type key_id: callable :param key_id: callable function, take point object as input, return object id, for example: lambda x: x["id"] :type key_lat: callable :param key_lat: callable function, take point object as input, return object latitude, for example: lambda x: x["lat"] :type key_lng: callable :param key_lng: callable function, take point object as input, return object longitude, for example: lambda x: x["lng"] """ engine, t_point = self.engine, self.t_point if clear_old: try: t_point.drop(engine) except: pass t_point.create(engine) table_data = list() for record in data: id = key_id(record) lat = key_lat(record) lng = key_lng(record) row = {"id": id, "lat": lat, "lng": lng, "data": record} table_data.append(row) ins = t_point.insert() engine.execute(ins, table_data) index = Index('idx_lat_lng', t_point.c.lat, t_point.c.lng) index.create(engine)
[ "def", "train", "(", "self", ",", "data", ",", "key_id", ",", "key_lat", ",", "key_lng", ",", "clear_old", "=", "True", ")", ":", "engine", ",", "t_point", "=", "self", ".", "engine", ",", "self", ".", "t_point", "if", "clear_old", ":", "try", ":", "t_point", ".", "drop", "(", "engine", ")", "except", ":", "pass", "t_point", ".", "create", "(", "engine", ")", "table_data", "=", "list", "(", ")", "for", "record", "in", "data", ":", "id", "=", "key_id", "(", "record", ")", "lat", "=", "key_lat", "(", "record", ")", "lng", "=", "key_lng", "(", "record", ")", "row", "=", "{", "\"id\"", ":", "id", ",", "\"lat\"", ":", "lat", ",", "\"lng\"", ":", "lng", ",", "\"data\"", ":", "record", "}", "table_data", ".", "append", "(", "row", ")", "ins", "=", "t_point", ".", "insert", "(", ")", "engine", ".", "execute", "(", "ins", ",", "table_data", ")", "index", "=", "Index", "(", "'idx_lat_lng'", ",", "t_point", ".", "c", ".", "lat", ",", "t_point", ".", "c", ".", "lng", ")", "index", ".", "create", "(", "engine", ")" ]
Feed data into database. :type data: list :param data: list of point object, can have other metadata, for example: [{"id": 10001, "lat": xxx, "lng": xxx}, ...] :type key_id: callable :param key_id: callable function, take point object as input, return object id, for example: lambda x: x["id"] :type key_lat: callable :param key_lat: callable function, take point object as input, return object latitude, for example: lambda x: x["lat"] :type key_lng: callable :param key_lng: callable function, take point object as input, return object longitude, for example: lambda x: x["lng"]
[ "Feed", "data", "into", "database", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/geo_search.py#L55-L95
train
MacHu-GWU/single_file_module-project
sfm/geo_search.py
GeoSearchEngine.find_n_nearest
def find_n_nearest(self, lat, lng, n=5, radius=None): """Find n nearest point within certain distance from a point. :param lat: latitude of center point. :param lng: longitude of center point. :param n: max number of record to return. :param radius: only search point within ``radius`` distance. **中文文档** """ engine, t_point = self.engine, self.t_point if radius: # Use a simple box filter to minimize candidates # Define latitude longitude boundary dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(lat) * 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad filters = [ t_point.c.lat >= lat_lower, t_point.c.lat <= lat_upper, t_point.c.lat >= lng_lower, t_point.c.lat >= lng_upper, ] else: radius = 999999.9 filters = [] s = select([t_point]).where(and_(*filters)) heap = list() for row in engine.execute(s): dist = great_circle((lat, lng), (row.lat, row.lng)) if dist <= radius: heap.append((dist, row.data)) # Use heap sort to find top-K nearest n_nearest = heapq.nsmallest(n, heap, key=lambda x: x[0]) return n_nearest
python
def find_n_nearest(self, lat, lng, n=5, radius=None): """Find n nearest point within certain distance from a point. :param lat: latitude of center point. :param lng: longitude of center point. :param n: max number of record to return. :param radius: only search point within ``radius`` distance. **中文文档** """ engine, t_point = self.engine, self.t_point if radius: # Use a simple box filter to minimize candidates # Define latitude longitude boundary dist_btwn_lat_deg = 69.172 dist_btwn_lon_deg = cos(lat) * 69.172 lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg) lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg) lat_lower = lat - lat_degr_rad lat_upper = lat + lat_degr_rad lng_lower = lng - lon_degr_rad lng_upper = lng + lon_degr_rad filters = [ t_point.c.lat >= lat_lower, t_point.c.lat <= lat_upper, t_point.c.lat >= lng_lower, t_point.c.lat >= lng_upper, ] else: radius = 999999.9 filters = [] s = select([t_point]).where(and_(*filters)) heap = list() for row in engine.execute(s): dist = great_circle((lat, lng), (row.lat, row.lng)) if dist <= radius: heap.append((dist, row.data)) # Use heap sort to find top-K nearest n_nearest = heapq.nsmallest(n, heap, key=lambda x: x[0]) return n_nearest
[ "def", "find_n_nearest", "(", "self", ",", "lat", ",", "lng", ",", "n", "=", "5", ",", "radius", "=", "None", ")", ":", "engine", ",", "t_point", "=", "self", ".", "engine", ",", "self", ".", "t_point", "if", "radius", ":", "# Use a simple box filter to minimize candidates", "# Define latitude longitude boundary", "dist_btwn_lat_deg", "=", "69.172", "dist_btwn_lon_deg", "=", "cos", "(", "lat", ")", "*", "69.172", "lat_degr_rad", "=", "abs", "(", "radius", "*", "1.05", "/", "dist_btwn_lat_deg", ")", "lon_degr_rad", "=", "abs", "(", "radius", "*", "1.05", "/", "dist_btwn_lon_deg", ")", "lat_lower", "=", "lat", "-", "lat_degr_rad", "lat_upper", "=", "lat", "+", "lat_degr_rad", "lng_lower", "=", "lng", "-", "lon_degr_rad", "lng_upper", "=", "lng", "+", "lon_degr_rad", "filters", "=", "[", "t_point", ".", "c", ".", "lat", ">=", "lat_lower", ",", "t_point", ".", "c", ".", "lat", "<=", "lat_upper", ",", "t_point", ".", "c", ".", "lat", ">=", "lng_lower", ",", "t_point", ".", "c", ".", "lat", ">=", "lng_upper", ",", "]", "else", ":", "radius", "=", "999999.9", "filters", "=", "[", "]", "s", "=", "select", "(", "[", "t_point", "]", ")", ".", "where", "(", "and_", "(", "*", "filters", ")", ")", "heap", "=", "list", "(", ")", "for", "row", "in", "engine", ".", "execute", "(", "s", ")", ":", "dist", "=", "great_circle", "(", "(", "lat", ",", "lng", ")", ",", "(", "row", ".", "lat", ",", "row", ".", "lng", ")", ")", "if", "dist", "<=", "radius", ":", "heap", ".", "append", "(", "(", "dist", ",", "row", ".", "data", ")", ")", "# Use heap sort to find top-K nearest", "n_nearest", "=", "heapq", ".", "nsmallest", "(", "n", ",", "heap", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "return", "n_nearest" ]
Find n nearest point within certain distance from a point. :param lat: latitude of center point. :param lng: longitude of center point. :param n: max number of record to return. :param radius: only search point within ``radius`` distance. **中文文档**
[ "Find", "n", "nearest", "point", "within", "certain", "distance", "from", "a", "point", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/geo_search.py#L97-L141
train
SMAPPNYU/pysmap
pysmap/twitterutil/smapp_dataset.py
SmappDataset.sample
def sample(self, k): ''' this method is especially troublesome i do not reccommend making any changes to it you may notice it uplicates code fro smappdragon there is no way around this as far as i can tell it really might screw up a lot of stuff, stip tweets has been purposely omitted as it isnt supported in pysmap ''' def new_get_iterators(): tweet_parser = smappdragon.TweetParser() it = iter(self.get_collection_iterators()) sample = list(itertools.islice(it, k)) random.shuffle(sample) for i, item in enumerate(it, start=k+1): j = random.randrange(i) if j < k: sample[j] = item for tweet in sample: if all([collection.limit != 0 and collection.limit <= count for collection in self.collections]): return elif all([tweet_parser.tweet_passes_filter(collection.filter, tweet) \ and tweet_parser.tweet_passes_custom_filter_list(collection.custom_filters, tweet) for collection in self.collections]): yield tweet cp = copy.deepcopy(self) cp.get_collection_iterators = new_get_iterators return cp
python
def sample(self, k): ''' this method is especially troublesome i do not reccommend making any changes to it you may notice it uplicates code fro smappdragon there is no way around this as far as i can tell it really might screw up a lot of stuff, stip tweets has been purposely omitted as it isnt supported in pysmap ''' def new_get_iterators(): tweet_parser = smappdragon.TweetParser() it = iter(self.get_collection_iterators()) sample = list(itertools.islice(it, k)) random.shuffle(sample) for i, item in enumerate(it, start=k+1): j = random.randrange(i) if j < k: sample[j] = item for tweet in sample: if all([collection.limit != 0 and collection.limit <= count for collection in self.collections]): return elif all([tweet_parser.tweet_passes_filter(collection.filter, tweet) \ and tweet_parser.tweet_passes_custom_filter_list(collection.custom_filters, tweet) for collection in self.collections]): yield tweet cp = copy.deepcopy(self) cp.get_collection_iterators = new_get_iterators return cp
[ "def", "sample", "(", "self", ",", "k", ")", ":", "def", "new_get_iterators", "(", ")", ":", "tweet_parser", "=", "smappdragon", ".", "TweetParser", "(", ")", "it", "=", "iter", "(", "self", ".", "get_collection_iterators", "(", ")", ")", "sample", "=", "list", "(", "itertools", ".", "islice", "(", "it", ",", "k", ")", ")", "random", ".", "shuffle", "(", "sample", ")", "for", "i", ",", "item", "in", "enumerate", "(", "it", ",", "start", "=", "k", "+", "1", ")", ":", "j", "=", "random", ".", "randrange", "(", "i", ")", "if", "j", "<", "k", ":", "sample", "[", "j", "]", "=", "item", "for", "tweet", "in", "sample", ":", "if", "all", "(", "[", "collection", ".", "limit", "!=", "0", "and", "collection", ".", "limit", "<=", "count", "for", "collection", "in", "self", ".", "collections", "]", ")", ":", "return", "elif", "all", "(", "[", "tweet_parser", ".", "tweet_passes_filter", "(", "collection", ".", "filter", ",", "tweet", ")", "and", "tweet_parser", ".", "tweet_passes_custom_filter_list", "(", "collection", ".", "custom_filters", ",", "tweet", ")", "for", "collection", "in", "self", ".", "collections", "]", ")", ":", "yield", "tweet", "cp", "=", "copy", ".", "deepcopy", "(", "self", ")", "cp", ".", "get_collection_iterators", "=", "new_get_iterators", "return", "cp" ]
this method is especially troublesome i do not reccommend making any changes to it you may notice it uplicates code fro smappdragon there is no way around this as far as i can tell it really might screw up a lot of stuff, stip tweets has been purposely omitted as it isnt supported in pysmap
[ "this", "method", "is", "especially", "troublesome", "i", "do", "not", "reccommend", "making", "any", "changes", "to", "it", "you", "may", "notice", "it", "uplicates", "code", "fro", "smappdragon", "there", "is", "no", "way", "around", "this", "as", "far", "as", "i", "can", "tell", "it", "really", "might", "screw", "up", "a", "lot", "of", "stuff", "stip", "tweets", "has", "been", "purposely", "omitted", "as", "it", "isnt", "supported", "in", "pysmap" ]
eb871992f40c53125129535e871525d5623c8c2d
https://github.com/SMAPPNYU/pysmap/blob/eb871992f40c53125129535e871525d5623c8c2d/pysmap/twitterutil/smapp_dataset.py#L451-L478
train
koszullab/metaTOR
metator/scripts/network.py
merge_networks
def merge_networks(output_file="merged_network.txt", *files): """Merge networks into a larger network. A naive implementation for merging two networks in edgelist format. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged network into. Default is merged_network.txt `*files` : file, str or pathlib.Path The network files to merge. Note ---- The partitioning step doesn't mind redundant edges and handles them pretty well, so if you are not using the merged edgelist for anything else you can just concatenate the edgelists without using this function. """ contacts = dict() for network_file in files: with open(network_file) as network_file_handle: for line in network_file_handle: id_a, id_b, n_contacts = line.split("\t") pair = sorted((id_a, id_b)) try: contacts[pair] += n_contacts except KeyError: contacts[pair] = n_contacts sorted_contacts = sorted(contacts) with open(output_file, "w") as output_handle: for index_pair in sorted_contacts: id_a, id_b = index_pair n_contacts = contacts[index_pair] output_handle.write("{}\t{}\t{}\n".format(id_a, id_b, n_contacts))
python
def merge_networks(output_file="merged_network.txt", *files): """Merge networks into a larger network. A naive implementation for merging two networks in edgelist format. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged network into. Default is merged_network.txt `*files` : file, str or pathlib.Path The network files to merge. Note ---- The partitioning step doesn't mind redundant edges and handles them pretty well, so if you are not using the merged edgelist for anything else you can just concatenate the edgelists without using this function. """ contacts = dict() for network_file in files: with open(network_file) as network_file_handle: for line in network_file_handle: id_a, id_b, n_contacts = line.split("\t") pair = sorted((id_a, id_b)) try: contacts[pair] += n_contacts except KeyError: contacts[pair] = n_contacts sorted_contacts = sorted(contacts) with open(output_file, "w") as output_handle: for index_pair in sorted_contacts: id_a, id_b = index_pair n_contacts = contacts[index_pair] output_handle.write("{}\t{}\t{}\n".format(id_a, id_b, n_contacts))
[ "def", "merge_networks", "(", "output_file", "=", "\"merged_network.txt\"", ",", "*", "files", ")", ":", "contacts", "=", "dict", "(", ")", "for", "network_file", "in", "files", ":", "with", "open", "(", "network_file", ")", "as", "network_file_handle", ":", "for", "line", "in", "network_file_handle", ":", "id_a", ",", "id_b", ",", "n_contacts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "pair", "=", "sorted", "(", "(", "id_a", ",", "id_b", ")", ")", "try", ":", "contacts", "[", "pair", "]", "+=", "n_contacts", "except", "KeyError", ":", "contacts", "[", "pair", "]", "=", "n_contacts", "sorted_contacts", "=", "sorted", "(", "contacts", ")", "with", "open", "(", "output_file", ",", "\"w\"", ")", "as", "output_handle", ":", "for", "index_pair", "in", "sorted_contacts", ":", "id_a", ",", "id_b", "=", "index_pair", "n_contacts", "=", "contacts", "[", "index_pair", "]", "output_handle", ".", "write", "(", "\"{}\\t{}\\t{}\\n\"", ".", "format", "(", "id_a", ",", "id_b", ",", "n_contacts", ")", ")" ]
Merge networks into a larger network. A naive implementation for merging two networks in edgelist format. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged network into. Default is merged_network.txt `*files` : file, str or pathlib.Path The network files to merge. Note ---- The partitioning step doesn't mind redundant edges and handles them pretty well, so if you are not using the merged edgelist for anything else you can just concatenate the edgelists without using this function.
[ "Merge", "networks", "into", "a", "larger", "network", "." ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L351-L388
train
koszullab/metaTOR
metator/scripts/network.py
merge_chunk_data
def merge_chunk_data(output_file="merged_idx_contig_hit_size_cov.txt", *files): """Merge chunk data from different networks Similarly to merge_network, this merges any number of chunk data files. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged chunk data files into. Default is merged_idx_contig_hit_size_cov.txt `*files` : file, str or pathlib.Path The chunk data files to merge. """ chunks = dict() for chunk_file in files: with open(chunk_file) as chunk_file_handle: for line in chunk_file_handle: chunk_id, chunk_name, hit, size, cov = line.split("\t") try: chunks[chunk_id]["hit"] += hit chunks[chunk_id]["cov"] += cov except KeyError: chunks[chunk_id] = { "name": chunk_name, "hit": hit, "size": size, "cov": cov, } sorted_chunks = sorted(chunks) with open(output_file, "w") as output_handle: for chunk_id in sorted_chunks: my_chunk = chunks[chunk_id] name, hit, size, cov = ( my_chunk["name"], my_chunk["hit"], my_chunk["size"], my_chunk["cov"], ) my_line = "{}\t{}\t{}\t{}\t{}".format( chunk_id, name, hit, size, cov ) output_handle.write(my_line)
python
def merge_chunk_data(output_file="merged_idx_contig_hit_size_cov.txt", *files): """Merge chunk data from different networks Similarly to merge_network, this merges any number of chunk data files. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged chunk data files into. Default is merged_idx_contig_hit_size_cov.txt `*files` : file, str or pathlib.Path The chunk data files to merge. """ chunks = dict() for chunk_file in files: with open(chunk_file) as chunk_file_handle: for line in chunk_file_handle: chunk_id, chunk_name, hit, size, cov = line.split("\t") try: chunks[chunk_id]["hit"] += hit chunks[chunk_id]["cov"] += cov except KeyError: chunks[chunk_id] = { "name": chunk_name, "hit": hit, "size": size, "cov": cov, } sorted_chunks = sorted(chunks) with open(output_file, "w") as output_handle: for chunk_id in sorted_chunks: my_chunk = chunks[chunk_id] name, hit, size, cov = ( my_chunk["name"], my_chunk["hit"], my_chunk["size"], my_chunk["cov"], ) my_line = "{}\t{}\t{}\t{}\t{}".format( chunk_id, name, hit, size, cov ) output_handle.write(my_line)
[ "def", "merge_chunk_data", "(", "output_file", "=", "\"merged_idx_contig_hit_size_cov.txt\"", ",", "*", "files", ")", ":", "chunks", "=", "dict", "(", ")", "for", "chunk_file", "in", "files", ":", "with", "open", "(", "chunk_file", ")", "as", "chunk_file_handle", ":", "for", "line", "in", "chunk_file_handle", ":", "chunk_id", ",", "chunk_name", ",", "hit", ",", "size", ",", "cov", "=", "line", ".", "split", "(", "\"\\t\"", ")", "try", ":", "chunks", "[", "chunk_id", "]", "[", "\"hit\"", "]", "+=", "hit", "chunks", "[", "chunk_id", "]", "[", "\"cov\"", "]", "+=", "cov", "except", "KeyError", ":", "chunks", "[", "chunk_id", "]", "=", "{", "\"name\"", ":", "chunk_name", ",", "\"hit\"", ":", "hit", ",", "\"size\"", ":", "size", ",", "\"cov\"", ":", "cov", ",", "}", "sorted_chunks", "=", "sorted", "(", "chunks", ")", "with", "open", "(", "output_file", ",", "\"w\"", ")", "as", "output_handle", ":", "for", "chunk_id", "in", "sorted_chunks", ":", "my_chunk", "=", "chunks", "[", "chunk_id", "]", "name", ",", "hit", ",", "size", ",", "cov", "=", "(", "my_chunk", "[", "\"name\"", "]", ",", "my_chunk", "[", "\"hit\"", "]", ",", "my_chunk", "[", "\"size\"", "]", ",", "my_chunk", "[", "\"cov\"", "]", ",", ")", "my_line", "=", "\"{}\\t{}\\t{}\\t{}\\t{}\"", ".", "format", "(", "chunk_id", ",", "name", ",", "hit", ",", "size", ",", "cov", ")", "output_handle", ".", "write", "(", "my_line", ")" ]
Merge chunk data from different networks Similarly to merge_network, this merges any number of chunk data files. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged chunk data files into. Default is merged_idx_contig_hit_size_cov.txt `*files` : file, str or pathlib.Path The chunk data files to merge.
[ "Merge", "chunk", "data", "from", "different", "networks" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L391-L435
train
koszullab/metaTOR
metator/scripts/network.py
alignment_to_reads
def alignment_to_reads( sam_merged, output_dir, parameters=DEFAULT_PARAMETERS, save_memory=True, *bin_fasta ): """Generate reads from ambiguous alignment file Extract reads found to be mapping an input FASTA bin. If one read maps, the whole pair is extracted and written to the output paired-end FASTQ files. Reads that mapped and weren't part of a pair are kept in a third 'single' file for people who need it (e.g. to get extra paired reads by fetching the opposite one from the original FASTQ library). Parameters ---------- sam_merged : file, str or pathlib.Path The input alignment file in SAM/BAM format to be processed. output_dir : str or pathlib.Path The output directory to write the network and chunk data into. parameters : dict, optional Parameters for the network to read conversion, similar to alignment_to_network. save_memory : bool, optional Whether to keep the read names into memory or write them in different files, which takes longer but may prevent out-of-memory crashes. Default is True. `*bin_fasta` : file, str or pathlib.Path The bin FASTA files with appropriately named records. Returns ------- A dictionary of files with read names for each bin if save_memory is True, and a dictionary of the read names lists themselves otherwise. Note ---- This will throw an IOError ('close failed in file object destructor') on exit with older versions of pysam for some reason. It's harmless but you may consider upgrading to a later version of pysam if it comes up in a pipeline. """ # Just in case file objects are sent as input def get_file_string(file_thing): try: file_string = file_thing.name except AttributeError: file_string = str(file_thing) return file_string # Global set of chunks against which reads are required to # map - we store them in a tuple that keeps track of the # original bin each chunk came from so we can reattribute the reads later bin_chunks = set() for bin_file in bin_fasta: for record in SeqIO.parse(bin_file, "fasta"): bin_chunks.add((get_file_string(bin_file), record.id)) chunk_size = int(parameters["chunk_size"]) mapq_threshold = int(parameters["mapq_threshold"]) def read_name(read): return read.query_name.split()[0] # Since reading a huge BAM file can take up a # lot of time and resources, we only do it once # but that requires opening fastq files for writing # as matching reads get detected along the # bam and keeping track of which ones are # currently open. def get_base_name(bin_file): base_name = ".".join(os.path.basename(bin_file).split(".")[:-1]) output_path = os.path.join( output_dir, "{}.readnames".format(base_name) ) return output_path if save_memory: opened_files = dict() else: read_names = dict() with pysam.AlignmentFile(sam_merged, "rb") as alignment_merged_handle: for (my_read_name, alignment_pool) in itertools.groupby( alignment_merged_handle, read_name ): for my_alignment in alignment_pool: relative_position = my_alignment.reference_start contig_name = my_alignment.reference_name chunk_position = relative_position // chunk_size # The 'chunk name' is used to detect macthing positions chunk_name = "{}_{}".format(contig_name, chunk_position) # But such matching positions have to map acceptably quality_test = my_alignment.mapping_quality > mapq_threshold for bin_file in bin_fasta: chunk_tuple = (bin_file, chunk_name) if chunk_tuple in bin_chunks and quality_test: if save_memory: output_path = get_base_name(bin_file) try: output_handle = opened_files[bin_file] except KeyError: output_handle = open(output_path, "w") opened_files[bin_file] = output_handle output_handle.write("@{}\n".format(my_read_name)) else: try: read_names[my_read_name].append(bin_file) except KeyError: read_names[my_read_name] = [bin_file] for file_handle in opened_files.values(): file_handle.close() # Return unpaired file names for pair_unpaired_reads() to process if save_memory: return opened_files.keys() else: return read_names
python
def alignment_to_reads( sam_merged, output_dir, parameters=DEFAULT_PARAMETERS, save_memory=True, *bin_fasta ): """Generate reads from ambiguous alignment file Extract reads found to be mapping an input FASTA bin. If one read maps, the whole pair is extracted and written to the output paired-end FASTQ files. Reads that mapped and weren't part of a pair are kept in a third 'single' file for people who need it (e.g. to get extra paired reads by fetching the opposite one from the original FASTQ library). Parameters ---------- sam_merged : file, str or pathlib.Path The input alignment file in SAM/BAM format to be processed. output_dir : str or pathlib.Path The output directory to write the network and chunk data into. parameters : dict, optional Parameters for the network to read conversion, similar to alignment_to_network. save_memory : bool, optional Whether to keep the read names into memory or write them in different files, which takes longer but may prevent out-of-memory crashes. Default is True. `*bin_fasta` : file, str or pathlib.Path The bin FASTA files with appropriately named records. Returns ------- A dictionary of files with read names for each bin if save_memory is True, and a dictionary of the read names lists themselves otherwise. Note ---- This will throw an IOError ('close failed in file object destructor') on exit with older versions of pysam for some reason. It's harmless but you may consider upgrading to a later version of pysam if it comes up in a pipeline. """ # Just in case file objects are sent as input def get_file_string(file_thing): try: file_string = file_thing.name except AttributeError: file_string = str(file_thing) return file_string # Global set of chunks against which reads are required to # map - we store them in a tuple that keeps track of the # original bin each chunk came from so we can reattribute the reads later bin_chunks = set() for bin_file in bin_fasta: for record in SeqIO.parse(bin_file, "fasta"): bin_chunks.add((get_file_string(bin_file), record.id)) chunk_size = int(parameters["chunk_size"]) mapq_threshold = int(parameters["mapq_threshold"]) def read_name(read): return read.query_name.split()[0] # Since reading a huge BAM file can take up a # lot of time and resources, we only do it once # but that requires opening fastq files for writing # as matching reads get detected along the # bam and keeping track of which ones are # currently open. def get_base_name(bin_file): base_name = ".".join(os.path.basename(bin_file).split(".")[:-1]) output_path = os.path.join( output_dir, "{}.readnames".format(base_name) ) return output_path if save_memory: opened_files = dict() else: read_names = dict() with pysam.AlignmentFile(sam_merged, "rb") as alignment_merged_handle: for (my_read_name, alignment_pool) in itertools.groupby( alignment_merged_handle, read_name ): for my_alignment in alignment_pool: relative_position = my_alignment.reference_start contig_name = my_alignment.reference_name chunk_position = relative_position // chunk_size # The 'chunk name' is used to detect macthing positions chunk_name = "{}_{}".format(contig_name, chunk_position) # But such matching positions have to map acceptably quality_test = my_alignment.mapping_quality > mapq_threshold for bin_file in bin_fasta: chunk_tuple = (bin_file, chunk_name) if chunk_tuple in bin_chunks and quality_test: if save_memory: output_path = get_base_name(bin_file) try: output_handle = opened_files[bin_file] except KeyError: output_handle = open(output_path, "w") opened_files[bin_file] = output_handle output_handle.write("@{}\n".format(my_read_name)) else: try: read_names[my_read_name].append(bin_file) except KeyError: read_names[my_read_name] = [bin_file] for file_handle in opened_files.values(): file_handle.close() # Return unpaired file names for pair_unpaired_reads() to process if save_memory: return opened_files.keys() else: return read_names
[ "def", "alignment_to_reads", "(", "sam_merged", ",", "output_dir", ",", "parameters", "=", "DEFAULT_PARAMETERS", ",", "save_memory", "=", "True", ",", "*", "bin_fasta", ")", ":", "# Just in case file objects are sent as input", "def", "get_file_string", "(", "file_thing", ")", ":", "try", ":", "file_string", "=", "file_thing", ".", "name", "except", "AttributeError", ":", "file_string", "=", "str", "(", "file_thing", ")", "return", "file_string", "# Global set of chunks against which reads are required to", "# map - we store them in a tuple that keeps track of the", "# original bin each chunk came from so we can reattribute the reads later", "bin_chunks", "=", "set", "(", ")", "for", "bin_file", "in", "bin_fasta", ":", "for", "record", "in", "SeqIO", ".", "parse", "(", "bin_file", ",", "\"fasta\"", ")", ":", "bin_chunks", ".", "add", "(", "(", "get_file_string", "(", "bin_file", ")", ",", "record", ".", "id", ")", ")", "chunk_size", "=", "int", "(", "parameters", "[", "\"chunk_size\"", "]", ")", "mapq_threshold", "=", "int", "(", "parameters", "[", "\"mapq_threshold\"", "]", ")", "def", "read_name", "(", "read", ")", ":", "return", "read", ".", "query_name", ".", "split", "(", ")", "[", "0", "]", "# Since reading a huge BAM file can take up a", "# lot of time and resources, we only do it once", "# but that requires opening fastq files for writing", "# as matching reads get detected along the", "# bam and keeping track of which ones are", "# currently open.", "def", "get_base_name", "(", "bin_file", ")", ":", "base_name", "=", "\".\"", ".", "join", "(", "os", ".", "path", ".", "basename", "(", "bin_file", ")", ".", "split", "(", "\".\"", ")", "[", ":", "-", "1", "]", ")", "output_path", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"{}.readnames\"", ".", "format", "(", "base_name", ")", ")", "return", "output_path", "if", "save_memory", ":", "opened_files", "=", "dict", "(", ")", "else", ":", "read_names", "=", "dict", "(", ")", "with", "pysam", ".", "AlignmentFile", "(", "sam_merged", ",", "\"rb\"", ")", "as", "alignment_merged_handle", ":", "for", "(", "my_read_name", ",", "alignment_pool", ")", "in", "itertools", ".", "groupby", "(", "alignment_merged_handle", ",", "read_name", ")", ":", "for", "my_alignment", "in", "alignment_pool", ":", "relative_position", "=", "my_alignment", ".", "reference_start", "contig_name", "=", "my_alignment", ".", "reference_name", "chunk_position", "=", "relative_position", "//", "chunk_size", "# The 'chunk name' is used to detect macthing positions", "chunk_name", "=", "\"{}_{}\"", ".", "format", "(", "contig_name", ",", "chunk_position", ")", "# But such matching positions have to map acceptably", "quality_test", "=", "my_alignment", ".", "mapping_quality", ">", "mapq_threshold", "for", "bin_file", "in", "bin_fasta", ":", "chunk_tuple", "=", "(", "bin_file", ",", "chunk_name", ")", "if", "chunk_tuple", "in", "bin_chunks", "and", "quality_test", ":", "if", "save_memory", ":", "output_path", "=", "get_base_name", "(", "bin_file", ")", "try", ":", "output_handle", "=", "opened_files", "[", "bin_file", "]", "except", "KeyError", ":", "output_handle", "=", "open", "(", "output_path", ",", "\"w\"", ")", "opened_files", "[", "bin_file", "]", "=", "output_handle", "output_handle", ".", "write", "(", "\"@{}\\n\"", ".", "format", "(", "my_read_name", ")", ")", "else", ":", "try", ":", "read_names", "[", "my_read_name", "]", ".", "append", "(", "bin_file", ")", "except", "KeyError", ":", "read_names", "[", "my_read_name", "]", "=", "[", "bin_file", "]", "for", "file_handle", "in", "opened_files", ".", "values", "(", ")", ":", "file_handle", ".", "close", "(", ")", "# Return unpaired file names for pair_unpaired_reads() to process", "if", "save_memory", ":", "return", "opened_files", ".", "keys", "(", ")", "else", ":", "return", "read_names" ]
Generate reads from ambiguous alignment file Extract reads found to be mapping an input FASTA bin. If one read maps, the whole pair is extracted and written to the output paired-end FASTQ files. Reads that mapped and weren't part of a pair are kept in a third 'single' file for people who need it (e.g. to get extra paired reads by fetching the opposite one from the original FASTQ library). Parameters ---------- sam_merged : file, str or pathlib.Path The input alignment file in SAM/BAM format to be processed. output_dir : str or pathlib.Path The output directory to write the network and chunk data into. parameters : dict, optional Parameters for the network to read conversion, similar to alignment_to_network. save_memory : bool, optional Whether to keep the read names into memory or write them in different files, which takes longer but may prevent out-of-memory crashes. Default is True. `*bin_fasta` : file, str or pathlib.Path The bin FASTA files with appropriately named records. Returns ------- A dictionary of files with read names for each bin if save_memory is True, and a dictionary of the read names lists themselves otherwise. Note ---- This will throw an IOError ('close failed in file object destructor') on exit with older versions of pysam for some reason. It's harmless but you may consider upgrading to a later version of pysam if it comes up in a pipeline.
[ "Generate", "reads", "from", "ambiguous", "alignment", "file" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/network.py#L438-L574
train
Godley/MuseParse
MuseParse/classes/Input/MxmlParser.py
MxmlParser.ResetHandler
def ResetHandler(self, name): ''' Method which assigns handler to the tag encountered before the current, or else sets it to None :param name: name of the latest tag :return: ''' if name in self.tags: if len(self.tags) > 1: key = len(self.tags) - 2 self.handler = None while key >= 0: if self.tags[key] in self.structure: self.handler = self.structure[self.tags[key]] break key -= 1 else: self.handler = None
python
def ResetHandler(self, name): ''' Method which assigns handler to the tag encountered before the current, or else sets it to None :param name: name of the latest tag :return: ''' if name in self.tags: if len(self.tags) > 1: key = len(self.tags) - 2 self.handler = None while key >= 0: if self.tags[key] in self.structure: self.handler = self.structure[self.tags[key]] break key -= 1 else: self.handler = None
[ "def", "ResetHandler", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "tags", ":", "if", "len", "(", "self", ".", "tags", ")", ">", "1", ":", "key", "=", "len", "(", "self", ".", "tags", ")", "-", "2", "self", ".", "handler", "=", "None", "while", "key", ">=", "0", ":", "if", "self", ".", "tags", "[", "key", "]", "in", "self", ".", "structure", ":", "self", ".", "handler", "=", "self", ".", "structure", "[", "self", ".", "tags", "[", "key", "]", "]", "break", "key", "-=", "1", "else", ":", "self", ".", "handler", "=", "None" ]
Method which assigns handler to the tag encountered before the current, or else sets it to None :param name: name of the latest tag :return:
[ "Method", "which", "assigns", "handler", "to", "the", "tag", "encountered", "before", "the", "current", "or", "else", "sets", "it", "to", "None" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Input/MxmlParser.py#L205-L225
train
uberVU/mongo-pool
mongo_pool/mongo_pool.py
MongoPool.get_cluster
def get_cluster(self, label): """Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config """ for cluster in self._clusters: if label == cluster['label']: return self._get_connection(cluster) raise AttributeError('No such cluster %s.' % label)
python
def get_cluster(self, label): """Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config """ for cluster in self._clusters: if label == cluster['label']: return self._get_connection(cluster) raise AttributeError('No such cluster %s.' % label)
[ "def", "get_cluster", "(", "self", ",", "label", ")", ":", "for", "cluster", "in", "self", ".", "_clusters", ":", "if", "label", "==", "cluster", "[", "'label'", "]", ":", "return", "self", ".", "_get_connection", "(", "cluster", ")", "raise", "AttributeError", "(", "'No such cluster %s.'", "%", "label", ")" ]
Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config
[ "Returns", "a", "connection", "to", "a", "mongo", "-", "clusters", "." ]
286d1d8e0b3c17d5d7d4860487fe69358941067d
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L26-L42
train