repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
greenelab/PathCORE-T
pathcore/network.py
CoNetwork._edges_from_permutation
def _edges_from_permutation(self, feature_pathway_dict): """Given a dictionary mapping each feature to the pathways overrepresented in the feature, build a CoNetwork by creating edges for every pairwise combination of pathways in a feature. """ network_edges = {} for feature, pathway_list in feature_pathway_dict.items(): for i in range(len(pathway_list)): for j in range(i + 1, len(pathway_list)): vertex_i = pathway_list[i] vertex_j = pathway_list[j] new_edge = self.edge_tuple(vertex_i, vertex_j) if new_edge not in network_edges: network_edges[new_edge] = [] network_edges[new_edge].append(feature) self._augment_network(network_edges)
python
def _edges_from_permutation(self, feature_pathway_dict): """Given a dictionary mapping each feature to the pathways overrepresented in the feature, build a CoNetwork by creating edges for every pairwise combination of pathways in a feature. """ network_edges = {} for feature, pathway_list in feature_pathway_dict.items(): for i in range(len(pathway_list)): for j in range(i + 1, len(pathway_list)): vertex_i = pathway_list[i] vertex_j = pathway_list[j] new_edge = self.edge_tuple(vertex_i, vertex_j) if new_edge not in network_edges: network_edges[new_edge] = [] network_edges[new_edge].append(feature) self._augment_network(network_edges)
[ "def", "_edges_from_permutation", "(", "self", ",", "feature_pathway_dict", ")", ":", "network_edges", "=", "{", "}", "for", "feature", ",", "pathway_list", "in", "feature_pathway_dict", ".", "items", "(", ")", ":", "for", "i", "in", "range", "(", "len", "(", "pathway_list", ")", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "pathway_list", ")", ")", ":", "vertex_i", "=", "pathway_list", "[", "i", "]", "vertex_j", "=", "pathway_list", "[", "j", "]", "new_edge", "=", "self", ".", "edge_tuple", "(", "vertex_i", ",", "vertex_j", ")", "if", "new_edge", "not", "in", "network_edges", ":", "network_edges", "[", "new_edge", "]", "=", "[", "]", "network_edges", "[", "new_edge", "]", ".", "append", "(", "feature", ")", "self", ".", "_augment_network", "(", "network_edges", ")" ]
Given a dictionary mapping each feature to the pathways overrepresented in the feature, build a CoNetwork by creating edges for every pairwise combination of pathways in a feature.
[ "Given", "a", "dictionary", "mapping", "each", "feature", "to", "the", "pathways", "overrepresented", "in", "the", "feature", "build", "a", "CoNetwork", "by", "creating", "edges", "for", "every", "pairwise", "combination", "of", "pathways", "in", "a", "feature", "." ]
9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L522-L537
train
pyQode/pyqode.cobol
pyqode/cobol/api/folding.py
CobolFoldDetector.normalize_text
def normalize_text(self, text): """ Normalize text, when fixed format is ON, replace the first 6 chars by a space. """ if not self.editor.free_format: text = ' ' * 6 + text[6:] return text.upper()
python
def normalize_text(self, text): """ Normalize text, when fixed format is ON, replace the first 6 chars by a space. """ if not self.editor.free_format: text = ' ' * 6 + text[6:] return text.upper()
[ "def", "normalize_text", "(", "self", ",", "text", ")", ":", "if", "not", "self", ".", "editor", ".", "free_format", ":", "text", "=", "' '", "*", "6", "+", "text", "[", "6", ":", "]", "return", "text", ".", "upper", "(", ")" ]
Normalize text, when fixed format is ON, replace the first 6 chars by a space.
[ "Normalize", "text", "when", "fixed", "format", "is", "ON", "replace", "the", "first", "6", "chars", "by", "a", "space", "." ]
eedae4e320a4b2d0c44abb2c3061091321648fb7
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/folding.py#L43-L49
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/neighborhood_network.py
NeighborhoodNetwork.get_neighborhood_network
def get_neighborhood_network(self, node_name: str, order: int = 1) -> Graph: """Get the neighborhood graph of a node. :param str node_name: Node whose neighborhood graph is requested. :return Graph: Neighborhood graph """ logger.info("In get_neighborhood_graph()") neighbors = list(self.get_neighbor_names(node_name, order)) neighbor_network = self.graph.copy() neighbor_network.delete_vertices(self.graph.vs.select(name_notin=neighbors)) return neighbor_network
python
def get_neighborhood_network(self, node_name: str, order: int = 1) -> Graph: """Get the neighborhood graph of a node. :param str node_name: Node whose neighborhood graph is requested. :return Graph: Neighborhood graph """ logger.info("In get_neighborhood_graph()") neighbors = list(self.get_neighbor_names(node_name, order)) neighbor_network = self.graph.copy() neighbor_network.delete_vertices(self.graph.vs.select(name_notin=neighbors)) return neighbor_network
[ "def", "get_neighborhood_network", "(", "self", ",", "node_name", ":", "str", ",", "order", ":", "int", "=", "1", ")", "->", "Graph", ":", "logger", ".", "info", "(", "\"In get_neighborhood_graph()\"", ")", "neighbors", "=", "list", "(", "self", ".", "get_neighbor_names", "(", "node_name", ",", "order", ")", ")", "neighbor_network", "=", "self", ".", "graph", ".", "copy", "(", ")", "neighbor_network", ".", "delete_vertices", "(", "self", ".", "graph", ".", "vs", ".", "select", "(", "name_notin", "=", "neighbors", ")", ")", "return", "neighbor_network" ]
Get the neighborhood graph of a node. :param str node_name: Node whose neighborhood graph is requested. :return Graph: Neighborhood graph
[ "Get", "the", "neighborhood", "graph", "of", "a", "node", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/neighborhood_network.py#L26-L36
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/neighborhood_network.py
NeighborhoodNetwork.get_neighbor_names
def get_neighbor_names(self, node_name: str, order: int = 1) -> list: """Get the names of all neighbors of a node, and the node itself. :param node_name: Node whose neighbor names are requested. :return: A list of names of all neighbors of a node, and the node itself. """ logger.info("In get_neighbor_names()") node = self.graph.vs.find(name=node_name) neighbors = self.graph.neighborhood(node, order=order) names = self.graph.vs[neighbors]["name"] names.append(node_name) return list(names)
python
def get_neighbor_names(self, node_name: str, order: int = 1) -> list: """Get the names of all neighbors of a node, and the node itself. :param node_name: Node whose neighbor names are requested. :return: A list of names of all neighbors of a node, and the node itself. """ logger.info("In get_neighbor_names()") node = self.graph.vs.find(name=node_name) neighbors = self.graph.neighborhood(node, order=order) names = self.graph.vs[neighbors]["name"] names.append(node_name) return list(names)
[ "def", "get_neighbor_names", "(", "self", ",", "node_name", ":", "str", ",", "order", ":", "int", "=", "1", ")", "->", "list", ":", "logger", ".", "info", "(", "\"In get_neighbor_names()\"", ")", "node", "=", "self", ".", "graph", ".", "vs", ".", "find", "(", "name", "=", "node_name", ")", "neighbors", "=", "self", ".", "graph", ".", "neighborhood", "(", "node", ",", "order", "=", "order", ")", "names", "=", "self", ".", "graph", ".", "vs", "[", "neighbors", "]", "[", "\"name\"", "]", "names", ".", "append", "(", "node_name", ")", "return", "list", "(", "names", ")" ]
Get the names of all neighbors of a node, and the node itself. :param node_name: Node whose neighbor names are requested. :return: A list of names of all neighbors of a node, and the node itself.
[ "Get", "the", "names", "of", "all", "neighbors", "of", "a", "node", "and", "the", "node", "itself", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/neighborhood_network.py#L38-L49
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/neighborhood_network.py
NeighborhoodNetwork.get_neighborhood_overlap
def get_neighborhood_overlap(self, node1, node2, connection_type=None): """Get the intersection of two nodes's neighborhoods. Neighborhood is defined by parameter connection_type. :param Vertex node1: First node. :param Vertex node2: Second node. :param Optional[str] connection_type: One of direct or second-degree. Defaults to direct. :return: Overlap of the nodes' neighborhoods. """ if connection_type is None or connection_type == "direct": order = 1 elif connection_type == "second-degree": order = 2 else: raise Exception( "Invalid option: {}. Valid options are direct and second-degree".format( connection_type) ) neighbors1 = self.graph.neighborhood(node1, order=order) neighbors2 = self.graph.neighborhood(node2, order=order) return set(neighbors1).intersection(neighbors2)
python
def get_neighborhood_overlap(self, node1, node2, connection_type=None): """Get the intersection of two nodes's neighborhoods. Neighborhood is defined by parameter connection_type. :param Vertex node1: First node. :param Vertex node2: Second node. :param Optional[str] connection_type: One of direct or second-degree. Defaults to direct. :return: Overlap of the nodes' neighborhoods. """ if connection_type is None or connection_type == "direct": order = 1 elif connection_type == "second-degree": order = 2 else: raise Exception( "Invalid option: {}. Valid options are direct and second-degree".format( connection_type) ) neighbors1 = self.graph.neighborhood(node1, order=order) neighbors2 = self.graph.neighborhood(node2, order=order) return set(neighbors1).intersection(neighbors2)
[ "def", "get_neighborhood_overlap", "(", "self", ",", "node1", ",", "node2", ",", "connection_type", "=", "None", ")", ":", "if", "connection_type", "is", "None", "or", "connection_type", "==", "\"direct\"", ":", "order", "=", "1", "elif", "connection_type", "==", "\"second-degree\"", ":", "order", "=", "2", "else", ":", "raise", "Exception", "(", "\"Invalid option: {}. Valid options are direct and second-degree\"", ".", "format", "(", "connection_type", ")", ")", "neighbors1", "=", "self", ".", "graph", ".", "neighborhood", "(", "node1", ",", "order", "=", "order", ")", "neighbors2", "=", "self", ".", "graph", ".", "neighborhood", "(", "node2", ",", "order", "=", "order", ")", "return", "set", "(", "neighbors1", ")", ".", "intersection", "(", "neighbors2", ")" ]
Get the intersection of two nodes's neighborhoods. Neighborhood is defined by parameter connection_type. :param Vertex node1: First node. :param Vertex node2: Second node. :param Optional[str] connection_type: One of direct or second-degree. Defaults to direct. :return: Overlap of the nodes' neighborhoods.
[ "Get", "the", "intersection", "of", "two", "nodes", "s", "neighborhoods", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/neighborhood_network.py#L51-L72
train
inveniosoftware/invenio-query-parser
invenio_query_parser/contrib/spires/parser.py
SpiresSmartValue.parse
def parse(cls, parser, text, pos): # pylint: disable=W0613 """Match simple values excluding some Keywords like 'and' and 'or'""" if not text.strip(): return text, SyntaxError("Invalid value") class Rule(object): grammar = attr('value', SpiresSimpleValue), omit(re.compile(".*")) try: tree = pypeg2.parse(text, Rule, whitespace="") except SyntaxError: return text, SyntaxError("Expected %r" % cls) else: r = tree.value if r.value.lower() in ('and', 'or', 'not'): return text, SyntaxError("Invalid value %s" % r.value) return text[len(r.value):], r
python
def parse(cls, parser, text, pos): # pylint: disable=W0613 """Match simple values excluding some Keywords like 'and' and 'or'""" if not text.strip(): return text, SyntaxError("Invalid value") class Rule(object): grammar = attr('value', SpiresSimpleValue), omit(re.compile(".*")) try: tree = pypeg2.parse(text, Rule, whitespace="") except SyntaxError: return text, SyntaxError("Expected %r" % cls) else: r = tree.value if r.value.lower() in ('and', 'or', 'not'): return text, SyntaxError("Invalid value %s" % r.value) return text[len(r.value):], r
[ "def", "parse", "(", "cls", ",", "parser", ",", "text", ",", "pos", ")", ":", "# pylint: disable=W0613", "if", "not", "text", ".", "strip", "(", ")", ":", "return", "text", ",", "SyntaxError", "(", "\"Invalid value\"", ")", "class", "Rule", "(", "object", ")", ":", "grammar", "=", "attr", "(", "'value'", ",", "SpiresSimpleValue", ")", ",", "omit", "(", "re", ".", "compile", "(", "\".*\"", ")", ")", "try", ":", "tree", "=", "pypeg2", ".", "parse", "(", "text", ",", "Rule", ",", "whitespace", "=", "\"\"", ")", "except", "SyntaxError", ":", "return", "text", ",", "SyntaxError", "(", "\"Expected %r\"", "%", "cls", ")", "else", ":", "r", "=", "tree", ".", "value", "if", "r", ".", "value", ".", "lower", "(", ")", "in", "(", "'and'", ",", "'or'", ",", "'not'", ")", ":", "return", "text", ",", "SyntaxError", "(", "\"Invalid value %s\"", "%", "r", ".", "value", ")", "return", "text", "[", "len", "(", "r", ".", "value", ")", ":", "]", ",", "r" ]
Match simple values excluding some Keywords like 'and' and 'or
[ "Match", "simple", "values", "excluding", "some", "Keywords", "like", "and", "and", "or" ]
21a2c36318003ff52d2e18e7196bb420db8ecb4b
https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/contrib/spires/parser.py#L64-L82
train
pyQode/pyqode.cobol
pyqode/cobol/api/pic.py
get_field_infos
def get_field_infos(code, free_format): """ Gets the list of pic fields information from line |start| to line |end|. :param code: code to parse :returns: the list of pic fields info found in the specified text. """ offset = 0 field_infos = [] lines = _clean_code(code) previous_offset = 0 for row in process_cobol(lines, free_format): fi = PicFieldInfo() fi.name = row["name"] fi.level = row["level"] fi.pic = row["pic"] fi.occurs = row["occurs"] fi.redefines = row["redefines"] fi.indexed_by = row["indexed_by"] # find item that was redefined and use its offset if fi.redefines: for fib in field_infos: if fib.name == fi.redefines: offset = fib.offset # level 1 should have their offset set to 1 if fi.level == 1: offset = 1 # level 78 have no offset if fi.level == 78: offset = 0 # level 77 have offset always to 1 if fi.level == 77: offset = 1 # set item offset fi.offset = offset # special case: level 88 have the same level as its parent if fi.level == 88: fi.offset = previous_offset else: previous_offset = offset field_infos.append(fi) # compute offset of next PIC field. if row['pic']: offset += row['pic_info']['length'] return field_infos
python
def get_field_infos(code, free_format): """ Gets the list of pic fields information from line |start| to line |end|. :param code: code to parse :returns: the list of pic fields info found in the specified text. """ offset = 0 field_infos = [] lines = _clean_code(code) previous_offset = 0 for row in process_cobol(lines, free_format): fi = PicFieldInfo() fi.name = row["name"] fi.level = row["level"] fi.pic = row["pic"] fi.occurs = row["occurs"] fi.redefines = row["redefines"] fi.indexed_by = row["indexed_by"] # find item that was redefined and use its offset if fi.redefines: for fib in field_infos: if fib.name == fi.redefines: offset = fib.offset # level 1 should have their offset set to 1 if fi.level == 1: offset = 1 # level 78 have no offset if fi.level == 78: offset = 0 # level 77 have offset always to 1 if fi.level == 77: offset = 1 # set item offset fi.offset = offset # special case: level 88 have the same level as its parent if fi.level == 88: fi.offset = previous_offset else: previous_offset = offset field_infos.append(fi) # compute offset of next PIC field. if row['pic']: offset += row['pic_info']['length'] return field_infos
[ "def", "get_field_infos", "(", "code", ",", "free_format", ")", ":", "offset", "=", "0", "field_infos", "=", "[", "]", "lines", "=", "_clean_code", "(", "code", ")", "previous_offset", "=", "0", "for", "row", "in", "process_cobol", "(", "lines", ",", "free_format", ")", ":", "fi", "=", "PicFieldInfo", "(", ")", "fi", ".", "name", "=", "row", "[", "\"name\"", "]", "fi", ".", "level", "=", "row", "[", "\"level\"", "]", "fi", ".", "pic", "=", "row", "[", "\"pic\"", "]", "fi", ".", "occurs", "=", "row", "[", "\"occurs\"", "]", "fi", ".", "redefines", "=", "row", "[", "\"redefines\"", "]", "fi", ".", "indexed_by", "=", "row", "[", "\"indexed_by\"", "]", "# find item that was redefined and use its offset", "if", "fi", ".", "redefines", ":", "for", "fib", "in", "field_infos", ":", "if", "fib", ".", "name", "==", "fi", ".", "redefines", ":", "offset", "=", "fib", ".", "offset", "# level 1 should have their offset set to 1", "if", "fi", ".", "level", "==", "1", ":", "offset", "=", "1", "# level 78 have no offset", "if", "fi", ".", "level", "==", "78", ":", "offset", "=", "0", "# level 77 have offset always to 1", "if", "fi", ".", "level", "==", "77", ":", "offset", "=", "1", "# set item offset", "fi", ".", "offset", "=", "offset", "# special case: level 88 have the same level as its parent", "if", "fi", ".", "level", "==", "88", ":", "fi", ".", "offset", "=", "previous_offset", "else", ":", "previous_offset", "=", "offset", "field_infos", ".", "append", "(", "fi", ")", "# compute offset of next PIC field.", "if", "row", "[", "'pic'", "]", ":", "offset", "+=", "row", "[", "'pic_info'", "]", "[", "'length'", "]", "return", "field_infos" ]
Gets the list of pic fields information from line |start| to line |end|. :param code: code to parse :returns: the list of pic fields info found in the specified text.
[ "Gets", "the", "list", "of", "pic", "fields", "information", "from", "line", "|start|", "to", "line", "|end|", "." ]
eedae4e320a4b2d0c44abb2c3061091321648fb7
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/pic.py#L47-L103
train
walter426/Python_GoogleMapsApi
GoogleMapsApi/GoogleMapsServiceParser.py
ServiceParser.get_signed_url
def get_signed_url(self, params): '''Returns a Premier account signed url.''' params['client'] = self.client_id url_params = {'protocol': self.protocol, 'domain': self.domain, 'service': self.service, 'params': urlencode(params)} secret = base64.urlsafe_b64decode(self.secret_key) url_params['url_part'] = ( '/maps/api/%(service)s/json?%(params)s' % url_params) signature = hmac.new(secret, url_params['url_part'], hashlib.sha1) url_params['signature'] = base64.urlsafe_b64encode(signature.digest()) return ('%(protocol)s://%(domain)s%(url_part)s' '&signature=%(signature)s' % url_params)
python
def get_signed_url(self, params): '''Returns a Premier account signed url.''' params['client'] = self.client_id url_params = {'protocol': self.protocol, 'domain': self.domain, 'service': self.service, 'params': urlencode(params)} secret = base64.urlsafe_b64decode(self.secret_key) url_params['url_part'] = ( '/maps/api/%(service)s/json?%(params)s' % url_params) signature = hmac.new(secret, url_params['url_part'], hashlib.sha1) url_params['signature'] = base64.urlsafe_b64encode(signature.digest()) return ('%(protocol)s://%(domain)s%(url_part)s' '&signature=%(signature)s' % url_params)
[ "def", "get_signed_url", "(", "self", ",", "params", ")", ":", "params", "[", "'client'", "]", "=", "self", ".", "client_id", "url_params", "=", "{", "'protocol'", ":", "self", ".", "protocol", ",", "'domain'", ":", "self", ".", "domain", ",", "'service'", ":", "self", ".", "service", ",", "'params'", ":", "urlencode", "(", "params", ")", "}", "secret", "=", "base64", ".", "urlsafe_b64decode", "(", "self", ".", "secret_key", ")", "url_params", "[", "'url_part'", "]", "=", "(", "'/maps/api/%(service)s/json?%(params)s'", "%", "url_params", ")", "signature", "=", "hmac", ".", "new", "(", "secret", ",", "url_params", "[", "'url_part'", "]", ",", "hashlib", ".", "sha1", ")", "url_params", "[", "'signature'", "]", "=", "base64", ".", "urlsafe_b64encode", "(", "signature", ".", "digest", "(", ")", ")", "return", "(", "'%(protocol)s://%(domain)s%(url_part)s'", "'&signature=%(signature)s'", "%", "url_params", ")" ]
Returns a Premier account signed url.
[ "Returns", "a", "Premier", "account", "signed", "url", "." ]
4832b293a0027446941a5f00ecc66256f92ddbce
https://github.com/walter426/Python_GoogleMapsApi/blob/4832b293a0027446941a5f00ecc66256f92ddbce/GoogleMapsApi/GoogleMapsServiceParser.py#L85-L98
train
walter426/Python_GoogleMapsApi
GoogleMapsApi/GoogleMapsServiceParser.py
ServiceParser.parse_json
def parse_json(self, page): '''Returns json feed.''' if not isinstance(page, basestring): page = util.decode_page(page) self.doc = json.loads(page) results = self.doc.get(self.result_name, []) if not results: self.check_status(self.doc.get('status')) return None return results
python
def parse_json(self, page): '''Returns json feed.''' if not isinstance(page, basestring): page = util.decode_page(page) self.doc = json.loads(page) results = self.doc.get(self.result_name, []) if not results: self.check_status(self.doc.get('status')) return None return results
[ "def", "parse_json", "(", "self", ",", "page", ")", ":", "if", "not", "isinstance", "(", "page", ",", "basestring", ")", ":", "page", "=", "util", ".", "decode_page", "(", "page", ")", "self", ".", "doc", "=", "json", ".", "loads", "(", "page", ")", "results", "=", "self", ".", "doc", ".", "get", "(", "self", ".", "result_name", ",", "[", "]", ")", "if", "not", "results", ":", "self", ".", "check_status", "(", "self", ".", "doc", ".", "get", "(", "'status'", ")", ")", "return", "None", "return", "results" ]
Returns json feed.
[ "Returns", "json", "feed", "." ]
4832b293a0027446941a5f00ecc66256f92ddbce
https://github.com/walter426/Python_GoogleMapsApi/blob/4832b293a0027446941a5f00ecc66256f92ddbce/GoogleMapsApi/GoogleMapsServiceParser.py#L112-L125
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
_determine_case
def _determine_case(was_upper, words, string): """ Determine case type of string. Arguments: was_upper {[type]} -- [description] words {[type]} -- [description] string {[type]} -- [description] Returns: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words. """ case_type = 'unknown' if was_upper: case_type = 'upper' elif string.islower(): case_type = 'lower' elif len(words) > 0: camel_case = words[0].islower() pascal_case = words[0].istitle() or words[0].isupper() if camel_case or pascal_case: for word in words[1:]: c = word.istitle() or word.isupper() camel_case &= c pascal_case &= c if not c: break if camel_case: case_type = 'camel' elif pascal_case: case_type = 'pascal' else: case_type = 'mixed' return case_type
python
def _determine_case(was_upper, words, string): """ Determine case type of string. Arguments: was_upper {[type]} -- [description] words {[type]} -- [description] string {[type]} -- [description] Returns: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words. """ case_type = 'unknown' if was_upper: case_type = 'upper' elif string.islower(): case_type = 'lower' elif len(words) > 0: camel_case = words[0].islower() pascal_case = words[0].istitle() or words[0].isupper() if camel_case or pascal_case: for word in words[1:]: c = word.istitle() or word.isupper() camel_case &= c pascal_case &= c if not c: break if camel_case: case_type = 'camel' elif pascal_case: case_type = 'pascal' else: case_type = 'mixed' return case_type
[ "def", "_determine_case", "(", "was_upper", ",", "words", ",", "string", ")", ":", "case_type", "=", "'unknown'", "if", "was_upper", ":", "case_type", "=", "'upper'", "elif", "string", ".", "islower", "(", ")", ":", "case_type", "=", "'lower'", "elif", "len", "(", "words", ")", ">", "0", ":", "camel_case", "=", "words", "[", "0", "]", ".", "islower", "(", ")", "pascal_case", "=", "words", "[", "0", "]", ".", "istitle", "(", ")", "or", "words", "[", "0", "]", ".", "isupper", "(", ")", "if", "camel_case", "or", "pascal_case", ":", "for", "word", "in", "words", "[", "1", ":", "]", ":", "c", "=", "word", ".", "istitle", "(", ")", "or", "word", ".", "isupper", "(", ")", "camel_case", "&=", "c", "pascal_case", "&=", "c", "if", "not", "c", ":", "break", "if", "camel_case", ":", "case_type", "=", "'camel'", "elif", "pascal_case", ":", "case_type", "=", "'pascal'", "else", ":", "case_type", "=", "'mixed'", "return", "case_type" ]
Determine case type of string. Arguments: was_upper {[type]} -- [description] words {[type]} -- [description] string {[type]} -- [description] Returns: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words.
[ "Determine", "case", "type", "of", "string", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L15-L60
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
_advanced_acronym_detection
def _advanced_acronym_detection(s, i, words, acronyms): """ Detect acronyms by checking against a list of acronyms. Check a run of words represented by the range [s, i]. Return last index of new word groups. """ # Combine each letter into single string. acstr = ''.join(words[s:i]) # List of ranges representing found acronyms. range_list = [] # Set of remaining letters. not_range = set(range(len(acstr))) # Search for each acronym in acstr. for acronym in acronyms: # TODO: Sanitize acronyms to include only letters. rac = regex.compile(unicode(acronym)) # Loop until all instances of the acronym are found, # instead of just the first. n = 0 while True: m = rac.search(acstr, n) if not m: break a, b = m.start(), m.end() n = b # Make sure found acronym doesn't overlap with others. ok = True for r in range_list: if a < r[1] and b > r[0]: ok = False break if ok: range_list.append((a, b)) for j in xrange(a, b): not_range.remove(j) # Add remaining letters as ranges. for nr in not_range: range_list.append((nr, nr + 1)) # No ranges will overlap, so it's safe to sort by lower bound, # which sort() will do by default. range_list.sort() # Remove original letters in word list. for _ in xrange(s, i): del words[s] # Replace them with new word grouping. for j in xrange(len(range_list)): r = range_list[j] words.insert(s + j, acstr[r[0]:r[1]]) return s + len(range_list) - 1
python
def _advanced_acronym_detection(s, i, words, acronyms): """ Detect acronyms by checking against a list of acronyms. Check a run of words represented by the range [s, i]. Return last index of new word groups. """ # Combine each letter into single string. acstr = ''.join(words[s:i]) # List of ranges representing found acronyms. range_list = [] # Set of remaining letters. not_range = set(range(len(acstr))) # Search for each acronym in acstr. for acronym in acronyms: # TODO: Sanitize acronyms to include only letters. rac = regex.compile(unicode(acronym)) # Loop until all instances of the acronym are found, # instead of just the first. n = 0 while True: m = rac.search(acstr, n) if not m: break a, b = m.start(), m.end() n = b # Make sure found acronym doesn't overlap with others. ok = True for r in range_list: if a < r[1] and b > r[0]: ok = False break if ok: range_list.append((a, b)) for j in xrange(a, b): not_range.remove(j) # Add remaining letters as ranges. for nr in not_range: range_list.append((nr, nr + 1)) # No ranges will overlap, so it's safe to sort by lower bound, # which sort() will do by default. range_list.sort() # Remove original letters in word list. for _ in xrange(s, i): del words[s] # Replace them with new word grouping. for j in xrange(len(range_list)): r = range_list[j] words.insert(s + j, acstr[r[0]:r[1]]) return s + len(range_list) - 1
[ "def", "_advanced_acronym_detection", "(", "s", ",", "i", ",", "words", ",", "acronyms", ")", ":", "# Combine each letter into single string.", "acstr", "=", "''", ".", "join", "(", "words", "[", "s", ":", "i", "]", ")", "# List of ranges representing found acronyms.", "range_list", "=", "[", "]", "# Set of remaining letters.", "not_range", "=", "set", "(", "range", "(", "len", "(", "acstr", ")", ")", ")", "# Search for each acronym in acstr.", "for", "acronym", "in", "acronyms", ":", "# TODO: Sanitize acronyms to include only letters.", "rac", "=", "regex", ".", "compile", "(", "unicode", "(", "acronym", ")", ")", "# Loop until all instances of the acronym are found,", "# instead of just the first.", "n", "=", "0", "while", "True", ":", "m", "=", "rac", ".", "search", "(", "acstr", ",", "n", ")", "if", "not", "m", ":", "break", "a", ",", "b", "=", "m", ".", "start", "(", ")", ",", "m", ".", "end", "(", ")", "n", "=", "b", "# Make sure found acronym doesn't overlap with others.", "ok", "=", "True", "for", "r", "in", "range_list", ":", "if", "a", "<", "r", "[", "1", "]", "and", "b", ">", "r", "[", "0", "]", ":", "ok", "=", "False", "break", "if", "ok", ":", "range_list", ".", "append", "(", "(", "a", ",", "b", ")", ")", "for", "j", "in", "xrange", "(", "a", ",", "b", ")", ":", "not_range", ".", "remove", "(", "j", ")", "# Add remaining letters as ranges.", "for", "nr", "in", "not_range", ":", "range_list", ".", "append", "(", "(", "nr", ",", "nr", "+", "1", ")", ")", "# No ranges will overlap, so it's safe to sort by lower bound,", "# which sort() will do by default.", "range_list", ".", "sort", "(", ")", "# Remove original letters in word list.", "for", "_", "in", "xrange", "(", "s", ",", "i", ")", ":", "del", "words", "[", "s", "]", "# Replace them with new word grouping.", "for", "j", "in", "xrange", "(", "len", "(", "range_list", ")", ")", ":", "r", "=", "range_list", "[", "j", "]", "words", ".", "insert", "(", "s", "+", "j", ",", "acstr", "[", "r", "[", "0", "]", ":", "r", "[", "1", "]", "]", ")", "return", "s", "+", "len", "(", "range_list", ")", "-", "1" ]
Detect acronyms by checking against a list of acronyms. Check a run of words represented by the range [s, i]. Return last index of new word groups.
[ "Detect", "acronyms", "by", "checking", "against", "a", "list", "of", "acronyms", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L63-L123
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
_simple_acronym_detection
def _simple_acronym_detection(s, i, words, *args): """Detect acronyms based on runs of upper-case letters.""" # Combine each letter into a single string. acronym = ''.join(words[s:i]) # Remove original letters in word list. for _ in xrange(s, i): del words[s] # Replace them with new word grouping. words.insert(s, ''.join(acronym)) return s
python
def _simple_acronym_detection(s, i, words, *args): """Detect acronyms based on runs of upper-case letters.""" # Combine each letter into a single string. acronym = ''.join(words[s:i]) # Remove original letters in word list. for _ in xrange(s, i): del words[s] # Replace them with new word grouping. words.insert(s, ''.join(acronym)) return s
[ "def", "_simple_acronym_detection", "(", "s", ",", "i", ",", "words", ",", "*", "args", ")", ":", "# Combine each letter into a single string.", "acronym", "=", "''", ".", "join", "(", "words", "[", "s", ":", "i", "]", ")", "# Remove original letters in word list.", "for", "_", "in", "xrange", "(", "s", ",", "i", ")", ":", "del", "words", "[", "s", "]", "# Replace them with new word grouping.", "words", ".", "insert", "(", "s", ",", "''", ".", "join", "(", "acronym", ")", ")", "return", "s" ]
Detect acronyms based on runs of upper-case letters.
[ "Detect", "acronyms", "based", "on", "runs", "of", "upper", "-", "case", "letters", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L126-L138
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
_sanitize_acronyms
def _sanitize_acronyms(unsafe_acronyms): """ Check acronyms against regex. Normalize valid acronyms to upper-case. If an invalid acronym is encountered raise InvalidAcronymError. """ valid_acronym = regex.compile(u'^[\p{Ll}\p{Lu}\p{Nd}]+$') acronyms = [] for a in unsafe_acronyms: if valid_acronym.match(a): acronyms.append(a.upper()) else: raise InvalidAcronymError(a) return acronyms
python
def _sanitize_acronyms(unsafe_acronyms): """ Check acronyms against regex. Normalize valid acronyms to upper-case. If an invalid acronym is encountered raise InvalidAcronymError. """ valid_acronym = regex.compile(u'^[\p{Ll}\p{Lu}\p{Nd}]+$') acronyms = [] for a in unsafe_acronyms: if valid_acronym.match(a): acronyms.append(a.upper()) else: raise InvalidAcronymError(a) return acronyms
[ "def", "_sanitize_acronyms", "(", "unsafe_acronyms", ")", ":", "valid_acronym", "=", "regex", ".", "compile", "(", "u'^[\\p{Ll}\\p{Lu}\\p{Nd}]+$'", ")", "acronyms", "=", "[", "]", "for", "a", "in", "unsafe_acronyms", ":", "if", "valid_acronym", ".", "match", "(", "a", ")", ":", "acronyms", ".", "append", "(", "a", ".", "upper", "(", ")", ")", "else", ":", "raise", "InvalidAcronymError", "(", "a", ")", "return", "acronyms" ]
Check acronyms against regex. Normalize valid acronyms to upper-case. If an invalid acronym is encountered raise InvalidAcronymError.
[ "Check", "acronyms", "against", "regex", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L150-L164
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
_normalize_words
def _normalize_words(words, acronyms): """Normalize case of each word to PascalCase.""" for i, _ in enumerate(words): # if detect_acronyms: if words[i].upper() in acronyms: # Convert known acronyms to upper-case. words[i] = words[i].upper() else: # Fallback behavior: Preserve case on upper-case words. if not words[i].isupper(): words[i] = words[i].capitalize() return words
python
def _normalize_words(words, acronyms): """Normalize case of each word to PascalCase.""" for i, _ in enumerate(words): # if detect_acronyms: if words[i].upper() in acronyms: # Convert known acronyms to upper-case. words[i] = words[i].upper() else: # Fallback behavior: Preserve case on upper-case words. if not words[i].isupper(): words[i] = words[i].capitalize() return words
[ "def", "_normalize_words", "(", "words", ",", "acronyms", ")", ":", "for", "i", ",", "_", "in", "enumerate", "(", "words", ")", ":", "# if detect_acronyms:", "if", "words", "[", "i", "]", ".", "upper", "(", ")", "in", "acronyms", ":", "# Convert known acronyms to upper-case.", "words", "[", "i", "]", "=", "words", "[", "i", "]", ".", "upper", "(", ")", "else", ":", "# Fallback behavior: Preserve case on upper-case words.", "if", "not", "words", "[", "i", "]", ".", "isupper", "(", ")", ":", "words", "[", "i", "]", "=", "words", "[", "i", "]", ".", "capitalize", "(", ")", "return", "words" ]
Normalize case of each word to PascalCase.
[ "Normalize", "case", "of", "each", "word", "to", "PascalCase", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L167-L178
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
_separate_words
def _separate_words(string): """ Segment string on separator into list of words. Arguments: string -- the string we want to process Returns: words -- list of words the string got minced to separator -- the separator char intersecting words was_upper -- whether string happened to be upper-case """ words = [] separator = "" # Index of current character. Initially 1 because we don't want to check # if the 0th character is a boundary. i = 1 # Index of first character in a sequence s = 0 # Previous character. p = string[0:1] # Treat an all-caps stringiable as lower-case, so that every letter isn't # counted as a boundary. was_upper = False if string.isupper(): string = string.lower() was_upper = True # Iterate over each character, checking for boundaries, or places where # the stringiable should divided. while i <= len(string): c = string[i:i + 1] split = False if i < len(string): # Detect upper-case letter as boundary. if UPPER.match(c): split = True # Detect transition from separator to not separator. elif NOTSEP.match(c) and SEP.match(p): split = True # Detect transition not separator to separator. elif SEP.match(c) and NOTSEP.match(p): split = True else: # The loop goes one extra iteration so that it can handle the # remaining text after the last boundary. split = True if split: if NOTSEP.match(p): words.append(string[s:i]) else: # stringiable contains at least one separator. # Use the first one as the stringiable's primary separator. if not separator: separator = string[s:s + 1] # Use None to indicate a separator in the word list. words.append(None) # If separators weren't included in the list, then breaks # between upper-case sequences ("AAA_BBB") would be # disregarded; the letter-run detector would count them as one # sequence ("AAABBB"). s = i i += 1 p = c return words, separator, was_upper
python
def _separate_words(string): """ Segment string on separator into list of words. Arguments: string -- the string we want to process Returns: words -- list of words the string got minced to separator -- the separator char intersecting words was_upper -- whether string happened to be upper-case """ words = [] separator = "" # Index of current character. Initially 1 because we don't want to check # if the 0th character is a boundary. i = 1 # Index of first character in a sequence s = 0 # Previous character. p = string[0:1] # Treat an all-caps stringiable as lower-case, so that every letter isn't # counted as a boundary. was_upper = False if string.isupper(): string = string.lower() was_upper = True # Iterate over each character, checking for boundaries, or places where # the stringiable should divided. while i <= len(string): c = string[i:i + 1] split = False if i < len(string): # Detect upper-case letter as boundary. if UPPER.match(c): split = True # Detect transition from separator to not separator. elif NOTSEP.match(c) and SEP.match(p): split = True # Detect transition not separator to separator. elif SEP.match(c) and NOTSEP.match(p): split = True else: # The loop goes one extra iteration so that it can handle the # remaining text after the last boundary. split = True if split: if NOTSEP.match(p): words.append(string[s:i]) else: # stringiable contains at least one separator. # Use the first one as the stringiable's primary separator. if not separator: separator = string[s:s + 1] # Use None to indicate a separator in the word list. words.append(None) # If separators weren't included in the list, then breaks # between upper-case sequences ("AAA_BBB") would be # disregarded; the letter-run detector would count them as one # sequence ("AAABBB"). s = i i += 1 p = c return words, separator, was_upper
[ "def", "_separate_words", "(", "string", ")", ":", "words", "=", "[", "]", "separator", "=", "\"\"", "# Index of current character. Initially 1 because we don't want to check", "# if the 0th character is a boundary.", "i", "=", "1", "# Index of first character in a sequence", "s", "=", "0", "# Previous character.", "p", "=", "string", "[", "0", ":", "1", "]", "# Treat an all-caps stringiable as lower-case, so that every letter isn't", "# counted as a boundary.", "was_upper", "=", "False", "if", "string", ".", "isupper", "(", ")", ":", "string", "=", "string", ".", "lower", "(", ")", "was_upper", "=", "True", "# Iterate over each character, checking for boundaries, or places where", "# the stringiable should divided.", "while", "i", "<=", "len", "(", "string", ")", ":", "c", "=", "string", "[", "i", ":", "i", "+", "1", "]", "split", "=", "False", "if", "i", "<", "len", "(", "string", ")", ":", "# Detect upper-case letter as boundary.", "if", "UPPER", ".", "match", "(", "c", ")", ":", "split", "=", "True", "# Detect transition from separator to not separator.", "elif", "NOTSEP", ".", "match", "(", "c", ")", "and", "SEP", ".", "match", "(", "p", ")", ":", "split", "=", "True", "# Detect transition not separator to separator.", "elif", "SEP", ".", "match", "(", "c", ")", "and", "NOTSEP", ".", "match", "(", "p", ")", ":", "split", "=", "True", "else", ":", "# The loop goes one extra iteration so that it can handle the", "# remaining text after the last boundary.", "split", "=", "True", "if", "split", ":", "if", "NOTSEP", ".", "match", "(", "p", ")", ":", "words", ".", "append", "(", "string", "[", "s", ":", "i", "]", ")", "else", ":", "# stringiable contains at least one separator.", "# Use the first one as the stringiable's primary separator.", "if", "not", "separator", ":", "separator", "=", "string", "[", "s", ":", "s", "+", "1", "]", "# Use None to indicate a separator in the word list.", "words", ".", "append", "(", "None", ")", "# If separators weren't included in the list, then breaks", "# between upper-case sequences (\"AAA_BBB\") would be", "# disregarded; the letter-run detector would count them as one", "# sequence (\"AAABBB\").", "s", "=", "i", "i", "+=", "1", "p", "=", "c", "return", "words", ",", "separator", ",", "was_upper" ]
Segment string on separator into list of words. Arguments: string -- the string we want to process Returns: words -- list of words the string got minced to separator -- the separator char intersecting words was_upper -- whether string happened to be upper-case
[ "Segment", "string", "on", "separator", "into", "list", "of", "words", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L181-L252
train
AlejandroFrias/case-conversion
case_conversion/case_parse.py
parse_case
def parse_case(string, acronyms=None, preserve_case=False): """ Parse a stringiable into a list of words. Also returns the case type, which can be one of the following: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words. Also returns the first separator character, or False if there isn't one. """ words, separator, was_upper = _separate_words(string) if acronyms: # Use advanced acronym detection with list acronyms = _sanitize_acronyms(acronyms) check_acronym = _advanced_acronym_detection else: acronyms = [] # Fallback to simple acronym detection. check_acronym = _simple_acronym_detection # Letter-run detector # Index of current word. i = 0 # Index of first letter in run. s = None # Find runs of single upper-case letters. while i < len(words): word = words[i] if word is not None and UPPER.match(word): if s is None: s = i elif s is not None: i = check_acronym(s, i, words, acronyms) + 1 s = None i += 1 if s is not None: check_acronym(s, i, words, acronyms) # Separators are no longer needed, so they can be removed. They *should* # be removed, since it's supposed to be a *word* list. words = [w for w in words if w is not None] # Determine case type. case_type = _determine_case(was_upper, words, string) if preserve_case: if was_upper: words = [w.upper() for w in words] else: words = _normalize_words(words, acronyms) return words, case_type, separator
python
def parse_case(string, acronyms=None, preserve_case=False): """ Parse a stringiable into a list of words. Also returns the case type, which can be one of the following: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words. Also returns the first separator character, or False if there isn't one. """ words, separator, was_upper = _separate_words(string) if acronyms: # Use advanced acronym detection with list acronyms = _sanitize_acronyms(acronyms) check_acronym = _advanced_acronym_detection else: acronyms = [] # Fallback to simple acronym detection. check_acronym = _simple_acronym_detection # Letter-run detector # Index of current word. i = 0 # Index of first letter in run. s = None # Find runs of single upper-case letters. while i < len(words): word = words[i] if word is not None and UPPER.match(word): if s is None: s = i elif s is not None: i = check_acronym(s, i, words, acronyms) + 1 s = None i += 1 if s is not None: check_acronym(s, i, words, acronyms) # Separators are no longer needed, so they can be removed. They *should* # be removed, since it's supposed to be a *word* list. words = [w for w in words if w is not None] # Determine case type. case_type = _determine_case(was_upper, words, string) if preserve_case: if was_upper: words = [w.upper() for w in words] else: words = _normalize_words(words, acronyms) return words, case_type, separator
[ "def", "parse_case", "(", "string", ",", "acronyms", "=", "None", ",", "preserve_case", "=", "False", ")", ":", "words", ",", "separator", ",", "was_upper", "=", "_separate_words", "(", "string", ")", "if", "acronyms", ":", "# Use advanced acronym detection with list", "acronyms", "=", "_sanitize_acronyms", "(", "acronyms", ")", "check_acronym", "=", "_advanced_acronym_detection", "else", ":", "acronyms", "=", "[", "]", "# Fallback to simple acronym detection.", "check_acronym", "=", "_simple_acronym_detection", "# Letter-run detector", "# Index of current word.", "i", "=", "0", "# Index of first letter in run.", "s", "=", "None", "# Find runs of single upper-case letters.", "while", "i", "<", "len", "(", "words", ")", ":", "word", "=", "words", "[", "i", "]", "if", "word", "is", "not", "None", "and", "UPPER", ".", "match", "(", "word", ")", ":", "if", "s", "is", "None", ":", "s", "=", "i", "elif", "s", "is", "not", "None", ":", "i", "=", "check_acronym", "(", "s", ",", "i", ",", "words", ",", "acronyms", ")", "+", "1", "s", "=", "None", "i", "+=", "1", "if", "s", "is", "not", "None", ":", "check_acronym", "(", "s", ",", "i", ",", "words", ",", "acronyms", ")", "# Separators are no longer needed, so they can be removed. They *should*", "# be removed, since it's supposed to be a *word* list.", "words", "=", "[", "w", "for", "w", "in", "words", "if", "w", "is", "not", "None", "]", "# Determine case type.", "case_type", "=", "_determine_case", "(", "was_upper", ",", "words", ",", "string", ")", "if", "preserve_case", ":", "if", "was_upper", ":", "words", "=", "[", "w", ".", "upper", "(", ")", "for", "w", "in", "words", "]", "else", ":", "words", "=", "_normalize_words", "(", "words", ",", "acronyms", ")", "return", "words", ",", "case_type", ",", "separator" ]
Parse a stringiable into a list of words. Also returns the case type, which can be one of the following: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words. Also returns the first separator character, or False if there isn't one.
[ "Parse", "a", "stringiable", "into", "a", "list", "of", "words", "." ]
79ebce1403fbdac949b2da21b8f6fbe3234ddb31
https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L255-L319
train
jpscaletti/authcode
authcode/auth_views_mixin.py
ViewsMixin.send_email
def send_email(self, user, subject, msg): """Should be overwritten in the setup""" print('To:', user) print('Subject:', subject) print(msg)
python
def send_email(self, user, subject, msg): """Should be overwritten in the setup""" print('To:', user) print('Subject:', subject) print(msg)
[ "def", "send_email", "(", "self", ",", "user", ",", "subject", ",", "msg", ")", ":", "print", "(", "'To:'", ",", "user", ")", "print", "(", "'Subject:'", ",", "subject", ")", "print", "(", "msg", ")" ]
Should be overwritten in the setup
[ "Should", "be", "overwritten", "in", "the", "setup" ]
91529b6d0caec07d1452758d937e1e0745826139
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_views_mixin.py#L67-L71
train
BernardFW/bernard
src/bernard/i18n/intents.py
IntentsDb.update
def update(self, new_data: IntentDict): """ Receive an update from the loaders. """ for locale, data in new_data.items(): if locale not in self.dict: self.dict[locale] = {} self.dict[locale].update(data)
python
def update(self, new_data: IntentDict): """ Receive an update from the loaders. """ for locale, data in new_data.items(): if locale not in self.dict: self.dict[locale] = {} self.dict[locale].update(data)
[ "def", "update", "(", "self", ",", "new_data", ":", "IntentDict", ")", ":", "for", "locale", ",", "data", "in", "new_data", ".", "items", "(", ")", ":", "if", "locale", "not", "in", "self", ".", "dict", ":", "self", ".", "dict", "[", "locale", "]", "=", "{", "}", "self", ".", "dict", "[", "locale", "]", ".", "update", "(", "data", ")" ]
Receive an update from the loaders.
[ "Receive", "an", "update", "from", "the", "loaders", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/intents.py#L52-L61
train
BernardFW/bernard
src/bernard/i18n/intents.py
IntentsDb.get
def get(self, key: Text, locale: Optional[Text]) -> List[Tuple[Text, ...]]: """ Get a single set of intents. """ locale = self.choose_locale(locale) return self.dict[locale][key]
python
def get(self, key: Text, locale: Optional[Text]) -> List[Tuple[Text, ...]]: """ Get a single set of intents. """ locale = self.choose_locale(locale) return self.dict[locale][key]
[ "def", "get", "(", "self", ",", "key", ":", "Text", ",", "locale", ":", "Optional", "[", "Text", "]", ")", "->", "List", "[", "Tuple", "[", "Text", ",", "...", "]", "]", ":", "locale", "=", "self", ".", "choose_locale", "(", "locale", ")", "return", "self", ".", "dict", "[", "locale", "]", "[", "key", "]" ]
Get a single set of intents.
[ "Get", "a", "single", "set", "of", "intents", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/intents.py#L63-L70
train
BernardFW/bernard
src/bernard/i18n/intents.py
Intent.strings
async def strings(self, request: Optional['Request']=None) \ -> List[Tuple[Text, ...]]: """ For the given request, find the list of strings of that intent. If the intent does not exist, it will raise a KeyError. """ if request: locale = await request.get_locale() else: locale = None return self.db.get(self.key, locale)
python
async def strings(self, request: Optional['Request']=None) \ -> List[Tuple[Text, ...]]: """ For the given request, find the list of strings of that intent. If the intent does not exist, it will raise a KeyError. """ if request: locale = await request.get_locale() else: locale = None return self.db.get(self.key, locale)
[ "async", "def", "strings", "(", "self", ",", "request", ":", "Optional", "[", "'Request'", "]", "=", "None", ")", "->", "List", "[", "Tuple", "[", "Text", ",", "...", "]", "]", ":", "if", "request", ":", "locale", "=", "await", "request", ".", "get_locale", "(", ")", "else", ":", "locale", "=", "None", "return", "self", ".", "db", ".", "get", "(", "self", ".", "key", ",", "locale", ")" ]
For the given request, find the list of strings of that intent. If the intent does not exist, it will raise a KeyError.
[ "For", "the", "given", "request", "find", "the", "list", "of", "strings", "of", "that", "intent", ".", "If", "the", "intent", "does", "not", "exist", "it", "will", "raise", "a", "KeyError", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/intents.py#L90-L102
train
ioos/cc-plugin-ncei
cc_plugin_ncei/util.py
get_unitless_standard_names
def get_unitless_standard_names(): ''' Returns a list of valid standard_names that are allowed to be unitless ''' global _UNITLESS_DB if _UNITLESS_DB is None: with open(resource_filename('cc_plugin_ncei', 'data/unitless.json'), 'r') as f: _UNITLESS_DB = json.load(f) return _UNITLESS_DB
python
def get_unitless_standard_names(): ''' Returns a list of valid standard_names that are allowed to be unitless ''' global _UNITLESS_DB if _UNITLESS_DB is None: with open(resource_filename('cc_plugin_ncei', 'data/unitless.json'), 'r') as f: _UNITLESS_DB = json.load(f) return _UNITLESS_DB
[ "def", "get_unitless_standard_names", "(", ")", ":", "global", "_UNITLESS_DB", "if", "_UNITLESS_DB", "is", "None", ":", "with", "open", "(", "resource_filename", "(", "'cc_plugin_ncei'", ",", "'data/unitless.json'", ")", ",", "'r'", ")", "as", "f", ":", "_UNITLESS_DB", "=", "json", ".", "load", "(", "f", ")", "return", "_UNITLESS_DB" ]
Returns a list of valid standard_names that are allowed to be unitless
[ "Returns", "a", "list", "of", "valid", "standard_names", "that", "are", "allowed", "to", "be", "unitless" ]
963fefd7fa43afd32657ac4c36aad4ddb4c25acf
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L16-L24
train
ioos/cc-plugin-ncei
cc_plugin_ncei/util.py
get_lat_variable
def get_lat_variable(nc): ''' Returns the variable for latitude :param netcdf4.dataset nc: an open netcdf dataset object ''' if 'latitude' in nc.variables: return 'latitude' latitudes = nc.get_variables_by_attributes(standard_name="latitude") if latitudes: return latitudes[0].name return None
python
def get_lat_variable(nc): ''' Returns the variable for latitude :param netcdf4.dataset nc: an open netcdf dataset object ''' if 'latitude' in nc.variables: return 'latitude' latitudes = nc.get_variables_by_attributes(standard_name="latitude") if latitudes: return latitudes[0].name return None
[ "def", "get_lat_variable", "(", "nc", ")", ":", "if", "'latitude'", "in", "nc", ".", "variables", ":", "return", "'latitude'", "latitudes", "=", "nc", ".", "get_variables_by_attributes", "(", "standard_name", "=", "\"latitude\"", ")", "if", "latitudes", ":", "return", "latitudes", "[", "0", "]", ".", "name", "return", "None" ]
Returns the variable for latitude :param netcdf4.dataset nc: an open netcdf dataset object
[ "Returns", "the", "variable", "for", "latitude" ]
963fefd7fa43afd32657ac4c36aad4ddb4c25acf
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L111-L122
train
ioos/cc-plugin-ncei
cc_plugin_ncei/util.py
get_lon_variable
def get_lon_variable(nc): ''' Returns the variable for longitude :param netCDF4.Dataset nc: netCDF dataset ''' if 'longitude' in nc.variables: return 'longitude' longitudes = nc.get_variables_by_attributes(standard_name="longitude") if longitudes: return longitudes[0].name return None
python
def get_lon_variable(nc): ''' Returns the variable for longitude :param netCDF4.Dataset nc: netCDF dataset ''' if 'longitude' in nc.variables: return 'longitude' longitudes = nc.get_variables_by_attributes(standard_name="longitude") if longitudes: return longitudes[0].name return None
[ "def", "get_lon_variable", "(", "nc", ")", ":", "if", "'longitude'", "in", "nc", ".", "variables", ":", "return", "'longitude'", "longitudes", "=", "nc", ".", "get_variables_by_attributes", "(", "standard_name", "=", "\"longitude\"", ")", "if", "longitudes", ":", "return", "longitudes", "[", "0", "]", ".", "name", "return", "None" ]
Returns the variable for longitude :param netCDF4.Dataset nc: netCDF dataset
[ "Returns", "the", "variable", "for", "longitude" ]
963fefd7fa43afd32657ac4c36aad4ddb4c25acf
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L125-L136
train
ioos/cc-plugin-ncei
cc_plugin_ncei/util.py
get_crs_variable
def get_crs_variable(ds): ''' Returns the name of the variable identified by a grid_mapping attribute :param netCDF4.Dataset ds: An open netCDF4 Dataset ''' for var in ds.variables: grid_mapping = getattr(ds.variables[var], 'grid_mapping', '') if grid_mapping and grid_mapping in ds.variables: return grid_mapping return None
python
def get_crs_variable(ds): ''' Returns the name of the variable identified by a grid_mapping attribute :param netCDF4.Dataset ds: An open netCDF4 Dataset ''' for var in ds.variables: grid_mapping = getattr(ds.variables[var], 'grid_mapping', '') if grid_mapping and grid_mapping in ds.variables: return grid_mapping return None
[ "def", "get_crs_variable", "(", "ds", ")", ":", "for", "var", "in", "ds", ".", "variables", ":", "grid_mapping", "=", "getattr", "(", "ds", ".", "variables", "[", "var", "]", ",", "'grid_mapping'", ",", "''", ")", "if", "grid_mapping", "and", "grid_mapping", "in", "ds", ".", "variables", ":", "return", "grid_mapping", "return", "None" ]
Returns the name of the variable identified by a grid_mapping attribute :param netCDF4.Dataset ds: An open netCDF4 Dataset
[ "Returns", "the", "name", "of", "the", "variable", "identified", "by", "a", "grid_mapping", "attribute" ]
963fefd7fa43afd32657ac4c36aad4ddb4c25acf
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L200-L210
train
ioos/cc-plugin-ncei
cc_plugin_ncei/util.py
is_2d_regular_grid
def is_2d_regular_grid(nc, variable): ''' Returns True if the variable is a 2D Regular grid. :param netCDF4.Dataset nc: An open netCDF dataset :param str variable: name of the variable to check ''' # x(x), y(y), t(t) # X(t, y, x) dims = nc.variables[variable].dimensions cmatrix = coordinate_dimension_matrix(nc) for req in ('x', 'y', 't'): if req not in cmatrix: return False x = get_lon_variable(nc) y = get_lat_variable(nc) t = get_time_variable(nc) if cmatrix['x'] != (x,): return False if cmatrix['y'] != (y,): return False if cmatrix['t'] != (t,): return False # Relaxed dimension ordering if len(dims) == 3 and x in dims and y in dims and t in dims: return True return False
python
def is_2d_regular_grid(nc, variable): ''' Returns True if the variable is a 2D Regular grid. :param netCDF4.Dataset nc: An open netCDF dataset :param str variable: name of the variable to check ''' # x(x), y(y), t(t) # X(t, y, x) dims = nc.variables[variable].dimensions cmatrix = coordinate_dimension_matrix(nc) for req in ('x', 'y', 't'): if req not in cmatrix: return False x = get_lon_variable(nc) y = get_lat_variable(nc) t = get_time_variable(nc) if cmatrix['x'] != (x,): return False if cmatrix['y'] != (y,): return False if cmatrix['t'] != (t,): return False # Relaxed dimension ordering if len(dims) == 3 and x in dims and y in dims and t in dims: return True return False
[ "def", "is_2d_regular_grid", "(", "nc", ",", "variable", ")", ":", "# x(x), y(y), t(t)", "# X(t, y, x)", "dims", "=", "nc", ".", "variables", "[", "variable", "]", ".", "dimensions", "cmatrix", "=", "coordinate_dimension_matrix", "(", "nc", ")", "for", "req", "in", "(", "'x'", ",", "'y'", ",", "'t'", ")", ":", "if", "req", "not", "in", "cmatrix", ":", "return", "False", "x", "=", "get_lon_variable", "(", "nc", ")", "y", "=", "get_lat_variable", "(", "nc", ")", "t", "=", "get_time_variable", "(", "nc", ")", "if", "cmatrix", "[", "'x'", "]", "!=", "(", "x", ",", ")", ":", "return", "False", "if", "cmatrix", "[", "'y'", "]", "!=", "(", "y", ",", ")", ":", "return", "False", "if", "cmatrix", "[", "'t'", "]", "!=", "(", "t", ",", ")", ":", "return", "False", "# Relaxed dimension ordering", "if", "len", "(", "dims", ")", "==", "3", "and", "x", "in", "dims", "and", "y", "in", "dims", "and", "t", "in", "dims", ":", "return", "True", "return", "False" ]
Returns True if the variable is a 2D Regular grid. :param netCDF4.Dataset nc: An open netCDF dataset :param str variable: name of the variable to check
[ "Returns", "True", "if", "the", "variable", "is", "a", "2D", "Regular", "grid", "." ]
963fefd7fa43afd32657ac4c36aad4ddb4c25acf
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L804-L836
train
rsgalloway/grit
grit/server/handler.py
handle_read
def handle_read(repo, **kwargs): """handles reading repo information""" log.info('read: %s %s' %(repo, kwargs)) if type(repo) in [unicode, str]: return {'name': 'Repo', 'desc': 'Welcome to Grit', 'comment': ''} else: return repo.serialize()
python
def handle_read(repo, **kwargs): """handles reading repo information""" log.info('read: %s %s' %(repo, kwargs)) if type(repo) in [unicode, str]: return {'name': 'Repo', 'desc': 'Welcome to Grit', 'comment': ''} else: return repo.serialize()
[ "def", "handle_read", "(", "repo", ",", "*", "*", "kwargs", ")", ":", "log", ".", "info", "(", "'read: %s %s'", "%", "(", "repo", ",", "kwargs", ")", ")", "if", "type", "(", "repo", ")", "in", "[", "unicode", ",", "str", "]", ":", "return", "{", "'name'", ":", "'Repo'", ",", "'desc'", ":", "'Welcome to Grit'", ",", "'comment'", ":", "''", "}", "else", ":", "return", "repo", ".", "serialize", "(", ")" ]
handles reading repo information
[ "handles", "reading", "repo", "information" ]
e6434ad8a1f4ac5d0903ebad630c81f8a5164d78
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/handler.py#L24-L30
train
garenchan/policy
policy/_utils.py
dict_from_object
def dict_from_object(obj: object): """Convert a object into dictionary with all of its readable attributes.""" # If object is a dict instance, no need to convert. return (obj if isinstance(obj, dict) else {attr: getattr(obj, attr) for attr in dir(obj) if not attr.startswith('_')})
python
def dict_from_object(obj: object): """Convert a object into dictionary with all of its readable attributes.""" # If object is a dict instance, no need to convert. return (obj if isinstance(obj, dict) else {attr: getattr(obj, attr) for attr in dir(obj) if not attr.startswith('_')})
[ "def", "dict_from_object", "(", "obj", ":", "object", ")", ":", "# If object is a dict instance, no need to convert.", "return", "(", "obj", "if", "isinstance", "(", "obj", ",", "dict", ")", "else", "{", "attr", ":", "getattr", "(", "obj", ",", "attr", ")", "for", "attr", "in", "dir", "(", "obj", ")", "if", "not", "attr", ".", "startswith", "(", "'_'", ")", "}", ")" ]
Convert a object into dictionary with all of its readable attributes.
[ "Convert", "a", "object", "into", "dictionary", "with", "all", "of", "its", "readable", "attributes", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_utils.py#L13-L19
train
garenchan/policy
policy/_utils.py
xgetattr
def xgetattr(obj: object, name: str, default=_sentinel, getitem=False): """Get attribute value from object. :param obj: object :param name: attribute or key name :param default: when attribute or key missing, return default; if obj is a dict and use getitem, default will not be used. :param getitem: when object is a dict, use getitem or get :return: attribute or key value, or raise KeyError/AttributeError """ if isinstance(obj, dict): if getitem: # In tune with `dict.__getitem__` method. return obj[name] else: # In tune with `dict.get` method. val = obj.get(name, default) return None if val is _sentinel else val else: # If object is not a dict, in tune with `getattr` method. val = getattr(obj, name, default) if val is _sentinel: msg = '%r object has no attribute %r' % (obj.__class__, name) raise AttributeError(msg) else: return val
python
def xgetattr(obj: object, name: str, default=_sentinel, getitem=False): """Get attribute value from object. :param obj: object :param name: attribute or key name :param default: when attribute or key missing, return default; if obj is a dict and use getitem, default will not be used. :param getitem: when object is a dict, use getitem or get :return: attribute or key value, or raise KeyError/AttributeError """ if isinstance(obj, dict): if getitem: # In tune with `dict.__getitem__` method. return obj[name] else: # In tune with `dict.get` method. val = obj.get(name, default) return None if val is _sentinel else val else: # If object is not a dict, in tune with `getattr` method. val = getattr(obj, name, default) if val is _sentinel: msg = '%r object has no attribute %r' % (obj.__class__, name) raise AttributeError(msg) else: return val
[ "def", "xgetattr", "(", "obj", ":", "object", ",", "name", ":", "str", ",", "default", "=", "_sentinel", ",", "getitem", "=", "False", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "if", "getitem", ":", "# In tune with `dict.__getitem__` method.", "return", "obj", "[", "name", "]", "else", ":", "# In tune with `dict.get` method.", "val", "=", "obj", ".", "get", "(", "name", ",", "default", ")", "return", "None", "if", "val", "is", "_sentinel", "else", "val", "else", ":", "# If object is not a dict, in tune with `getattr` method.", "val", "=", "getattr", "(", "obj", ",", "name", ",", "default", ")", "if", "val", "is", "_sentinel", ":", "msg", "=", "'%r object has no attribute %r'", "%", "(", "obj", ".", "__class__", ",", "name", ")", "raise", "AttributeError", "(", "msg", ")", "else", ":", "return", "val" ]
Get attribute value from object. :param obj: object :param name: attribute or key name :param default: when attribute or key missing, return default; if obj is a dict and use getitem, default will not be used. :param getitem: when object is a dict, use getitem or get :return: attribute or key value, or raise KeyError/AttributeError
[ "Get", "attribute", "value", "from", "object", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_utils.py#L22-L48
train
BernardFW/bernard
src/bernard/conf/__init__.py
list_config_files
def list_config_files() -> List[Text]: """ This function returns the list of configuration files to load. This is a callable so the configuration can be reloaded with files that changed in between. """ return [ os.path.join(os.path.dirname(__file__), 'default_settings.py'), os.getenv(ENVIRONMENT_VARIABLE, ''), ]
python
def list_config_files() -> List[Text]: """ This function returns the list of configuration files to load. This is a callable so the configuration can be reloaded with files that changed in between. """ return [ os.path.join(os.path.dirname(__file__), 'default_settings.py'), os.getenv(ENVIRONMENT_VARIABLE, ''), ]
[ "def", "list_config_files", "(", ")", "->", "List", "[", "Text", "]", ":", "return", "[", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'default_settings.py'", ")", ",", "os", ".", "getenv", "(", "ENVIRONMENT_VARIABLE", ",", "''", ")", ",", "]" ]
This function returns the list of configuration files to load. This is a callable so the configuration can be reloaded with files that changed in between.
[ "This", "function", "returns", "the", "list", "of", "configuration", "files", "to", "load", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/conf/__init__.py#L15-L26
train
frostming/marko
marko/helpers.py
camel_to_snake_case
def camel_to_snake_case(name): """Takes a camelCased string and converts to snake_case.""" pattern = r'[A-Z][a-z]+|[A-Z]+(?![a-z])' return '_'.join(map(str.lower, re.findall(pattern, name)))
python
def camel_to_snake_case(name): """Takes a camelCased string and converts to snake_case.""" pattern = r'[A-Z][a-z]+|[A-Z]+(?![a-z])' return '_'.join(map(str.lower, re.findall(pattern, name)))
[ "def", "camel_to_snake_case", "(", "name", ")", ":", "pattern", "=", "r'[A-Z][a-z]+|[A-Z]+(?![a-z])'", "return", "'_'", ".", "join", "(", "map", "(", "str", ".", "lower", ",", "re", ".", "findall", "(", "pattern", ",", "name", ")", ")", ")" ]
Takes a camelCased string and converts to snake_case.
[ "Takes", "a", "camelCased", "string", "and", "converts", "to", "snake_case", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/helpers.py#L10-L13
train
frostming/marko
marko/helpers.py
Source.match_prefix
def match_prefix(prefix, line): """Check if the line starts with given prefix and return the position of the end of prefix. If the prefix is not matched, return -1. """ m = re.match(prefix, line.expandtabs(4)) if not m: if re.match(prefix, line.expandtabs(4).replace('\n', ' ' * 99 + '\n')): return len(line) - 1 return -1 pos = m.end() if pos == 0: return 0 for i in range(1, len(line) + 1): if len(line[:i].expandtabs(4)) >= pos: return i
python
def match_prefix(prefix, line): """Check if the line starts with given prefix and return the position of the end of prefix. If the prefix is not matched, return -1. """ m = re.match(prefix, line.expandtabs(4)) if not m: if re.match(prefix, line.expandtabs(4).replace('\n', ' ' * 99 + '\n')): return len(line) - 1 return -1 pos = m.end() if pos == 0: return 0 for i in range(1, len(line) + 1): if len(line[:i].expandtabs(4)) >= pos: return i
[ "def", "match_prefix", "(", "prefix", ",", "line", ")", ":", "m", "=", "re", ".", "match", "(", "prefix", ",", "line", ".", "expandtabs", "(", "4", ")", ")", "if", "not", "m", ":", "if", "re", ".", "match", "(", "prefix", ",", "line", ".", "expandtabs", "(", "4", ")", ".", "replace", "(", "'\\n'", ",", "' '", "*", "99", "+", "'\\n'", ")", ")", ":", "return", "len", "(", "line", ")", "-", "1", "return", "-", "1", "pos", "=", "m", ".", "end", "(", ")", "if", "pos", "==", "0", ":", "return", "0", "for", "i", "in", "range", "(", "1", ",", "len", "(", "line", ")", "+", "1", ")", ":", "if", "len", "(", "line", "[", ":", "i", "]", ".", "expandtabs", "(", "4", ")", ")", ">=", "pos", ":", "return", "i" ]
Check if the line starts with given prefix and return the position of the end of prefix. If the prefix is not matched, return -1.
[ "Check", "if", "the", "line", "starts", "with", "given", "prefix", "and", "return", "the", "position", "of", "the", "end", "of", "prefix", ".", "If", "the", "prefix", "is", "not", "matched", "return", "-", "1", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/helpers.py#L101-L116
train
frostming/marko
marko/helpers.py
Source.expect_re
def expect_re(self, regexp): """Test against the given regular expression and returns the match object. :param regexp: the expression to be tested. :returns: the match object. """ prefix_len = self.match_prefix( self.prefix, self.next_line(require_prefix=False) ) if prefix_len >= 0: match = self._expect_re(regexp, self.pos + prefix_len) self.match = match return match else: return None
python
def expect_re(self, regexp): """Test against the given regular expression and returns the match object. :param regexp: the expression to be tested. :returns: the match object. """ prefix_len = self.match_prefix( self.prefix, self.next_line(require_prefix=False) ) if prefix_len >= 0: match = self._expect_re(regexp, self.pos + prefix_len) self.match = match return match else: return None
[ "def", "expect_re", "(", "self", ",", "regexp", ")", ":", "prefix_len", "=", "self", ".", "match_prefix", "(", "self", ".", "prefix", ",", "self", ".", "next_line", "(", "require_prefix", "=", "False", ")", ")", "if", "prefix_len", ">=", "0", ":", "match", "=", "self", ".", "_expect_re", "(", "regexp", ",", "self", ".", "pos", "+", "prefix_len", ")", "self", ".", "match", "=", "match", "return", "match", "else", ":", "return", "None" ]
Test against the given regular expression and returns the match object. :param regexp: the expression to be tested. :returns: the match object.
[ "Test", "against", "the", "given", "regular", "expression", "and", "returns", "the", "match", "object", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/helpers.py#L118-L132
train
frostming/marko
marko/helpers.py
Source.next_line
def next_line(self, require_prefix=True): """Return the next line in the source. :param require_prefix: if False, the whole line will be returned. otherwise, return the line with prefix stripped or None if the prefix is not matched. """ if require_prefix: m = self.expect_re(r'(?m)[^\n]*?$\n?') else: m = self._expect_re(r'(?m)[^\n]*$\n?', self.pos) self.match = m if m: return m.group()
python
def next_line(self, require_prefix=True): """Return the next line in the source. :param require_prefix: if False, the whole line will be returned. otherwise, return the line with prefix stripped or None if the prefix is not matched. """ if require_prefix: m = self.expect_re(r'(?m)[^\n]*?$\n?') else: m = self._expect_re(r'(?m)[^\n]*$\n?', self.pos) self.match = m if m: return m.group()
[ "def", "next_line", "(", "self", ",", "require_prefix", "=", "True", ")", ":", "if", "require_prefix", ":", "m", "=", "self", ".", "expect_re", "(", "r'(?m)[^\\n]*?$\\n?'", ")", "else", ":", "m", "=", "self", ".", "_expect_re", "(", "r'(?m)[^\\n]*$\\n?'", ",", "self", ".", "pos", ")", "self", ".", "match", "=", "m", "if", "m", ":", "return", "m", ".", "group", "(", ")" ]
Return the next line in the source. :param require_prefix: if False, the whole line will be returned. otherwise, return the line with prefix stripped or None if the prefix is not matched.
[ "Return", "the", "next", "line", "in", "the", "source", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/helpers.py#L134-L147
train
frostming/marko
marko/helpers.py
Source.consume
def consume(self): """Consume the body of source. ``pos`` will move forward.""" if self.match: self.pos = self.match.end() if self.match.group()[-1] == '\n': self._update_prefix() self.match = None
python
def consume(self): """Consume the body of source. ``pos`` will move forward.""" if self.match: self.pos = self.match.end() if self.match.group()[-1] == '\n': self._update_prefix() self.match = None
[ "def", "consume", "(", "self", ")", ":", "if", "self", ".", "match", ":", "self", ".", "pos", "=", "self", ".", "match", ".", "end", "(", ")", "if", "self", ".", "match", ".", "group", "(", ")", "[", "-", "1", "]", "==", "'\\n'", ":", "self", ".", "_update_prefix", "(", ")", "self", ".", "match", "=", "None" ]
Consume the body of source. ``pos`` will move forward.
[ "Consume", "the", "body", "of", "source", ".", "pos", "will", "move", "forward", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/helpers.py#L149-L155
train
kata198/python-subprocess2
subprocess2/BackgroundTask.py
BackgroundTaskInfo.asDict
def asDict(self): ''' asDict - Returns a copy of the current state as a dictionary. This copy will not be updated automatically. @return <dict> - Dictionary with all fields in BackgroundTaskInfo.FIELDS ''' ret = {} for field in BackgroundTaskInfo.FIELDS: ret[field] = getattr(self, field) return ret
python
def asDict(self): ''' asDict - Returns a copy of the current state as a dictionary. This copy will not be updated automatically. @return <dict> - Dictionary with all fields in BackgroundTaskInfo.FIELDS ''' ret = {} for field in BackgroundTaskInfo.FIELDS: ret[field] = getattr(self, field) return ret
[ "def", "asDict", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "field", "in", "BackgroundTaskInfo", ".", "FIELDS", ":", "ret", "[", "field", "]", "=", "getattr", "(", "self", ",", "field", ")", "return", "ret" ]
asDict - Returns a copy of the current state as a dictionary. This copy will not be updated automatically. @return <dict> - Dictionary with all fields in BackgroundTaskInfo.FIELDS
[ "asDict", "-", "Returns", "a", "copy", "of", "the", "current", "state", "as", "a", "dictionary", ".", "This", "copy", "will", "not", "be", "updated", "automatically", "." ]
8544b0b651d8e14de9fdd597baa704182e248b01
https://github.com/kata198/python-subprocess2/blob/8544b0b651d8e14de9fdd597baa704182e248b01/subprocess2/BackgroundTask.py#L84-L93
train
garenchan/policy
policy/checks.py
register
def register(name, _callable=None): """A decorator used for register custom check. :param name: name of check :type: str :param _callable: check class or a function which return check instance :return: _callable or a decorator """ def wrapper(_callable): registered_checks[name] = _callable return _callable # If function or class is given, do the registeration if _callable: return wrapper(_callable) return wrapper
python
def register(name, _callable=None): """A decorator used for register custom check. :param name: name of check :type: str :param _callable: check class or a function which return check instance :return: _callable or a decorator """ def wrapper(_callable): registered_checks[name] = _callable return _callable # If function or class is given, do the registeration if _callable: return wrapper(_callable) return wrapper
[ "def", "register", "(", "name", ",", "_callable", "=", "None", ")", ":", "def", "wrapper", "(", "_callable", ")", ":", "registered_checks", "[", "name", "]", "=", "_callable", "return", "_callable", "# If function or class is given, do the registeration", "if", "_callable", ":", "return", "wrapper", "(", "_callable", ")", "return", "wrapper" ]
A decorator used for register custom check. :param name: name of check :type: str :param _callable: check class or a function which return check instance :return: _callable or a decorator
[ "A", "decorator", "used", "for", "register", "custom", "check", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/checks.py#L177-L193
train
BernardFW/bernard
src/bernard/platforms/facebook/layers.py
MessagingType.serialize
def serialize(self): """ Generates the messaging-type-related part of the message dictionary. """ if self.response is not None: return {'messaging_type': 'RESPONSE'} if self.update is not None: return {'messaging_type': 'UPDATE'} if self.tag is not None: return { 'messaging_type': 'MESSAGE_TAG', 'tag': self.tag.value, } if self.subscription is not None: return {'messaging_type': 'NON_PROMOTIONAL_SUBSCRIPTION'}
python
def serialize(self): """ Generates the messaging-type-related part of the message dictionary. """ if self.response is not None: return {'messaging_type': 'RESPONSE'} if self.update is not None: return {'messaging_type': 'UPDATE'} if self.tag is not None: return { 'messaging_type': 'MESSAGE_TAG', 'tag': self.tag.value, } if self.subscription is not None: return {'messaging_type': 'NON_PROMOTIONAL_SUBSCRIPTION'}
[ "def", "serialize", "(", "self", ")", ":", "if", "self", ".", "response", "is", "not", "None", ":", "return", "{", "'messaging_type'", ":", "'RESPONSE'", "}", "if", "self", ".", "update", "is", "not", "None", ":", "return", "{", "'messaging_type'", ":", "'UPDATE'", "}", "if", "self", ".", "tag", "is", "not", "None", ":", "return", "{", "'messaging_type'", ":", "'MESSAGE_TAG'", ",", "'tag'", ":", "self", ".", "tag", ".", "value", ",", "}", "if", "self", ".", "subscription", "is", "not", "None", ":", "return", "{", "'messaging_type'", ":", "'NON_PROMOTIONAL_SUBSCRIPTION'", "}" ]
Generates the messaging-type-related part of the message dictionary.
[ "Generates", "the", "messaging", "-", "type", "-", "related", "part", "of", "the", "message", "dictionary", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/layers.py#L102-L120
train
BernardFW/bernard
src/bernard/platforms/facebook/layers.py
QuickRepliesList.patch_register
async def patch_register(self, register: Dict, request: 'Request'): """ Store all options in the "choices" sub-register. We store both the text and the potential intent, in order to match both regular quick reply clicks but also the user typing stuff on his keyboard that matches more or less the content of quick replies. """ register['choices'] = { o.slug: { 'intent': o.intent.key if o.intent else None, 'text': await render(o.text, request), } for o in self.options if isinstance(o, QuickRepliesList.TextOption) } return register
python
async def patch_register(self, register: Dict, request: 'Request'): """ Store all options in the "choices" sub-register. We store both the text and the potential intent, in order to match both regular quick reply clicks but also the user typing stuff on his keyboard that matches more or less the content of quick replies. """ register['choices'] = { o.slug: { 'intent': o.intent.key if o.intent else None, 'text': await render(o.text, request), } for o in self.options if isinstance(o, QuickRepliesList.TextOption) } return register
[ "async", "def", "patch_register", "(", "self", ",", "register", ":", "Dict", ",", "request", ":", "'Request'", ")", ":", "register", "[", "'choices'", "]", "=", "{", "o", ".", "slug", ":", "{", "'intent'", ":", "o", ".", "intent", ".", "key", "if", "o", ".", "intent", "else", "None", ",", "'text'", ":", "await", "render", "(", "o", ".", "text", ",", "request", ")", ",", "}", "for", "o", "in", "self", ".", "options", "if", "isinstance", "(", "o", ",", "QuickRepliesList", ".", "TextOption", ")", "}", "return", "register" ]
Store all options in the "choices" sub-register. We store both the text and the potential intent, in order to match both regular quick reply clicks but also the user typing stuff on his keyboard that matches more or less the content of quick replies.
[ "Store", "all", "options", "in", "the", "choices", "sub", "-", "register", ".", "We", "store", "both", "the", "text", "and", "the", "potential", "intent", "in", "order", "to", "match", "both", "regular", "quick", "reply", "clicks", "but", "also", "the", "user", "typing", "stuff", "on", "his", "keyboard", "that", "matches", "more", "or", "less", "the", "content", "of", "quick", "replies", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/layers.py#L198-L214
train
BernardFW/bernard
src/bernard/platforms/facebook/layers.py
GenericTemplate.is_sharable
def is_sharable(self): """ Can only be sharable if marked as such and no child element is blocking sharing due to security reasons. """ return bool( self.sharable and all(x.is_sharable() for x in self.elements) )
python
def is_sharable(self): """ Can only be sharable if marked as such and no child element is blocking sharing due to security reasons. """ return bool( self.sharable and all(x.is_sharable() for x in self.elements) )
[ "def", "is_sharable", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "sharable", "and", "all", "(", "x", ".", "is_sharable", "(", ")", "for", "x", "in", "self", ".", "elements", ")", ")" ]
Can only be sharable if marked as such and no child element is blocking sharing due to security reasons.
[ "Can", "only", "be", "sharable", "if", "marked", "as", "such", "and", "no", "child", "element", "is", "blocking", "sharing", "due", "to", "security", "reasons", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/layers.py#L289-L297
train
mbourqui/django-echoices
echoices/enums/enums.py
EChoice.from_value
def from_value(cls, value): """ Return the EChoice object associated with this value, if any. Parameters ---------- value In the type of the `value` field, as set when instantiating this EChoice. Returns ------- EChoice Raises ------ KeyError if `value` does not exist in any element """ warnings.warn("{0}.{1} will be deprecated in a future release. " "Please use {0}.{2} instead".format(cls.__name__, cls.from_value.__name__, cls.get.__name__), PendingDeprecationWarning) return cls[value]
python
def from_value(cls, value): """ Return the EChoice object associated with this value, if any. Parameters ---------- value In the type of the `value` field, as set when instantiating this EChoice. Returns ------- EChoice Raises ------ KeyError if `value` does not exist in any element """ warnings.warn("{0}.{1} will be deprecated in a future release. " "Please use {0}.{2} instead".format(cls.__name__, cls.from_value.__name__, cls.get.__name__), PendingDeprecationWarning) return cls[value]
[ "def", "from_value", "(", "cls", ",", "value", ")", ":", "warnings", ".", "warn", "(", "\"{0}.{1} will be deprecated in a future release. \"", "\"Please use {0}.{2} instead\"", ".", "format", "(", "cls", ".", "__name__", ",", "cls", ".", "from_value", ".", "__name__", ",", "cls", ".", "get", ".", "__name__", ")", ",", "PendingDeprecationWarning", ")", "return", "cls", "[", "value", "]" ]
Return the EChoice object associated with this value, if any. Parameters ---------- value In the type of the `value` field, as set when instantiating this EChoice. Returns ------- EChoice Raises ------ KeyError if `value` does not exist in any element
[ "Return", "the", "EChoice", "object", "associated", "with", "this", "value", "if", "any", "." ]
c57405005ec368ac602bb38a71091a1e03c723bb
https://github.com/mbourqui/django-echoices/blob/c57405005ec368ac602bb38a71091a1e03c723bb/echoices/enums/enums.py#L165-L187
train
BernardFW/bernard
src/bernard/server/views.py
bernard_auth
def bernard_auth(func): """ Authenticates the users based on the query-string-provided token """ @wraps(func) async def wrapper(request: Request): def get_query_token(): token_key = settings.WEBVIEW_TOKEN_KEY return request.query.get(token_key, '') def get_header_token(): header_key = settings.WEBVIEW_HEADER_NAME return request.headers.get(header_key, '') try: token = next(filter(None, [ get_header_token(), get_query_token(), ])) except StopIteration: token = '' try: body = await request.json() except ValueError: body = None msg, platform = await manager.message_from_token(token, body) if not msg: return json_response({ 'status': 'unauthorized', 'message': 'No valid token found', }, status=401) return await func(msg, platform) return wrapper
python
def bernard_auth(func): """ Authenticates the users based on the query-string-provided token """ @wraps(func) async def wrapper(request: Request): def get_query_token(): token_key = settings.WEBVIEW_TOKEN_KEY return request.query.get(token_key, '') def get_header_token(): header_key = settings.WEBVIEW_HEADER_NAME return request.headers.get(header_key, '') try: token = next(filter(None, [ get_header_token(), get_query_token(), ])) except StopIteration: token = '' try: body = await request.json() except ValueError: body = None msg, platform = await manager.message_from_token(token, body) if not msg: return json_response({ 'status': 'unauthorized', 'message': 'No valid token found', }, status=401) return await func(msg, platform) return wrapper
[ "def", "bernard_auth", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "async", "def", "wrapper", "(", "request", ":", "Request", ")", ":", "def", "get_query_token", "(", ")", ":", "token_key", "=", "settings", ".", "WEBVIEW_TOKEN_KEY", "return", "request", ".", "query", ".", "get", "(", "token_key", ",", "''", ")", "def", "get_header_token", "(", ")", ":", "header_key", "=", "settings", ".", "WEBVIEW_HEADER_NAME", "return", "request", ".", "headers", ".", "get", "(", "header_key", ",", "''", ")", "try", ":", "token", "=", "next", "(", "filter", "(", "None", ",", "[", "get_header_token", "(", ")", ",", "get_query_token", "(", ")", ",", "]", ")", ")", "except", "StopIteration", ":", "token", "=", "''", "try", ":", "body", "=", "await", "request", ".", "json", "(", ")", "except", "ValueError", ":", "body", "=", "None", "msg", ",", "platform", "=", "await", "manager", ".", "message_from_token", "(", "token", ",", "body", ")", "if", "not", "msg", ":", "return", "json_response", "(", "{", "'status'", ":", "'unauthorized'", ",", "'message'", ":", "'No valid token found'", ",", "}", ",", "status", "=", "401", ")", "return", "await", "func", "(", "msg", ",", "platform", ")", "return", "wrapper" ]
Authenticates the users based on the query-string-provided token
[ "Authenticates", "the", "users", "based", "on", "the", "query", "-", "string", "-", "provided", "token" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/server/views.py#L37-L74
train
BernardFW/bernard
src/bernard/server/views.py
postback_me
async def postback_me(msg: BaseMessage, platform: Platform) -> Response: """ Provides the front-end with details about the user. This output can be completed using the `api_postback_me` middleware hook. """ async def get_basic_info(_msg: BaseMessage, _platform: Platform): user = _msg.get_user() return { 'friendly_name': await user.get_friendly_name(), 'locale': await user.get_locale(), 'platform': _platform.NAME, } func = MiddlewareManager.instance().get('api_postback_me', get_basic_info) return json_response(await func(msg, platform))
python
async def postback_me(msg: BaseMessage, platform: Platform) -> Response: """ Provides the front-end with details about the user. This output can be completed using the `api_postback_me` middleware hook. """ async def get_basic_info(_msg: BaseMessage, _platform: Platform): user = _msg.get_user() return { 'friendly_name': await user.get_friendly_name(), 'locale': await user.get_locale(), 'platform': _platform.NAME, } func = MiddlewareManager.instance().get('api_postback_me', get_basic_info) return json_response(await func(msg, platform))
[ "async", "def", "postback_me", "(", "msg", ":", "BaseMessage", ",", "platform", ":", "Platform", ")", "->", "Response", ":", "async", "def", "get_basic_info", "(", "_msg", ":", "BaseMessage", ",", "_platform", ":", "Platform", ")", ":", "user", "=", "_msg", ".", "get_user", "(", ")", "return", "{", "'friendly_name'", ":", "await", "user", ".", "get_friendly_name", "(", ")", ",", "'locale'", ":", "await", "user", ".", "get_locale", "(", ")", ",", "'platform'", ":", "_platform", ".", "NAME", ",", "}", "func", "=", "MiddlewareManager", ".", "instance", "(", ")", ".", "get", "(", "'api_postback_me'", ",", "get_basic_info", ")", "return", "json_response", "(", "await", "func", "(", "msg", ",", "platform", ")", ")" ]
Provides the front-end with details about the user. This output can be completed using the `api_postback_me` middleware hook.
[ "Provides", "the", "front", "-", "end", "with", "details", "about", "the", "user", ".", "This", "output", "can", "be", "completed", "using", "the", "api_postback_me", "middleware", "hook", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/server/views.py#L78-L95
train
BernardFW/bernard
src/bernard/server/views.py
postback_send
async def postback_send(msg: BaseMessage, platform: Platform) -> Response: """ Injects the POST body into the FSM as a Postback message. """ await platform.inject_message(msg) return json_response({ 'status': 'ok', })
python
async def postback_send(msg: BaseMessage, platform: Platform) -> Response: """ Injects the POST body into the FSM as a Postback message. """ await platform.inject_message(msg) return json_response({ 'status': 'ok', })
[ "async", "def", "postback_send", "(", "msg", ":", "BaseMessage", ",", "platform", ":", "Platform", ")", "->", "Response", ":", "await", "platform", ".", "inject_message", "(", "msg", ")", "return", "json_response", "(", "{", "'status'", ":", "'ok'", ",", "}", ")" ]
Injects the POST body into the FSM as a Postback message.
[ "Injects", "the", "POST", "body", "into", "the", "FSM", "as", "a", "Postback", "message", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/server/views.py#L99-L108
train
BernardFW/bernard
src/bernard/server/views.py
postback_analytics
async def postback_analytics(msg: BaseMessage, platform: Platform) -> Response: """ Makes a call to an analytics function. """ try: pb = msg.get_layers()[0] assert isinstance(pb, Postback) user = msg.get_user() user_lang = await user.get_locale() user_id = user.id if pb.payload['event'] == 'page_view': func = 'page_view' path = pb.payload['path'] title = pb.payload.get('title', '') args = [path, title, user_id, user_lang] else: return json_response({ 'status': 'unknown event', 'message': f'"{pb.payload["event"]}" is not a recognized ' f'analytics event', }) async for p in providers(): await getattr(p, func)(*args) except (KeyError, IndexError, AssertionError, TypeError): return json_response({ 'status': 'missing data' }, status=400) else: return json_response({ 'status': 'ok', })
python
async def postback_analytics(msg: BaseMessage, platform: Platform) -> Response: """ Makes a call to an analytics function. """ try: pb = msg.get_layers()[0] assert isinstance(pb, Postback) user = msg.get_user() user_lang = await user.get_locale() user_id = user.id if pb.payload['event'] == 'page_view': func = 'page_view' path = pb.payload['path'] title = pb.payload.get('title', '') args = [path, title, user_id, user_lang] else: return json_response({ 'status': 'unknown event', 'message': f'"{pb.payload["event"]}" is not a recognized ' f'analytics event', }) async for p in providers(): await getattr(p, func)(*args) except (KeyError, IndexError, AssertionError, TypeError): return json_response({ 'status': 'missing data' }, status=400) else: return json_response({ 'status': 'ok', })
[ "async", "def", "postback_analytics", "(", "msg", ":", "BaseMessage", ",", "platform", ":", "Platform", ")", "->", "Response", ":", "try", ":", "pb", "=", "msg", ".", "get_layers", "(", ")", "[", "0", "]", "assert", "isinstance", "(", "pb", ",", "Postback", ")", "user", "=", "msg", ".", "get_user", "(", ")", "user_lang", "=", "await", "user", ".", "get_locale", "(", ")", "user_id", "=", "user", ".", "id", "if", "pb", ".", "payload", "[", "'event'", "]", "==", "'page_view'", ":", "func", "=", "'page_view'", "path", "=", "pb", ".", "payload", "[", "'path'", "]", "title", "=", "pb", ".", "payload", ".", "get", "(", "'title'", ",", "''", ")", "args", "=", "[", "path", ",", "title", ",", "user_id", ",", "user_lang", "]", "else", ":", "return", "json_response", "(", "{", "'status'", ":", "'unknown event'", ",", "'message'", ":", "f'\"{pb.payload[\"event\"]}\" is not a recognized '", "f'analytics event'", ",", "}", ")", "async", "for", "p", "in", "providers", "(", ")", ":", "await", "getattr", "(", "p", ",", "func", ")", "(", "*", "args", ")", "except", "(", "KeyError", ",", "IndexError", ",", "AssertionError", ",", "TypeError", ")", ":", "return", "json_response", "(", "{", "'status'", ":", "'missing data'", "}", ",", "status", "=", "400", ")", "else", ":", "return", "json_response", "(", "{", "'status'", ":", "'ok'", ",", "}", ")" ]
Makes a call to an analytics function.
[ "Makes", "a", "call", "to", "an", "analytics", "function", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/server/views.py#L112-L148
train
klmitch/turnstile
turnstile/control.py
register
def register(name, func=None): """ Function or decorator which registers a given function as a recognized control command. """ def decorator(func): # Perform the registration ControlDaemon._register(name, func) return func # If func was given, call the decorator, otherwise, return the # decorator if func: return decorator(func) else: return decorator
python
def register(name, func=None): """ Function or decorator which registers a given function as a recognized control command. """ def decorator(func): # Perform the registration ControlDaemon._register(name, func) return func # If func was given, call the decorator, otherwise, return the # decorator if func: return decorator(func) else: return decorator
[ "def", "register", "(", "name", ",", "func", "=", "None", ")", ":", "def", "decorator", "(", "func", ")", ":", "# Perform the registration", "ControlDaemon", ".", "_register", "(", "name", ",", "func", ")", "return", "func", "# If func was given, call the decorator, otherwise, return the", "# decorator", "if", "func", ":", "return", "decorator", "(", "func", ")", "else", ":", "return", "decorator" ]
Function or decorator which registers a given function as a recognized control command.
[ "Function", "or", "decorator", "which", "registers", "a", "given", "function", "as", "a", "recognized", "control", "command", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L288-L304
train
klmitch/turnstile
turnstile/control.py
ping
def ping(daemon, channel, data=None): """ Process the 'ping' control message. :param daemon: The control daemon; used to get at the configuration and the database. :param channel: The publish channel to which to send the response. :param data: Optional extra data. Will be returned as the second argument of the response. Responds to the named channel with a command of 'pong' and with the node_name (if configured) and provided data as arguments. """ if not channel: # No place to reply to return # Get our configured node name node_name = daemon.config['control'].get('node_name') # Format the response reply = ['pong'] if node_name or data: reply.append(node_name or '') if data: reply.append(data) # And send it with utils.ignore_except(): daemon.db.publish(channel, ':'.join(reply))
python
def ping(daemon, channel, data=None): """ Process the 'ping' control message. :param daemon: The control daemon; used to get at the configuration and the database. :param channel: The publish channel to which to send the response. :param data: Optional extra data. Will be returned as the second argument of the response. Responds to the named channel with a command of 'pong' and with the node_name (if configured) and provided data as arguments. """ if not channel: # No place to reply to return # Get our configured node name node_name = daemon.config['control'].get('node_name') # Format the response reply = ['pong'] if node_name or data: reply.append(node_name or '') if data: reply.append(data) # And send it with utils.ignore_except(): daemon.db.publish(channel, ':'.join(reply))
[ "def", "ping", "(", "daemon", ",", "channel", ",", "data", "=", "None", ")", ":", "if", "not", "channel", ":", "# No place to reply to", "return", "# Get our configured node name", "node_name", "=", "daemon", ".", "config", "[", "'control'", "]", ".", "get", "(", "'node_name'", ")", "# Format the response", "reply", "=", "[", "'pong'", "]", "if", "node_name", "or", "data", ":", "reply", ".", "append", "(", "node_name", "or", "''", ")", "if", "data", ":", "reply", ".", "append", "(", "data", ")", "# And send it", "with", "utils", ".", "ignore_except", "(", ")", ":", "daemon", ".", "db", ".", "publish", "(", "channel", ",", "':'", ".", "join", "(", "reply", ")", ")" ]
Process the 'ping' control message. :param daemon: The control daemon; used to get at the configuration and the database. :param channel: The publish channel to which to send the response. :param data: Optional extra data. Will be returned as the second argument of the response. Responds to the named channel with a command of 'pong' and with the node_name (if configured) and provided data as arguments.
[ "Process", "the", "ping", "control", "message", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L308-L340
train
klmitch/turnstile
turnstile/control.py
reload
def reload(daemon, load_type=None, spread=None): """ Process the 'reload' control message. :param daemon: The control daemon; used to get at the configuration and call the actual reload. :param load_type: Optional type of reload. If given as 'immediate', reload is triggered immediately. If given as 'spread', reload is triggered after a random period of time in the interval (0.0, spread). Otherwise, reload will be as configured. :param spread: Optional argument for 'spread' load_type. Must be a float giving the maximum length of the interval, in seconds, over which the reload should be scheduled. If not provided, falls back to configuration. If a recognized load_type is not given, or is given as 'spread' but the spread parameter is not a valid float, the configuration will be checked for the 'redis.reload_spread' value. If that is a valid value, the reload will be randomly scheduled for some time within the interval (0.0, redis.reload_spread). """ # Figure out what type of reload this needs to be if load_type == 'immediate': spread = None elif load_type == 'spread': try: spread = float(spread) except (TypeError, ValueError): # Not a valid float; use the configured spread value load_type = None else: load_type = None if load_type is None: # Use configured set-up; see if we have a spread # configured try: spread = float(daemon.config['control']['reload_spread']) except (TypeError, ValueError, KeyError): # No valid configuration spread = None if spread: # Apply a randomization to spread the load around eventlet.spawn_after(random.random() * spread, daemon.reload) else: # Spawn in immediate mode eventlet.spawn_n(daemon.reload)
python
def reload(daemon, load_type=None, spread=None): """ Process the 'reload' control message. :param daemon: The control daemon; used to get at the configuration and call the actual reload. :param load_type: Optional type of reload. If given as 'immediate', reload is triggered immediately. If given as 'spread', reload is triggered after a random period of time in the interval (0.0, spread). Otherwise, reload will be as configured. :param spread: Optional argument for 'spread' load_type. Must be a float giving the maximum length of the interval, in seconds, over which the reload should be scheduled. If not provided, falls back to configuration. If a recognized load_type is not given, or is given as 'spread' but the spread parameter is not a valid float, the configuration will be checked for the 'redis.reload_spread' value. If that is a valid value, the reload will be randomly scheduled for some time within the interval (0.0, redis.reload_spread). """ # Figure out what type of reload this needs to be if load_type == 'immediate': spread = None elif load_type == 'spread': try: spread = float(spread) except (TypeError, ValueError): # Not a valid float; use the configured spread value load_type = None else: load_type = None if load_type is None: # Use configured set-up; see if we have a spread # configured try: spread = float(daemon.config['control']['reload_spread']) except (TypeError, ValueError, KeyError): # No valid configuration spread = None if spread: # Apply a randomization to spread the load around eventlet.spawn_after(random.random() * spread, daemon.reload) else: # Spawn in immediate mode eventlet.spawn_n(daemon.reload)
[ "def", "reload", "(", "daemon", ",", "load_type", "=", "None", ",", "spread", "=", "None", ")", ":", "# Figure out what type of reload this needs to be", "if", "load_type", "==", "'immediate'", ":", "spread", "=", "None", "elif", "load_type", "==", "'spread'", ":", "try", ":", "spread", "=", "float", "(", "spread", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# Not a valid float; use the configured spread value", "load_type", "=", "None", "else", ":", "load_type", "=", "None", "if", "load_type", "is", "None", ":", "# Use configured set-up; see if we have a spread", "# configured", "try", ":", "spread", "=", "float", "(", "daemon", ".", "config", "[", "'control'", "]", "[", "'reload_spread'", "]", ")", "except", "(", "TypeError", ",", "ValueError", ",", "KeyError", ")", ":", "# No valid configuration", "spread", "=", "None", "if", "spread", ":", "# Apply a randomization to spread the load around", "eventlet", ".", "spawn_after", "(", "random", ".", "random", "(", ")", "*", "spread", ",", "daemon", ".", "reload", ")", "else", ":", "# Spawn in immediate mode", "eventlet", ".", "spawn_n", "(", "daemon", ".", "reload", ")" ]
Process the 'reload' control message. :param daemon: The control daemon; used to get at the configuration and call the actual reload. :param load_type: Optional type of reload. If given as 'immediate', reload is triggered immediately. If given as 'spread', reload is triggered after a random period of time in the interval (0.0, spread). Otherwise, reload will be as configured. :param spread: Optional argument for 'spread' load_type. Must be a float giving the maximum length of the interval, in seconds, over which the reload should be scheduled. If not provided, falls back to configuration. If a recognized load_type is not given, or is given as 'spread' but the spread parameter is not a valid float, the configuration will be checked for the 'redis.reload_spread' value. If that is a valid value, the reload will be randomly scheduled for some time within the interval (0.0, redis.reload_spread).
[ "Process", "the", "reload", "control", "message", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L344-L396
train
klmitch/turnstile
turnstile/control.py
LimitData.set_limits
def set_limits(self, limits): """ Set the limit data to the given list of limits. Limits are specified as the raw msgpack string representing the limit. Computes the checksum of the limits; if the checksum is identical to the current one, no action is taken. """ # First task, build the checksum of the new limits chksum = hashlib.md5() # sufficient for our purposes for lim in limits: chksum.update(lim) new_sum = chksum.hexdigest() # Now install it with self.limit_lock: if self.limit_sum == new_sum: # No changes return self.limit_data = [msgpack.loads(lim) for lim in limits] self.limit_sum = new_sum
python
def set_limits(self, limits): """ Set the limit data to the given list of limits. Limits are specified as the raw msgpack string representing the limit. Computes the checksum of the limits; if the checksum is identical to the current one, no action is taken. """ # First task, build the checksum of the new limits chksum = hashlib.md5() # sufficient for our purposes for lim in limits: chksum.update(lim) new_sum = chksum.hexdigest() # Now install it with self.limit_lock: if self.limit_sum == new_sum: # No changes return self.limit_data = [msgpack.loads(lim) for lim in limits] self.limit_sum = new_sum
[ "def", "set_limits", "(", "self", ",", "limits", ")", ":", "# First task, build the checksum of the new limits", "chksum", "=", "hashlib", ".", "md5", "(", ")", "# sufficient for our purposes", "for", "lim", "in", "limits", ":", "chksum", ".", "update", "(", "lim", ")", "new_sum", "=", "chksum", ".", "hexdigest", "(", ")", "# Now install it", "with", "self", ".", "limit_lock", ":", "if", "self", ".", "limit_sum", "==", "new_sum", ":", "# No changes", "return", "self", ".", "limit_data", "=", "[", "msgpack", ".", "loads", "(", "lim", ")", "for", "lim", "in", "limits", "]", "self", ".", "limit_sum", "=", "new_sum" ]
Set the limit data to the given list of limits. Limits are specified as the raw msgpack string representing the limit. Computes the checksum of the limits; if the checksum is identical to the current one, no action is taken.
[ "Set", "the", "limit", "data", "to", "the", "given", "list", "of", "limits", ".", "Limits", "are", "specified", "as", "the", "raw", "msgpack", "string", "representing", "the", "limit", ".", "Computes", "the", "checksum", "of", "the", "limits", ";", "if", "the", "checksum", "is", "identical", "to", "the", "current", "one", "no", "action", "is", "taken", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L60-L80
train
klmitch/turnstile
turnstile/control.py
ControlDaemon.start
def start(self): """ Starts the ControlDaemon by launching the listening thread and triggering the initial limits load. """ # Spawn the listening thread self.listen_thread = eventlet.spawn_n(self.listen) # Now do the initial load self.reload()
python
def start(self): """ Starts the ControlDaemon by launching the listening thread and triggering the initial limits load. """ # Spawn the listening thread self.listen_thread = eventlet.spawn_n(self.listen) # Now do the initial load self.reload()
[ "def", "start", "(", "self", ")", ":", "# Spawn the listening thread", "self", ".", "listen_thread", "=", "eventlet", ".", "spawn_n", "(", "self", ".", "listen", ")", "# Now do the initial load", "self", ".", "reload", "(", ")" ]
Starts the ControlDaemon by launching the listening thread and triggering the initial limits load.
[ "Starts", "the", "ControlDaemon", "by", "launching", "the", "listening", "thread", "and", "triggering", "the", "initial", "limits", "load", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L136-L146
train
klmitch/turnstile
turnstile/control.py
ControlDaemon.listen
def listen(self): """ Listen for incoming control messages. If the 'redis.shard_hint' configuration is set, its value will be passed to the pubsub() method when setting up the subscription. The control channel to subscribe to is specified by the 'redis.control_channel' configuration ('control' by default). """ # Use a specific database handle, with override. This allows # the long-lived listen thread to be configured to use a # different database or different database options. db = self.config.get_database('control') # Need a pub-sub object kwargs = {} if 'shard_hint' in self.config['control']: kwargs['shard_hint'] = self.config['control']['shard_hint'] pubsub = db.pubsub(**kwargs) # Subscribe to the right channel(s)... channel = self.config['control'].get('channel', 'control') pubsub.subscribe(channel) # Now we listen... for msg in pubsub.listen(): # Only interested in messages to our reload channel if (msg['type'] in ('pmessage', 'message') and msg['channel'] == channel): # Figure out what kind of message this is command, _sep, args = msg['data'].partition(':') # We must have some command... if not command: continue # Don't do anything with internal commands if command[0] == '_': LOG.error("Cannot call internal command %r" % command) continue # Look up the command if command in self._commands: func = self._commands[command] else: # Try an entrypoint func = utils.find_entrypoint('turnstile.command', command, compat=False) self._commands[command] = func # Don't do anything with missing commands if not func: LOG.error("No such command %r" % command) continue # Execute the desired command arglist = args.split(':') if args else [] try: func(self, *arglist) except Exception: LOG.exception("Failed to execute command %r arguments %r" % (command, arglist)) continue
python
def listen(self): """ Listen for incoming control messages. If the 'redis.shard_hint' configuration is set, its value will be passed to the pubsub() method when setting up the subscription. The control channel to subscribe to is specified by the 'redis.control_channel' configuration ('control' by default). """ # Use a specific database handle, with override. This allows # the long-lived listen thread to be configured to use a # different database or different database options. db = self.config.get_database('control') # Need a pub-sub object kwargs = {} if 'shard_hint' in self.config['control']: kwargs['shard_hint'] = self.config['control']['shard_hint'] pubsub = db.pubsub(**kwargs) # Subscribe to the right channel(s)... channel = self.config['control'].get('channel', 'control') pubsub.subscribe(channel) # Now we listen... for msg in pubsub.listen(): # Only interested in messages to our reload channel if (msg['type'] in ('pmessage', 'message') and msg['channel'] == channel): # Figure out what kind of message this is command, _sep, args = msg['data'].partition(':') # We must have some command... if not command: continue # Don't do anything with internal commands if command[0] == '_': LOG.error("Cannot call internal command %r" % command) continue # Look up the command if command in self._commands: func = self._commands[command] else: # Try an entrypoint func = utils.find_entrypoint('turnstile.command', command, compat=False) self._commands[command] = func # Don't do anything with missing commands if not func: LOG.error("No such command %r" % command) continue # Execute the desired command arglist = args.split(':') if args else [] try: func(self, *arglist) except Exception: LOG.exception("Failed to execute command %r arguments %r" % (command, arglist)) continue
[ "def", "listen", "(", "self", ")", ":", "# Use a specific database handle, with override. This allows", "# the long-lived listen thread to be configured to use a", "# different database or different database options.", "db", "=", "self", ".", "config", ".", "get_database", "(", "'control'", ")", "# Need a pub-sub object", "kwargs", "=", "{", "}", "if", "'shard_hint'", "in", "self", ".", "config", "[", "'control'", "]", ":", "kwargs", "[", "'shard_hint'", "]", "=", "self", ".", "config", "[", "'control'", "]", "[", "'shard_hint'", "]", "pubsub", "=", "db", ".", "pubsub", "(", "*", "*", "kwargs", ")", "# Subscribe to the right channel(s)...", "channel", "=", "self", ".", "config", "[", "'control'", "]", ".", "get", "(", "'channel'", ",", "'control'", ")", "pubsub", ".", "subscribe", "(", "channel", ")", "# Now we listen...", "for", "msg", "in", "pubsub", ".", "listen", "(", ")", ":", "# Only interested in messages to our reload channel", "if", "(", "msg", "[", "'type'", "]", "in", "(", "'pmessage'", ",", "'message'", ")", "and", "msg", "[", "'channel'", "]", "==", "channel", ")", ":", "# Figure out what kind of message this is", "command", ",", "_sep", ",", "args", "=", "msg", "[", "'data'", "]", ".", "partition", "(", "':'", ")", "# We must have some command...", "if", "not", "command", ":", "continue", "# Don't do anything with internal commands", "if", "command", "[", "0", "]", "==", "'_'", ":", "LOG", ".", "error", "(", "\"Cannot call internal command %r\"", "%", "command", ")", "continue", "# Look up the command", "if", "command", "in", "self", ".", "_commands", ":", "func", "=", "self", ".", "_commands", "[", "command", "]", "else", ":", "# Try an entrypoint", "func", "=", "utils", ".", "find_entrypoint", "(", "'turnstile.command'", ",", "command", ",", "compat", "=", "False", ")", "self", ".", "_commands", "[", "command", "]", "=", "func", "# Don't do anything with missing commands", "if", "not", "func", ":", "LOG", ".", "error", "(", "\"No such command %r\"", "%", "command", ")", "continue", "# Execute the desired command", "arglist", "=", "args", ".", "split", "(", "':'", ")", "if", "args", "else", "[", "]", "try", ":", "func", "(", "self", ",", "*", "arglist", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Failed to execute command %r arguments %r\"", "%", "(", "command", ",", "arglist", ")", ")", "continue" ]
Listen for incoming control messages. If the 'redis.shard_hint' configuration is set, its value will be passed to the pubsub() method when setting up the subscription. The control channel to subscribe to is specified by the 'redis.control_channel' configuration ('control' by default).
[ "Listen", "for", "incoming", "control", "messages", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L148-L212
train
klmitch/turnstile
turnstile/control.py
ControlDaemon.reload
def reload(self): """ Reloads the limits configuration from the database. If an error occurs loading the configuration, an error-level log message will be emitted. Additionally, the error message will be added to the set specified by the 'redis.errors_key' configuration ('errors' by default) and sent to the publishing channel specified by the 'redis.errors_channel' configuration ('errors' by default). """ # Acquire the pending semaphore. If we fail, exit--someone # else is already doing the reload if not self.pending.acquire(False): return # Do the remaining steps in a try/finally block so we make # sure to release the semaphore control_args = self.config['control'] try: # Load all the limits key = control_args.get('limits_key', 'limits') self.limits.set_limits(self.db.zrange(key, 0, -1)) except Exception: # Log an error LOG.exception("Could not load limits") # Get our error set and publish channel error_key = control_args.get('errors_key', 'errors') error_channel = control_args.get('errors_channel', 'errors') # Get an informative message msg = "Failed to load limits: " + traceback.format_exc() # Store the message into the error set. We use a set here # because it's likely that more than one node will # generate the same message if there is an error, and this # avoids an explosion in the size of the set. with utils.ignore_except(): self.db.sadd(error_key, msg) # Publish the message to a channel with utils.ignore_except(): self.db.publish(error_channel, msg) finally: self.pending.release()
python
def reload(self): """ Reloads the limits configuration from the database. If an error occurs loading the configuration, an error-level log message will be emitted. Additionally, the error message will be added to the set specified by the 'redis.errors_key' configuration ('errors' by default) and sent to the publishing channel specified by the 'redis.errors_channel' configuration ('errors' by default). """ # Acquire the pending semaphore. If we fail, exit--someone # else is already doing the reload if not self.pending.acquire(False): return # Do the remaining steps in a try/finally block so we make # sure to release the semaphore control_args = self.config['control'] try: # Load all the limits key = control_args.get('limits_key', 'limits') self.limits.set_limits(self.db.zrange(key, 0, -1)) except Exception: # Log an error LOG.exception("Could not load limits") # Get our error set and publish channel error_key = control_args.get('errors_key', 'errors') error_channel = control_args.get('errors_channel', 'errors') # Get an informative message msg = "Failed to load limits: " + traceback.format_exc() # Store the message into the error set. We use a set here # because it's likely that more than one node will # generate the same message if there is an error, and this # avoids an explosion in the size of the set. with utils.ignore_except(): self.db.sadd(error_key, msg) # Publish the message to a channel with utils.ignore_except(): self.db.publish(error_channel, msg) finally: self.pending.release()
[ "def", "reload", "(", "self", ")", ":", "# Acquire the pending semaphore. If we fail, exit--someone", "# else is already doing the reload", "if", "not", "self", ".", "pending", ".", "acquire", "(", "False", ")", ":", "return", "# Do the remaining steps in a try/finally block so we make", "# sure to release the semaphore", "control_args", "=", "self", ".", "config", "[", "'control'", "]", "try", ":", "# Load all the limits", "key", "=", "control_args", ".", "get", "(", "'limits_key'", ",", "'limits'", ")", "self", ".", "limits", ".", "set_limits", "(", "self", ".", "db", ".", "zrange", "(", "key", ",", "0", ",", "-", "1", ")", ")", "except", "Exception", ":", "# Log an error", "LOG", ".", "exception", "(", "\"Could not load limits\"", ")", "# Get our error set and publish channel", "error_key", "=", "control_args", ".", "get", "(", "'errors_key'", ",", "'errors'", ")", "error_channel", "=", "control_args", ".", "get", "(", "'errors_channel'", ",", "'errors'", ")", "# Get an informative message", "msg", "=", "\"Failed to load limits: \"", "+", "traceback", ".", "format_exc", "(", ")", "# Store the message into the error set. We use a set here", "# because it's likely that more than one node will", "# generate the same message if there is an error, and this", "# avoids an explosion in the size of the set.", "with", "utils", ".", "ignore_except", "(", ")", ":", "self", ".", "db", ".", "sadd", "(", "error_key", ",", "msg", ")", "# Publish the message to a channel", "with", "utils", ".", "ignore_except", "(", ")", ":", "self", ".", "db", ".", "publish", "(", "error_channel", ",", "msg", ")", "finally", ":", "self", ".", "pending", ".", "release", "(", ")" ]
Reloads the limits configuration from the database. If an error occurs loading the configuration, an error-level log message will be emitted. Additionally, the error message will be added to the set specified by the 'redis.errors_key' configuration ('errors' by default) and sent to the publishing channel specified by the 'redis.errors_channel' configuration ('errors' by default).
[ "Reloads", "the", "limits", "configuration", "from", "the", "database", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L225-L271
train
garenchan/policy
demos/flask/server.py
enforce_policy
def enforce_policy(rule): """Enforce a policy to a API.""" def wrapper(func): """Decorator used for wrap API.""" @functools.wraps(func) def wrapped(*args, **kwargs): if enforcer.enforce(rule, {}, g.cred): return func(*args, **kwargs) return wrapped return wrapper
python
def enforce_policy(rule): """Enforce a policy to a API.""" def wrapper(func): """Decorator used for wrap API.""" @functools.wraps(func) def wrapped(*args, **kwargs): if enforcer.enforce(rule, {}, g.cred): return func(*args, **kwargs) return wrapped return wrapper
[ "def", "enforce_policy", "(", "rule", ")", ":", "def", "wrapper", "(", "func", ")", ":", "\"\"\"Decorator used for wrap API.\"\"\"", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "enforcer", ".", "enforce", "(", "rule", ",", "{", "}", ",", "g", ".", "cred", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped", "return", "wrapper" ]
Enforce a policy to a API.
[ "Enforce", "a", "policy", "to", "a", "API", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/demos/flask/server.py#L56-L67
train
klmitch/turnstile
turnstile/database.py
initialize
def initialize(config): """ Initialize a connection to the Redis database. """ # Determine the client class to use if 'redis_client' in config: client = utils.find_entrypoint('turnstile.redis_client', config['redis_client'], required=True) else: client = redis.StrictRedis # Extract relevant connection information from the configuration kwargs = {} for cfg_var, type_ in REDIS_CONFIGS.items(): if cfg_var in config: kwargs[cfg_var] = type_(config[cfg_var]) # Make sure we have at a minimum the hostname if 'host' not in kwargs and 'unix_socket_path' not in kwargs: raise redis.ConnectionError("No host specified for redis database") # Look up the connection pool configuration cpool_class = None cpool = {} extra_kwargs = {} for key, value in config.items(): if key.startswith('connection_pool.'): _dummy, _sep, varname = key.partition('.') if varname == 'connection_class': cpool[varname] = utils.find_entrypoint( 'turnstile.connection_class', value, required=True) elif varname == 'max_connections': cpool[varname] = int(value) elif varname == 'parser_class': cpool[varname] = utils.find_entrypoint( 'turnstile.parser_class', value, required=True) else: cpool[varname] = value elif key not in REDIS_CONFIGS and key not in REDIS_EXCLUDES: extra_kwargs[key] = value if cpool: cpool_class = redis.ConnectionPool # Use custom connection pool class if requested... if 'connection_pool' in config: cpool_class = utils.find_entrypoint('turnstile.connection_pool', config['connection_pool'], required=True) # If we're using a connection pool, we'll need to pass the keyword # arguments to that instead of to redis if cpool_class: cpool.update(kwargs) # Use a custom connection class? if 'connection_class' not in cpool: if 'unix_socket_path' in cpool: if 'host' in cpool: del cpool['host'] if 'port' in cpool: del cpool['port'] cpool['path'] = cpool['unix_socket_path'] del cpool['unix_socket_path'] cpool['connection_class'] = redis.UnixDomainSocketConnection else: cpool['connection_class'] = redis.Connection # Build the connection pool to use and set up to pass it into # the redis constructor... kwargs = dict(connection_pool=cpool_class(**cpool)) # Build and return the database kwargs.update(extra_kwargs) return client(**kwargs)
python
def initialize(config): """ Initialize a connection to the Redis database. """ # Determine the client class to use if 'redis_client' in config: client = utils.find_entrypoint('turnstile.redis_client', config['redis_client'], required=True) else: client = redis.StrictRedis # Extract relevant connection information from the configuration kwargs = {} for cfg_var, type_ in REDIS_CONFIGS.items(): if cfg_var in config: kwargs[cfg_var] = type_(config[cfg_var]) # Make sure we have at a minimum the hostname if 'host' not in kwargs and 'unix_socket_path' not in kwargs: raise redis.ConnectionError("No host specified for redis database") # Look up the connection pool configuration cpool_class = None cpool = {} extra_kwargs = {} for key, value in config.items(): if key.startswith('connection_pool.'): _dummy, _sep, varname = key.partition('.') if varname == 'connection_class': cpool[varname] = utils.find_entrypoint( 'turnstile.connection_class', value, required=True) elif varname == 'max_connections': cpool[varname] = int(value) elif varname == 'parser_class': cpool[varname] = utils.find_entrypoint( 'turnstile.parser_class', value, required=True) else: cpool[varname] = value elif key not in REDIS_CONFIGS and key not in REDIS_EXCLUDES: extra_kwargs[key] = value if cpool: cpool_class = redis.ConnectionPool # Use custom connection pool class if requested... if 'connection_pool' in config: cpool_class = utils.find_entrypoint('turnstile.connection_pool', config['connection_pool'], required=True) # If we're using a connection pool, we'll need to pass the keyword # arguments to that instead of to redis if cpool_class: cpool.update(kwargs) # Use a custom connection class? if 'connection_class' not in cpool: if 'unix_socket_path' in cpool: if 'host' in cpool: del cpool['host'] if 'port' in cpool: del cpool['port'] cpool['path'] = cpool['unix_socket_path'] del cpool['unix_socket_path'] cpool['connection_class'] = redis.UnixDomainSocketConnection else: cpool['connection_class'] = redis.Connection # Build the connection pool to use and set up to pass it into # the redis constructor... kwargs = dict(connection_pool=cpool_class(**cpool)) # Build and return the database kwargs.update(extra_kwargs) return client(**kwargs)
[ "def", "initialize", "(", "config", ")", ":", "# Determine the client class to use", "if", "'redis_client'", "in", "config", ":", "client", "=", "utils", ".", "find_entrypoint", "(", "'turnstile.redis_client'", ",", "config", "[", "'redis_client'", "]", ",", "required", "=", "True", ")", "else", ":", "client", "=", "redis", ".", "StrictRedis", "# Extract relevant connection information from the configuration", "kwargs", "=", "{", "}", "for", "cfg_var", ",", "type_", "in", "REDIS_CONFIGS", ".", "items", "(", ")", ":", "if", "cfg_var", "in", "config", ":", "kwargs", "[", "cfg_var", "]", "=", "type_", "(", "config", "[", "cfg_var", "]", ")", "# Make sure we have at a minimum the hostname", "if", "'host'", "not", "in", "kwargs", "and", "'unix_socket_path'", "not", "in", "kwargs", ":", "raise", "redis", ".", "ConnectionError", "(", "\"No host specified for redis database\"", ")", "# Look up the connection pool configuration", "cpool_class", "=", "None", "cpool", "=", "{", "}", "extra_kwargs", "=", "{", "}", "for", "key", ",", "value", "in", "config", ".", "items", "(", ")", ":", "if", "key", ".", "startswith", "(", "'connection_pool.'", ")", ":", "_dummy", ",", "_sep", ",", "varname", "=", "key", ".", "partition", "(", "'.'", ")", "if", "varname", "==", "'connection_class'", ":", "cpool", "[", "varname", "]", "=", "utils", ".", "find_entrypoint", "(", "'turnstile.connection_class'", ",", "value", ",", "required", "=", "True", ")", "elif", "varname", "==", "'max_connections'", ":", "cpool", "[", "varname", "]", "=", "int", "(", "value", ")", "elif", "varname", "==", "'parser_class'", ":", "cpool", "[", "varname", "]", "=", "utils", ".", "find_entrypoint", "(", "'turnstile.parser_class'", ",", "value", ",", "required", "=", "True", ")", "else", ":", "cpool", "[", "varname", "]", "=", "value", "elif", "key", "not", "in", "REDIS_CONFIGS", "and", "key", "not", "in", "REDIS_EXCLUDES", ":", "extra_kwargs", "[", "key", "]", "=", "value", "if", "cpool", ":", "cpool_class", "=", "redis", ".", "ConnectionPool", "# Use custom connection pool class if requested...", "if", "'connection_pool'", "in", "config", ":", "cpool_class", "=", "utils", ".", "find_entrypoint", "(", "'turnstile.connection_pool'", ",", "config", "[", "'connection_pool'", "]", ",", "required", "=", "True", ")", "# If we're using a connection pool, we'll need to pass the keyword", "# arguments to that instead of to redis", "if", "cpool_class", ":", "cpool", ".", "update", "(", "kwargs", ")", "# Use a custom connection class?", "if", "'connection_class'", "not", "in", "cpool", ":", "if", "'unix_socket_path'", "in", "cpool", ":", "if", "'host'", "in", "cpool", ":", "del", "cpool", "[", "'host'", "]", "if", "'port'", "in", "cpool", ":", "del", "cpool", "[", "'port'", "]", "cpool", "[", "'path'", "]", "=", "cpool", "[", "'unix_socket_path'", "]", "del", "cpool", "[", "'unix_socket_path'", "]", "cpool", "[", "'connection_class'", "]", "=", "redis", ".", "UnixDomainSocketConnection", "else", ":", "cpool", "[", "'connection_class'", "]", "=", "redis", ".", "Connection", "# Build the connection pool to use and set up to pass it into", "# the redis constructor...", "kwargs", "=", "dict", "(", "connection_pool", "=", "cpool_class", "(", "*", "*", "cpool", ")", ")", "# Build and return the database", "kwargs", ".", "update", "(", "extra_kwargs", ")", "return", "client", "(", "*", "*", "kwargs", ")" ]
Initialize a connection to the Redis database.
[ "Initialize", "a", "connection", "to", "the", "Redis", "database", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/database.py#L35-L111
train
klmitch/turnstile
turnstile/database.py
limits_hydrate
def limits_hydrate(db, lims): """ Helper function to hydrate a list of limits. :param db: A database handle. :param lims: A list of limit strings, as retrieved from the database. """ return [limits.Limit.hydrate(db, lim) for lim in lims]
python
def limits_hydrate(db, lims): """ Helper function to hydrate a list of limits. :param db: A database handle. :param lims: A list of limit strings, as retrieved from the database. """ return [limits.Limit.hydrate(db, lim) for lim in lims]
[ "def", "limits_hydrate", "(", "db", ",", "lims", ")", ":", "return", "[", "limits", ".", "Limit", ".", "hydrate", "(", "db", ",", "lim", ")", "for", "lim", "in", "lims", "]" ]
Helper function to hydrate a list of limits. :param db: A database handle. :param lims: A list of limit strings, as retrieved from the database.
[ "Helper", "function", "to", "hydrate", "a", "list", "of", "limits", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/database.py#L114-L123
train
klmitch/turnstile
turnstile/database.py
limit_update
def limit_update(db, key, limits): """ Safely updates the list of limits in the database. :param db: The database handle. :param key: The key the limits are stored under. :param limits: A list or sequence of limit objects, each understanding the dehydrate() method. The limits list currently in the database will be atomically changed to match the new list. This is done using the pipeline() method. """ # Start by dehydrating all the limits desired = [msgpack.dumps(l.dehydrate()) for l in limits] desired_set = set(desired) # Now, let's update the limits with db.pipeline() as pipe: while True: try: # Watch for changes to the key pipe.watch(key) # Look up the existing limits existing = set(pipe.zrange(key, 0, -1)) # Start the transaction... pipe.multi() # Remove limits we no longer have for lim in existing - desired_set: pipe.zrem(key, lim) # Update or add all our desired limits for idx, lim in enumerate(desired): pipe.zadd(key, (idx + 1) * 10, lim) # Execute the transaction pipe.execute() except redis.WatchError: # Try again... continue else: # We're all done! break
python
def limit_update(db, key, limits): """ Safely updates the list of limits in the database. :param db: The database handle. :param key: The key the limits are stored under. :param limits: A list or sequence of limit objects, each understanding the dehydrate() method. The limits list currently in the database will be atomically changed to match the new list. This is done using the pipeline() method. """ # Start by dehydrating all the limits desired = [msgpack.dumps(l.dehydrate()) for l in limits] desired_set = set(desired) # Now, let's update the limits with db.pipeline() as pipe: while True: try: # Watch for changes to the key pipe.watch(key) # Look up the existing limits existing = set(pipe.zrange(key, 0, -1)) # Start the transaction... pipe.multi() # Remove limits we no longer have for lim in existing - desired_set: pipe.zrem(key, lim) # Update or add all our desired limits for idx, lim in enumerate(desired): pipe.zadd(key, (idx + 1) * 10, lim) # Execute the transaction pipe.execute() except redis.WatchError: # Try again... continue else: # We're all done! break
[ "def", "limit_update", "(", "db", ",", "key", ",", "limits", ")", ":", "# Start by dehydrating all the limits", "desired", "=", "[", "msgpack", ".", "dumps", "(", "l", ".", "dehydrate", "(", ")", ")", "for", "l", "in", "limits", "]", "desired_set", "=", "set", "(", "desired", ")", "# Now, let's update the limits", "with", "db", ".", "pipeline", "(", ")", "as", "pipe", ":", "while", "True", ":", "try", ":", "# Watch for changes to the key", "pipe", ".", "watch", "(", "key", ")", "# Look up the existing limits", "existing", "=", "set", "(", "pipe", ".", "zrange", "(", "key", ",", "0", ",", "-", "1", ")", ")", "# Start the transaction...", "pipe", ".", "multi", "(", ")", "# Remove limits we no longer have", "for", "lim", "in", "existing", "-", "desired_set", ":", "pipe", ".", "zrem", "(", "key", ",", "lim", ")", "# Update or add all our desired limits", "for", "idx", ",", "lim", "in", "enumerate", "(", "desired", ")", ":", "pipe", ".", "zadd", "(", "key", ",", "(", "idx", "+", "1", ")", "*", "10", ",", "lim", ")", "# Execute the transaction", "pipe", ".", "execute", "(", ")", "except", "redis", ".", "WatchError", ":", "# Try again...", "continue", "else", ":", "# We're all done!", "break" ]
Safely updates the list of limits in the database. :param db: The database handle. :param key: The key the limits are stored under. :param limits: A list or sequence of limit objects, each understanding the dehydrate() method. The limits list currently in the database will be atomically changed to match the new list. This is done using the pipeline() method.
[ "Safely", "updates", "the", "list", "of", "limits", "in", "the", "database", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/database.py#L126-L172
train
klmitch/turnstile
turnstile/database.py
command
def command(db, channel, command, *args): """ Utility function to issue a command to all Turnstile instances. :param db: The database handle. :param channel: The control channel all Turnstile instances are listening on. :param command: The command, as plain text. Currently, only 'reload' and 'ping' are recognized. All remaining arguments are treated as arguments for the command; they will be stringified and sent along with the command to the control channel. Note that ':' is an illegal character in arguments, but no warnings will be issued if it is used. """ # Build the command we're sending cmd = [command] cmd.extend(str(a) for a in args) # Send it out db.publish(channel, ':'.join(cmd))
python
def command(db, channel, command, *args): """ Utility function to issue a command to all Turnstile instances. :param db: The database handle. :param channel: The control channel all Turnstile instances are listening on. :param command: The command, as plain text. Currently, only 'reload' and 'ping' are recognized. All remaining arguments are treated as arguments for the command; they will be stringified and sent along with the command to the control channel. Note that ':' is an illegal character in arguments, but no warnings will be issued if it is used. """ # Build the command we're sending cmd = [command] cmd.extend(str(a) for a in args) # Send it out db.publish(channel, ':'.join(cmd))
[ "def", "command", "(", "db", ",", "channel", ",", "command", ",", "*", "args", ")", ":", "# Build the command we're sending", "cmd", "=", "[", "command", "]", "cmd", ".", "extend", "(", "str", "(", "a", ")", "for", "a", "in", "args", ")", "# Send it out", "db", ".", "publish", "(", "channel", ",", "':'", ".", "join", "(", "cmd", ")", ")" ]
Utility function to issue a command to all Turnstile instances. :param db: The database handle. :param channel: The control channel all Turnstile instances are listening on. :param command: The command, as plain text. Currently, only 'reload' and 'ping' are recognized. All remaining arguments are treated as arguments for the command; they will be stringified and sent along with the command to the control channel. Note that ':' is an illegal character in arguments, but no warnings will be issued if it is used.
[ "Utility", "function", "to", "issue", "a", "command", "to", "all", "Turnstile", "instances", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/database.py#L175-L196
train
samghelms/mathviz
mathviz_hopper/src/indices.py
GensimMathIndex._tokenize_latex
def _tokenize_latex(self, exp): """ Internal method to tokenize latex """ tokens = [] prevexp = "" while exp: t, exp = self._get_next_token(exp) if t.strip() != "": tokens.append(t) if prevexp == exp: break prevexp = exp return tokens
python
def _tokenize_latex(self, exp): """ Internal method to tokenize latex """ tokens = [] prevexp = "" while exp: t, exp = self._get_next_token(exp) if t.strip() != "": tokens.append(t) if prevexp == exp: break prevexp = exp return tokens
[ "def", "_tokenize_latex", "(", "self", ",", "exp", ")", ":", "tokens", "=", "[", "]", "prevexp", "=", "\"\"", "while", "exp", ":", "t", ",", "exp", "=", "self", ".", "_get_next_token", "(", "exp", ")", "if", "t", ".", "strip", "(", ")", "!=", "\"\"", ":", "tokens", ".", "append", "(", "t", ")", "if", "prevexp", "==", "exp", ":", "break", "prevexp", "=", "exp", "return", "tokens" ]
Internal method to tokenize latex
[ "Internal", "method", "to", "tokenize", "latex" ]
30fe89537379faea4de8c8b568ac6e52e4d15353
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/indices.py#L95-L108
train
samghelms/mathviz
mathviz_hopper/src/indices.py
GensimMathIndex._convert_query
def _convert_query(self, query): """ Convert query into an indexable string. """ query = self.dictionary.doc2bow(self._tokenize_latex(query)) sims = self.index[query] neighbors = sorted(sims, key=lambda item: -item[1]) neighbors = {"neighbors":[{self.columns[0]: {"data": self.docs[n[0]], "fmt": "math"}, self.columns[1]: {"data": float(n[1])}} for n in neighbors]} if neighbors else {"neighbors": []} return neighbors
python
def _convert_query(self, query): """ Convert query into an indexable string. """ query = self.dictionary.doc2bow(self._tokenize_latex(query)) sims = self.index[query] neighbors = sorted(sims, key=lambda item: -item[1]) neighbors = {"neighbors":[{self.columns[0]: {"data": self.docs[n[0]], "fmt": "math"}, self.columns[1]: {"data": float(n[1])}} for n in neighbors]} if neighbors else {"neighbors": []} return neighbors
[ "def", "_convert_query", "(", "self", ",", "query", ")", ":", "query", "=", "self", ".", "dictionary", ".", "doc2bow", "(", "self", ".", "_tokenize_latex", "(", "query", ")", ")", "sims", "=", "self", ".", "index", "[", "query", "]", "neighbors", "=", "sorted", "(", "sims", ",", "key", "=", "lambda", "item", ":", "-", "item", "[", "1", "]", ")", "neighbors", "=", "{", "\"neighbors\"", ":", "[", "{", "self", ".", "columns", "[", "0", "]", ":", "{", "\"data\"", ":", "self", ".", "docs", "[", "n", "[", "0", "]", "]", ",", "\"fmt\"", ":", "\"math\"", "}", ",", "self", ".", "columns", "[", "1", "]", ":", "{", "\"data\"", ":", "float", "(", "n", "[", "1", "]", ")", "}", "}", "for", "n", "in", "neighbors", "]", "}", "if", "neighbors", "else", "{", "\"neighbors\"", ":", "[", "]", "}", "return", "neighbors" ]
Convert query into an indexable string.
[ "Convert", "query", "into", "an", "indexable", "string", "." ]
30fe89537379faea4de8c8b568ac6e52e4d15353
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/indices.py#L110-L118
train
jasonrbriggs/proton
python/proton/utils.py
join
def join(path1, path2): ''' nicely join two path elements together ''' if path1.endswith('/') and path2.startswith('/'): return ''.join([path1, path2[1:]]) elif path1.endswith('/') or path2.startswith('/'): return ''.join([path1, path2]) else: return ''.join([path1, '/', path2])
python
def join(path1, path2): ''' nicely join two path elements together ''' if path1.endswith('/') and path2.startswith('/'): return ''.join([path1, path2[1:]]) elif path1.endswith('/') or path2.startswith('/'): return ''.join([path1, path2]) else: return ''.join([path1, '/', path2])
[ "def", "join", "(", "path1", ",", "path2", ")", ":", "if", "path1", ".", "endswith", "(", "'/'", ")", "and", "path2", ".", "startswith", "(", "'/'", ")", ":", "return", "''", ".", "join", "(", "[", "path1", ",", "path2", "[", "1", ":", "]", "]", ")", "elif", "path1", ".", "endswith", "(", "'/'", ")", "or", "path2", ".", "startswith", "(", "'/'", ")", ":", "return", "''", ".", "join", "(", "[", "path1", ",", "path2", "]", ")", "else", ":", "return", "''", ".", "join", "(", "[", "path1", ",", "'/'", ",", "path2", "]", ")" ]
nicely join two path elements together
[ "nicely", "join", "two", "path", "elements", "together" ]
e734734750797ef0caaa1680379e07b86d7a53e3
https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/utils.py#L24-L33
train
samghelms/mathviz
mathviz_hopper/src/print_math.py
print_math
def print_math(math_expression_lst, name = "math.html", out='html', formatter = lambda x: x): """ Converts LaTeX math expressions into an html layout. Creates a html file in the directory where print_math is called by default. Displays math to jupyter notebook if "notebook" argument is specified. Args: math_expression_lst (list): A list of LaTeX math (string) to be rendered by KaTeX out (string): {"html"|"notebook"}: HTML by default. Specifies output medium. formatter (function): function that cleans up the string for KaTeX. Returns: A HTML file in the directory where this function is called, or displays HTML output in a notebook. """ try: shutil.rmtree('viz') except: pass pth = get_cur_path()+print_math_template_path shutil.copytree(pth, 'viz') # clean_str = formatter(math_expression_lst) html_loc = None if out == "html": html_loc = pth+"standalone_index.html" if out == "notebook": from IPython.display import display, HTML html_loc = pth+"notebook_index.html" html = open(html_loc).read() html = html.replace("__MATH_LIST__", json.dumps(math_expression_lst)) if out == "notebook": display(HTML(html)) elif out == "html": with open(name, "w+") as out_f: out_f.write(html)
python
def print_math(math_expression_lst, name = "math.html", out='html', formatter = lambda x: x): """ Converts LaTeX math expressions into an html layout. Creates a html file in the directory where print_math is called by default. Displays math to jupyter notebook if "notebook" argument is specified. Args: math_expression_lst (list): A list of LaTeX math (string) to be rendered by KaTeX out (string): {"html"|"notebook"}: HTML by default. Specifies output medium. formatter (function): function that cleans up the string for KaTeX. Returns: A HTML file in the directory where this function is called, or displays HTML output in a notebook. """ try: shutil.rmtree('viz') except: pass pth = get_cur_path()+print_math_template_path shutil.copytree(pth, 'viz') # clean_str = formatter(math_expression_lst) html_loc = None if out == "html": html_loc = pth+"standalone_index.html" if out == "notebook": from IPython.display import display, HTML html_loc = pth+"notebook_index.html" html = open(html_loc).read() html = html.replace("__MATH_LIST__", json.dumps(math_expression_lst)) if out == "notebook": display(HTML(html)) elif out == "html": with open(name, "w+") as out_f: out_f.write(html)
[ "def", "print_math", "(", "math_expression_lst", ",", "name", "=", "\"math.html\"", ",", "out", "=", "'html'", ",", "formatter", "=", "lambda", "x", ":", "x", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "'viz'", ")", "except", ":", "pass", "pth", "=", "get_cur_path", "(", ")", "+", "print_math_template_path", "shutil", ".", "copytree", "(", "pth", ",", "'viz'", ")", "# clean_str = formatter(math_expression_lst)", "html_loc", "=", "None", "if", "out", "==", "\"html\"", ":", "html_loc", "=", "pth", "+", "\"standalone_index.html\"", "if", "out", "==", "\"notebook\"", ":", "from", "IPython", ".", "display", "import", "display", ",", "HTML", "html_loc", "=", "pth", "+", "\"notebook_index.html\"", "html", "=", "open", "(", "html_loc", ")", ".", "read", "(", ")", "html", "=", "html", ".", "replace", "(", "\"__MATH_LIST__\"", ",", "json", ".", "dumps", "(", "math_expression_lst", ")", ")", "if", "out", "==", "\"notebook\"", ":", "display", "(", "HTML", "(", "html", ")", ")", "elif", "out", "==", "\"html\"", ":", "with", "open", "(", "name", ",", "\"w+\"", ")", "as", "out_f", ":", "out_f", ".", "write", "(", "html", ")" ]
Converts LaTeX math expressions into an html layout. Creates a html file in the directory where print_math is called by default. Displays math to jupyter notebook if "notebook" argument is specified. Args: math_expression_lst (list): A list of LaTeX math (string) to be rendered by KaTeX out (string): {"html"|"notebook"}: HTML by default. Specifies output medium. formatter (function): function that cleans up the string for KaTeX. Returns: A HTML file in the directory where this function is called, or displays HTML output in a notebook.
[ "Converts", "LaTeX", "math", "expressions", "into", "an", "html", "layout", ".", "Creates", "a", "html", "file", "in", "the", "directory", "where", "print_math", "is", "called", "by", "default", ".", "Displays", "math", "to", "jupyter", "notebook", "if", "notebook", "argument", "is", "specified", "." ]
30fe89537379faea4de8c8b568ac6e52e4d15353
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/print_math.py#L17-L53
train
azogue/i2csense
i2csense/__init__.py
I2cBaseClass.log_error
def log_error(self, msg, *args): """Log an error or print in stdout if no logger.""" if self._logger is not None: self._logger.error(msg, *args) else: print(msg % args)
python
def log_error(self, msg, *args): """Log an error or print in stdout if no logger.""" if self._logger is not None: self._logger.error(msg, *args) else: print(msg % args)
[ "def", "log_error", "(", "self", ",", "msg", ",", "*", "args", ")", ":", "if", "self", ".", "_logger", "is", "not", "None", ":", "self", ".", "_logger", ".", "error", "(", "msg", ",", "*", "args", ")", "else", ":", "print", "(", "msg", "%", "args", ")" ]
Log an error or print in stdout if no logger.
[ "Log", "an", "error", "or", "print", "in", "stdout", "if", "no", "logger", "." ]
ecc6806dcee9de827a5414a9e836d271fedca9b9
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/__init__.py#L55-L60
train
azogue/i2csense
i2csense/__init__.py
I2cBaseClass._get_value_opc_attr
def _get_value_opc_attr(self, attr_name, prec_decimals=2): """Return sensor attribute with precission, or None if not present.""" try: value = getattr(self, attr_name) if value is not None: return round(value, prec_decimals) except I2cVariableNotImplemented: pass return None
python
def _get_value_opc_attr(self, attr_name, prec_decimals=2): """Return sensor attribute with precission, or None if not present.""" try: value = getattr(self, attr_name) if value is not None: return round(value, prec_decimals) except I2cVariableNotImplemented: pass return None
[ "def", "_get_value_opc_attr", "(", "self", ",", "attr_name", ",", "prec_decimals", "=", "2", ")", ":", "try", ":", "value", "=", "getattr", "(", "self", ",", "attr_name", ")", "if", "value", "is", "not", "None", ":", "return", "round", "(", "value", ",", "prec_decimals", ")", "except", "I2cVariableNotImplemented", ":", "pass", "return", "None" ]
Return sensor attribute with precission, or None if not present.
[ "Return", "sensor", "attribute", "with", "precission", "or", "None", "if", "not", "present", "." ]
ecc6806dcee9de827a5414a9e836d271fedca9b9
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/__init__.py#L91-L99
train
azogue/i2csense
i2csense/__init__.py
I2cBaseClass.current_state_str
def current_state_str(self): """Return string representation of the current state of the sensor.""" if self.sample_ok: msg = '' temperature = self._get_value_opc_attr('temperature') if temperature is not None: msg += 'Temp: %s ºC, ' % temperature humidity = self._get_value_opc_attr('humidity') if humidity is not None: msg += 'Humid: %s %%, ' % humidity pressure = self._get_value_opc_attr('pressure') if pressure is not None: msg += 'Press: %s mb, ' % pressure light_level = self._get_value_opc_attr('light_level') if light_level is not None: msg += 'Light: %s lux, ' % light_level return msg[:-2] else: return "Bad sample"
python
def current_state_str(self): """Return string representation of the current state of the sensor.""" if self.sample_ok: msg = '' temperature = self._get_value_opc_attr('temperature') if temperature is not None: msg += 'Temp: %s ºC, ' % temperature humidity = self._get_value_opc_attr('humidity') if humidity is not None: msg += 'Humid: %s %%, ' % humidity pressure = self._get_value_opc_attr('pressure') if pressure is not None: msg += 'Press: %s mb, ' % pressure light_level = self._get_value_opc_attr('light_level') if light_level is not None: msg += 'Light: %s lux, ' % light_level return msg[:-2] else: return "Bad sample"
[ "def", "current_state_str", "(", "self", ")", ":", "if", "self", ".", "sample_ok", ":", "msg", "=", "''", "temperature", "=", "self", ".", "_get_value_opc_attr", "(", "'temperature'", ")", "if", "temperature", "is", "not", "None", ":", "msg", "+=", "'Temp: %s ºC, ' ", " ", "emperature", "humidity", "=", "self", ".", "_get_value_opc_attr", "(", "'humidity'", ")", "if", "humidity", "is", "not", "None", ":", "msg", "+=", "'Humid: %s %%, '", "%", "humidity", "pressure", "=", "self", ".", "_get_value_opc_attr", "(", "'pressure'", ")", "if", "pressure", "is", "not", "None", ":", "msg", "+=", "'Press: %s mb, '", "%", "pressure", "light_level", "=", "self", ".", "_get_value_opc_attr", "(", "'light_level'", ")", "if", "light_level", "is", "not", "None", ":", "msg", "+=", "'Light: %s lux, '", "%", "light_level", "return", "msg", "[", ":", "-", "2", "]", "else", ":", "return", "\"Bad sample\"" ]
Return string representation of the current state of the sensor.
[ "Return", "string", "representation", "of", "the", "current", "state", "of", "the", "sensor", "." ]
ecc6806dcee9de827a5414a9e836d271fedca9b9
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/__init__.py#L102-L120
train
gesellkammer/sndfileio
sndfileio/resampling.py
_applyMultichan
def _applyMultichan(samples, func): # type: (np.ndarray, Callable[[np.ndarray], np.ndarray]) -> np.ndarray """ Apply func to each channel of audio data in samples """ if len(samples.shape) == 1 or samples.shape[1] == 1: newsamples = func(samples) else: y = np.array([]) for i in range(samples.shape[1]): y = np.concatenate((y, func(samples[:,i]))) newsamples = y.reshape(samples.shape[1], -1).T return newsamples
python
def _applyMultichan(samples, func): # type: (np.ndarray, Callable[[np.ndarray], np.ndarray]) -> np.ndarray """ Apply func to each channel of audio data in samples """ if len(samples.shape) == 1 or samples.shape[1] == 1: newsamples = func(samples) else: y = np.array([]) for i in range(samples.shape[1]): y = np.concatenate((y, func(samples[:,i]))) newsamples = y.reshape(samples.shape[1], -1).T return newsamples
[ "def", "_applyMultichan", "(", "samples", ",", "func", ")", ":", "# type: (np.ndarray, Callable[[np.ndarray], np.ndarray]) -> np.ndarray", "if", "len", "(", "samples", ".", "shape", ")", "==", "1", "or", "samples", ".", "shape", "[", "1", "]", "==", "1", ":", "newsamples", "=", "func", "(", "samples", ")", "else", ":", "y", "=", "np", ".", "array", "(", "[", "]", ")", "for", "i", "in", "range", "(", "samples", ".", "shape", "[", "1", "]", ")", ":", "y", "=", "np", ".", "concatenate", "(", "(", "y", ",", "func", "(", "samples", "[", ":", ",", "i", "]", ")", ")", ")", "newsamples", "=", "y", ".", "reshape", "(", "samples", ".", "shape", "[", "1", "]", ",", "-", "1", ")", ".", "T", "return", "newsamples" ]
Apply func to each channel of audio data in samples
[ "Apply", "func", "to", "each", "channel", "of", "audio", "data", "in", "samples" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/resampling.py#L17-L29
train
gesellkammer/sndfileio
sndfileio/resampling.py
_resample_obspy
def _resample_obspy(samples, sr, newsr, window='hanning', lowpass=True): # type: (np.ndarray, int, int, str, bool) -> np.ndarray """ Resample using Fourier method. The same as resample_scipy but with low-pass filtering for upsampling """ from scipy.signal import resample from math import ceil factor = sr/float(newsr) if newsr < sr and lowpass: # be sure filter still behaves good if factor > 16: logger.info("Automatic filter design is unstable for resampling " "factors (current sampling rate/new sampling rate) " "above 16. Manual resampling is necessary.") freq = min(sr, newsr) * 0.5 / float(factor) logger.debug(f"resample_obspy: lowpass {freq}") samples = lowpass_cheby2(samples, freq=freq, sr=sr, maxorder=12) num = int(ceil(len(samples) / factor)) return _applyMultichan(samples, lambda S: resample(S, num, window=window))
python
def _resample_obspy(samples, sr, newsr, window='hanning', lowpass=True): # type: (np.ndarray, int, int, str, bool) -> np.ndarray """ Resample using Fourier method. The same as resample_scipy but with low-pass filtering for upsampling """ from scipy.signal import resample from math import ceil factor = sr/float(newsr) if newsr < sr and lowpass: # be sure filter still behaves good if factor > 16: logger.info("Automatic filter design is unstable for resampling " "factors (current sampling rate/new sampling rate) " "above 16. Manual resampling is necessary.") freq = min(sr, newsr) * 0.5 / float(factor) logger.debug(f"resample_obspy: lowpass {freq}") samples = lowpass_cheby2(samples, freq=freq, sr=sr, maxorder=12) num = int(ceil(len(samples) / factor)) return _applyMultichan(samples, lambda S: resample(S, num, window=window))
[ "def", "_resample_obspy", "(", "samples", ",", "sr", ",", "newsr", ",", "window", "=", "'hanning'", ",", "lowpass", "=", "True", ")", ":", "# type: (np.ndarray, int, int, str, bool) -> np.ndarray", "from", "scipy", ".", "signal", "import", "resample", "from", "math", "import", "ceil", "factor", "=", "sr", "/", "float", "(", "newsr", ")", "if", "newsr", "<", "sr", "and", "lowpass", ":", "# be sure filter still behaves good", "if", "factor", ">", "16", ":", "logger", ".", "info", "(", "\"Automatic filter design is unstable for resampling \"", "\"factors (current sampling rate/new sampling rate) \"", "\"above 16. Manual resampling is necessary.\"", ")", "freq", "=", "min", "(", "sr", ",", "newsr", ")", "*", "0.5", "/", "float", "(", "factor", ")", "logger", ".", "debug", "(", "f\"resample_obspy: lowpass {freq}\"", ")", "samples", "=", "lowpass_cheby2", "(", "samples", ",", "freq", "=", "freq", ",", "sr", "=", "sr", ",", "maxorder", "=", "12", ")", "num", "=", "int", "(", "ceil", "(", "len", "(", "samples", ")", "/", "factor", ")", ")", "return", "_applyMultichan", "(", "samples", ",", "lambda", "S", ":", "resample", "(", "S", ",", "num", ",", "window", "=", "window", ")", ")" ]
Resample using Fourier method. The same as resample_scipy but with low-pass filtering for upsampling
[ "Resample", "using", "Fourier", "method", ".", "The", "same", "as", "resample_scipy", "but", "with", "low", "-", "pass", "filtering", "for", "upsampling" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/resampling.py#L191-L213
train
gesellkammer/sndfileio
sndfileio/resampling.py
resample
def resample(samples, oldsr, newsr): # type: (np.ndarray, int, int) -> np.ndarray """ Resample `samples` with given samplerate `sr` to new samplerate `newsr` samples: mono or multichannel frames oldsr : original samplerate newsr : new sample rate Returns: the new samples """ backends = [ _resample_samplerate, # turns the samples into float32, which is ok for audio _resample_scikits, _resample_nnresample, # very good results, follows libsamplerate closely _resample_obspy, # these last two introduce some error at the first samples _resample_scipy ] # type: List[Callable[[np.ndarray, int, int], Opt[np.ndarray]]] for backend in backends: newsamples = backend(samples, oldsr, newsr) if newsamples is not None: return newsamples
python
def resample(samples, oldsr, newsr): # type: (np.ndarray, int, int) -> np.ndarray """ Resample `samples` with given samplerate `sr` to new samplerate `newsr` samples: mono or multichannel frames oldsr : original samplerate newsr : new sample rate Returns: the new samples """ backends = [ _resample_samplerate, # turns the samples into float32, which is ok for audio _resample_scikits, _resample_nnresample, # very good results, follows libsamplerate closely _resample_obspy, # these last two introduce some error at the first samples _resample_scipy ] # type: List[Callable[[np.ndarray, int, int], Opt[np.ndarray]]] for backend in backends: newsamples = backend(samples, oldsr, newsr) if newsamples is not None: return newsamples
[ "def", "resample", "(", "samples", ",", "oldsr", ",", "newsr", ")", ":", "# type: (np.ndarray, int, int) -> np.ndarray", "backends", "=", "[", "_resample_samplerate", ",", "# turns the samples into float32, which is ok for audio ", "_resample_scikits", ",", "_resample_nnresample", ",", "# very good results, follows libsamplerate closely", "_resample_obspy", ",", "# these last two introduce some error at the first samples", "_resample_scipy", "]", "# type: List[Callable[[np.ndarray, int, int], Opt[np.ndarray]]]", "for", "backend", "in", "backends", ":", "newsamples", "=", "backend", "(", "samples", ",", "oldsr", ",", "newsr", ")", "if", "newsamples", "is", "not", "None", ":", "return", "newsamples" ]
Resample `samples` with given samplerate `sr` to new samplerate `newsr` samples: mono or multichannel frames oldsr : original samplerate newsr : new sample rate Returns: the new samples
[ "Resample", "samples", "with", "given", "samplerate", "sr", "to", "new", "samplerate", "newsr" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/resampling.py#L216-L238
train
garenchan/policy
setup.py
get_package_version
def get_package_version(): """return package version without importing it""" base = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(base, 'policy', '__init__.py'), mode='rt', encoding='utf-8') as initf: for line in initf: m = version.match(line.strip()) if not m: continue return m.groups()[0]
python
def get_package_version(): """return package version without importing it""" base = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(base, 'policy', '__init__.py'), mode='rt', encoding='utf-8') as initf: for line in initf: m = version.match(line.strip()) if not m: continue return m.groups()[0]
[ "def", "get_package_version", "(", ")", ":", "base", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "base", ",", "'policy'", ",", "'__init__.py'", ")", ",", "mode", "=", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "initf", ":", "for", "line", "in", "initf", ":", "m", "=", "version", ".", "match", "(", "line", ".", "strip", "(", ")", ")", "if", "not", "m", ":", "continue", "return", "m", ".", "groups", "(", ")", "[", "0", "]" ]
return package version without importing it
[ "return", "package", "version", "without", "importing", "it" ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L24-L34
train
garenchan/policy
setup.py
get_long_description
def get_long_description(): """return package's long description""" base = os.path.abspath(os.path.dirname(__file__)) readme_file = os.path.join(base, 'README.md') with open(readme_file, mode='rt', encoding='utf-8') as readme: return readme.read()
python
def get_long_description(): """return package's long description""" base = os.path.abspath(os.path.dirname(__file__)) readme_file = os.path.join(base, 'README.md') with open(readme_file, mode='rt', encoding='utf-8') as readme: return readme.read()
[ "def", "get_long_description", "(", ")", ":", "base", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "readme_file", "=", "os", ".", "path", ".", "join", "(", "base", ",", "'README.md'", ")", "with", "open", "(", "readme_file", ",", "mode", "=", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "readme", ":", "return", "readme", ".", "read", "(", ")" ]
return package's long description
[ "return", "package", "s", "long", "description" ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L37-L42
train
garenchan/policy
setup.py
get_install_requires
def get_install_requires(): """return package's install requires""" base = os.path.abspath(os.path.dirname(__file__)) requirements_file = os.path.join(base, 'requirements.txt') if not os.path.exists(requirements_file): return [] with open(requirements_file, mode='rt', encoding='utf-8') as f: return f.read().splitlines()
python
def get_install_requires(): """return package's install requires""" base = os.path.abspath(os.path.dirname(__file__)) requirements_file = os.path.join(base, 'requirements.txt') if not os.path.exists(requirements_file): return [] with open(requirements_file, mode='rt', encoding='utf-8') as f: return f.read().splitlines()
[ "def", "get_install_requires", "(", ")", ":", "base", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "requirements_file", "=", "os", ".", "path", ".", "join", "(", "base", ",", "'requirements.txt'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "requirements_file", ")", ":", "return", "[", "]", "with", "open", "(", "requirements_file", ",", "mode", "=", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")" ]
return package's install requires
[ "return", "package", "s", "install", "requires" ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L61-L68
train
BernardFW/bernard
src/bernard/misc/sheet_sync/_base.py
main
def main(flags): """ Download all sheets as configured. """ dl = SheetDownloader(flags) dl.init() for file_info in settings.GOOGLE_SHEET_SYNC['files']: print('Downloading {}'.format(file_info['path'])) dl.download_sheet( file_info['path'], file_info['sheet'], file_info['range'], )
python
def main(flags): """ Download all sheets as configured. """ dl = SheetDownloader(flags) dl.init() for file_info in settings.GOOGLE_SHEET_SYNC['files']: print('Downloading {}'.format(file_info['path'])) dl.download_sheet( file_info['path'], file_info['sheet'], file_info['range'], )
[ "def", "main", "(", "flags", ")", ":", "dl", "=", "SheetDownloader", "(", "flags", ")", "dl", ".", "init", "(", ")", "for", "file_info", "in", "settings", ".", "GOOGLE_SHEET_SYNC", "[", "'files'", "]", ":", "print", "(", "'Downloading {}'", ".", "format", "(", "file_info", "[", "'path'", "]", ")", ")", "dl", ".", "download_sheet", "(", "file_info", "[", "'path'", "]", ",", "file_info", "[", "'sheet'", "]", ",", "file_info", "[", "'range'", "]", ",", ")" ]
Download all sheets as configured.
[ "Download", "all", "sheets", "as", "configured", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/sheet_sync/_base.py#L122-L136
train
BernardFW/bernard
src/bernard/misc/sheet_sync/_base.py
SheetDownloader.download_sheet
def download_sheet(self, file_path, sheet_id, cell_range): """ Download the cell range from the sheet and store it as CSV in the `file_path` file. """ result = self.service.spreadsheets().values().get( spreadsheetId=sheet_id, range=cell_range, ).execute() values = result.get('values', []) with open(file_path, newline='', encoding='utf-8', mode='w') as f: writer = csv.writer(f, lineterminator='\n') for row in values: writer.writerow(row)
python
def download_sheet(self, file_path, sheet_id, cell_range): """ Download the cell range from the sheet and store it as CSV in the `file_path` file. """ result = self.service.spreadsheets().values().get( spreadsheetId=sheet_id, range=cell_range, ).execute() values = result.get('values', []) with open(file_path, newline='', encoding='utf-8', mode='w') as f: writer = csv.writer(f, lineterminator='\n') for row in values: writer.writerow(row)
[ "def", "download_sheet", "(", "self", ",", "file_path", ",", "sheet_id", ",", "cell_range", ")", ":", "result", "=", "self", ".", "service", ".", "spreadsheets", "(", ")", ".", "values", "(", ")", ".", "get", "(", "spreadsheetId", "=", "sheet_id", ",", "range", "=", "cell_range", ",", ")", ".", "execute", "(", ")", "values", "=", "result", ".", "get", "(", "'values'", ",", "[", "]", ")", "with", "open", "(", "file_path", ",", "newline", "=", "''", ",", "encoding", "=", "'utf-8'", ",", "mode", "=", "'w'", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", ",", "lineterminator", "=", "'\\n'", ")", "for", "row", "in", "values", ":", "writer", ".", "writerow", "(", "row", ")" ]
Download the cell range from the sheet and store it as CSV in the `file_path` file.
[ "Download", "the", "cell", "range", "from", "the", "sheet", "and", "store", "it", "as", "CSV", "in", "the", "file_path", "file", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/sheet_sync/_base.py#L102-L119
train
giancosta86/Iris
info/gianlucacosta/iris/versioning.py
Version.getFriendlyString
def getFriendlyString(self): """ Returns the version, printed in a friendly way. More precisely, it trims trailing zero components. """ if self._friendlyString is not None: return self._friendlyString resultComponents = [ self.getIntMajor(), self.getIntMinor(), self.getIntBuild(), self.getIntRevision() ] for i in range(len(resultComponents) - 1, -1, -1): if resultComponents[i] == 0: del resultComponents[i] else: break result = ".".join(map(str, resultComponents)) self._friendlyString = result return result
python
def getFriendlyString(self): """ Returns the version, printed in a friendly way. More precisely, it trims trailing zero components. """ if self._friendlyString is not None: return self._friendlyString resultComponents = [ self.getIntMajor(), self.getIntMinor(), self.getIntBuild(), self.getIntRevision() ] for i in range(len(resultComponents) - 1, -1, -1): if resultComponents[i] == 0: del resultComponents[i] else: break result = ".".join(map(str, resultComponents)) self._friendlyString = result return result
[ "def", "getFriendlyString", "(", "self", ")", ":", "if", "self", ".", "_friendlyString", "is", "not", "None", ":", "return", "self", ".", "_friendlyString", "resultComponents", "=", "[", "self", ".", "getIntMajor", "(", ")", ",", "self", ".", "getIntMinor", "(", ")", ",", "self", ".", "getIntBuild", "(", ")", ",", "self", ".", "getIntRevision", "(", ")", "]", "for", "i", "in", "range", "(", "len", "(", "resultComponents", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "if", "resultComponents", "[", "i", "]", "==", "0", ":", "del", "resultComponents", "[", "i", "]", "else", ":", "break", "result", "=", "\".\"", ".", "join", "(", "map", "(", "str", ",", "resultComponents", ")", ")", "self", ".", "_friendlyString", "=", "result", "return", "result" ]
Returns the version, printed in a friendly way. More precisely, it trims trailing zero components.
[ "Returns", "the", "version", "printed", "in", "a", "friendly", "way", "." ]
b3d92cca5cce3653519bd032346b211c46a57d05
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/versioning.py#L117-L143
train
giancosta86/Iris
info/gianlucacosta/iris/versioning.py
VersionDirectory.getVersions
def getVersions(self): """ Returns the versions of the suitable entries available in the directory - an empty list if no such entry is available """ if not os.path.exists(self._path): return [] result = [] for entryName in os.listdir(self._path): try: entryVersion = Version(entryName) result.append(entryVersion) except InvalidVersionException: continue return result
python
def getVersions(self): """ Returns the versions of the suitable entries available in the directory - an empty list if no such entry is available """ if not os.path.exists(self._path): return [] result = [] for entryName in os.listdir(self._path): try: entryVersion = Version(entryName) result.append(entryVersion) except InvalidVersionException: continue return result
[ "def", "getVersions", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_path", ")", ":", "return", "[", "]", "result", "=", "[", "]", "for", "entryName", "in", "os", ".", "listdir", "(", "self", ".", "_path", ")", ":", "try", ":", "entryVersion", "=", "Version", "(", "entryName", ")", "result", ".", "append", "(", "entryVersion", ")", "except", "InvalidVersionException", ":", "continue", "return", "result" ]
Returns the versions of the suitable entries available in the directory - an empty list if no such entry is available
[ "Returns", "the", "versions", "of", "the", "suitable", "entries", "available", "in", "the", "directory", "-", "an", "empty", "list", "if", "no", "such", "entry", "is", "available" ]
b3d92cca5cce3653519bd032346b211c46a57d05
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/versioning.py#L167-L185
train
jstitch/MambuPy
MambuPy/rest/mambubranch.py
MambuBranch.setUsers
def setUsers(self, *args, **kwargs): """Adds the active users for this branch to a 'users' field. Returns the number of requests done to Mambu. .. todo:: since pagination logic was added, is not always true that just 1 request was done. It may be more! But since request counter singleton holds true information about how many requests were done to Mambu, in fact this return value may be obsolete """ try: usrs = [ us for us in self.mambuusersclass(branchId=self['id'], *args, **kwargs) if us['userState'] == "ACTIVE" ] except AttributeError as ae: from .mambuuser import MambuUsers self.mambuusersclass = MambuUsers usrs = [ us for us in self.mambuusersclass(branchId=self['id'], *args, **kwargs) if us['userState'] == "ACTIVE" ] self['users'] = usrs return 1
python
def setUsers(self, *args, **kwargs): """Adds the active users for this branch to a 'users' field. Returns the number of requests done to Mambu. .. todo:: since pagination logic was added, is not always true that just 1 request was done. It may be more! But since request counter singleton holds true information about how many requests were done to Mambu, in fact this return value may be obsolete """ try: usrs = [ us for us in self.mambuusersclass(branchId=self['id'], *args, **kwargs) if us['userState'] == "ACTIVE" ] except AttributeError as ae: from .mambuuser import MambuUsers self.mambuusersclass = MambuUsers usrs = [ us for us in self.mambuusersclass(branchId=self['id'], *args, **kwargs) if us['userState'] == "ACTIVE" ] self['users'] = usrs return 1
[ "def", "setUsers", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "usrs", "=", "[", "us", "for", "us", "in", "self", ".", "mambuusersclass", "(", "branchId", "=", "self", "[", "'id'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "us", "[", "'userState'", "]", "==", "\"ACTIVE\"", "]", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambuuser", "import", "MambuUsers", "self", ".", "mambuusersclass", "=", "MambuUsers", "usrs", "=", "[", "us", "for", "us", "in", "self", ".", "mambuusersclass", "(", "branchId", "=", "self", "[", "'id'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "us", "[", "'userState'", "]", "==", "\"ACTIVE\"", "]", "self", "[", "'users'", "]", "=", "usrs", "return", "1" ]
Adds the active users for this branch to a 'users' field. Returns the number of requests done to Mambu. .. todo:: since pagination logic was added, is not always true that just 1 request was done. It may be more! But since request counter singleton holds true information about how many requests were done to Mambu, in fact this return value may be obsolete
[ "Adds", "the", "active", "users", "for", "this", "branch", "to", "a", "users", "field", "." ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambubranch.py#L39-L57
train
pyQode/pyqode.cobol
pyqode/cobol/modes/indenter.py
IndenterMode.unindent
def unindent(self): """ Un-indents text at cursor position. """ _logger().debug('unindent') cursor = self.editor.textCursor() _logger().debug('cursor has selection %r', cursor.hasSelection()) if cursor.hasSelection(): cursor.beginEditBlock() self.unindent_selection(cursor) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: tab_len = self.editor.tab_length indentation = cursor.positionInBlock() indentation -= self.min_column if indentation == 0: return max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(cursor, max_spaces) _logger().info('deleting %d space before cursor' % spaces) cursor.beginEditBlock() for _ in range(spaces): cursor.deletePreviousChar() cursor.endEditBlock() self.editor.setTextCursor(cursor) _logger().debug(cursor.block().text())
python
def unindent(self): """ Un-indents text at cursor position. """ _logger().debug('unindent') cursor = self.editor.textCursor() _logger().debug('cursor has selection %r', cursor.hasSelection()) if cursor.hasSelection(): cursor.beginEditBlock() self.unindent_selection(cursor) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: tab_len = self.editor.tab_length indentation = cursor.positionInBlock() indentation -= self.min_column if indentation == 0: return max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(cursor, max_spaces) _logger().info('deleting %d space before cursor' % spaces) cursor.beginEditBlock() for _ in range(spaces): cursor.deletePreviousChar() cursor.endEditBlock() self.editor.setTextCursor(cursor) _logger().debug(cursor.block().text())
[ "def", "unindent", "(", "self", ")", ":", "_logger", "(", ")", ".", "debug", "(", "'unindent'", ")", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "_logger", "(", ")", ".", "debug", "(", "'cursor has selection %r'", ",", "cursor", ".", "hasSelection", "(", ")", ")", "if", "cursor", ".", "hasSelection", "(", ")", ":", "cursor", ".", "beginEditBlock", "(", ")", "self", ".", "unindent_selection", "(", "cursor", ")", "cursor", ".", "endEditBlock", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "else", ":", "tab_len", "=", "self", ".", "editor", ".", "tab_length", "indentation", "=", "cursor", ".", "positionInBlock", "(", ")", "indentation", "-=", "self", ".", "min_column", "if", "indentation", "==", "0", ":", "return", "max_spaces", "=", "indentation", "%", "tab_len", "if", "max_spaces", "==", "0", ":", "max_spaces", "=", "tab_len", "spaces", "=", "self", ".", "count_deletable_spaces", "(", "cursor", ",", "max_spaces", ")", "_logger", "(", ")", ".", "info", "(", "'deleting %d space before cursor'", "%", "spaces", ")", "cursor", ".", "beginEditBlock", "(", ")", "for", "_", "in", "range", "(", "spaces", ")", ":", "cursor", ".", "deletePreviousChar", "(", ")", "cursor", ".", "endEditBlock", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "_logger", "(", ")", ".", "debug", "(", "cursor", ".", "block", "(", ")", ".", "text", "(", ")", ")" ]
Un-indents text at cursor position.
[ "Un", "-", "indents", "text", "at", "cursor", "position", "." ]
eedae4e320a4b2d0c44abb2c3061091321648fb7
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/indenter.py#L149-L178
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
WorkflowEngine.with_name
def with_name(cls, name, id_user=0, **extra_data): """Instantiate a WorkflowEngine given a name or UUID. :param name: name of workflow to run. :type name: str :param id_user: id of user to associate with workflow :type id_user: int :param module_name: label used to query groups of workflows. :type module_name: str """ return cls(name=name, id_user=0, **extra_data)
python
def with_name(cls, name, id_user=0, **extra_data): """Instantiate a WorkflowEngine given a name or UUID. :param name: name of workflow to run. :type name: str :param id_user: id of user to associate with workflow :type id_user: int :param module_name: label used to query groups of workflows. :type module_name: str """ return cls(name=name, id_user=0, **extra_data)
[ "def", "with_name", "(", "cls", ",", "name", ",", "id_user", "=", "0", ",", "*", "*", "extra_data", ")", ":", "return", "cls", "(", "name", "=", "name", ",", "id_user", "=", "0", ",", "*", "*", "extra_data", ")" ]
Instantiate a WorkflowEngine given a name or UUID. :param name: name of workflow to run. :type name: str :param id_user: id of user to associate with workflow :type id_user: int :param module_name: label used to query groups of workflows. :type module_name: str
[ "Instantiate", "a", "WorkflowEngine", "given", "a", "name", "or", "UUID", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L65-L77
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
WorkflowEngine.from_uuid
def from_uuid(cls, uuid, **extra_data): """Load an existing workflow from the database given a UUID. :param uuid: pass a uuid to an existing workflow. :type uuid: str """ model = Workflow.query.get(uuid) if model is None: raise LookupError( "No workflow with UUID {} was found".format(uuid) ) instance = cls(model=model, **extra_data) instance.objects = WorkflowObjectModel.query.filter( WorkflowObjectModel.id_workflow == uuid, WorkflowObjectModel.id_parent == None, # noqa ).all() return instance
python
def from_uuid(cls, uuid, **extra_data): """Load an existing workflow from the database given a UUID. :param uuid: pass a uuid to an existing workflow. :type uuid: str """ model = Workflow.query.get(uuid) if model is None: raise LookupError( "No workflow with UUID {} was found".format(uuid) ) instance = cls(model=model, **extra_data) instance.objects = WorkflowObjectModel.query.filter( WorkflowObjectModel.id_workflow == uuid, WorkflowObjectModel.id_parent == None, # noqa ).all() return instance
[ "def", "from_uuid", "(", "cls", ",", "uuid", ",", "*", "*", "extra_data", ")", ":", "model", "=", "Workflow", ".", "query", ".", "get", "(", "uuid", ")", "if", "model", "is", "None", ":", "raise", "LookupError", "(", "\"No workflow with UUID {} was found\"", ".", "format", "(", "uuid", ")", ")", "instance", "=", "cls", "(", "model", "=", "model", ",", "*", "*", "extra_data", ")", "instance", ".", "objects", "=", "WorkflowObjectModel", ".", "query", ".", "filter", "(", "WorkflowObjectModel", ".", "id_workflow", "==", "uuid", ",", "WorkflowObjectModel", ".", "id_parent", "==", "None", ",", "# noqa", ")", ".", "all", "(", ")", "return", "instance" ]
Load an existing workflow from the database given a UUID. :param uuid: pass a uuid to an existing workflow. :type uuid: str
[ "Load", "an", "existing", "workflow", "from", "the", "database", "given", "a", "UUID", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L80-L96
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
WorkflowEngine.continue_object
def continue_object(self, workflow_object, restart_point='restart_task', task_offset=1, stop_on_halt=False): """Continue workflow for one given object from "restart_point". :param object: :param stop_on_halt: :param restart_point: can be one of: * restart_prev: will restart from the previous task * continue_next: will continue to the next task * restart_task: will restart the current task You can use stop_on_error to raise exception's and stop the processing. Use stop_on_halt to stop processing the workflow if HaltProcessing is raised. """ translate = { 'restart_task': 'current', 'continue_next': 'next', 'restart_prev': 'prev', } self.state.callback_pos = workflow_object.callback_pos or [0] self.restart(task=translate[restart_point], obj='first', objects=[workflow_object], stop_on_halt=stop_on_halt)
python
def continue_object(self, workflow_object, restart_point='restart_task', task_offset=1, stop_on_halt=False): """Continue workflow for one given object from "restart_point". :param object: :param stop_on_halt: :param restart_point: can be one of: * restart_prev: will restart from the previous task * continue_next: will continue to the next task * restart_task: will restart the current task You can use stop_on_error to raise exception's and stop the processing. Use stop_on_halt to stop processing the workflow if HaltProcessing is raised. """ translate = { 'restart_task': 'current', 'continue_next': 'next', 'restart_prev': 'prev', } self.state.callback_pos = workflow_object.callback_pos or [0] self.restart(task=translate[restart_point], obj='first', objects=[workflow_object], stop_on_halt=stop_on_halt)
[ "def", "continue_object", "(", "self", ",", "workflow_object", ",", "restart_point", "=", "'restart_task'", ",", "task_offset", "=", "1", ",", "stop_on_halt", "=", "False", ")", ":", "translate", "=", "{", "'restart_task'", ":", "'current'", ",", "'continue_next'", ":", "'next'", ",", "'restart_prev'", ":", "'prev'", ",", "}", "self", ".", "state", ".", "callback_pos", "=", "workflow_object", ".", "callback_pos", "or", "[", "0", "]", "self", ".", "restart", "(", "task", "=", "translate", "[", "restart_point", "]", ",", "obj", "=", "'first'", ",", "objects", "=", "[", "workflow_object", "]", ",", "stop_on_halt", "=", "stop_on_halt", ")" ]
Continue workflow for one given object from "restart_point". :param object: :param stop_on_halt: :param restart_point: can be one of: * restart_prev: will restart from the previous task * continue_next: will continue to the next task * restart_task: will restart the current task You can use stop_on_error to raise exception's and stop the processing. Use stop_on_halt to stop processing the workflow if HaltProcessing is raised.
[ "Continue", "workflow", "for", "one", "given", "object", "from", "restart_point", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L193-L215
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
WorkflowEngine.has_completed
def has_completed(self): """Return True if workflow is fully completed.""" objects_in_db = WorkflowObjectModel.query.filter( WorkflowObjectModel.id_workflow == self.uuid, WorkflowObjectModel.id_parent == None, # noqa ).filter(WorkflowObjectModel.status.in_([ workflow_object_class.known_statuses.COMPLETED ])).count() return objects_in_db == len(list(self.objects))
python
def has_completed(self): """Return True if workflow is fully completed.""" objects_in_db = WorkflowObjectModel.query.filter( WorkflowObjectModel.id_workflow == self.uuid, WorkflowObjectModel.id_parent == None, # noqa ).filter(WorkflowObjectModel.status.in_([ workflow_object_class.known_statuses.COMPLETED ])).count() return objects_in_db == len(list(self.objects))
[ "def", "has_completed", "(", "self", ")", ":", "objects_in_db", "=", "WorkflowObjectModel", ".", "query", ".", "filter", "(", "WorkflowObjectModel", ".", "id_workflow", "==", "self", ".", "uuid", ",", "WorkflowObjectModel", ".", "id_parent", "==", "None", ",", "# noqa", ")", ".", "filter", "(", "WorkflowObjectModel", ".", "status", ".", "in_", "(", "[", "workflow_object_class", ".", "known_statuses", ".", "COMPLETED", "]", ")", ")", ".", "count", "(", ")", "return", "objects_in_db", "==", "len", "(", "list", "(", "self", ".", "objects", ")", ")" ]
Return True if workflow is fully completed.
[ "Return", "True", "if", "workflow", "is", "fully", "completed", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L222-L230
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
WorkflowEngine.set_workflow_by_name
def set_workflow_by_name(self, workflow_name): """Configure the workflow to run by the name of this one. Allows the modification of the workflow that the engine will run by looking in the registry the name passed in parameter. :param workflow_name: name of the workflow. :type workflow_name: str """ from .proxies import workflows if workflow_name not in workflows: # No workflow with that name exists raise WorkflowDefinitionError("Workflow '%s' does not exist" % (workflow_name,), workflow_name=workflow_name) self.workflow_definition = workflows[workflow_name] self.callbacks.replace(self.workflow_definition.workflow)
python
def set_workflow_by_name(self, workflow_name): """Configure the workflow to run by the name of this one. Allows the modification of the workflow that the engine will run by looking in the registry the name passed in parameter. :param workflow_name: name of the workflow. :type workflow_name: str """ from .proxies import workflows if workflow_name not in workflows: # No workflow with that name exists raise WorkflowDefinitionError("Workflow '%s' does not exist" % (workflow_name,), workflow_name=workflow_name) self.workflow_definition = workflows[workflow_name] self.callbacks.replace(self.workflow_definition.workflow)
[ "def", "set_workflow_by_name", "(", "self", ",", "workflow_name", ")", ":", "from", ".", "proxies", "import", "workflows", "if", "workflow_name", "not", "in", "workflows", ":", "# No workflow with that name exists", "raise", "WorkflowDefinitionError", "(", "\"Workflow '%s' does not exist\"", "%", "(", "workflow_name", ",", ")", ",", "workflow_name", "=", "workflow_name", ")", "self", ".", "workflow_definition", "=", "workflows", "[", "workflow_name", "]", "self", ".", "callbacks", ".", "replace", "(", "self", ".", "workflow_definition", ".", "workflow", ")" ]
Configure the workflow to run by the name of this one. Allows the modification of the workflow that the engine will run by looking in the registry the name passed in parameter. :param workflow_name: name of the workflow. :type workflow_name: str
[ "Configure", "the", "workflow", "to", "run", "by", "the", "name", "of", "this", "one", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L232-L249
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioActionMapper.after_each_callback
def after_each_callback(eng, callback_func, obj): """Take action after every WF callback.""" obj.callback_pos = eng.state.callback_pos obj.extra_data["_last_task_name"] = callback_func.__name__ task_history = get_task_history(callback_func) if "_task_history" not in obj.extra_data: obj.extra_data["_task_history"] = [task_history] else: obj.extra_data["_task_history"].append(task_history)
python
def after_each_callback(eng, callback_func, obj): """Take action after every WF callback.""" obj.callback_pos = eng.state.callback_pos obj.extra_data["_last_task_name"] = callback_func.__name__ task_history = get_task_history(callback_func) if "_task_history" not in obj.extra_data: obj.extra_data["_task_history"] = [task_history] else: obj.extra_data["_task_history"].append(task_history)
[ "def", "after_each_callback", "(", "eng", ",", "callback_func", ",", "obj", ")", ":", "obj", ".", "callback_pos", "=", "eng", ".", "state", ".", "callback_pos", "obj", ".", "extra_data", "[", "\"_last_task_name\"", "]", "=", "callback_func", ".", "__name__", "task_history", "=", "get_task_history", "(", "callback_func", ")", "if", "\"_task_history\"", "not", "in", "obj", ".", "extra_data", ":", "obj", ".", "extra_data", "[", "\"_task_history\"", "]", "=", "[", "task_history", "]", "else", ":", "obj", ".", "extra_data", "[", "\"_task_history\"", "]", ".", "append", "(", "task_history", ")" ]
Take action after every WF callback.
[ "Take", "action", "after", "every", "WF", "callback", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L275-L283
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioProcessingFactory.before_object
def before_object(eng, objects, obj): """Take action before the processing of an object begins.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .before_object( eng, objects, obj ) if "_error_msg" in obj.extra_data: del obj.extra_data["_error_msg"] db.session.commit()
python
def before_object(eng, objects, obj): """Take action before the processing of an object begins.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .before_object( eng, objects, obj ) if "_error_msg" in obj.extra_data: del obj.extra_data["_error_msg"] db.session.commit()
[ "def", "before_object", "(", "eng", ",", "objects", ",", "obj", ")", ":", "super", "(", "InvenioProcessingFactory", ",", "InvenioProcessingFactory", ")", ".", "before_object", "(", "eng", ",", "objects", ",", "obj", ")", "if", "\"_error_msg\"", "in", "obj", ".", "extra_data", ":", "del", "obj", ".", "extra_data", "[", "\"_error_msg\"", "]", "db", ".", "session", ".", "commit", "(", ")" ]
Take action before the processing of an object begins.
[ "Take", "action", "before", "the", "processing", "of", "an", "object", "begins", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L300-L308
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioProcessingFactory.after_object
def after_object(eng, objects, obj): """Take action once the proccessing of an object completes.""" # We save each object once it is fully run through super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_object(eng, objects, obj) obj.save( status=obj.known_statuses.COMPLETED, id_workflow=eng.model.uuid ) db.session.commit()
python
def after_object(eng, objects, obj): """Take action once the proccessing of an object completes.""" # We save each object once it is fully run through super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_object(eng, objects, obj) obj.save( status=obj.known_statuses.COMPLETED, id_workflow=eng.model.uuid ) db.session.commit()
[ "def", "after_object", "(", "eng", ",", "objects", ",", "obj", ")", ":", "# We save each object once it is fully run through", "super", "(", "InvenioProcessingFactory", ",", "InvenioProcessingFactory", ")", ".", "after_object", "(", "eng", ",", "objects", ",", "obj", ")", "obj", ".", "save", "(", "status", "=", "obj", ".", "known_statuses", ".", "COMPLETED", ",", "id_workflow", "=", "eng", ".", "model", ".", "uuid", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Take action once the proccessing of an object completes.
[ "Take", "action", "once", "the", "proccessing", "of", "an", "object", "completes", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L311-L320
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioProcessingFactory.before_processing
def before_processing(eng, objects): """Execute before processing the workflow.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .before_processing(eng, objects) eng.save(WorkflowStatus.RUNNING) db.session.commit()
python
def before_processing(eng, objects): """Execute before processing the workflow.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .before_processing(eng, objects) eng.save(WorkflowStatus.RUNNING) db.session.commit()
[ "def", "before_processing", "(", "eng", ",", "objects", ")", ":", "super", "(", "InvenioProcessingFactory", ",", "InvenioProcessingFactory", ")", ".", "before_processing", "(", "eng", ",", "objects", ")", "eng", ".", "save", "(", "WorkflowStatus", ".", "RUNNING", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Execute before processing the workflow.
[ "Execute", "before", "processing", "the", "workflow", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L323-L328
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioProcessingFactory.after_processing
def after_processing(eng, objects): """Process to update status.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_processing(eng, objects) if eng.has_completed: eng.save(WorkflowStatus.COMPLETED) else: eng.save(WorkflowStatus.HALTED) db.session.commit()
python
def after_processing(eng, objects): """Process to update status.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_processing(eng, objects) if eng.has_completed: eng.save(WorkflowStatus.COMPLETED) else: eng.save(WorkflowStatus.HALTED) db.session.commit()
[ "def", "after_processing", "(", "eng", ",", "objects", ")", ":", "super", "(", "InvenioProcessingFactory", ",", "InvenioProcessingFactory", ")", ".", "after_processing", "(", "eng", ",", "objects", ")", "if", "eng", ".", "has_completed", ":", "eng", ".", "save", "(", "WorkflowStatus", ".", "COMPLETED", ")", "else", ":", "eng", ".", "save", "(", "WorkflowStatus", ".", "HALTED", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Process to update status.
[ "Process", "to", "update", "status", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L331-L339
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioTransitionAction.Exception
def Exception(obj, eng, callbacks, exc_info): """Handle general exceptions in workflow, saving states.""" exception_repr = ''.join(traceback.format_exception(*exc_info)) msg = "Error:\n%s" % (exception_repr) eng.log.error(msg) if obj: # Sets an error message as a tuple (title, details) obj.extra_data['_error_msg'] = exception_repr obj.save( status=obj.known_statuses.ERROR, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid ) eng.save(WorkflowStatus.ERROR) db.session.commit() # Call super which will reraise super(InvenioTransitionAction, InvenioTransitionAction).Exception( obj, eng, callbacks, exc_info )
python
def Exception(obj, eng, callbacks, exc_info): """Handle general exceptions in workflow, saving states.""" exception_repr = ''.join(traceback.format_exception(*exc_info)) msg = "Error:\n%s" % (exception_repr) eng.log.error(msg) if obj: # Sets an error message as a tuple (title, details) obj.extra_data['_error_msg'] = exception_repr obj.save( status=obj.known_statuses.ERROR, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid ) eng.save(WorkflowStatus.ERROR) db.session.commit() # Call super which will reraise super(InvenioTransitionAction, InvenioTransitionAction).Exception( obj, eng, callbacks, exc_info )
[ "def", "Exception", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")", ":", "exception_repr", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "exc_info", ")", ")", "msg", "=", "\"Error:\\n%s\"", "%", "(", "exception_repr", ")", "eng", ".", "log", ".", "error", "(", "msg", ")", "if", "obj", ":", "# Sets an error message as a tuple (title, details)", "obj", ".", "extra_data", "[", "'_error_msg'", "]", "=", "exception_repr", "obj", ".", "save", "(", "status", "=", "obj", ".", "known_statuses", ".", "ERROR", ",", "callback_pos", "=", "eng", ".", "state", ".", "callback_pos", ",", "id_workflow", "=", "eng", ".", "uuid", ")", "eng", ".", "save", "(", "WorkflowStatus", ".", "ERROR", ")", "db", ".", "session", ".", "commit", "(", ")", "# Call super which will reraise", "super", "(", "InvenioTransitionAction", ",", "InvenioTransitionAction", ")", ".", "Exception", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")" ]
Handle general exceptions in workflow, saving states.
[ "Handle", "general", "exceptions", "in", "workflow", "saving", "states", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L346-L365
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioTransitionAction.WaitProcessing
def WaitProcessing(obj, eng, callbacks, exc_info): """Take actions when WaitProcessing is raised. ..note:: We're essentially doing HaltProcessing, plus `obj.set_action` and object status `WAITING` instead of `HALTED`. This is not present in TransitionActions so that's why it is not calling super in this case. """ e = exc_info[1] obj.set_action(e.action, e.message) obj.save(status=eng.object_status.WAITING, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid) eng.save(WorkflowStatus.HALTED) eng.log.warning("Workflow '%s' waiting at task %s with message: %s", eng.name, eng.current_taskname or "Unknown", e.message) db.session.commit() # Call super which will reraise TransitionActions.HaltProcessing( obj, eng, callbacks, exc_info )
python
def WaitProcessing(obj, eng, callbacks, exc_info): """Take actions when WaitProcessing is raised. ..note:: We're essentially doing HaltProcessing, plus `obj.set_action` and object status `WAITING` instead of `HALTED`. This is not present in TransitionActions so that's why it is not calling super in this case. """ e = exc_info[1] obj.set_action(e.action, e.message) obj.save(status=eng.object_status.WAITING, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid) eng.save(WorkflowStatus.HALTED) eng.log.warning("Workflow '%s' waiting at task %s with message: %s", eng.name, eng.current_taskname or "Unknown", e.message) db.session.commit() # Call super which will reraise TransitionActions.HaltProcessing( obj, eng, callbacks, exc_info )
[ "def", "WaitProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")", ":", "e", "=", "exc_info", "[", "1", "]", "obj", ".", "set_action", "(", "e", ".", "action", ",", "e", ".", "message", ")", "obj", ".", "save", "(", "status", "=", "eng", ".", "object_status", ".", "WAITING", ",", "callback_pos", "=", "eng", ".", "state", ".", "callback_pos", ",", "id_workflow", "=", "eng", ".", "uuid", ")", "eng", ".", "save", "(", "WorkflowStatus", ".", "HALTED", ")", "eng", ".", "log", ".", "warning", "(", "\"Workflow '%s' waiting at task %s with message: %s\"", ",", "eng", ".", "name", ",", "eng", ".", "current_taskname", "or", "\"Unknown\"", ",", "e", ".", "message", ")", "db", ".", "session", ".", "commit", "(", ")", "# Call super which will reraise", "TransitionActions", ".", "HaltProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")" ]
Take actions when WaitProcessing is raised. ..note:: We're essentially doing HaltProcessing, plus `obj.set_action` and object status `WAITING` instead of `HALTED`. This is not present in TransitionActions so that's why it is not calling super in this case.
[ "Take", "actions", "when", "WaitProcessing", "is", "raised", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L368-L391
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioTransitionAction.StopProcessing
def StopProcessing(obj, eng, callbacks, exc_info): """Stop the engne and mark the workflow as completed.""" e = exc_info[1] obj.save(status=eng.object_status.COMPLETED, id_workflow=eng.uuid) eng.save(WorkflowStatus.COMPLETED) obj.log.warning( "Workflow '%s' stopped at task %s with message: %s", eng.name, eng.current_taskname or "Unknown", e.message ) db.session.commit() super(InvenioTransitionAction, InvenioTransitionAction).StopProcessing( obj, eng, callbacks, exc_info )
python
def StopProcessing(obj, eng, callbacks, exc_info): """Stop the engne and mark the workflow as completed.""" e = exc_info[1] obj.save(status=eng.object_status.COMPLETED, id_workflow=eng.uuid) eng.save(WorkflowStatus.COMPLETED) obj.log.warning( "Workflow '%s' stopped at task %s with message: %s", eng.name, eng.current_taskname or "Unknown", e.message ) db.session.commit() super(InvenioTransitionAction, InvenioTransitionAction).StopProcessing( obj, eng, callbacks, exc_info )
[ "def", "StopProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")", ":", "e", "=", "exc_info", "[", "1", "]", "obj", ".", "save", "(", "status", "=", "eng", ".", "object_status", ".", "COMPLETED", ",", "id_workflow", "=", "eng", ".", "uuid", ")", "eng", ".", "save", "(", "WorkflowStatus", ".", "COMPLETED", ")", "obj", ".", "log", ".", "warning", "(", "\"Workflow '%s' stopped at task %s with message: %s\"", ",", "eng", ".", "name", ",", "eng", ".", "current_taskname", "or", "\"Unknown\"", ",", "e", ".", "message", ")", "db", ".", "session", ".", "commit", "(", ")", "super", "(", "InvenioTransitionAction", ",", "InvenioTransitionAction", ")", ".", "StopProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")" ]
Stop the engne and mark the workflow as completed.
[ "Stop", "the", "engne", "and", "mark", "the", "workflow", "as", "completed", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L419-L433
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioTransitionAction.SkipToken
def SkipToken(obj, eng, callbacks, exc_info): """Take action when SkipToken is raised.""" msg = "Skipped running this object: {0}".format(obj.id) eng.log.debug(msg) raise Continue
python
def SkipToken(obj, eng, callbacks, exc_info): """Take action when SkipToken is raised.""" msg = "Skipped running this object: {0}".format(obj.id) eng.log.debug(msg) raise Continue
[ "def", "SkipToken", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")", ":", "msg", "=", "\"Skipped running this object: {0}\"", ".", "format", "(", "obj", ".", "id", ")", "eng", ".", "log", ".", "debug", "(", "msg", ")", "raise", "Continue" ]
Take action when SkipToken is raised.
[ "Take", "action", "when", "SkipToken", "is", "raised", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L436-L440
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
InvenioTransitionAction.AbortProcessing
def AbortProcessing(obj, eng, callbacks, exc_info): """Take action when AbortProcessing is raised.""" msg = "Processing was aborted for object: {0}".format(obj.id) eng.log.debug(msg) raise Break
python
def AbortProcessing(obj, eng, callbacks, exc_info): """Take action when AbortProcessing is raised.""" msg = "Processing was aborted for object: {0}".format(obj.id) eng.log.debug(msg) raise Break
[ "def", "AbortProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")", ":", "msg", "=", "\"Processing was aborted for object: {0}\"", ".", "format", "(", "obj", ".", "id", ")", "eng", ".", "log", ".", "debug", "(", "msg", ")", "raise", "Break" ]
Take action when AbortProcessing is raised.
[ "Take", "action", "when", "AbortProcessing", "is", "raised", "." ]
9c09fd29509a3db975ac2aba337e6760d8cfd3c2
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L443-L447
train
xypnox/email_purifier
epurifier/tezt.py
edits1
def edits1(word): "All edits that are one edit away from `word`." letters = 'qwertyuiopasdfghjklzxcvbnm' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] print('splits = ', splits) deletes = [L + R[1:] for L, R in splits if R] print('deletes = ', deletes) transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] print('transposes = ', transposes) replaces = [L + c + R[1:] for L, R in splits if R for c in letters] print('replaces = ', replaces) inserts = [L + c + R for L, R in splits for c in letters] print('inserts = ', inserts) print(deletes + transposes + replaces + inserts) print(len(set(deletes + transposes + replaces + inserts))) return deletes + transposes + replaces + inserts
python
def edits1(word): "All edits that are one edit away from `word`." letters = 'qwertyuiopasdfghjklzxcvbnm' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] print('splits = ', splits) deletes = [L + R[1:] for L, R in splits if R] print('deletes = ', deletes) transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] print('transposes = ', transposes) replaces = [L + c + R[1:] for L, R in splits if R for c in letters] print('replaces = ', replaces) inserts = [L + c + R for L, R in splits for c in letters] print('inserts = ', inserts) print(deletes + transposes + replaces + inserts) print(len(set(deletes + transposes + replaces + inserts))) return deletes + transposes + replaces + inserts
[ "def", "edits1", "(", "word", ")", ":", "letters", "=", "'qwertyuiopasdfghjklzxcvbnm'", "splits", "=", "[", "(", "word", "[", ":", "i", "]", ",", "word", "[", "i", ":", "]", ")", "for", "i", "in", "range", "(", "len", "(", "word", ")", "+", "1", ")", "]", "print", "(", "'splits = '", ",", "splits", ")", "deletes", "=", "[", "L", "+", "R", "[", "1", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "R", "]", "print", "(", "'deletes = '", ",", "deletes", ")", "transposes", "=", "[", "L", "+", "R", "[", "1", "]", "+", "R", "[", "0", "]", "+", "R", "[", "2", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "len", "(", "R", ")", ">", "1", "]", "print", "(", "'transposes = '", ",", "transposes", ")", "replaces", "=", "[", "L", "+", "c", "+", "R", "[", "1", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "R", "for", "c", "in", "letters", "]", "print", "(", "'replaces = '", ",", "replaces", ")", "inserts", "=", "[", "L", "+", "c", "+", "R", "for", "L", ",", "R", "in", "splits", "for", "c", "in", "letters", "]", "print", "(", "'inserts = '", ",", "inserts", ")", "print", "(", "deletes", "+", "transposes", "+", "replaces", "+", "inserts", ")", "print", "(", "len", "(", "set", "(", "deletes", "+", "transposes", "+", "replaces", "+", "inserts", ")", ")", ")", "return", "deletes", "+", "transposes", "+", "replaces", "+", "inserts" ]
All edits that are one edit away from `word`.
[ "All", "edits", "that", "are", "one", "edit", "away", "from", "word", "." ]
a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/tezt.py#L1-L16
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
to_locus
def to_locus(variant_or_locus): """ Return a Locus object for a Variant instance. This is necessary since the read evidence module expects Variant instances to have a locus attribute st to a varcode.Locus instance of interbase genomic coordinates. The rest of varcode uses a different Variant class, but will eventually be transitioned to interbase coordinates. See test/test_read_evidence.py for a definition of the Variant class that the read_evidence module is meant to work with. This function can be passed a regular varcode.Variant instance (with fields start, end, contig, etc.), a different kind of variant object that has a 'locus' field, or a varcode.Locus. It will return a varcode.Locus instance. This should all get cleaned up once varcode switches to interbase coordinates and we standardize on a Variant class. """ if isinstance(variant_or_locus, Locus): return variant_or_locus try: return variant_or_locus.locus except AttributeError: # IMPORTANT: if varcode someday changes from inclusive to interbase # coordinates, this will need to be updated. return Locus.from_inclusive_coordinates( variant_or_locus.contig, variant_or_locus.start, variant_or_locus.end)
python
def to_locus(variant_or_locus): """ Return a Locus object for a Variant instance. This is necessary since the read evidence module expects Variant instances to have a locus attribute st to a varcode.Locus instance of interbase genomic coordinates. The rest of varcode uses a different Variant class, but will eventually be transitioned to interbase coordinates. See test/test_read_evidence.py for a definition of the Variant class that the read_evidence module is meant to work with. This function can be passed a regular varcode.Variant instance (with fields start, end, contig, etc.), a different kind of variant object that has a 'locus' field, or a varcode.Locus. It will return a varcode.Locus instance. This should all get cleaned up once varcode switches to interbase coordinates and we standardize on a Variant class. """ if isinstance(variant_or_locus, Locus): return variant_or_locus try: return variant_or_locus.locus except AttributeError: # IMPORTANT: if varcode someday changes from inclusive to interbase # coordinates, this will need to be updated. return Locus.from_inclusive_coordinates( variant_or_locus.contig, variant_or_locus.start, variant_or_locus.end)
[ "def", "to_locus", "(", "variant_or_locus", ")", ":", "if", "isinstance", "(", "variant_or_locus", ",", "Locus", ")", ":", "return", "variant_or_locus", "try", ":", "return", "variant_or_locus", ".", "locus", "except", "AttributeError", ":", "# IMPORTANT: if varcode someday changes from inclusive to interbase", "# coordinates, this will need to be updated.", "return", "Locus", ".", "from_inclusive_coordinates", "(", "variant_or_locus", ".", "contig", ",", "variant_or_locus", ".", "start", ",", "variant_or_locus", ".", "end", ")" ]
Return a Locus object for a Variant instance. This is necessary since the read evidence module expects Variant instances to have a locus attribute st to a varcode.Locus instance of interbase genomic coordinates. The rest of varcode uses a different Variant class, but will eventually be transitioned to interbase coordinates. See test/test_read_evidence.py for a definition of the Variant class that the read_evidence module is meant to work with. This function can be passed a regular varcode.Variant instance (with fields start, end, contig, etc.), a different kind of variant object that has a 'locus' field, or a varcode.Locus. It will return a varcode.Locus instance. This should all get cleaned up once varcode switches to interbase coordinates and we standardize on a Variant class.
[ "Return", "a", "Locus", "object", "for", "a", "Variant", "instance", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L605-L634
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.pileup
def pileup(self, locus): ''' Given a 1-base locus, return the Pileup at that locus. Raises a KeyError if this PileupCollection does not have a Pileup at the specified locus. ''' locus = to_locus(locus) if len(locus.positions) != 1: raise ValueError("Not a single-base locus: %s" % locus) return self.pileups[locus]
python
def pileup(self, locus): ''' Given a 1-base locus, return the Pileup at that locus. Raises a KeyError if this PileupCollection does not have a Pileup at the specified locus. ''' locus = to_locus(locus) if len(locus.positions) != 1: raise ValueError("Not a single-base locus: %s" % locus) return self.pileups[locus]
[ "def", "pileup", "(", "self", ",", "locus", ")", ":", "locus", "=", "to_locus", "(", "locus", ")", "if", "len", "(", "locus", ".", "positions", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Not a single-base locus: %s\"", "%", "locus", ")", "return", "self", ".", "pileups", "[", "locus", "]" ]
Given a 1-base locus, return the Pileup at that locus. Raises a KeyError if this PileupCollection does not have a Pileup at the specified locus.
[ "Given", "a", "1", "-", "base", "locus", "return", "the", "Pileup", "at", "that", "locus", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L54-L64
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.at
def at(self, *loci): ''' Return a new PileupCollection instance including only pileups for the specified loci. ''' loci = [to_locus(obj) for obj in loci] single_position_loci = [] for locus in loci: for position in locus.positions: single_position_loci.append( Locus.from_interbase_coordinates(locus.contig, position)) pileups = dict( (locus, self.pileups[locus]) for locus in single_position_loci) return PileupCollection(pileups, self)
python
def at(self, *loci): ''' Return a new PileupCollection instance including only pileups for the specified loci. ''' loci = [to_locus(obj) for obj in loci] single_position_loci = [] for locus in loci: for position in locus.positions: single_position_loci.append( Locus.from_interbase_coordinates(locus.contig, position)) pileups = dict( (locus, self.pileups[locus]) for locus in single_position_loci) return PileupCollection(pileups, self)
[ "def", "at", "(", "self", ",", "*", "loci", ")", ":", "loci", "=", "[", "to_locus", "(", "obj", ")", "for", "obj", "in", "loci", "]", "single_position_loci", "=", "[", "]", "for", "locus", "in", "loci", ":", "for", "position", "in", "locus", ".", "positions", ":", "single_position_loci", ".", "append", "(", "Locus", ".", "from_interbase_coordinates", "(", "locus", ".", "contig", ",", "position", ")", ")", "pileups", "=", "dict", "(", "(", "locus", ",", "self", ".", "pileups", "[", "locus", "]", ")", "for", "locus", "in", "single_position_loci", ")", "return", "PileupCollection", "(", "pileups", ",", "self", ")" ]
Return a new PileupCollection instance including only pileups for the specified loci.
[ "Return", "a", "new", "PileupCollection", "instance", "including", "only", "pileups", "for", "the", "specified", "loci", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L66-L79
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.reads
def reads(self): ''' The reads in this PileupCollection. All reads will have an alignment that overlaps at least one of the included loci. Since SAM (and pysam) have no real notion of a "read", the returned instances are actually pysam.AlignedSegment instances, (i.e. alignments). However, only one alignment will be returned by this method per read. Returns ---------- List of pysam.AlignedSegment instances. If a particular read has more than one alignment in this PileupCollection (e.g. one primary and one secondary), then the alignment returned is the one with the highest mapping quality. ''' # TODO: Optimize this. def alignment_precedence(pysam_alignment_record): return pysam_alignment_record.mapping_quality result = {} for pileup in self.pileups.values(): for e in pileup.elements: key = read_key(e.alignment) if key not in result or ( alignment_precedence(e.alignment) > alignment_precedence(result[key])): result[key] = e.alignment return list(result.values())
python
def reads(self): ''' The reads in this PileupCollection. All reads will have an alignment that overlaps at least one of the included loci. Since SAM (and pysam) have no real notion of a "read", the returned instances are actually pysam.AlignedSegment instances, (i.e. alignments). However, only one alignment will be returned by this method per read. Returns ---------- List of pysam.AlignedSegment instances. If a particular read has more than one alignment in this PileupCollection (e.g. one primary and one secondary), then the alignment returned is the one with the highest mapping quality. ''' # TODO: Optimize this. def alignment_precedence(pysam_alignment_record): return pysam_alignment_record.mapping_quality result = {} for pileup in self.pileups.values(): for e in pileup.elements: key = read_key(e.alignment) if key not in result or ( alignment_precedence(e.alignment) > alignment_precedence(result[key])): result[key] = e.alignment return list(result.values())
[ "def", "reads", "(", "self", ")", ":", "# TODO: Optimize this.", "def", "alignment_precedence", "(", "pysam_alignment_record", ")", ":", "return", "pysam_alignment_record", ".", "mapping_quality", "result", "=", "{", "}", "for", "pileup", "in", "self", ".", "pileups", ".", "values", "(", ")", ":", "for", "e", "in", "pileup", ".", "elements", ":", "key", "=", "read_key", "(", "e", ".", "alignment", ")", "if", "key", "not", "in", "result", "or", "(", "alignment_precedence", "(", "e", ".", "alignment", ")", ">", "alignment_precedence", "(", "result", "[", "key", "]", ")", ")", ":", "result", "[", "key", "]", "=", "e", ".", "alignment", "return", "list", "(", "result", ".", "values", "(", ")", ")" ]
The reads in this PileupCollection. All reads will have an alignment that overlaps at least one of the included loci. Since SAM (and pysam) have no real notion of a "read", the returned instances are actually pysam.AlignedSegment instances, (i.e. alignments). However, only one alignment will be returned by this method per read. Returns ---------- List of pysam.AlignedSegment instances. If a particular read has more than one alignment in this PileupCollection (e.g. one primary and one secondary), then the alignment returned is the one with the highest mapping quality.
[ "The", "reads", "in", "this", "PileupCollection", ".", "All", "reads", "will", "have", "an", "alignment", "that", "overlaps", "at", "least", "one", "of", "the", "included", "loci", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L87-L117
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.read_attributes
def read_attributes(self, attributes=None): ''' Collect read attributes across reads in this PileupCollection into a pandas.DataFrame. Valid attributes are the following properties of a pysam.AlignedSegment instance. See: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment for the meaning of these attributes. * cigarstring * flag * inferred_length * is_duplicate * is_paired * is_proper_pair * is_qcfail * is_read1 * is_read2 * is_reverse * is_secondary * is_unmapped * mapping_quality * mate_is_reverse * mate_is_unmapped * next_reference_id * next_reference_start * query_alignment_end * query_alignment_length * query_alignment_qualities * query_alignment_sequence * query_alignment_start * query_length * query_name * reference_end * reference_id * reference_length * reference_start * template_length (Note: the above list is parsed into the _READ_ATTRIBUTE_NAMES class variable, so be careful when modifying it.) Additionally, for alignment "tags" (arbitrary key values associated with an alignment), a column of the form "TAG_{tag name}" is included. Finally, the column "pysam_alignment_record" gives the underlying `pysam.AlignedSegment` instances. Parameters ---------- attributes (optional): list of strings List of columns to include. If unspecified, all columns are included in the result. Returns ---------- pandas.DataFrame of read attributes. ''' def include(attribute): return attributes is None or attribute in attributes reads = self.reads() possible_column_names = list(PileupCollection._READ_ATTRIBUTE_NAMES) result = OrderedDict( (name, [getattr(read, name) for read in reads]) for name in PileupCollection._READ_ATTRIBUTE_NAMES if include(name)) # Add tag columns. if reads: tag_dicts = [dict(x.get_tags()) for x in reads] tag_keys = set.union( *[set(item.keys()) for item in tag_dicts]) for tag_key in sorted(tag_keys): column_name = "TAG_%s" % tag_key possible_column_names.append(column_name) if include(column_name): result[column_name] = [d.get(tag_key) for d in tag_dicts] # Lastly, we include the underlying pysam alignment record. possible_column_names.append("pysam_alignment_record") if include("pysam_alignment_record"): result["pysam_alignment_record"] = reads # If particular attributes were requested, check that they're here. if attributes is not None: for attribute in attributes: if attribute not in result: raise ValueError( "No such attribute: %s. Valid attributes are: %s" % (attribute, " ".join(possible_column_names))) assert set(attributes) == set(result) return pandas.DataFrame(result)
python
def read_attributes(self, attributes=None): ''' Collect read attributes across reads in this PileupCollection into a pandas.DataFrame. Valid attributes are the following properties of a pysam.AlignedSegment instance. See: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment for the meaning of these attributes. * cigarstring * flag * inferred_length * is_duplicate * is_paired * is_proper_pair * is_qcfail * is_read1 * is_read2 * is_reverse * is_secondary * is_unmapped * mapping_quality * mate_is_reverse * mate_is_unmapped * next_reference_id * next_reference_start * query_alignment_end * query_alignment_length * query_alignment_qualities * query_alignment_sequence * query_alignment_start * query_length * query_name * reference_end * reference_id * reference_length * reference_start * template_length (Note: the above list is parsed into the _READ_ATTRIBUTE_NAMES class variable, so be careful when modifying it.) Additionally, for alignment "tags" (arbitrary key values associated with an alignment), a column of the form "TAG_{tag name}" is included. Finally, the column "pysam_alignment_record" gives the underlying `pysam.AlignedSegment` instances. Parameters ---------- attributes (optional): list of strings List of columns to include. If unspecified, all columns are included in the result. Returns ---------- pandas.DataFrame of read attributes. ''' def include(attribute): return attributes is None or attribute in attributes reads = self.reads() possible_column_names = list(PileupCollection._READ_ATTRIBUTE_NAMES) result = OrderedDict( (name, [getattr(read, name) for read in reads]) for name in PileupCollection._READ_ATTRIBUTE_NAMES if include(name)) # Add tag columns. if reads: tag_dicts = [dict(x.get_tags()) for x in reads] tag_keys = set.union( *[set(item.keys()) for item in tag_dicts]) for tag_key in sorted(tag_keys): column_name = "TAG_%s" % tag_key possible_column_names.append(column_name) if include(column_name): result[column_name] = [d.get(tag_key) for d in tag_dicts] # Lastly, we include the underlying pysam alignment record. possible_column_names.append("pysam_alignment_record") if include("pysam_alignment_record"): result["pysam_alignment_record"] = reads # If particular attributes were requested, check that they're here. if attributes is not None: for attribute in attributes: if attribute not in result: raise ValueError( "No such attribute: %s. Valid attributes are: %s" % (attribute, " ".join(possible_column_names))) assert set(attributes) == set(result) return pandas.DataFrame(result)
[ "def", "read_attributes", "(", "self", ",", "attributes", "=", "None", ")", ":", "def", "include", "(", "attribute", ")", ":", "return", "attributes", "is", "None", "or", "attribute", "in", "attributes", "reads", "=", "self", ".", "reads", "(", ")", "possible_column_names", "=", "list", "(", "PileupCollection", ".", "_READ_ATTRIBUTE_NAMES", ")", "result", "=", "OrderedDict", "(", "(", "name", ",", "[", "getattr", "(", "read", ",", "name", ")", "for", "read", "in", "reads", "]", ")", "for", "name", "in", "PileupCollection", ".", "_READ_ATTRIBUTE_NAMES", "if", "include", "(", "name", ")", ")", "# Add tag columns.", "if", "reads", ":", "tag_dicts", "=", "[", "dict", "(", "x", ".", "get_tags", "(", ")", ")", "for", "x", "in", "reads", "]", "tag_keys", "=", "set", ".", "union", "(", "*", "[", "set", "(", "item", ".", "keys", "(", ")", ")", "for", "item", "in", "tag_dicts", "]", ")", "for", "tag_key", "in", "sorted", "(", "tag_keys", ")", ":", "column_name", "=", "\"TAG_%s\"", "%", "tag_key", "possible_column_names", ".", "append", "(", "column_name", ")", "if", "include", "(", "column_name", ")", ":", "result", "[", "column_name", "]", "=", "[", "d", ".", "get", "(", "tag_key", ")", "for", "d", "in", "tag_dicts", "]", "# Lastly, we include the underlying pysam alignment record.", "possible_column_names", ".", "append", "(", "\"pysam_alignment_record\"", ")", "if", "include", "(", "\"pysam_alignment_record\"", ")", ":", "result", "[", "\"pysam_alignment_record\"", "]", "=", "reads", "# If particular attributes were requested, check that they're here.", "if", "attributes", "is", "not", "None", ":", "for", "attribute", "in", "attributes", ":", "if", "attribute", "not", "in", "result", ":", "raise", "ValueError", "(", "\"No such attribute: %s. Valid attributes are: %s\"", "%", "(", "attribute", ",", "\" \"", ".", "join", "(", "possible_column_names", ")", ")", ")", "assert", "set", "(", "attributes", ")", "==", "set", "(", "result", ")", "return", "pandas", ".", "DataFrame", "(", "result", ")" ]
Collect read attributes across reads in this PileupCollection into a pandas.DataFrame. Valid attributes are the following properties of a pysam.AlignedSegment instance. See: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment for the meaning of these attributes. * cigarstring * flag * inferred_length * is_duplicate * is_paired * is_proper_pair * is_qcfail * is_read1 * is_read2 * is_reverse * is_secondary * is_unmapped * mapping_quality * mate_is_reverse * mate_is_unmapped * next_reference_id * next_reference_start * query_alignment_end * query_alignment_length * query_alignment_qualities * query_alignment_sequence * query_alignment_start * query_length * query_name * reference_end * reference_id * reference_length * reference_start * template_length (Note: the above list is parsed into the _READ_ATTRIBUTE_NAMES class variable, so be careful when modifying it.) Additionally, for alignment "tags" (arbitrary key values associated with an alignment), a column of the form "TAG_{tag name}" is included. Finally, the column "pysam_alignment_record" gives the underlying `pysam.AlignedSegment` instances. Parameters ---------- attributes (optional): list of strings List of columns to include. If unspecified, all columns are included in the result. Returns ---------- pandas.DataFrame of read attributes.
[ "Collect", "read", "attributes", "across", "reads", "in", "this", "PileupCollection", "into", "a", "pandas", ".", "DataFrame", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L143-L240
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.group_by_allele
def group_by_allele(self, locus): ''' Split the PileupCollection by the alleles suggested by the reads at the specified locus. If a read has an insertion immediately following the locus, then the insertion is included in the allele. For example, if locus is the 1-base range [5,6), one allele might be "AGA", indicating that at locus 5 some read has an "A" followed by a 2-base insertion ("GA"). If a read has a deletion at the specified locus, the allele is the empty string. The given locus may include any number of bases. If the locus includes multiple bases, then the alleles consist of all bases aligning to that range in any read. Note that only sequences actually sequenced in a particular read are included. For example, if one read has "ATT" at a locus and another read has "GCC", then the alleles are "ATT" and "GCC", but not "GTT". That is, the bases in each allele are phased. For this reason, only reads that overlap the entire locus are included. If the locus is an empty interval (e.g. [5,5) ), then the alleles consist only of inserted bases. In this example, only bases inserted immediately after locus 5 would be included (but *not* the base actually at locus 5). In the previous insertion example, the allele would be "GA", indicating a 2-base insertion. Reads that have no insertion at that position (matches or deletions) would have the empty string as their allele. Parameters ---------- locus : Locus The reference locus, encompassing 0 or more bases. Returns ---------- A dict of string -> PileupCollection. The keys are nucleotide strings giving the bases sequenced at the locus, and the values are PileupCollection instances of the alignments that support that allele. ''' locus = to_locus(locus) read_to_allele = None loci = [] if locus.positions: # Our locus includes at least one reference base. for position in locus.positions: base_position = Locus.from_interbase_coordinates( locus.contig, position) loci.append(base_position) new_read_to_allele = {} for element in self.pileups[base_position]: allele_prefix = "" key = alignment_key(element.alignment) if read_to_allele is not None: try: allele_prefix = read_to_allele[key] except KeyError: continue allele = allele_prefix + element.bases new_read_to_allele[key] = allele read_to_allele = new_read_to_allele else: # Our locus is between reference bases. position_before = Locus.from_interbase_coordinates( locus.contig, locus.start) loci.append(position_before) read_to_allele = {} for element in self.pileups[position_before]: allele = element.bases[1:] read_to_allele[alignment_key(element.alignment)] = allele split = defaultdict(lambda: PileupCollection(pileups={}, parent=self)) for locus in loci: pileup = self.pileups[locus] for e in pileup.elements: key = read_to_allele.get(alignment_key(e.alignment)) if key is not None: if locus in split[key].pileups: split[key].pileups[locus].append(e) else: split[key].pileups[locus] = Pileup(locus, [e]) # Sort by number of reads (descending). Break ties with the # lexicographic ordering of the allele string. def sorter(pair): (allele, pileup_collection) = pair return (-1 * pileup_collection.num_reads(), allele) return OrderedDict(sorted(split.items(), key=sorter))
python
def group_by_allele(self, locus): ''' Split the PileupCollection by the alleles suggested by the reads at the specified locus. If a read has an insertion immediately following the locus, then the insertion is included in the allele. For example, if locus is the 1-base range [5,6), one allele might be "AGA", indicating that at locus 5 some read has an "A" followed by a 2-base insertion ("GA"). If a read has a deletion at the specified locus, the allele is the empty string. The given locus may include any number of bases. If the locus includes multiple bases, then the alleles consist of all bases aligning to that range in any read. Note that only sequences actually sequenced in a particular read are included. For example, if one read has "ATT" at a locus and another read has "GCC", then the alleles are "ATT" and "GCC", but not "GTT". That is, the bases in each allele are phased. For this reason, only reads that overlap the entire locus are included. If the locus is an empty interval (e.g. [5,5) ), then the alleles consist only of inserted bases. In this example, only bases inserted immediately after locus 5 would be included (but *not* the base actually at locus 5). In the previous insertion example, the allele would be "GA", indicating a 2-base insertion. Reads that have no insertion at that position (matches or deletions) would have the empty string as their allele. Parameters ---------- locus : Locus The reference locus, encompassing 0 or more bases. Returns ---------- A dict of string -> PileupCollection. The keys are nucleotide strings giving the bases sequenced at the locus, and the values are PileupCollection instances of the alignments that support that allele. ''' locus = to_locus(locus) read_to_allele = None loci = [] if locus.positions: # Our locus includes at least one reference base. for position in locus.positions: base_position = Locus.from_interbase_coordinates( locus.contig, position) loci.append(base_position) new_read_to_allele = {} for element in self.pileups[base_position]: allele_prefix = "" key = alignment_key(element.alignment) if read_to_allele is not None: try: allele_prefix = read_to_allele[key] except KeyError: continue allele = allele_prefix + element.bases new_read_to_allele[key] = allele read_to_allele = new_read_to_allele else: # Our locus is between reference bases. position_before = Locus.from_interbase_coordinates( locus.contig, locus.start) loci.append(position_before) read_to_allele = {} for element in self.pileups[position_before]: allele = element.bases[1:] read_to_allele[alignment_key(element.alignment)] = allele split = defaultdict(lambda: PileupCollection(pileups={}, parent=self)) for locus in loci: pileup = self.pileups[locus] for e in pileup.elements: key = read_to_allele.get(alignment_key(e.alignment)) if key is not None: if locus in split[key].pileups: split[key].pileups[locus].append(e) else: split[key].pileups[locus] = Pileup(locus, [e]) # Sort by number of reads (descending). Break ties with the # lexicographic ordering of the allele string. def sorter(pair): (allele, pileup_collection) = pair return (-1 * pileup_collection.num_reads(), allele) return OrderedDict(sorted(split.items(), key=sorter))
[ "def", "group_by_allele", "(", "self", ",", "locus", ")", ":", "locus", "=", "to_locus", "(", "locus", ")", "read_to_allele", "=", "None", "loci", "=", "[", "]", "if", "locus", ".", "positions", ":", "# Our locus includes at least one reference base.", "for", "position", "in", "locus", ".", "positions", ":", "base_position", "=", "Locus", ".", "from_interbase_coordinates", "(", "locus", ".", "contig", ",", "position", ")", "loci", ".", "append", "(", "base_position", ")", "new_read_to_allele", "=", "{", "}", "for", "element", "in", "self", ".", "pileups", "[", "base_position", "]", ":", "allele_prefix", "=", "\"\"", "key", "=", "alignment_key", "(", "element", ".", "alignment", ")", "if", "read_to_allele", "is", "not", "None", ":", "try", ":", "allele_prefix", "=", "read_to_allele", "[", "key", "]", "except", "KeyError", ":", "continue", "allele", "=", "allele_prefix", "+", "element", ".", "bases", "new_read_to_allele", "[", "key", "]", "=", "allele", "read_to_allele", "=", "new_read_to_allele", "else", ":", "# Our locus is between reference bases.", "position_before", "=", "Locus", ".", "from_interbase_coordinates", "(", "locus", ".", "contig", ",", "locus", ".", "start", ")", "loci", ".", "append", "(", "position_before", ")", "read_to_allele", "=", "{", "}", "for", "element", "in", "self", ".", "pileups", "[", "position_before", "]", ":", "allele", "=", "element", ".", "bases", "[", "1", ":", "]", "read_to_allele", "[", "alignment_key", "(", "element", ".", "alignment", ")", "]", "=", "allele", "split", "=", "defaultdict", "(", "lambda", ":", "PileupCollection", "(", "pileups", "=", "{", "}", ",", "parent", "=", "self", ")", ")", "for", "locus", "in", "loci", ":", "pileup", "=", "self", ".", "pileups", "[", "locus", "]", "for", "e", "in", "pileup", ".", "elements", ":", "key", "=", "read_to_allele", ".", "get", "(", "alignment_key", "(", "e", ".", "alignment", ")", ")", "if", "key", "is", "not", "None", ":", "if", "locus", "in", "split", "[", "key", "]", ".", "pileups", ":", "split", "[", "key", "]", ".", "pileups", "[", "locus", "]", ".", "append", "(", "e", ")", "else", ":", "split", "[", "key", "]", ".", "pileups", "[", "locus", "]", "=", "Pileup", "(", "locus", ",", "[", "e", "]", ")", "# Sort by number of reads (descending). Break ties with the", "# lexicographic ordering of the allele string.", "def", "sorter", "(", "pair", ")", ":", "(", "allele", ",", "pileup_collection", ")", "=", "pair", "return", "(", "-", "1", "*", "pileup_collection", ".", "num_reads", "(", ")", ",", "allele", ")", "return", "OrderedDict", "(", "sorted", "(", "split", ".", "items", "(", ")", ",", "key", "=", "sorter", ")", ")" ]
Split the PileupCollection by the alleles suggested by the reads at the specified locus. If a read has an insertion immediately following the locus, then the insertion is included in the allele. For example, if locus is the 1-base range [5,6), one allele might be "AGA", indicating that at locus 5 some read has an "A" followed by a 2-base insertion ("GA"). If a read has a deletion at the specified locus, the allele is the empty string. The given locus may include any number of bases. If the locus includes multiple bases, then the alleles consist of all bases aligning to that range in any read. Note that only sequences actually sequenced in a particular read are included. For example, if one read has "ATT" at a locus and another read has "GCC", then the alleles are "ATT" and "GCC", but not "GTT". That is, the bases in each allele are phased. For this reason, only reads that overlap the entire locus are included. If the locus is an empty interval (e.g. [5,5) ), then the alleles consist only of inserted bases. In this example, only bases inserted immediately after locus 5 would be included (but *not* the base actually at locus 5). In the previous insertion example, the allele would be "GA", indicating a 2-base insertion. Reads that have no insertion at that position (matches or deletions) would have the empty string as their allele. Parameters ---------- locus : Locus The reference locus, encompassing 0 or more bases. Returns ---------- A dict of string -> PileupCollection. The keys are nucleotide strings giving the bases sequenced at the locus, and the values are PileupCollection instances of the alignments that support that allele.
[ "Split", "the", "PileupCollection", "by", "the", "alleles", "suggested", "by", "the", "reads", "at", "the", "specified", "locus", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L249-L336
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.allele_summary
def allele_summary(self, locus, score=lambda x: x.num_reads()): ''' Convenience method to summarize the evidence for each of the alleles present at a locus. Applies a score function to the PileupCollection associated with each allele. See also `PileupCollection.group_by_allele`. Parameters ---------- locus : Locus The reference locus, encompassing 0 or more bases. score (optional) : PileupCollection -> object Function to apply to summarize the evidence for each allele. Default: count number of reads. Returns ---------- List of (allele, score) pairs. ''' locus = to_locus(locus) return [ (allele, score(x)) for (allele, x) in self.group_by_allele(locus).items() ]
python
def allele_summary(self, locus, score=lambda x: x.num_reads()): ''' Convenience method to summarize the evidence for each of the alleles present at a locus. Applies a score function to the PileupCollection associated with each allele. See also `PileupCollection.group_by_allele`. Parameters ---------- locus : Locus The reference locus, encompassing 0 or more bases. score (optional) : PileupCollection -> object Function to apply to summarize the evidence for each allele. Default: count number of reads. Returns ---------- List of (allele, score) pairs. ''' locus = to_locus(locus) return [ (allele, score(x)) for (allele, x) in self.group_by_allele(locus).items() ]
[ "def", "allele_summary", "(", "self", ",", "locus", ",", "score", "=", "lambda", "x", ":", "x", ".", "num_reads", "(", ")", ")", ":", "locus", "=", "to_locus", "(", "locus", ")", "return", "[", "(", "allele", ",", "score", "(", "x", ")", ")", "for", "(", "allele", ",", "x", ")", "in", "self", ".", "group_by_allele", "(", "locus", ")", ".", "items", "(", ")", "]" ]
Convenience method to summarize the evidence for each of the alleles present at a locus. Applies a score function to the PileupCollection associated with each allele. See also `PileupCollection.group_by_allele`. Parameters ---------- locus : Locus The reference locus, encompassing 0 or more bases. score (optional) : PileupCollection -> object Function to apply to summarize the evidence for each allele. Default: count number of reads. Returns ---------- List of (allele, score) pairs.
[ "Convenience", "method", "to", "summarize", "the", "evidence", "for", "each", "of", "the", "alleles", "present", "at", "a", "locus", ".", "Applies", "a", "score", "function", "to", "the", "PileupCollection", "associated", "with", "each", "allele", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L338-L363
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.group_by_match
def group_by_match(self, variant): ''' Given a variant, split the PileupCollection based on whether it the data supports the reference allele, the alternate allele, or neither. Parameters ---------- variant : Variant The variant. Must have fields 'locus', 'ref', and 'alt'. Returns ---------- A MatchingEvidence named tuple with fields (ref, alt, other), each of which is a string -> PileupCollection dict mapping alleles to the PileupCollection of evidence supporting them. ''' locus = to_locus(variant) if len(variant.ref) != len(locus.positions): logging.warning( "Ref is length %d but locus has %d bases in variant: %s" % (len(variant.ref), len(locus.positions), str(variant))) alleles_dict = self.group_by_allele(locus) single_base_loci = [ Locus.from_interbase_coordinates(locus.contig, position) for position in locus.positions ] empty_pileups = dict( (locus, Pileup(locus=locus, elements=[])) for locus in single_base_loci) empty_collection = PileupCollection(pileups=empty_pileups, parent=self) ref = {variant.ref: alleles_dict.pop(variant.ref, empty_collection)} alt = {variant.alt: alleles_dict.pop(variant.alt, empty_collection)} other = alleles_dict # TODO: consider end of read issues for insertions return MatchingEvidence(ref, alt, other)
python
def group_by_match(self, variant): ''' Given a variant, split the PileupCollection based on whether it the data supports the reference allele, the alternate allele, or neither. Parameters ---------- variant : Variant The variant. Must have fields 'locus', 'ref', and 'alt'. Returns ---------- A MatchingEvidence named tuple with fields (ref, alt, other), each of which is a string -> PileupCollection dict mapping alleles to the PileupCollection of evidence supporting them. ''' locus = to_locus(variant) if len(variant.ref) != len(locus.positions): logging.warning( "Ref is length %d but locus has %d bases in variant: %s" % (len(variant.ref), len(locus.positions), str(variant))) alleles_dict = self.group_by_allele(locus) single_base_loci = [ Locus.from_interbase_coordinates(locus.contig, position) for position in locus.positions ] empty_pileups = dict( (locus, Pileup(locus=locus, elements=[])) for locus in single_base_loci) empty_collection = PileupCollection(pileups=empty_pileups, parent=self) ref = {variant.ref: alleles_dict.pop(variant.ref, empty_collection)} alt = {variant.alt: alleles_dict.pop(variant.alt, empty_collection)} other = alleles_dict # TODO: consider end of read issues for insertions return MatchingEvidence(ref, alt, other)
[ "def", "group_by_match", "(", "self", ",", "variant", ")", ":", "locus", "=", "to_locus", "(", "variant", ")", "if", "len", "(", "variant", ".", "ref", ")", "!=", "len", "(", "locus", ".", "positions", ")", ":", "logging", ".", "warning", "(", "\"Ref is length %d but locus has %d bases in variant: %s\"", "%", "(", "len", "(", "variant", ".", "ref", ")", ",", "len", "(", "locus", ".", "positions", ")", ",", "str", "(", "variant", ")", ")", ")", "alleles_dict", "=", "self", ".", "group_by_allele", "(", "locus", ")", "single_base_loci", "=", "[", "Locus", ".", "from_interbase_coordinates", "(", "locus", ".", "contig", ",", "position", ")", "for", "position", "in", "locus", ".", "positions", "]", "empty_pileups", "=", "dict", "(", "(", "locus", ",", "Pileup", "(", "locus", "=", "locus", ",", "elements", "=", "[", "]", ")", ")", "for", "locus", "in", "single_base_loci", ")", "empty_collection", "=", "PileupCollection", "(", "pileups", "=", "empty_pileups", ",", "parent", "=", "self", ")", "ref", "=", "{", "variant", ".", "ref", ":", "alleles_dict", ".", "pop", "(", "variant", ".", "ref", ",", "empty_collection", ")", "}", "alt", "=", "{", "variant", ".", "alt", ":", "alleles_dict", ".", "pop", "(", "variant", ".", "alt", ",", "empty_collection", ")", "}", "other", "=", "alleles_dict", "# TODO: consider end of read issues for insertions", "return", "MatchingEvidence", "(", "ref", ",", "alt", ",", "other", ")" ]
Given a variant, split the PileupCollection based on whether it the data supports the reference allele, the alternate allele, or neither. Parameters ---------- variant : Variant The variant. Must have fields 'locus', 'ref', and 'alt'. Returns ---------- A MatchingEvidence named tuple with fields (ref, alt, other), each of which is a string -> PileupCollection dict mapping alleles to the PileupCollection of evidence supporting them.
[ "Given", "a", "variant", "split", "the", "PileupCollection", "based", "on", "whether", "it", "the", "data", "supports", "the", "reference", "allele", "the", "alternate", "allele", "or", "neither", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L365-L402
train
openvax/varlens
varlens/read_evidence/pileup_collection.py
PileupCollection.match_summary
def match_summary(self, variant, score=lambda x: x.num_reads()): ''' Convenience method to summarize the evidence for and against a variant using a user-specified score function. See also `PileupCollection.group_by_match`. Parameters ---------- variant : Variant The variant. Must have fields 'locus', 'ref', and 'alt'. score (optional) : PileupCollection -> object Function to apply to summarize the evidence for each allele. Default: count number of reads. Returns ---------- List of (allele, score) pairs. This list will always have at least two elements. The first pair in the list is the reference allele. The second pair is the alternate. The subsequent items give the "third" alleles (neither ref nor alt), if any. ''' split = self.group_by_match(variant) def name(allele_to_pileup_collection): return ",".join(allele_to_pileup_collection) def aggregate_and_score(pileup_collections): merged = PileupCollection.merge(*pileup_collections) return score(merged) result = [ (name(split.ref), aggregate_and_score(split.ref.values())), (name(split.alt), aggregate_and_score(split.alt.values())), ] result.extend( (allele, score(collection)) for (allele, collection) in split.other.items()) return result
python
def match_summary(self, variant, score=lambda x: x.num_reads()): ''' Convenience method to summarize the evidence for and against a variant using a user-specified score function. See also `PileupCollection.group_by_match`. Parameters ---------- variant : Variant The variant. Must have fields 'locus', 'ref', and 'alt'. score (optional) : PileupCollection -> object Function to apply to summarize the evidence for each allele. Default: count number of reads. Returns ---------- List of (allele, score) pairs. This list will always have at least two elements. The first pair in the list is the reference allele. The second pair is the alternate. The subsequent items give the "third" alleles (neither ref nor alt), if any. ''' split = self.group_by_match(variant) def name(allele_to_pileup_collection): return ",".join(allele_to_pileup_collection) def aggregate_and_score(pileup_collections): merged = PileupCollection.merge(*pileup_collections) return score(merged) result = [ (name(split.ref), aggregate_and_score(split.ref.values())), (name(split.alt), aggregate_and_score(split.alt.values())), ] result.extend( (allele, score(collection)) for (allele, collection) in split.other.items()) return result
[ "def", "match_summary", "(", "self", ",", "variant", ",", "score", "=", "lambda", "x", ":", "x", ".", "num_reads", "(", ")", ")", ":", "split", "=", "self", ".", "group_by_match", "(", "variant", ")", "def", "name", "(", "allele_to_pileup_collection", ")", ":", "return", "\",\"", ".", "join", "(", "allele_to_pileup_collection", ")", "def", "aggregate_and_score", "(", "pileup_collections", ")", ":", "merged", "=", "PileupCollection", ".", "merge", "(", "*", "pileup_collections", ")", "return", "score", "(", "merged", ")", "result", "=", "[", "(", "name", "(", "split", ".", "ref", ")", ",", "aggregate_and_score", "(", "split", ".", "ref", ".", "values", "(", ")", ")", ")", ",", "(", "name", "(", "split", ".", "alt", ")", ",", "aggregate_and_score", "(", "split", ".", "alt", ".", "values", "(", ")", ")", ")", ",", "]", "result", ".", "extend", "(", "(", "allele", ",", "score", "(", "collection", ")", ")", "for", "(", "allele", ",", "collection", ")", "in", "split", ".", "other", ".", "items", "(", ")", ")", "return", "result" ]
Convenience method to summarize the evidence for and against a variant using a user-specified score function. See also `PileupCollection.group_by_match`. Parameters ---------- variant : Variant The variant. Must have fields 'locus', 'ref', and 'alt'. score (optional) : PileupCollection -> object Function to apply to summarize the evidence for each allele. Default: count number of reads. Returns ---------- List of (allele, score) pairs. This list will always have at least two elements. The first pair in the list is the reference allele. The second pair is the alternate. The subsequent items give the "third" alleles (neither ref nor alt), if any.
[ "Convenience", "method", "to", "summarize", "the", "evidence", "for", "and", "against", "a", "variant", "using", "a", "user", "-", "specified", "score", "function", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L404-L443
train