repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
hufman/flask_rdf
flask_rdf/common_decorators.py
ViewDecorator.output
def output(self, response, accepts): """ Formats a response from a view to handle any RDF graphs If a view function returns an RDF graph, serialize it based on Accept header If it's not an RDF graph, return it without any special handling """ graph = self.get_graph(response) if graph is not None: # decide the format mimetype, format = self.format_selector.decide(accepts, graph.context_aware) # requested content couldn't find anything if mimetype is None: return self.make_406_response() # explicitly mark text mimetypes as utf-8 if 'text' in mimetype: mimetype = mimetype + '; charset=utf-8' # format the new response serialized = graph.serialize(format=format) response = self.make_new_response(response, mimetype, serialized) return response else: return response
python
def output(self, response, accepts): """ Formats a response from a view to handle any RDF graphs If a view function returns an RDF graph, serialize it based on Accept header If it's not an RDF graph, return it without any special handling """ graph = self.get_graph(response) if graph is not None: # decide the format mimetype, format = self.format_selector.decide(accepts, graph.context_aware) # requested content couldn't find anything if mimetype is None: return self.make_406_response() # explicitly mark text mimetypes as utf-8 if 'text' in mimetype: mimetype = mimetype + '; charset=utf-8' # format the new response serialized = graph.serialize(format=format) response = self.make_new_response(response, mimetype, serialized) return response else: return response
[ "def", "output", "(", "self", ",", "response", ",", "accepts", ")", ":", "graph", "=", "self", ".", "get_graph", "(", "response", ")", "if", "graph", "is", "not", "None", ":", "# decide the format", "mimetype", ",", "format", "=", "self", ".", "format_selector", ".", "decide", "(", "accepts", ",", "graph", ".", "context_aware", ")", "# requested content couldn't find anything", "if", "mimetype", "is", "None", ":", "return", "self", ".", "make_406_response", "(", ")", "# explicitly mark text mimetypes as utf-8", "if", "'text'", "in", "mimetype", ":", "mimetype", "=", "mimetype", "+", "'; charset=utf-8'", "# format the new response", "serialized", "=", "graph", ".", "serialize", "(", "format", "=", "format", ")", "response", "=", "self", ".", "make_new_response", "(", "response", ",", "mimetype", ",", "serialized", ")", "return", "response", "else", ":", "return", "response" ]
Formats a response from a view to handle any RDF graphs If a view function returns an RDF graph, serialize it based on Accept header If it's not an RDF graph, return it without any special handling
[ "Formats", "a", "response", "from", "a", "view", "to", "handle", "any", "RDF", "graphs", "If", "a", "view", "function", "returns", "an", "RDF", "graph", "serialize", "it", "based", "on", "Accept", "header", "If", "it", "s", "not", "an", "RDF", "graph", "return", "it", "without", "any", "special", "handling" ]
train
https://github.com/hufman/flask_rdf/blob/9bf86023288171eb0665c15fb28070250f80310c/flask_rdf/common_decorators.py#L45-L68
hufman/flask_rdf
flask_rdf/common_decorators.py
ViewDecorator.decorate
def decorate(self, view): """ Wraps a view function to return formatted RDF graphs Uses content negotiation to serialize the graph to the client-preferred format Passes other content through unmodified """ from functools import wraps @wraps(view) def decorated(*args, **kwargs): response = view(*args, **kwargs) accept = self.get_accept() return self.output(response, accept) return decorated
python
def decorate(self, view): """ Wraps a view function to return formatted RDF graphs Uses content negotiation to serialize the graph to the client-preferred format Passes other content through unmodified """ from functools import wraps @wraps(view) def decorated(*args, **kwargs): response = view(*args, **kwargs) accept = self.get_accept() return self.output(response, accept) return decorated
[ "def", "decorate", "(", "self", ",", "view", ")", ":", "from", "functools", "import", "wraps", "@", "wraps", "(", "view", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "view", "(", "*", "args", ",", "*", "*", "kwargs", ")", "accept", "=", "self", ".", "get_accept", "(", ")", "return", "self", ".", "output", "(", "response", ",", "accept", ")", "return", "decorated" ]
Wraps a view function to return formatted RDF graphs Uses content negotiation to serialize the graph to the client-preferred format Passes other content through unmodified
[ "Wraps", "a", "view", "function", "to", "return", "formatted", "RDF", "graphs", "Uses", "content", "negotiation", "to", "serialize", "the", "graph", "to", "the", "client", "-", "preferred", "format", "Passes", "other", "content", "through", "unmodified" ]
train
https://github.com/hufman/flask_rdf/blob/9bf86023288171eb0665c15fb28070250f80310c/flask_rdf/common_decorators.py#L70-L82
Duke-QCD/hic
hic/initial.py
IC.ecc
def ecc(self, n): r""" Calculate eccentricity harmonic `\varepsilon_n`. :param int n: Eccentricity order. """ ny, nx = self._profile.shape xmax, ymax = self._xymax xcm, ycm = self._cm # create (X, Y) grids relative to CM Y, X = np.mgrid[ymax:-ymax:1j*ny, -xmax:xmax:1j*nx] X -= xcm Y -= ycm # create grid of weights = profile * R^n Rsq = X*X + Y*Y if n == 1: W = np.sqrt(Rsq, out=Rsq) elif n == 2: W = Rsq else: if n & 1: # odd n W = np.sqrt(Rsq) else: # even n W = np.copy(Rsq) # multiply by R^2 until W = R^n for _ in range(int((n-1)/2)): W *= Rsq W *= self._profile # create grid of e^{i*n*phi} * W i_n_phi = np.zeros_like(X, dtype=complex) np.arctan2(Y, X, out=i_n_phi.imag) i_n_phi.imag *= n exp_phi = np.exp(i_n_phi, out=i_n_phi) exp_phi *= W return abs(exp_phi.sum()) / W.sum()
python
def ecc(self, n): r""" Calculate eccentricity harmonic `\varepsilon_n`. :param int n: Eccentricity order. """ ny, nx = self._profile.shape xmax, ymax = self._xymax xcm, ycm = self._cm # create (X, Y) grids relative to CM Y, X = np.mgrid[ymax:-ymax:1j*ny, -xmax:xmax:1j*nx] X -= xcm Y -= ycm # create grid of weights = profile * R^n Rsq = X*X + Y*Y if n == 1: W = np.sqrt(Rsq, out=Rsq) elif n == 2: W = Rsq else: if n & 1: # odd n W = np.sqrt(Rsq) else: # even n W = np.copy(Rsq) # multiply by R^2 until W = R^n for _ in range(int((n-1)/2)): W *= Rsq W *= self._profile # create grid of e^{i*n*phi} * W i_n_phi = np.zeros_like(X, dtype=complex) np.arctan2(Y, X, out=i_n_phi.imag) i_n_phi.imag *= n exp_phi = np.exp(i_n_phi, out=i_n_phi) exp_phi *= W return abs(exp_phi.sum()) / W.sum()
[ "def", "ecc", "(", "self", ",", "n", ")", ":", "ny", ",", "nx", "=", "self", ".", "_profile", ".", "shape", "xmax", ",", "ymax", "=", "self", ".", "_xymax", "xcm", ",", "ycm", "=", "self", ".", "_cm", "# create (X, Y) grids relative to CM", "Y", ",", "X", "=", "np", ".", "mgrid", "[", "ymax", ":", "-", "ymax", ":", "1j", "*", "ny", ",", "-", "xmax", ":", "xmax", ":", "1j", "*", "nx", "]", "X", "-=", "xcm", "Y", "-=", "ycm", "# create grid of weights = profile * R^n", "Rsq", "=", "X", "*", "X", "+", "Y", "*", "Y", "if", "n", "==", "1", ":", "W", "=", "np", ".", "sqrt", "(", "Rsq", ",", "out", "=", "Rsq", ")", "elif", "n", "==", "2", ":", "W", "=", "Rsq", "else", ":", "if", "n", "&", "1", ":", "# odd n", "W", "=", "np", ".", "sqrt", "(", "Rsq", ")", "else", ":", "# even n", "W", "=", "np", ".", "copy", "(", "Rsq", ")", "# multiply by R^2 until W = R^n", "for", "_", "in", "range", "(", "int", "(", "(", "n", "-", "1", ")", "/", "2", ")", ")", ":", "W", "*=", "Rsq", "W", "*=", "self", ".", "_profile", "# create grid of e^{i*n*phi} * W", "i_n_phi", "=", "np", ".", "zeros_like", "(", "X", ",", "dtype", "=", "complex", ")", "np", ".", "arctan2", "(", "Y", ",", "X", ",", "out", "=", "i_n_phi", ".", "imag", ")", "i_n_phi", ".", "imag", "*=", "n", "exp_phi", "=", "np", ".", "exp", "(", "i_n_phi", ",", "out", "=", "i_n_phi", ")", "exp_phi", "*=", "W", "return", "abs", "(", "exp_phi", ".", "sum", "(", ")", ")", "/", "W", ".", "sum", "(", ")" ]
r""" Calculate eccentricity harmonic `\varepsilon_n`. :param int n: Eccentricity order.
[ "r", "Calculate", "eccentricity", "harmonic", "\\", "varepsilon_n", "." ]
train
https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/initial.py#L63-L102
alefnula/tea
tea/dsa/config.py
Config.get
def get(self, var, default=None): """Return a value from configuration. Safe version which always returns a default value if the value is not found. """ try: return self.__get(var) except (KeyError, IndexError): return default
python
def get(self, var, default=None): """Return a value from configuration. Safe version which always returns a default value if the value is not found. """ try: return self.__get(var) except (KeyError, IndexError): return default
[ "def", "get", "(", "self", ",", "var", ",", "default", "=", "None", ")", ":", "try", ":", "return", "self", ".", "__get", "(", "var", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "return", "default" ]
Return a value from configuration. Safe version which always returns a default value if the value is not found.
[ "Return", "a", "value", "from", "configuration", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/dsa/config.py#L211-L220
alefnula/tea
tea/dsa/config.py
Config.insert
def insert(self, var, value, index=None): """Insert at the index. If the index is not provided appends to the end of the list. """ current = self.__get(var) if not isinstance(current, list): raise KeyError("%s: is not a list" % var) if index is None: current.append(value) else: current.insert(index, value) if self.auto_save: self.save()
python
def insert(self, var, value, index=None): """Insert at the index. If the index is not provided appends to the end of the list. """ current = self.__get(var) if not isinstance(current, list): raise KeyError("%s: is not a list" % var) if index is None: current.append(value) else: current.insert(index, value) if self.auto_save: self.save()
[ "def", "insert", "(", "self", ",", "var", ",", "value", ",", "index", "=", "None", ")", ":", "current", "=", "self", ".", "__get", "(", "var", ")", "if", "not", "isinstance", "(", "current", ",", "list", ")", ":", "raise", "KeyError", "(", "\"%s: is not a list\"", "%", "var", ")", "if", "index", "is", "None", ":", "current", ".", "append", "(", "value", ")", "else", ":", "current", ".", "insert", "(", "index", ",", "value", ")", "if", "self", ".", "auto_save", ":", "self", ".", "save", "(", ")" ]
Insert at the index. If the index is not provided appends to the end of the list.
[ "Insert", "at", "the", "index", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/dsa/config.py#L238-L251
alefnula/tea
tea/dsa/config.py
MultiConfig.keys
def keys(self): """Return a merged set of top level keys from all configurations.""" s = set() for config in self.__configs: s |= config.keys() return s
python
def keys(self): """Return a merged set of top level keys from all configurations.""" s = set() for config in self.__configs: s |= config.keys() return s
[ "def", "keys", "(", "self", ")", ":", "s", "=", "set", "(", ")", "for", "config", "in", "self", ".", "__configs", ":", "s", "|=", "config", ".", "keys", "(", ")", "return", "s" ]
Return a merged set of top level keys from all configurations.
[ "Return", "a", "merged", "set", "of", "top", "level", "keys", "from", "all", "configurations", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/dsa/config.py#L328-L333
alefnula/tea
tea/logger/win_handlers.py
FileHandler.close
def close(self): """Close the stream.""" self.flush() self.stream.close() logging.StreamHandler.close(self)
python
def close(self): """Close the stream.""" self.flush() self.stream.close() logging.StreamHandler.close(self)
[ "def", "close", "(", "self", ")", ":", "self", ".", "flush", "(", ")", "self", ".", "stream", ".", "close", "(", ")", "logging", ".", "StreamHandler", ".", "close", "(", "self", ")" ]
Close the stream.
[ "Close", "the", "stream", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/logger/win_handlers.py#L22-L26
alefnula/tea
tea/logger/win_handlers.py
BaseRotatingHandler.emit
def emit(self, record): """Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
python
def emit(self, record): """Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "if", "self", ".", "shouldRollover", "(", "record", ")", ":", "self", ".", "doRollover", "(", ")", "FileHandler", ".", "emit", "(", "self", ",", "record", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", "Exception", ":", "self", ".", "handleError", "(", "record", ")" ]
Emit a record. Output the record to the file, catering for rollover as described in doRollover().
[ "Emit", "a", "record", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/logger/win_handlers.py#L42-L55
alefnula/tea
tea/logger/win_handlers.py
RotatingFileHandler.doRollover
def doRollover(self): """Do a rollover, as described in __init__().""" self.stream.close() try: if self.backupCount > 0: tmp_location = "%s.0" % self.baseFilename os.rename(self.baseFilename, tmp_location) for i in range(self.backupCount - 1, 0, -1): sfn = "%s.%d" % (self.baseFilename, i) dfn = "%s.%d" % (self.baseFilename, i + 1) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.baseFilename + ".1" if os.path.exists(dfn): os.remove(dfn) os.rename(tmp_location, dfn) except Exception: pass finally: self.stream = WindowsFile(self.baseFilename, "a", self.encoding)
python
def doRollover(self): """Do a rollover, as described in __init__().""" self.stream.close() try: if self.backupCount > 0: tmp_location = "%s.0" % self.baseFilename os.rename(self.baseFilename, tmp_location) for i in range(self.backupCount - 1, 0, -1): sfn = "%s.%d" % (self.baseFilename, i) dfn = "%s.%d" % (self.baseFilename, i + 1) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.baseFilename + ".1" if os.path.exists(dfn): os.remove(dfn) os.rename(tmp_location, dfn) except Exception: pass finally: self.stream = WindowsFile(self.baseFilename, "a", self.encoding)
[ "def", "doRollover", "(", "self", ")", ":", "self", ".", "stream", ".", "close", "(", ")", "try", ":", "if", "self", ".", "backupCount", ">", "0", ":", "tmp_location", "=", "\"%s.0\"", "%", "self", ".", "baseFilename", "os", ".", "rename", "(", "self", ".", "baseFilename", ",", "tmp_location", ")", "for", "i", "in", "range", "(", "self", ".", "backupCount", "-", "1", ",", "0", ",", "-", "1", ")", ":", "sfn", "=", "\"%s.%d\"", "%", "(", "self", ".", "baseFilename", ",", "i", ")", "dfn", "=", "\"%s.%d\"", "%", "(", "self", ".", "baseFilename", ",", "i", "+", "1", ")", "if", "os", ".", "path", ".", "exists", "(", "sfn", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "dfn", ")", ":", "os", ".", "remove", "(", "dfn", ")", "os", ".", "rename", "(", "sfn", ",", "dfn", ")", "dfn", "=", "self", ".", "baseFilename", "+", "\".1\"", "if", "os", ".", "path", ".", "exists", "(", "dfn", ")", ":", "os", ".", "remove", "(", "dfn", ")", "os", ".", "rename", "(", "tmp_location", ",", "dfn", ")", "except", "Exception", ":", "pass", "finally", ":", "self", ".", "stream", "=", "WindowsFile", "(", "self", ".", "baseFilename", ",", "\"a\"", ",", "self", ".", "encoding", ")" ]
Do a rollover, as described in __init__().
[ "Do", "a", "rollover", "as", "described", "in", "__init__", "()", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/logger/win_handlers.py#L93-L114
alefnula/tea
tea/logger/win_handlers.py
RotatingFileHandler.shouldRollover
def shouldRollover(self, record): """Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ if self.maxBytes > 0: # are we rolling over? msg = "%s\n" % self.format(record) self.stream.seek(0, 2) # due to non-posix-compliant win feature if self.stream.tell() + len(msg) >= self.maxBytes: return 1 return 0
python
def shouldRollover(self, record): """Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ if self.maxBytes > 0: # are we rolling over? msg = "%s\n" % self.format(record) self.stream.seek(0, 2) # due to non-posix-compliant win feature if self.stream.tell() + len(msg) >= self.maxBytes: return 1 return 0
[ "def", "shouldRollover", "(", "self", ",", "record", ")", ":", "if", "self", ".", "maxBytes", ">", "0", ":", "# are we rolling over?", "msg", "=", "\"%s\\n\"", "%", "self", ".", "format", "(", "record", ")", "self", ".", "stream", ".", "seek", "(", "0", ",", "2", ")", "# due to non-posix-compliant win feature", "if", "self", ".", "stream", ".", "tell", "(", ")", "+", "len", "(", "msg", ")", ">=", "self", ".", "maxBytes", ":", "return", "1", "return", "0" ]
Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have.
[ "Determine", "if", "rollover", "should", "occur", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/logger/win_handlers.py#L116-L127
alefnula/tea
tea/shell/__init__.py
split
def split(s, posix=True): """Split the string s using shell-like syntax. Args: s (str): String to split posix (bool): Use posix split Returns: list of str: List of string parts """ if isinstance(s, six.binary_type): s = s.decode("utf-8") return shlex.split(s, posix=posix)
python
def split(s, posix=True): """Split the string s using shell-like syntax. Args: s (str): String to split posix (bool): Use posix split Returns: list of str: List of string parts """ if isinstance(s, six.binary_type): s = s.decode("utf-8") return shlex.split(s, posix=posix)
[ "def", "split", "(", "s", ",", "posix", "=", "True", ")", ":", "if", "isinstance", "(", "s", ",", "six", ".", "binary_type", ")", ":", "s", "=", "s", ".", "decode", "(", "\"utf-8\"", ")", "return", "shlex", ".", "split", "(", "s", ",", "posix", "=", "posix", ")" ]
Split the string s using shell-like syntax. Args: s (str): String to split posix (bool): Use posix split Returns: list of str: List of string parts
[ "Split", "the", "string", "s", "using", "shell", "-", "like", "syntax", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L25-L37
alefnula/tea
tea/shell/__init__.py
search
def search(path, matcher="*", dirs=False, files=True): """Recursive search function. Args: path (str): Path to search recursively matcher (str or callable): String pattern to search for or function that returns True/False for a file argument dirs (bool): if True returns directories that match the pattern files(bool): if True returns files that match the patter Yields: str: Found files and directories """ if callable(matcher): def fnmatcher(items): return list(filter(matcher, items)) else: def fnmatcher(items): return fnmatch.filter(items, matcher) for root, directories, filenames in os.walk(os.path.abspath(path)): to_match = [] if dirs: to_match.extend(directories) if files: to_match.extend(filenames) for item in fnmatcher(to_match): yield os.path.join(root, item)
python
def search(path, matcher="*", dirs=False, files=True): """Recursive search function. Args: path (str): Path to search recursively matcher (str or callable): String pattern to search for or function that returns True/False for a file argument dirs (bool): if True returns directories that match the pattern files(bool): if True returns files that match the patter Yields: str: Found files and directories """ if callable(matcher): def fnmatcher(items): return list(filter(matcher, items)) else: def fnmatcher(items): return fnmatch.filter(items, matcher) for root, directories, filenames in os.walk(os.path.abspath(path)): to_match = [] if dirs: to_match.extend(directories) if files: to_match.extend(filenames) for item in fnmatcher(to_match): yield os.path.join(root, item)
[ "def", "search", "(", "path", ",", "matcher", "=", "\"*\"", ",", "dirs", "=", "False", ",", "files", "=", "True", ")", ":", "if", "callable", "(", "matcher", ")", ":", "def", "fnmatcher", "(", "items", ")", ":", "return", "list", "(", "filter", "(", "matcher", ",", "items", ")", ")", "else", ":", "def", "fnmatcher", "(", "items", ")", ":", "return", "fnmatch", ".", "filter", "(", "items", ",", "matcher", ")", "for", "root", ",", "directories", ",", "filenames", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", ":", "to_match", "=", "[", "]", "if", "dirs", ":", "to_match", ".", "extend", "(", "directories", ")", "if", "files", ":", "to_match", ".", "extend", "(", "filenames", ")", "for", "item", "in", "fnmatcher", "(", "to_match", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "item", ")" ]
Recursive search function. Args: path (str): Path to search recursively matcher (str or callable): String pattern to search for or function that returns True/False for a file argument dirs (bool): if True returns directories that match the pattern files(bool): if True returns files that match the patter Yields: str: Found files and directories
[ "Recursive", "search", "function", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L40-L70
alefnula/tea
tea/shell/__init__.py
chdir
def chdir(directory): """Change the current working directory. Args: directory (str): Directory to go to. """ directory = os.path.abspath(directory) logger.info("chdir -> %s" % directory) try: if not os.path.isdir(directory): logger.error( "chdir -> %s failed! Directory does not exist!", directory ) return False os.chdir(directory) return True except Exception as e: logger.error("chdir -> %s failed! %s" % (directory, e)) return False
python
def chdir(directory): """Change the current working directory. Args: directory (str): Directory to go to. """ directory = os.path.abspath(directory) logger.info("chdir -> %s" % directory) try: if not os.path.isdir(directory): logger.error( "chdir -> %s failed! Directory does not exist!", directory ) return False os.chdir(directory) return True except Exception as e: logger.error("chdir -> %s failed! %s" % (directory, e)) return False
[ "def", "chdir", "(", "directory", ")", ":", "directory", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "logger", ".", "info", "(", "\"chdir -> %s\"", "%", "directory", ")", "try", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "logger", ".", "error", "(", "\"chdir -> %s failed! Directory does not exist!\"", ",", "directory", ")", "return", "False", "os", ".", "chdir", "(", "directory", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"chdir -> %s failed! %s\"", "%", "(", "directory", ",", "e", ")", ")", "return", "False" ]
Change the current working directory. Args: directory (str): Directory to go to.
[ "Change", "the", "current", "working", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L73-L91
alefnula/tea
tea/shell/__init__.py
goto
def goto(directory, create=False): """Context object for changing directory. Args: directory (str): Directory to go to. create (bool): Create directory if it doesn't exists. Usage:: >>> with goto(directory) as ok: ... if not ok: ... print 'Error' ... else: ... print 'All OK' """ current = os.getcwd() directory = os.path.abspath(directory) if os.path.isdir(directory) or (create and mkdir(directory)): logger.info("goto -> %s", directory) os.chdir(directory) try: yield True finally: logger.info("goto <- %s", directory) os.chdir(current) else: logger.info( "goto(%s) - directory does not exist, or cannot be " "created.", directory, ) yield False
python
def goto(directory, create=False): """Context object for changing directory. Args: directory (str): Directory to go to. create (bool): Create directory if it doesn't exists. Usage:: >>> with goto(directory) as ok: ... if not ok: ... print 'Error' ... else: ... print 'All OK' """ current = os.getcwd() directory = os.path.abspath(directory) if os.path.isdir(directory) or (create and mkdir(directory)): logger.info("goto -> %s", directory) os.chdir(directory) try: yield True finally: logger.info("goto <- %s", directory) os.chdir(current) else: logger.info( "goto(%s) - directory does not exist, or cannot be " "created.", directory, ) yield False
[ "def", "goto", "(", "directory", ",", "create", "=", "False", ")", ":", "current", "=", "os", ".", "getcwd", "(", ")", "directory", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "if", "os", ".", "path", ".", "isdir", "(", "directory", ")", "or", "(", "create", "and", "mkdir", "(", "directory", ")", ")", ":", "logger", ".", "info", "(", "\"goto -> %s\"", ",", "directory", ")", "os", ".", "chdir", "(", "directory", ")", "try", ":", "yield", "True", "finally", ":", "logger", ".", "info", "(", "\"goto <- %s\"", ",", "directory", ")", "os", ".", "chdir", "(", "current", ")", "else", ":", "logger", ".", "info", "(", "\"goto(%s) - directory does not exist, or cannot be \"", "\"created.\"", ",", "directory", ",", ")", "yield", "False" ]
Context object for changing directory. Args: directory (str): Directory to go to. create (bool): Create directory if it doesn't exists. Usage:: >>> with goto(directory) as ok: ... if not ok: ... print 'Error' ... else: ... print 'All OK'
[ "Context", "object", "for", "changing", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L95-L127
alefnula/tea
tea/shell/__init__.py
mkdir
def mkdir(path, mode=0o755, delete=False): """Make a directory. Create a leaf directory and all intermediate ones. Works like ``mkdir``, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. This is recursive. Args: path (str): Directory to create mode (int): Directory mode delete (bool): Delete directory/file if exists Returns: bool: True if succeeded else False """ logger.info("mkdir: %s" % path) if os.path.isdir(path): if not delete: return True if not remove(path): return False try: os.makedirs(path, mode) return True except Exception: logger.exception("Failed to mkdir: %s" % path) return False
python
def mkdir(path, mode=0o755, delete=False): """Make a directory. Create a leaf directory and all intermediate ones. Works like ``mkdir``, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. This is recursive. Args: path (str): Directory to create mode (int): Directory mode delete (bool): Delete directory/file if exists Returns: bool: True if succeeded else False """ logger.info("mkdir: %s" % path) if os.path.isdir(path): if not delete: return True if not remove(path): return False try: os.makedirs(path, mode) return True except Exception: logger.exception("Failed to mkdir: %s" % path) return False
[ "def", "mkdir", "(", "path", ",", "mode", "=", "0o755", ",", "delete", "=", "False", ")", ":", "logger", ".", "info", "(", "\"mkdir: %s\"", "%", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "if", "not", "delete", ":", "return", "True", "if", "not", "remove", "(", "path", ")", ":", "return", "False", "try", ":", "os", ".", "makedirs", "(", "path", ",", "mode", ")", "return", "True", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Failed to mkdir: %s\"", "%", "path", ")", "return", "False" ]
Make a directory. Create a leaf directory and all intermediate ones. Works like ``mkdir``, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. This is recursive. Args: path (str): Directory to create mode (int): Directory mode delete (bool): Delete directory/file if exists Returns: bool: True if succeeded else False
[ "Make", "a", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L130-L156
alefnula/tea
tea/shell/__init__.py
__copyfile
def __copyfile(source, destination): """Copy data and mode bits ("cp source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("copyfile: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copy(source, destination) return True except Exception as e: logger.error( "copyfile: %s -> %s failed! Error: %s", source, destination, e ) return False
python
def __copyfile(source, destination): """Copy data and mode bits ("cp source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("copyfile: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copy(source, destination) return True except Exception as e: logger.error( "copyfile: %s -> %s failed! Error: %s", source, destination, e ) return False
[ "def", "__copyfile", "(", "source", ",", "destination", ")", ":", "logger", ".", "info", "(", "\"copyfile: %s -> %s\"", "%", "(", "source", ",", "destination", ")", ")", "try", ":", "__create_destdir", "(", "destination", ")", "shutil", ".", "copy", "(", "source", ",", "destination", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"copyfile: %s -> %s failed! Error: %s\"", ",", "source", ",", "destination", ",", "e", ")", "return", "False" ]
Copy data and mode bits ("cp source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise.
[ "Copy", "data", "and", "mode", "bits", "(", "cp", "source", "destination", ")", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L166-L187
alefnula/tea
tea/shell/__init__.py
__copyfile2
def __copyfile2(source, destination): """Copy data and all stat info ("cp -p source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("copyfile2: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copy2(source, destination) return True except Exception as e: logger.error( "copyfile2: %s -> %s failed! Error: %s", source, destination, e ) return False
python
def __copyfile2(source, destination): """Copy data and all stat info ("cp -p source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("copyfile2: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copy2(source, destination) return True except Exception as e: logger.error( "copyfile2: %s -> %s failed! Error: %s", source, destination, e ) return False
[ "def", "__copyfile2", "(", "source", ",", "destination", ")", ":", "logger", ".", "info", "(", "\"copyfile2: %s -> %s\"", "%", "(", "source", ",", "destination", ")", ")", "try", ":", "__create_destdir", "(", "destination", ")", "shutil", ".", "copy2", "(", "source", ",", "destination", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"copyfile2: %s -> %s failed! Error: %s\"", ",", "source", ",", "destination", ",", "e", ")", "return", "False" ]
Copy data and all stat info ("cp -p source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise.
[ "Copy", "data", "and", "all", "stat", "info", "(", "cp", "-", "p", "source", "destination", ")", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L190-L211
alefnula/tea
tea/shell/__init__.py
__copytree
def __copytree(source, destination, symlinks=False): """Copy a directory tree recursively using copy2(). The destination directory must not already exist. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. Args: source (str): Source directory (directory to copy). destination (str): Destination directory (where to copy). symlinks (bool): Follow symbolic links. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("copytree: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copytree(source, destination, symlinks) return True except Exception as e: logger.exception( "copytree: %s -> %s failed! Error: %s", source, destination, e ) return False
python
def __copytree(source, destination, symlinks=False): """Copy a directory tree recursively using copy2(). The destination directory must not already exist. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. Args: source (str): Source directory (directory to copy). destination (str): Destination directory (where to copy). symlinks (bool): Follow symbolic links. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("copytree: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copytree(source, destination, symlinks) return True except Exception as e: logger.exception( "copytree: %s -> %s failed! Error: %s", source, destination, e ) return False
[ "def", "__copytree", "(", "source", ",", "destination", ",", "symlinks", "=", "False", ")", ":", "logger", ".", "info", "(", "\"copytree: %s -> %s\"", "%", "(", "source", ",", "destination", ")", ")", "try", ":", "__create_destdir", "(", "destination", ")", "shutil", ".", "copytree", "(", "source", ",", "destination", ",", "symlinks", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"copytree: %s -> %s failed! Error: %s\"", ",", "source", ",", "destination", ",", "e", ")", "return", "False" ]
Copy a directory tree recursively using copy2(). The destination directory must not already exist. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. Args: source (str): Source directory (directory to copy). destination (str): Destination directory (where to copy). symlinks (bool): Follow symbolic links. Returns: bool: True if the operation is successful, False otherwise.
[ "Copy", "a", "directory", "tree", "recursively", "using", "copy2", "()", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L214-L241
alefnula/tea
tea/shell/__init__.py
copy
def copy(source, destination): """Copy file or directory. Args: source (str): Source file or directory destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise. """ if os.path.isdir(source): return __copytree(source, destination) else: return __copyfile2(source, destination)
python
def copy(source, destination): """Copy file or directory. Args: source (str): Source file or directory destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise. """ if os.path.isdir(source): return __copytree(source, destination) else: return __copyfile2(source, destination)
[ "def", "copy", "(", "source", ",", "destination", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "return", "__copytree", "(", "source", ",", "destination", ")", "else", ":", "return", "__copyfile2", "(", "source", ",", "destination", ")" ]
Copy file or directory. Args: source (str): Source file or directory destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise.
[ "Copy", "file", "or", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L244-L257
alefnula/tea
tea/shell/__init__.py
gcopy
def gcopy(pattern, destination): """Copy all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not copy(item, destination): return False return True
python
def gcopy(pattern, destination): """Copy all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not copy(item, destination): return False return True
[ "def", "gcopy", "(", "pattern", ",", "destination", ")", ":", "for", "item", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "if", "not", "copy", "(", "item", ",", "destination", ")", ":", "return", "False", "return", "True" ]
Copy all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise.
[ "Copy", "all", "file", "found", "by", "glob", ".", "glob", "(", "pattern", ")", "to", "destination", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L260-L273
alefnula/tea
tea/shell/__init__.py
move
def move(source, destination): """Move a file or directory (recursively) to another location. If the destination is on our current file system, then simply use rename. Otherwise, copy source to the destination and then remove source. Args: source (str): Source file or directory (file or directory to move). destination (str): Destination file or directory (where to move). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("Move: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.move(source, destination) return True except Exception: logger.exception("Failed to Move: %s -> %s" % (source, destination)) return False
python
def move(source, destination): """Move a file or directory (recursively) to another location. If the destination is on our current file system, then simply use rename. Otherwise, copy source to the destination and then remove source. Args: source (str): Source file or directory (file or directory to move). destination (str): Destination file or directory (where to move). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("Move: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.move(source, destination) return True except Exception: logger.exception("Failed to Move: %s -> %s" % (source, destination)) return False
[ "def", "move", "(", "source", ",", "destination", ")", ":", "logger", ".", "info", "(", "\"Move: %s -> %s\"", "%", "(", "source", ",", "destination", ")", ")", "try", ":", "__create_destdir", "(", "destination", ")", "shutil", ".", "move", "(", "source", ",", "destination", ")", "return", "True", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Failed to Move: %s -> %s\"", "%", "(", "source", ",", "destination", ")", ")", "return", "False" ]
Move a file or directory (recursively) to another location. If the destination is on our current file system, then simply use rename. Otherwise, copy source to the destination and then remove source. Args: source (str): Source file or directory (file or directory to move). destination (str): Destination file or directory (where to move). Returns: bool: True if the operation is successful, False otherwise.
[ "Move", "a", "file", "or", "directory", "(", "recursively", ")", "to", "another", "location", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L276-L297
alefnula/tea
tea/shell/__init__.py
gmove
def gmove(pattern, destination): """Move all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not move(item, destination): return False return True
python
def gmove(pattern, destination): """Move all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not move(item, destination): return False return True
[ "def", "gmove", "(", "pattern", ",", "destination", ")", ":", "for", "item", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "if", "not", "move", "(", "item", ",", "destination", ")", ":", "return", "False", "return", "True" ]
Move all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise.
[ "Move", "all", "file", "found", "by", "glob", ".", "glob", "(", "pattern", ")", "to", "destination", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L300-L313
alefnula/tea
tea/shell/__init__.py
__rmfile
def __rmfile(path): """Delete a file. Args: path (str): Path to the file that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("rmfile: %s" % path) try: os.remove(path) return True except Exception as e: logger.error("rmfile: %s failed! Error: %s" % (path, e)) return False
python
def __rmfile(path): """Delete a file. Args: path (str): Path to the file that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("rmfile: %s" % path) try: os.remove(path) return True except Exception as e: logger.error("rmfile: %s failed! Error: %s" % (path, e)) return False
[ "def", "__rmfile", "(", "path", ")", ":", "logger", ".", "info", "(", "\"rmfile: %s\"", "%", "path", ")", "try", ":", "os", ".", "remove", "(", "path", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"rmfile: %s failed! Error: %s\"", "%", "(", "path", ",", "e", ")", ")", "return", "False" ]
Delete a file. Args: path (str): Path to the file that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
[ "Delete", "a", "file", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L316-L331
alefnula/tea
tea/shell/__init__.py
__rmtree
def __rmtree(path): """Recursively delete a directory tree. Args: path (str): Path to the directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("rmtree: %s" % path) try: shutil.rmtree(path) return True except Exception as e: logger.error("rmtree: %s failed! Error: %s" % (path, e)) return False
python
def __rmtree(path): """Recursively delete a directory tree. Args: path (str): Path to the directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("rmtree: %s" % path) try: shutil.rmtree(path) return True except Exception as e: logger.error("rmtree: %s failed! Error: %s" % (path, e)) return False
[ "def", "__rmtree", "(", "path", ")", ":", "logger", ".", "info", "(", "\"rmtree: %s\"", "%", "path", ")", "try", ":", "shutil", ".", "rmtree", "(", "path", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"rmtree: %s failed! Error: %s\"", "%", "(", "path", ",", "e", ")", ")", "return", "False" ]
Recursively delete a directory tree. Args: path (str): Path to the directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
[ "Recursively", "delete", "a", "directory", "tree", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L334-L349
alefnula/tea
tea/shell/__init__.py
remove
def remove(path): """Delete a file or directory. Args: path (str): Path to the file or directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ if os.path.isdir(path): return __rmtree(path) else: return __rmfile(path)
python
def remove(path): """Delete a file or directory. Args: path (str): Path to the file or directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ if os.path.isdir(path): return __rmtree(path) else: return __rmfile(path)
[ "def", "remove", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "__rmtree", "(", "path", ")", "else", ":", "return", "__rmfile", "(", "path", ")" ]
Delete a file or directory. Args: path (str): Path to the file or directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
[ "Delete", "a", "file", "or", "directory", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L352-L364
alefnula/tea
tea/shell/__init__.py
gremove
def gremove(pattern): """Remove all file found by glob.glob(pattern). Args: pattern (str): Pattern of files to remove Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not remove(item): return False return True
python
def gremove(pattern): """Remove all file found by glob.glob(pattern). Args: pattern (str): Pattern of files to remove Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not remove(item): return False return True
[ "def", "gremove", "(", "pattern", ")", ":", "for", "item", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "if", "not", "remove", "(", "item", ")", ":", "return", "False", "return", "True" ]
Remove all file found by glob.glob(pattern). Args: pattern (str): Pattern of files to remove Returns: bool: True if the operation is successful, False otherwise.
[ "Remove", "all", "file", "found", "by", "glob", ".", "glob", "(", "pattern", ")", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L367-L378
alefnula/tea
tea/shell/__init__.py
read
def read(path, encoding="utf-8"): """Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error """ try: with io.open(path, encoding=encoding) as f: return f.read() except Exception as e: logger.error("read: %s failed. Error: %s", path, e) return ""
python
def read(path, encoding="utf-8"): """Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error """ try: with io.open(path, encoding=encoding) as f: return f.read() except Exception as e: logger.error("read: %s failed. Error: %s", path, e) return ""
[ "def", "read", "(", "path", ",", "encoding", "=", "\"utf-8\"", ")", ":", "try", ":", "with", "io", ".", "open", "(", "path", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"read: %s failed. Error: %s\"", ",", "path", ",", "e", ")", "return", "\"\"" ]
Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error
[ "Read", "the", "content", "of", "the", "file", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L381-L396
alefnula/tea
tea/shell/__init__.py
touch
def touch(path, content="", encoding="utf-8", overwrite=False): """Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise. """ path = os.path.abspath(path) if not overwrite and os.path.exists(path): logger.warning('touch: "%s" already exists', path) return False try: logger.info("touch: %s", path) with io.open(path, "wb") as f: if not isinstance(content, six.binary_type): content = content.encode(encoding) f.write(content) return True except Exception as e: logger.error("touch: %s failed. Error: %s", path, e) return False
python
def touch(path, content="", encoding="utf-8", overwrite=False): """Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise. """ path = os.path.abspath(path) if not overwrite and os.path.exists(path): logger.warning('touch: "%s" already exists', path) return False try: logger.info("touch: %s", path) with io.open(path, "wb") as f: if not isinstance(content, six.binary_type): content = content.encode(encoding) f.write(content) return True except Exception as e: logger.error("touch: %s failed. Error: %s", path, e) return False
[ "def", "touch", "(", "path", ",", "content", "=", "\"\"", ",", "encoding", "=", "\"utf-8\"", ",", "overwrite", "=", "False", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "not", "overwrite", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logger", ".", "warning", "(", "'touch: \"%s\" already exists'", ",", "path", ")", "return", "False", "try", ":", "logger", ".", "info", "(", "\"touch: %s\"", ",", "path", ")", "with", "io", ".", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "if", "not", "isinstance", "(", "content", ",", "six", ".", "binary_type", ")", ":", "content", "=", "content", ".", "encode", "(", "encoding", ")", "f", ".", "write", "(", "content", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"touch: %s failed. Error: %s\"", ",", "path", ",", "e", ")", "return", "False" ]
Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise.
[ "Create", "a", "file", "at", "the", "given", "path", "if", "it", "does", "not", "already", "exists", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L399-L425
alefnula/tea
tea/utils/__init__.py
get_object
def get_object(path="", obj=None): """Return an object from a dot path. Path can either be a full path, in which case the `get_object` function will try to import the modules in the path and follow it to the final object. Or it can be a path relative to the object passed in as the second argument. Args: path (str): Full or relative dot path to the desired object obj (object): Starting object. Dot path is calculated relatively to this object. Returns: Object at the end of the path, or list of non hidden objects if we use the star query. Example for full paths:: >>> get_object('os.path.join') <function join at 0x1002d9ed8> >>> get_object('tea.process') <module 'tea.process' from 'tea/process/__init__.pyc'> Example for relative paths when an object is passed in:: >>> import os >>> get_object('path.join', os) <function join at 0x1002d9ed8> Example for a star query. (Star query can be used only as the last element of the path:: >>> get_object('tea.dsa.*') [] >>> get_object('tea.dsa.singleton.*') [<class 'tea.dsa.singleton.Singleton'>, <class 'tea.dsa.singleton.SingletonMetaclass'> <module 'six' from '...'>] >>> get_object('tea.dsa.*') [<module 'tea.dsa.singleton' from '...'>] # Since we imported it """ if not path: return obj path = path.split(".") if obj is None: obj = importlib.import_module(path[0]) path = path[1:] for item in path: if item == "*": # This is the star query, returns non hidden objects return [ getattr(obj, name) for name in dir(obj) if not name.startswith("__") ] if isinstance(obj, types.ModuleType): submodule = "{}.{}".format(_package(obj), item) try: obj = importlib.import_module(submodule) except Exception as import_error: try: obj = getattr(obj, item) except Exception: # FIXME: I know I should probably merge the errors, but # it's easier just to throw the import error since # it's most probably the one user wants to see. # Create a new LoadingError and throw a combination # of the import error and attribute error. raise import_error else: obj = getattr(obj, item) return obj
python
def get_object(path="", obj=None): """Return an object from a dot path. Path can either be a full path, in which case the `get_object` function will try to import the modules in the path and follow it to the final object. Or it can be a path relative to the object passed in as the second argument. Args: path (str): Full or relative dot path to the desired object obj (object): Starting object. Dot path is calculated relatively to this object. Returns: Object at the end of the path, or list of non hidden objects if we use the star query. Example for full paths:: >>> get_object('os.path.join') <function join at 0x1002d9ed8> >>> get_object('tea.process') <module 'tea.process' from 'tea/process/__init__.pyc'> Example for relative paths when an object is passed in:: >>> import os >>> get_object('path.join', os) <function join at 0x1002d9ed8> Example for a star query. (Star query can be used only as the last element of the path:: >>> get_object('tea.dsa.*') [] >>> get_object('tea.dsa.singleton.*') [<class 'tea.dsa.singleton.Singleton'>, <class 'tea.dsa.singleton.SingletonMetaclass'> <module 'six' from '...'>] >>> get_object('tea.dsa.*') [<module 'tea.dsa.singleton' from '...'>] # Since we imported it """ if not path: return obj path = path.split(".") if obj is None: obj = importlib.import_module(path[0]) path = path[1:] for item in path: if item == "*": # This is the star query, returns non hidden objects return [ getattr(obj, name) for name in dir(obj) if not name.startswith("__") ] if isinstance(obj, types.ModuleType): submodule = "{}.{}".format(_package(obj), item) try: obj = importlib.import_module(submodule) except Exception as import_error: try: obj = getattr(obj, item) except Exception: # FIXME: I know I should probably merge the errors, but # it's easier just to throw the import error since # it's most probably the one user wants to see. # Create a new LoadingError and throw a combination # of the import error and attribute error. raise import_error else: obj = getattr(obj, item) return obj
[ "def", "get_object", "(", "path", "=", "\"\"", ",", "obj", "=", "None", ")", ":", "if", "not", "path", ":", "return", "obj", "path", "=", "path", ".", "split", "(", "\".\"", ")", "if", "obj", "is", "None", ":", "obj", "=", "importlib", ".", "import_module", "(", "path", "[", "0", "]", ")", "path", "=", "path", "[", "1", ":", "]", "for", "item", "in", "path", ":", "if", "item", "==", "\"*\"", ":", "# This is the star query, returns non hidden objects", "return", "[", "getattr", "(", "obj", ",", "name", ")", "for", "name", "in", "dir", "(", "obj", ")", "if", "not", "name", ".", "startswith", "(", "\"__\"", ")", "]", "if", "isinstance", "(", "obj", ",", "types", ".", "ModuleType", ")", ":", "submodule", "=", "\"{}.{}\"", ".", "format", "(", "_package", "(", "obj", ")", ",", "item", ")", "try", ":", "obj", "=", "importlib", ".", "import_module", "(", "submodule", ")", "except", "Exception", "as", "import_error", ":", "try", ":", "obj", "=", "getattr", "(", "obj", ",", "item", ")", "except", "Exception", ":", "# FIXME: I know I should probably merge the errors, but", "# it's easier just to throw the import error since", "# it's most probably the one user wants to see.", "# Create a new LoadingError and throw a combination", "# of the import error and attribute error.", "raise", "import_error", "else", ":", "obj", "=", "getattr", "(", "obj", ",", "item", ")", "return", "obj" ]
Return an object from a dot path. Path can either be a full path, in which case the `get_object` function will try to import the modules in the path and follow it to the final object. Or it can be a path relative to the object passed in as the second argument. Args: path (str): Full or relative dot path to the desired object obj (object): Starting object. Dot path is calculated relatively to this object. Returns: Object at the end of the path, or list of non hidden objects if we use the star query. Example for full paths:: >>> get_object('os.path.join') <function join at 0x1002d9ed8> >>> get_object('tea.process') <module 'tea.process' from 'tea/process/__init__.pyc'> Example for relative paths when an object is passed in:: >>> import os >>> get_object('path.join', os) <function join at 0x1002d9ed8> Example for a star query. (Star query can be used only as the last element of the path:: >>> get_object('tea.dsa.*') [] >>> get_object('tea.dsa.singleton.*') [<class 'tea.dsa.singleton.Singleton'>, <class 'tea.dsa.singleton.SingletonMetaclass'> <module 'six' from '...'>] >>> get_object('tea.dsa.*') [<module 'tea.dsa.singleton' from '...'>] # Since we imported it
[ "Return", "an", "object", "from", "a", "dot", "path", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/__init__.py#L30-L102
alefnula/tea
tea/utils/__init__.py
load_subclasses
def load_subclasses(klass, modules=None): """Load recursively all all subclasses from a module. Args: klass (str or list of str): Class whose subclasses we want to load. modules: List of additional modules or module names that should be recursively imported in order to find all the subclasses of the desired class. Default: None FIXME: This function is kept only for backward compatibility reasons, it should not be used. Deprecation warning should be raised and it should be replaces by the ``Loader`` class. """ if modules: if isinstance(modules, six.string_types): modules = [modules] loader = Loader() loader.load(*modules) return klass.__subclasses__()
python
def load_subclasses(klass, modules=None): """Load recursively all all subclasses from a module. Args: klass (str or list of str): Class whose subclasses we want to load. modules: List of additional modules or module names that should be recursively imported in order to find all the subclasses of the desired class. Default: None FIXME: This function is kept only for backward compatibility reasons, it should not be used. Deprecation warning should be raised and it should be replaces by the ``Loader`` class. """ if modules: if isinstance(modules, six.string_types): modules = [modules] loader = Loader() loader.load(*modules) return klass.__subclasses__()
[ "def", "load_subclasses", "(", "klass", ",", "modules", "=", "None", ")", ":", "if", "modules", ":", "if", "isinstance", "(", "modules", ",", "six", ".", "string_types", ")", ":", "modules", "=", "[", "modules", "]", "loader", "=", "Loader", "(", ")", "loader", ".", "load", "(", "*", "modules", ")", "return", "klass", ".", "__subclasses__", "(", ")" ]
Load recursively all all subclasses from a module. Args: klass (str or list of str): Class whose subclasses we want to load. modules: List of additional modules or module names that should be recursively imported in order to find all the subclasses of the desired class. Default: None FIXME: This function is kept only for backward compatibility reasons, it should not be used. Deprecation warning should be raised and it should be replaces by the ``Loader`` class.
[ "Load", "recursively", "all", "all", "subclasses", "from", "a", "module", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/__init__.py#L156-L174
alefnula/tea
tea/utils/__init__.py
get_exception
def get_exception(): """Return full formatted traceback as a string.""" trace = "" exception = "" exc_list = traceback.format_exception_only( sys.exc_info()[0], sys.exc_info()[1] ) for entry in exc_list: exception += entry tb_list = traceback.format_tb(sys.exc_info()[2]) for entry in tb_list: trace += entry return "%s\n%s" % (exception, trace)
python
def get_exception(): """Return full formatted traceback as a string.""" trace = "" exception = "" exc_list = traceback.format_exception_only( sys.exc_info()[0], sys.exc_info()[1] ) for entry in exc_list: exception += entry tb_list = traceback.format_tb(sys.exc_info()[2]) for entry in tb_list: trace += entry return "%s\n%s" % (exception, trace)
[ "def", "get_exception", "(", ")", ":", "trace", "=", "\"\"", "exception", "=", "\"\"", "exc_list", "=", "traceback", ".", "format_exception_only", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "for", "entry", "in", "exc_list", ":", "exception", "+=", "entry", "tb_list", "=", "traceback", ".", "format_tb", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "for", "entry", "in", "tb_list", ":", "trace", "+=", "entry", "return", "\"%s\\n%s\"", "%", "(", "exception", ",", "trace", ")" ]
Return full formatted traceback as a string.
[ "Return", "full", "formatted", "traceback", "as", "a", "string", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/__init__.py#L177-L189
alefnula/tea
tea/utils/__init__.py
Loader.load
def load(self, *modules): """Load one or more modules. Args: modules: Either a string full path to a module or an actual module object. """ for module in modules: if isinstance(module, six.string_types): try: module = get_object(module) except Exception as e: self.errors[module] = e continue self.modules[module.__package__] = module for (loader, module_name, is_pkg) in pkgutil.walk_packages( module.__path__ ): full_name = "{}.{}".format(_package(module), module_name) try: self.modules[full_name] = get_object(full_name) if is_pkg: self.load(self.modules[full_name]) except Exception as e: self.errors[full_name] = e
python
def load(self, *modules): """Load one or more modules. Args: modules: Either a string full path to a module or an actual module object. """ for module in modules: if isinstance(module, six.string_types): try: module = get_object(module) except Exception as e: self.errors[module] = e continue self.modules[module.__package__] = module for (loader, module_name, is_pkg) in pkgutil.walk_packages( module.__path__ ): full_name = "{}.{}".format(_package(module), module_name) try: self.modules[full_name] = get_object(full_name) if is_pkg: self.load(self.modules[full_name]) except Exception as e: self.errors[full_name] = e
[ "def", "load", "(", "self", ",", "*", "modules", ")", ":", "for", "module", "in", "modules", ":", "if", "isinstance", "(", "module", ",", "six", ".", "string_types", ")", ":", "try", ":", "module", "=", "get_object", "(", "module", ")", "except", "Exception", "as", "e", ":", "self", ".", "errors", "[", "module", "]", "=", "e", "continue", "self", ".", "modules", "[", "module", ".", "__package__", "]", "=", "module", "for", "(", "loader", ",", "module_name", ",", "is_pkg", ")", "in", "pkgutil", ".", "walk_packages", "(", "module", ".", "__path__", ")", ":", "full_name", "=", "\"{}.{}\"", ".", "format", "(", "_package", "(", "module", ")", ",", "module_name", ")", "try", ":", "self", ".", "modules", "[", "full_name", "]", "=", "get_object", "(", "full_name", ")", "if", "is_pkg", ":", "self", ".", "load", "(", "self", ".", "modules", "[", "full_name", "]", ")", "except", "Exception", "as", "e", ":", "self", ".", "errors", "[", "full_name", "]", "=", "e" ]
Load one or more modules. Args: modules: Either a string full path to a module or an actual module object.
[ "Load", "one", "or", "more", "modules", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/__init__.py#L129-L153
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
_product_filter
def _product_filter(products) -> str: """Calculate the product filter.""" _filter = 0 for product in {PRODUCTS[p] for p in products}: _filter += product return format(_filter, "b")[::-1]
python
def _product_filter(products) -> str: """Calculate the product filter.""" _filter = 0 for product in {PRODUCTS[p] for p in products}: _filter += product return format(_filter, "b")[::-1]
[ "def", "_product_filter", "(", "products", ")", "->", "str", ":", "_filter", "=", "0", "for", "product", "in", "{", "PRODUCTS", "[", "p", "]", "for", "p", "in", "products", "}", ":", "_filter", "+=", "product", "return", "format", "(", "_filter", ",", "\"b\"", ")", "[", ":", ":", "-", "1", "]" ]
Calculate the product filter.
[ "Calculate", "the", "product", "filter", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L172-L177
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
_base_url
def _base_url() -> str: """Build base url.""" _lang: str = "d" _type: str = "n" _with_suggestions: str = "?" return BASE_URI + STBOARD_PATH + _lang + _type + _with_suggestions
python
def _base_url() -> str: """Build base url.""" _lang: str = "d" _type: str = "n" _with_suggestions: str = "?" return BASE_URI + STBOARD_PATH + _lang + _type + _with_suggestions
[ "def", "_base_url", "(", ")", "->", "str", ":", "_lang", ":", "str", "=", "\"d\"", "_type", ":", "str", "=", "\"n\"", "_with_suggestions", ":", "str", "=", "\"?\"", "return", "BASE_URI", "+", "STBOARD_PATH", "+", "_lang", "+", "_type", "+", "_with_suggestions" ]
Build base url.
[ "Build", "base", "url", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L180-L185
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
RMVtransport.get_departures
async def get_departures( self, station_id: str, direction_id: Optional[str] = None, max_journeys: int = 20, products: Optional[List[str]] = None, ) -> Dict[str, Any]: """Fetch data from rmv.de.""" self.station_id: str = station_id self.direction_id: str = direction_id self.max_journeys: int = max_journeys self.products_filter: str = _product_filter(products or ALL_PRODUCTS) base_url: str = _base_url() params: Dict[str, Union[str, int]] = { "selectDate": "today", "time": "now", "input": self.station_id, "maxJourneys": self.max_journeys, "boardType": "dep", "productsFilter": self.products_filter, "disableEquivs": "discard_nearby", "output": "xml", "start": "yes", } if self.direction_id: params["dirInput"] = self.direction_id url = base_url + urllib.parse.urlencode(params) try: with async_timeout.timeout(self._timeout): async with self._session.get(url) as response: _LOGGER.debug(f"Response from RMV API: {response.status}") xml = await response.read() _LOGGER.debug(xml) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from RMV API") raise RMVtransportApiConnectionError() # pylint: disable=I1101 try: self.obj = objectify.fromstring(xml) except (TypeError, etree.XMLSyntaxError): _LOGGER.debug(f"Get from string: {xml[:100]}") print(f"Get from string: {xml}") raise RMVtransportError() try: self.now = self.current_time() self.station = self._station() except (TypeError, AttributeError): _LOGGER.debug( f"Time/Station TypeError or AttributeError {objectify.dump(self.obj)}" ) raise RMVtransportError() self.journeys.clear() try: for journey in self.obj.SBRes.JourneyList.Journey: self.journeys.append(RMVJourney(journey, self.now)) except AttributeError: _LOGGER.debug(f"Extract journeys: {objectify.dump(self.obj.SBRes)}") raise RMVtransportError() return self.data()
python
async def get_departures( self, station_id: str, direction_id: Optional[str] = None, max_journeys: int = 20, products: Optional[List[str]] = None, ) -> Dict[str, Any]: """Fetch data from rmv.de.""" self.station_id: str = station_id self.direction_id: str = direction_id self.max_journeys: int = max_journeys self.products_filter: str = _product_filter(products or ALL_PRODUCTS) base_url: str = _base_url() params: Dict[str, Union[str, int]] = { "selectDate": "today", "time": "now", "input": self.station_id, "maxJourneys": self.max_journeys, "boardType": "dep", "productsFilter": self.products_filter, "disableEquivs": "discard_nearby", "output": "xml", "start": "yes", } if self.direction_id: params["dirInput"] = self.direction_id url = base_url + urllib.parse.urlencode(params) try: with async_timeout.timeout(self._timeout): async with self._session.get(url) as response: _LOGGER.debug(f"Response from RMV API: {response.status}") xml = await response.read() _LOGGER.debug(xml) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from RMV API") raise RMVtransportApiConnectionError() # pylint: disable=I1101 try: self.obj = objectify.fromstring(xml) except (TypeError, etree.XMLSyntaxError): _LOGGER.debug(f"Get from string: {xml[:100]}") print(f"Get from string: {xml}") raise RMVtransportError() try: self.now = self.current_time() self.station = self._station() except (TypeError, AttributeError): _LOGGER.debug( f"Time/Station TypeError or AttributeError {objectify.dump(self.obj)}" ) raise RMVtransportError() self.journeys.clear() try: for journey in self.obj.SBRes.JourneyList.Journey: self.journeys.append(RMVJourney(journey, self.now)) except AttributeError: _LOGGER.debug(f"Extract journeys: {objectify.dump(self.obj.SBRes)}") raise RMVtransportError() return self.data()
[ "async", "def", "get_departures", "(", "self", ",", "station_id", ":", "str", ",", "direction_id", ":", "Optional", "[", "str", "]", "=", "None", ",", "max_journeys", ":", "int", "=", "20", ",", "products", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ",", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "self", ".", "station_id", ":", "str", "=", "station_id", "self", ".", "direction_id", ":", "str", "=", "direction_id", "self", ".", "max_journeys", ":", "int", "=", "max_journeys", "self", ".", "products_filter", ":", "str", "=", "_product_filter", "(", "products", "or", "ALL_PRODUCTS", ")", "base_url", ":", "str", "=", "_base_url", "(", ")", "params", ":", "Dict", "[", "str", ",", "Union", "[", "str", ",", "int", "]", "]", "=", "{", "\"selectDate\"", ":", "\"today\"", ",", "\"time\"", ":", "\"now\"", ",", "\"input\"", ":", "self", ".", "station_id", ",", "\"maxJourneys\"", ":", "self", ".", "max_journeys", ",", "\"boardType\"", ":", "\"dep\"", ",", "\"productsFilter\"", ":", "self", ".", "products_filter", ",", "\"disableEquivs\"", ":", "\"discard_nearby\"", ",", "\"output\"", ":", "\"xml\"", ",", "\"start\"", ":", "\"yes\"", ",", "}", "if", "self", ".", "direction_id", ":", "params", "[", "\"dirInput\"", "]", "=", "self", ".", "direction_id", "url", "=", "base_url", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "try", ":", "with", "async_timeout", ".", "timeout", "(", "self", ".", "_timeout", ")", ":", "async", "with", "self", ".", "_session", ".", "get", "(", "url", ")", "as", "response", ":", "_LOGGER", ".", "debug", "(", "f\"Response from RMV API: {response.status}\"", ")", "xml", "=", "await", "response", ".", "read", "(", ")", "_LOGGER", ".", "debug", "(", "xml", ")", "except", "(", "asyncio", ".", "TimeoutError", ",", "aiohttp", ".", "ClientError", ")", ":", "_LOGGER", ".", "error", "(", "\"Can not load data from RMV API\"", ")", "raise", "RMVtransportApiConnectionError", "(", ")", "# pylint: disable=I1101", "try", ":", "self", ".", "obj", "=", "objectify", ".", "fromstring", "(", "xml", ")", "except", "(", "TypeError", ",", "etree", ".", "XMLSyntaxError", ")", ":", "_LOGGER", ".", "debug", "(", "f\"Get from string: {xml[:100]}\"", ")", "print", "(", "f\"Get from string: {xml}\"", ")", "raise", "RMVtransportError", "(", ")", "try", ":", "self", ".", "now", "=", "self", ".", "current_time", "(", ")", "self", ".", "station", "=", "self", ".", "_station", "(", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "_LOGGER", ".", "debug", "(", "f\"Time/Station TypeError or AttributeError {objectify.dump(self.obj)}\"", ")", "raise", "RMVtransportError", "(", ")", "self", ".", "journeys", ".", "clear", "(", ")", "try", ":", "for", "journey", "in", "self", ".", "obj", ".", "SBRes", ".", "JourneyList", ".", "Journey", ":", "self", ".", "journeys", ".", "append", "(", "RMVJourney", "(", "journey", ",", "self", ".", "now", ")", ")", "except", "AttributeError", ":", "_LOGGER", ".", "debug", "(", "f\"Extract journeys: {objectify.dump(self.obj.SBRes)}\"", ")", "raise", "RMVtransportError", "(", ")", "return", "self", ".", "data", "(", ")" ]
Fetch data from rmv.de.
[ "Fetch", "data", "from", "rmv", ".", "de", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L44-L111
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
RMVtransport.data
def data(self) -> Dict[str, Any]: """Return travel data.""" data: Dict[str, Any] = {} data["station"] = self.station data["stationId"] = self.station_id data["filter"] = self.products_filter journeys = [] for j in sorted(self.journeys, key=lambda k: k.real_departure)[ : self.max_journeys ]: journeys.append( { "product": j.product, "number": j.number, "trainId": j.train_id, "direction": j.direction, "departure_time": j.real_departure_time, "minutes": j.real_departure, "delay": j.delay, "stops": [s["station"] for s in j.stops], "info": j.info, "info_long": j.info_long, "icon": j.icon, } ) data["journeys"] = journeys return data
python
def data(self) -> Dict[str, Any]: """Return travel data.""" data: Dict[str, Any] = {} data["station"] = self.station data["stationId"] = self.station_id data["filter"] = self.products_filter journeys = [] for j in sorted(self.journeys, key=lambda k: k.real_departure)[ : self.max_journeys ]: journeys.append( { "product": j.product, "number": j.number, "trainId": j.train_id, "direction": j.direction, "departure_time": j.real_departure_time, "minutes": j.real_departure, "delay": j.delay, "stops": [s["station"] for s in j.stops], "info": j.info, "info_long": j.info_long, "icon": j.icon, } ) data["journeys"] = journeys return data
[ "def", "data", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "data", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", "data", "[", "\"station\"", "]", "=", "self", ".", "station", "data", "[", "\"stationId\"", "]", "=", "self", ".", "station_id", "data", "[", "\"filter\"", "]", "=", "self", ".", "products_filter", "journeys", "=", "[", "]", "for", "j", "in", "sorted", "(", "self", ".", "journeys", ",", "key", "=", "lambda", "k", ":", "k", ".", "real_departure", ")", "[", ":", "self", ".", "max_journeys", "]", ":", "journeys", ".", "append", "(", "{", "\"product\"", ":", "j", ".", "product", ",", "\"number\"", ":", "j", ".", "number", ",", "\"trainId\"", ":", "j", ".", "train_id", ",", "\"direction\"", ":", "j", ".", "direction", ",", "\"departure_time\"", ":", "j", ".", "real_departure_time", ",", "\"minutes\"", ":", "j", ".", "real_departure", ",", "\"delay\"", ":", "j", ".", "delay", ",", "\"stops\"", ":", "[", "s", "[", "\"station\"", "]", "for", "s", "in", "j", ".", "stops", "]", ",", "\"info\"", ":", "j", ".", "info", ",", "\"info_long\"", ":", "j", ".", "info_long", ",", "\"icon\"", ":", "j", ".", "icon", ",", "}", ")", "data", "[", "\"journeys\"", "]", "=", "journeys", "return", "data" ]
Return travel data.
[ "Return", "travel", "data", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L113-L140
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
RMVtransport._station
def _station(self) -> str: """Extract station name.""" return str(self.obj.SBRes.SBReq.Start.Station.HafasName.Text.pyval)
python
def _station(self) -> str: """Extract station name.""" return str(self.obj.SBRes.SBReq.Start.Station.HafasName.Text.pyval)
[ "def", "_station", "(", "self", ")", "->", "str", ":", "return", "str", "(", "self", ".", "obj", ".", "SBRes", ".", "SBReq", ".", "Start", ".", "Station", ".", "HafasName", ".", "Text", ".", "pyval", ")" ]
Extract station name.
[ "Extract", "station", "name", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L142-L144
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
RMVtransport.current_time
def current_time(self) -> datetime: """Extract current time.""" _date = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("date"), "%Y%m%d") _time = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("time"), "%H:%M") return datetime.combine(_date.date(), _time.time())
python
def current_time(self) -> datetime: """Extract current time.""" _date = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("date"), "%Y%m%d") _time = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("time"), "%H:%M") return datetime.combine(_date.date(), _time.time())
[ "def", "current_time", "(", "self", ")", "->", "datetime", ":", "_date", "=", "datetime", ".", "strptime", "(", "self", ".", "obj", ".", "SBRes", ".", "SBReq", ".", "StartT", ".", "get", "(", "\"date\"", ")", ",", "\"%Y%m%d\"", ")", "_time", "=", "datetime", ".", "strptime", "(", "self", ".", "obj", ".", "SBRes", ".", "SBReq", ".", "StartT", ".", "get", "(", "\"time\"", ")", ",", "\"%H:%M\"", ")", "return", "datetime", ".", "combine", "(", "_date", ".", "date", "(", ")", ",", "_time", ".", "time", "(", ")", ")" ]
Extract current time.
[ "Extract", "current", "time", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L146-L150
cgtobi/PyRMVtransport
RMVtransport/rmvtransport.py
RMVtransport.output
def output(self) -> None: """Pretty print travel times.""" print("%s - %s" % (self.station, self.now)) print(self.products_filter) for j in sorted(self.journeys, key=lambda k: k.real_departure)[ : self.max_journeys ]: print("-------------") print(f"{j.product}: {j.number} ({j.train_id})") print(f"Richtung: {j.direction}") print(f"Abfahrt in {j.real_departure} min.") print(f"Abfahrt {j.departure.time()} (+{j.delay})") print(f"Nächste Haltestellen: {([s['station'] for s in j.stops])}") if j.info: print(f"Hinweis: {j.info}") print(f"Hinweis (lang): {j.info_long}") print(f"Icon: {j.icon}")
python
def output(self) -> None: """Pretty print travel times.""" print("%s - %s" % (self.station, self.now)) print(self.products_filter) for j in sorted(self.journeys, key=lambda k: k.real_departure)[ : self.max_journeys ]: print("-------------") print(f"{j.product}: {j.number} ({j.train_id})") print(f"Richtung: {j.direction}") print(f"Abfahrt in {j.real_departure} min.") print(f"Abfahrt {j.departure.time()} (+{j.delay})") print(f"Nächste Haltestellen: {([s['station'] for s in j.stops])}") if j.info: print(f"Hinweis: {j.info}") print(f"Hinweis (lang): {j.info_long}") print(f"Icon: {j.icon}")
[ "def", "output", "(", "self", ")", "->", "None", ":", "print", "(", "\"%s - %s\"", "%", "(", "self", ".", "station", ",", "self", ".", "now", ")", ")", "print", "(", "self", ".", "products_filter", ")", "for", "j", "in", "sorted", "(", "self", ".", "journeys", ",", "key", "=", "lambda", "k", ":", "k", ".", "real_departure", ")", "[", ":", "self", ".", "max_journeys", "]", ":", "print", "(", "\"-------------\"", ")", "print", "(", "f\"{j.product}: {j.number} ({j.train_id})\"", ")", "print", "(", "f\"Richtung: {j.direction}\"", ")", "print", "(", "f\"Abfahrt in {j.real_departure} min.\"", ")", "print", "(", "f\"Abfahrt {j.departure.time()} (+{j.delay})\"", ")", "print", "(", "f\"Nächste Haltestellen: {([s['station'] for s in j.stops])}\")", "", "if", "j", ".", "info", ":", "print", "(", "f\"Hinweis: {j.info}\"", ")", "print", "(", "f\"Hinweis (lang): {j.info_long}\"", ")", "print", "(", "f\"Icon: {j.icon}\"", ")" ]
Pretty print travel times.
[ "Pretty", "print", "travel", "times", "." ]
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvtransport.py#L152-L169
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/mpt_redis_parser.py
MPTRedisParser.get
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ if self.scope: variable_path = '{0.scope}{0.path_separator}{1}'.format(self, variable_path) if self.key_prefix: variable_path = '{0.key_prefix}:{1}'.format(self, variable_path) val = self.client.get(variable_path) if val is None: return default if val.startswith(self.object_serialize_prefix): # since complex data types are yaml-serialized there's no need to coerce anything _val = val[len(self.object_serialize_prefix):] bundle = self.object_deserialize(_val) if bundle == '': # check for reinforced empty flag return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer) return bundle if isinstance(val, bytes): val = val.decode() return self.coerce(val, coerce_type=coerce_type, coercer=coercer)
python
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ if self.scope: variable_path = '{0.scope}{0.path_separator}{1}'.format(self, variable_path) if self.key_prefix: variable_path = '{0.key_prefix}:{1}'.format(self, variable_path) val = self.client.get(variable_path) if val is None: return default if val.startswith(self.object_serialize_prefix): # since complex data types are yaml-serialized there's no need to coerce anything _val = val[len(self.object_serialize_prefix):] bundle = self.object_deserialize(_val) if bundle == '': # check for reinforced empty flag return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer) return bundle if isinstance(val, bytes): val = val.decode() return self.coerce(val, coerce_type=coerce_type, coercer=coercer)
[ "def", "get", "(", "self", ",", "variable_path", ":", "str", ",", "default", ":", "t", ".", "Optional", "[", "t", ".", "Any", "]", "=", "None", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "scope", ":", "variable_path", "=", "'{0.scope}{0.path_separator}{1}'", ".", "format", "(", "self", ",", "variable_path", ")", "if", "self", ".", "key_prefix", ":", "variable_path", "=", "'{0.key_prefix}:{1}'", ".", "format", "(", "self", ",", "variable_path", ")", "val", "=", "self", ".", "client", ".", "get", "(", "variable_path", ")", "if", "val", "is", "None", ":", "return", "default", "if", "val", ".", "startswith", "(", "self", ".", "object_serialize_prefix", ")", ":", "# since complex data types are yaml-serialized there's no need to coerce anything", "_val", "=", "val", "[", "len", "(", "self", ".", "object_serialize_prefix", ")", ":", "]", "bundle", "=", "self", ".", "object_deserialize", "(", "_val", ")", "if", "bundle", "==", "''", ":", "# check for reinforced empty flag", "return", "self", ".", "coerce", "(", "bundle", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ")", "return", "bundle", "if", "isinstance", "(", "val", ",", "bytes", ")", ":", "val", "=", "val", ".", "decode", "(", ")", "return", "self", ".", "coerce", "(", "val", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ")" ]
:param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default
[ ":", "param", "variable_path", ":", "a", "delimiter", "-", "separated", "path", "to", "a", "nested", "value", ":", "param", "default", ":", "default", "value", "if", "there", "s", "no", "object", "by", "specified", "path", ":", "param", "coerce_type", ":", "cast", "a", "type", "of", "a", "value", "to", "a", "specified", "one", ":", "param", "coercer", ":", "perform", "a", "type", "casting", "with", "specified", "callback", ":", "param", "kwargs", ":", "additional", "arguments", "inherited", "parser", "may", "need", ":", "return", ":", "value", "or", "default" ]
train
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/mpt_redis_parser.py#L75-L112
letuananh/chirptext
chirptext/chio.py
process_file
def process_file(path, processor, encoding='utf-8', mode='rt', *args, **kwargs): ''' Process a text file's content. If the file name ends with .gz, read it as gzip file ''' if mode not in ('rU', 'rt', 'rb', 'r'): raise Exception("Invalid file reading mode") with open(path, encoding=encoding, mode=mode, *args, **kwargs) as infile: return processor(infile)
python
def process_file(path, processor, encoding='utf-8', mode='rt', *args, **kwargs): ''' Process a text file's content. If the file name ends with .gz, read it as gzip file ''' if mode not in ('rU', 'rt', 'rb', 'r'): raise Exception("Invalid file reading mode") with open(path, encoding=encoding, mode=mode, *args, **kwargs) as infile: return processor(infile)
[ "def", "process_file", "(", "path", ",", "processor", ",", "encoding", "=", "'utf-8'", ",", "mode", "=", "'rt'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "mode", "not", "in", "(", "'rU'", ",", "'rt'", ",", "'rb'", ",", "'r'", ")", ":", "raise", "Exception", "(", "\"Invalid file reading mode\"", ")", "with", "open", "(", "path", ",", "encoding", "=", "encoding", ",", "mode", "=", "mode", ",", "*", "args", ",", "*", "*", "kwargs", ")", "as", "infile", ":", "return", "processor", "(", "infile", ")" ]
Process a text file's content. If the file name ends with .gz, read it as gzip file
[ "Process", "a", "text", "file", "s", "content", ".", "If", "the", "file", "name", "ends", "with", ".", "gz", "read", "it", "as", "gzip", "file" ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L75-L80
letuananh/chirptext
chirptext/chio.py
read_file
def read_file(path, encoding='utf-8', *args, **kwargs): ''' Read text file content. If the file name ends with .gz, read it as gzip file. If mode argument is provided as 'rb', content will be read as byte stream. By default, content is read as string. ''' if 'mode' in kwargs and kwargs['mode'] == 'rb': return process_file(path, processor=lambda x: x.read(), encoding=encoding, *args, **kwargs) else: return process_file(path, processor=lambda x: to_string(x.read(), encoding), encoding=encoding, *args, **kwargs)
python
def read_file(path, encoding='utf-8', *args, **kwargs): ''' Read text file content. If the file name ends with .gz, read it as gzip file. If mode argument is provided as 'rb', content will be read as byte stream. By default, content is read as string. ''' if 'mode' in kwargs and kwargs['mode'] == 'rb': return process_file(path, processor=lambda x: x.read(), encoding=encoding, *args, **kwargs) else: return process_file(path, processor=lambda x: to_string(x.read(), encoding), encoding=encoding, *args, **kwargs)
[ "def", "read_file", "(", "path", ",", "encoding", "=", "'utf-8'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'mode'", "in", "kwargs", "and", "kwargs", "[", "'mode'", "]", "==", "'rb'", ":", "return", "process_file", "(", "path", ",", "processor", "=", "lambda", "x", ":", "x", ".", "read", "(", ")", ",", "encoding", "=", "encoding", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "process_file", "(", "path", ",", "processor", "=", "lambda", "x", ":", "to_string", "(", "x", ".", "read", "(", ")", ",", "encoding", ")", ",", "encoding", "=", "encoding", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Read text file content. If the file name ends with .gz, read it as gzip file. If mode argument is provided as 'rb', content will be read as byte stream. By default, content is read as string.
[ "Read", "text", "file", "content", ".", "If", "the", "file", "name", "ends", "with", ".", "gz", "read", "it", "as", "gzip", "file", ".", "If", "mode", "argument", "is", "provided", "as", "rb", "content", "will", "be", "read", "as", "byte", "stream", ".", "By", "default", "content", "is", "read", "as", "string", "." ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L83-L93
letuananh/chirptext
chirptext/chio.py
write_file
def write_file(path, content, mode=None, encoding='utf-8'): ''' Write content to a file. If the path ends with .gz, gzip will be used. ''' if not mode: if isinstance(content, bytes): mode = 'wb' else: mode = 'wt' if not path: raise ValueError("Output path is invalid") else: getLogger().debug("Writing content to {}".format(path)) # convert content to string when writing text data if mode in ('w', 'wt') and not isinstance(content, str): content = to_string(content) elif mode == 'wb': # content needs to be encoded as bytes if not isinstance(content, str): content = to_string(content).encode(encoding) else: content = content.encode(encoding) if str(path).endswith('.gz'): with gzip.open(path, mode) as outfile: outfile.write(content) else: with open(path, mode=mode) as outfile: outfile.write(content)
python
def write_file(path, content, mode=None, encoding='utf-8'): ''' Write content to a file. If the path ends with .gz, gzip will be used. ''' if not mode: if isinstance(content, bytes): mode = 'wb' else: mode = 'wt' if not path: raise ValueError("Output path is invalid") else: getLogger().debug("Writing content to {}".format(path)) # convert content to string when writing text data if mode in ('w', 'wt') and not isinstance(content, str): content = to_string(content) elif mode == 'wb': # content needs to be encoded as bytes if not isinstance(content, str): content = to_string(content).encode(encoding) else: content = content.encode(encoding) if str(path).endswith('.gz'): with gzip.open(path, mode) as outfile: outfile.write(content) else: with open(path, mode=mode) as outfile: outfile.write(content)
[ "def", "write_file", "(", "path", ",", "content", ",", "mode", "=", "None", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "not", "mode", ":", "if", "isinstance", "(", "content", ",", "bytes", ")", ":", "mode", "=", "'wb'", "else", ":", "mode", "=", "'wt'", "if", "not", "path", ":", "raise", "ValueError", "(", "\"Output path is invalid\"", ")", "else", ":", "getLogger", "(", ")", ".", "debug", "(", "\"Writing content to {}\"", ".", "format", "(", "path", ")", ")", "# convert content to string when writing text data", "if", "mode", "in", "(", "'w'", ",", "'wt'", ")", "and", "not", "isinstance", "(", "content", ",", "str", ")", ":", "content", "=", "to_string", "(", "content", ")", "elif", "mode", "==", "'wb'", ":", "# content needs to be encoded as bytes", "if", "not", "isinstance", "(", "content", ",", "str", ")", ":", "content", "=", "to_string", "(", "content", ")", ".", "encode", "(", "encoding", ")", "else", ":", "content", "=", "content", ".", "encode", "(", "encoding", ")", "if", "str", "(", "path", ")", ".", "endswith", "(", "'.gz'", ")", ":", "with", "gzip", ".", "open", "(", "path", ",", "mode", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "content", ")", "else", ":", "with", "open", "(", "path", ",", "mode", "=", "mode", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "content", ")" ]
Write content to a file. If the path ends with .gz, gzip will be used.
[ "Write", "content", "to", "a", "file", ".", "If", "the", "path", "ends", "with", ".", "gz", "gzip", "will", "be", "used", "." ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L96-L121
letuananh/chirptext
chirptext/chio.py
iter_csv_stream
def iter_csv_stream(input_stream, fieldnames=None, sniff=False, *args, **kwargs): ''' Read CSV content as a table (list of lists) from an input stream ''' if 'dialect' not in kwargs and sniff: kwargs['dialect'] = csv.Sniffer().sniff(input_stream.read(1024)) input_stream.seek(0) if 'quoting' in kwargs and kwargs['quoting'] is None: kwargs['quoting'] = csv.QUOTE_MINIMAL if fieldnames: # read csv using dictreader if isinstance(fieldnames, bool): reader = csv.DictReader(input_stream, *args, **kwargs) else: reader = csv.DictReader(input_stream, *args, fieldnames=fieldnames, **kwargs) for row in reader: yield row else: csvreader = csv.reader(input_stream, *args, **kwargs) for row in csvreader: yield row
python
def iter_csv_stream(input_stream, fieldnames=None, sniff=False, *args, **kwargs): ''' Read CSV content as a table (list of lists) from an input stream ''' if 'dialect' not in kwargs and sniff: kwargs['dialect'] = csv.Sniffer().sniff(input_stream.read(1024)) input_stream.seek(0) if 'quoting' in kwargs and kwargs['quoting'] is None: kwargs['quoting'] = csv.QUOTE_MINIMAL if fieldnames: # read csv using dictreader if isinstance(fieldnames, bool): reader = csv.DictReader(input_stream, *args, **kwargs) else: reader = csv.DictReader(input_stream, *args, fieldnames=fieldnames, **kwargs) for row in reader: yield row else: csvreader = csv.reader(input_stream, *args, **kwargs) for row in csvreader: yield row
[ "def", "iter_csv_stream", "(", "input_stream", ",", "fieldnames", "=", "None", ",", "sniff", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'dialect'", "not", "in", "kwargs", "and", "sniff", ":", "kwargs", "[", "'dialect'", "]", "=", "csv", ".", "Sniffer", "(", ")", ".", "sniff", "(", "input_stream", ".", "read", "(", "1024", ")", ")", "input_stream", ".", "seek", "(", "0", ")", "if", "'quoting'", "in", "kwargs", "and", "kwargs", "[", "'quoting'", "]", "is", "None", ":", "kwargs", "[", "'quoting'", "]", "=", "csv", ".", "QUOTE_MINIMAL", "if", "fieldnames", ":", "# read csv using dictreader", "if", "isinstance", "(", "fieldnames", ",", "bool", ")", ":", "reader", "=", "csv", ".", "DictReader", "(", "input_stream", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "reader", "=", "csv", ".", "DictReader", "(", "input_stream", ",", "*", "args", ",", "fieldnames", "=", "fieldnames", ",", "*", "*", "kwargs", ")", "for", "row", "in", "reader", ":", "yield", "row", "else", ":", "csvreader", "=", "csv", ".", "reader", "(", "input_stream", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "row", "in", "csvreader", ":", "yield", "row" ]
Read CSV content as a table (list of lists) from an input stream
[ "Read", "CSV", "content", "as", "a", "table", "(", "list", "of", "lists", ")", "from", "an", "input", "stream" ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L124-L142
letuananh/chirptext
chirptext/chio.py
read_csv_iter
def read_csv_iter(path, fieldnames=None, sniff=True, mode='rt', encoding='utf-8', *args, **kwargs): ''' Iterate through CSV rows in a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off. ''' with open(path, mode=mode, encoding=encoding) as infile: for row in iter_csv_stream(infile, fieldnames=fieldnames, sniff=sniff, *args, **kwargs): yield row
python
def read_csv_iter(path, fieldnames=None, sniff=True, mode='rt', encoding='utf-8', *args, **kwargs): ''' Iterate through CSV rows in a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off. ''' with open(path, mode=mode, encoding=encoding) as infile: for row in iter_csv_stream(infile, fieldnames=fieldnames, sniff=sniff, *args, **kwargs): yield row
[ "def", "read_csv_iter", "(", "path", ",", "fieldnames", "=", "None", ",", "sniff", "=", "True", ",", "mode", "=", "'rt'", ",", "encoding", "=", "'utf-8'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "path", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ")", "as", "infile", ":", "for", "row", "in", "iter_csv_stream", "(", "infile", ",", "fieldnames", "=", "fieldnames", ",", "sniff", "=", "sniff", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "row" ]
Iterate through CSV rows in a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off.
[ "Iterate", "through", "CSV", "rows", "in", "a", "file", ".", "By", "default", "csv", ".", "reader", "()", "will", "be", "used", "any", "output", "will", "be", "a", "list", "of", "lists", ".", "If", "fieldnames", "is", "provided", "DictReader", "will", "be", "used", "and", "output", "will", "be", "list", "of", "OrderedDict", "instead", ".", "CSV", "sniffing", "(", "dialect", "detection", ")", "is", "enabled", "by", "default", "set", "sniff", "=", "False", "to", "switch", "it", "off", "." ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L149-L157
letuananh/chirptext
chirptext/chio.py
read_csv
def read_csv(path, fieldnames=None, sniff=True, encoding='utf-8', *args, **kwargs): ''' Read CSV rows as table from a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off. ''' return list(r for r in read_csv_iter(path, fieldnames=fieldnames, sniff=sniff, encoding=encoding, *args, **kwargs))
python
def read_csv(path, fieldnames=None, sniff=True, encoding='utf-8', *args, **kwargs): ''' Read CSV rows as table from a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off. ''' return list(r for r in read_csv_iter(path, fieldnames=fieldnames, sniff=sniff, encoding=encoding, *args, **kwargs))
[ "def", "read_csv", "(", "path", ",", "fieldnames", "=", "None", ",", "sniff", "=", "True", ",", "encoding", "=", "'utf-8'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "r", "for", "r", "in", "read_csv_iter", "(", "path", ",", "fieldnames", "=", "fieldnames", ",", "sniff", "=", "sniff", ",", "encoding", "=", "encoding", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Read CSV rows as table from a file. By default, csv.reader() will be used any output will be a list of lists. If fieldnames is provided, DictReader will be used and output will be list of OrderedDict instead. CSV sniffing (dialect detection) is enabled by default, set sniff=False to switch it off.
[ "Read", "CSV", "rows", "as", "table", "from", "a", "file", ".", "By", "default", "csv", ".", "reader", "()", "will", "be", "used", "any", "output", "will", "be", "a", "list", "of", "lists", ".", "If", "fieldnames", "is", "provided", "DictReader", "will", "be", "used", "and", "output", "will", "be", "list", "of", "OrderedDict", "instead", ".", "CSV", "sniffing", "(", "dialect", "detection", ")", "is", "enabled", "by", "default", "set", "sniff", "=", "False", "to", "switch", "it", "off", "." ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L164-L170
letuananh/chirptext
chirptext/chio.py
write_csv
def write_csv(path, rows, dialect='excel', fieldnames=None, quoting=csv.QUOTE_ALL, extrasaction='ignore', *args, **kwargs): ''' Write rows data to a CSV file (with or without fieldnames) ''' if not quoting: quoting = csv.QUOTE_MINIMAL if 'lineterminator' not in kwargs: kwargs['lineterminator'] = '\n' # use \n to fix double-line in Windows with open(path, mode='wt', newline='') as csvfile: if fieldnames: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect=dialect, quoting=quoting, extrasaction=extrasaction, *args, **kwargs) writer.writeheader() for row in rows: writer.writerow(row) else: writer = csv.writer(csvfile, dialect=dialect, quoting=quoting, *args, **kwargs) for row in rows: writer.writerow(row)
python
def write_csv(path, rows, dialect='excel', fieldnames=None, quoting=csv.QUOTE_ALL, extrasaction='ignore', *args, **kwargs): ''' Write rows data to a CSV file (with or without fieldnames) ''' if not quoting: quoting = csv.QUOTE_MINIMAL if 'lineterminator' not in kwargs: kwargs['lineterminator'] = '\n' # use \n to fix double-line in Windows with open(path, mode='wt', newline='') as csvfile: if fieldnames: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect=dialect, quoting=quoting, extrasaction=extrasaction, *args, **kwargs) writer.writeheader() for row in rows: writer.writerow(row) else: writer = csv.writer(csvfile, dialect=dialect, quoting=quoting, *args, **kwargs) for row in rows: writer.writerow(row)
[ "def", "write_csv", "(", "path", ",", "rows", ",", "dialect", "=", "'excel'", ",", "fieldnames", "=", "None", ",", "quoting", "=", "csv", ".", "QUOTE_ALL", ",", "extrasaction", "=", "'ignore'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "quoting", ":", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", "if", "'lineterminator'", "not", "in", "kwargs", ":", "kwargs", "[", "'lineterminator'", "]", "=", "'\\n'", "# use \\n to fix double-line in Windows", "with", "open", "(", "path", ",", "mode", "=", "'wt'", ",", "newline", "=", "''", ")", "as", "csvfile", ":", "if", "fieldnames", ":", "writer", "=", "csv", ".", "DictWriter", "(", "csvfile", ",", "fieldnames", "=", "fieldnames", ",", "dialect", "=", "dialect", ",", "quoting", "=", "quoting", ",", "extrasaction", "=", "extrasaction", ",", "*", "args", ",", "*", "*", "kwargs", ")", "writer", ".", "writeheader", "(", ")", "for", "row", "in", "rows", ":", "writer", ".", "writerow", "(", "row", ")", "else", ":", "writer", "=", "csv", ".", "writer", "(", "csvfile", ",", "dialect", "=", "dialect", ",", "quoting", "=", "quoting", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "row", "in", "rows", ":", "writer", ".", "writerow", "(", "row", ")" ]
Write rows data to a CSV file (with or without fieldnames)
[ "Write", "rows", "data", "to", "a", "CSV", "file", "(", "with", "or", "without", "fieldnames", ")" ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L177-L192
letuananh/chirptext
chirptext/chio.py
CSV.write
def write(file_name, rows, header=None, *args, **kwargs): ''' Write rows data to a CSV file (with or without header) ''' warnings.warn("chirptext.io.CSV is deprecated and will be removed in near future.", DeprecationWarning) write_csv(file_name, rows, fieldnames=header, *args, **kwargs)
python
def write(file_name, rows, header=None, *args, **kwargs): ''' Write rows data to a CSV file (with or without header) ''' warnings.warn("chirptext.io.CSV is deprecated and will be removed in near future.", DeprecationWarning) write_csv(file_name, rows, fieldnames=header, *args, **kwargs)
[ "def", "write", "(", "file_name", ",", "rows", ",", "header", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"chirptext.io.CSV is deprecated and will be removed in near future.\"", ",", "DeprecationWarning", ")", "write_csv", "(", "file_name", ",", "rows", ",", "fieldnames", "=", "header", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Write rows data to a CSV file (with or without header)
[ "Write", "rows", "data", "to", "a", "CSV", "file", "(", "with", "or", "without", "header", ")" ]
train
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chio.py#L216-L219
alefnula/tea
tea/msg/mail.py
make_msgid
def make_msgid(idstring=None, utc=False): """Return a string suitable for RFC 2822 compliant Message-ID. E.g: <[email protected]> Optional idstring if given is a string used to strengthen the uniqueness of the message id. """ if utc: timestamp = time.gmtime() else: timestamp = time.localtime() utcdate = time.strftime("%Y%m%d%H%M%S", timestamp) try: pid = os.getpid() except AttributeError: # No getpid() in Jython, for example. pid = 1 randint = random.randrange(100000) if idstring is None: idstring = "" else: idstring = "." + idstring idhost = DNS_NAME msgid = "<%s.%s.%s%s@%s>" % (utcdate, pid, randint, idstring, idhost) return msgid
python
def make_msgid(idstring=None, utc=False): """Return a string suitable for RFC 2822 compliant Message-ID. E.g: <[email protected]> Optional idstring if given is a string used to strengthen the uniqueness of the message id. """ if utc: timestamp = time.gmtime() else: timestamp = time.localtime() utcdate = time.strftime("%Y%m%d%H%M%S", timestamp) try: pid = os.getpid() except AttributeError: # No getpid() in Jython, for example. pid = 1 randint = random.randrange(100000) if idstring is None: idstring = "" else: idstring = "." + idstring idhost = DNS_NAME msgid = "<%s.%s.%s%s@%s>" % (utcdate, pid, randint, idstring, idhost) return msgid
[ "def", "make_msgid", "(", "idstring", "=", "None", ",", "utc", "=", "False", ")", ":", "if", "utc", ":", "timestamp", "=", "time", ".", "gmtime", "(", ")", "else", ":", "timestamp", "=", "time", ".", "localtime", "(", ")", "utcdate", "=", "time", ".", "strftime", "(", "\"%Y%m%d%H%M%S\"", ",", "timestamp", ")", "try", ":", "pid", "=", "os", ".", "getpid", "(", ")", "except", "AttributeError", ":", "# No getpid() in Jython, for example.\r", "pid", "=", "1", "randint", "=", "random", ".", "randrange", "(", "100000", ")", "if", "idstring", "is", "None", ":", "idstring", "=", "\"\"", "else", ":", "idstring", "=", "\".\"", "+", "idstring", "idhost", "=", "DNS_NAME", "msgid", "=", "\"<%s.%s.%s%s@%s>\"", "%", "(", "utcdate", ",", "pid", ",", "randint", ",", "idstring", ",", "idhost", ")", "return", "msgid" ]
Return a string suitable for RFC 2822 compliant Message-ID. E.g: <[email protected]> Optional idstring if given is a string used to strengthen the uniqueness of the message id.
[ "Return", "a", "string", "suitable", "for", "RFC", "2822", "compliant", "Message", "-", "ID", ".", "E", ".", "g", ":", "<20020201195627", ".", "33539", ".", "96671" ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L75-L100
alefnula/tea
tea/msg/mail.py
forbid_multi_line_headers
def forbid_multi_line_headers(name, val): """Forbid multi-line headers, to prevent header injection.""" val = smart_text(val) if "\n" in val or "\r" in val: raise BadHeaderError( "Header values can't contain newlines " "(got %r for header %r)" % (val, name) ) try: val = val.encode("ascii") except UnicodeEncodeError: if name.lower() in ("to", "from", "cc"): result = [] for item in val.split(", "): nm, addr = parseaddr(item) nm = str(Header(nm, DEFAULT_CHARSET)) result.append(formataddr((nm, str(addr)))) val = ", ".join(result) else: val = Header(val, DEFAULT_CHARSET) else: if name.lower() == "subject": val = Header(val) return name, val
python
def forbid_multi_line_headers(name, val): """Forbid multi-line headers, to prevent header injection.""" val = smart_text(val) if "\n" in val or "\r" in val: raise BadHeaderError( "Header values can't contain newlines " "(got %r for header %r)" % (val, name) ) try: val = val.encode("ascii") except UnicodeEncodeError: if name.lower() in ("to", "from", "cc"): result = [] for item in val.split(", "): nm, addr = parseaddr(item) nm = str(Header(nm, DEFAULT_CHARSET)) result.append(formataddr((nm, str(addr)))) val = ", ".join(result) else: val = Header(val, DEFAULT_CHARSET) else: if name.lower() == "subject": val = Header(val) return name, val
[ "def", "forbid_multi_line_headers", "(", "name", ",", "val", ")", ":", "val", "=", "smart_text", "(", "val", ")", "if", "\"\\n\"", "in", "val", "or", "\"\\r\"", "in", "val", ":", "raise", "BadHeaderError", "(", "\"Header values can't contain newlines \"", "\"(got %r for header %r)\"", "%", "(", "val", ",", "name", ")", ")", "try", ":", "val", "=", "val", ".", "encode", "(", "\"ascii\"", ")", "except", "UnicodeEncodeError", ":", "if", "name", ".", "lower", "(", ")", "in", "(", "\"to\"", ",", "\"from\"", ",", "\"cc\"", ")", ":", "result", "=", "[", "]", "for", "item", "in", "val", ".", "split", "(", "\", \"", ")", ":", "nm", ",", "addr", "=", "parseaddr", "(", "item", ")", "nm", "=", "str", "(", "Header", "(", "nm", ",", "DEFAULT_CHARSET", ")", ")", "result", ".", "append", "(", "formataddr", "(", "(", "nm", ",", "str", "(", "addr", ")", ")", ")", ")", "val", "=", "\", \"", ".", "join", "(", "result", ")", "else", ":", "val", "=", "Header", "(", "val", ",", "DEFAULT_CHARSET", ")", "else", ":", "if", "name", ".", "lower", "(", ")", "==", "\"subject\"", ":", "val", "=", "Header", "(", "val", ")", "return", "name", ",", "val" ]
Forbid multi-line headers, to prevent header injection.
[ "Forbid", "multi", "-", "line", "headers", "to", "prevent", "header", "injection", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L107-L130
alefnula/tea
tea/msg/mail.py
send_mail
def send_mail( subject, sender, to, message, html_message=None, cc=None, bcc=None, attachments=None, host=None, port=None, auth_user=None, auth_password=None, use_tls=False, fail_silently=False, ): """Send a single email to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ if message is None and html_message is None: raise ValueError("Either message or html_message must be provided") if message is None: message = strip_tags(html_message) connection = SMTPConnection( host=host, port=port, username=auth_user, password=auth_password, use_tls=use_tls, fail_silently=fail_silently, ) # Convert the to field just for easier usage if isinstance(to, six.string_types): to = [to] if html_message is None: email = EmailMessage( subject=subject, body=message, sender=sender, to=to, cc=cc, bcc=bcc, attachments=attachments, connection=connection, ) else: email = EmailMultiAlternatives( subject=subject, body=message, sender=sender, to=to, cc=cc, bcc=bcc, attachments=attachments, connection=connection, ) email.attach_alternative(html_message, "text/html") return email.send()
python
def send_mail( subject, sender, to, message, html_message=None, cc=None, bcc=None, attachments=None, host=None, port=None, auth_user=None, auth_password=None, use_tls=False, fail_silently=False, ): """Send a single email to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ if message is None and html_message is None: raise ValueError("Either message or html_message must be provided") if message is None: message = strip_tags(html_message) connection = SMTPConnection( host=host, port=port, username=auth_user, password=auth_password, use_tls=use_tls, fail_silently=fail_silently, ) # Convert the to field just for easier usage if isinstance(to, six.string_types): to = [to] if html_message is None: email = EmailMessage( subject=subject, body=message, sender=sender, to=to, cc=cc, bcc=bcc, attachments=attachments, connection=connection, ) else: email = EmailMultiAlternatives( subject=subject, body=message, sender=sender, to=to, cc=cc, bcc=bcc, attachments=attachments, connection=connection, ) email.attach_alternative(html_message, "text/html") return email.send()
[ "def", "send_mail", "(", "subject", ",", "sender", ",", "to", ",", "message", ",", "html_message", "=", "None", ",", "cc", "=", "None", ",", "bcc", "=", "None", ",", "attachments", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "auth_user", "=", "None", ",", "auth_password", "=", "None", ",", "use_tls", "=", "False", ",", "fail_silently", "=", "False", ",", ")", ":", "if", "message", "is", "None", "and", "html_message", "is", "None", ":", "raise", "ValueError", "(", "\"Either message or html_message must be provided\"", ")", "if", "message", "is", "None", ":", "message", "=", "strip_tags", "(", "html_message", ")", "connection", "=", "SMTPConnection", "(", "host", "=", "host", ",", "port", "=", "port", ",", "username", "=", "auth_user", ",", "password", "=", "auth_password", ",", "use_tls", "=", "use_tls", ",", "fail_silently", "=", "fail_silently", ",", ")", "# Convert the to field just for easier usage\r", "if", "isinstance", "(", "to", ",", "six", ".", "string_types", ")", ":", "to", "=", "[", "to", "]", "if", "html_message", "is", "None", ":", "email", "=", "EmailMessage", "(", "subject", "=", "subject", ",", "body", "=", "message", ",", "sender", "=", "sender", ",", "to", "=", "to", ",", "cc", "=", "cc", ",", "bcc", "=", "bcc", ",", "attachments", "=", "attachments", ",", "connection", "=", "connection", ",", ")", "else", ":", "email", "=", "EmailMultiAlternatives", "(", "subject", "=", "subject", ",", "body", "=", "message", ",", "sender", "=", "sender", ",", "to", "=", "to", ",", "cc", "=", "cc", ",", "bcc", "=", "bcc", ",", "attachments", "=", "attachments", ",", "connection", "=", "connection", ",", ")", "email", ".", "attach_alternative", "(", "html_message", ",", "\"text/html\"", ")", "return", "email", ".", "send", "(", ")" ]
Send a single email to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly.
[ "Send", "a", "single", "email", "to", "a", "recipient", "list", ".", "All", "members", "of", "the", "recipient", "list", "will", "see", "the", "other", "recipients", "in", "the", "To", "field", ".", "Note", ":", "The", "API", "for", "this", "method", "is", "frozen", ".", "New", "code", "wanting", "to", "extend", "the", "functionality", "should", "use", "the", "EmailMessage", "class", "directly", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L432-L494
alefnula/tea
tea/msg/mail.py
send_mass_mail
def send_mass_mail( datatuple, fail_silently=False, auth_user=None, auth_password=None ): """Send multiple emails to multiple recipients. Given a datatuple of (subject, message, sender, recipient_list), sends each message to each recipient list. Returns the number of e-mails sent. If auth_user and auth_password are set, they're used to log in. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = SMTPConnection( username=auth_user, password=auth_password, fail_silently=fail_silently ) messages = [ EmailMessage(subject, message, sender, recipient) for subject, message, sender, recipient in datatuple ] return connection.send_messages(messages)
python
def send_mass_mail( datatuple, fail_silently=False, auth_user=None, auth_password=None ): """Send multiple emails to multiple recipients. Given a datatuple of (subject, message, sender, recipient_list), sends each message to each recipient list. Returns the number of e-mails sent. If auth_user and auth_password are set, they're used to log in. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = SMTPConnection( username=auth_user, password=auth_password, fail_silently=fail_silently ) messages = [ EmailMessage(subject, message, sender, recipient) for subject, message, sender, recipient in datatuple ] return connection.send_messages(messages)
[ "def", "send_mass_mail", "(", "datatuple", ",", "fail_silently", "=", "False", ",", "auth_user", "=", "None", ",", "auth_password", "=", "None", ")", ":", "connection", "=", "SMTPConnection", "(", "username", "=", "auth_user", ",", "password", "=", "auth_password", ",", "fail_silently", "=", "fail_silently", ")", "messages", "=", "[", "EmailMessage", "(", "subject", ",", "message", ",", "sender", ",", "recipient", ")", "for", "subject", ",", "message", ",", "sender", ",", "recipient", "in", "datatuple", "]", "return", "connection", ".", "send_messages", "(", "messages", ")" ]
Send multiple emails to multiple recipients. Given a datatuple of (subject, message, sender, recipient_list), sends each message to each recipient list. Returns the number of e-mails sent. If auth_user and auth_password are set, they're used to log in. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly.
[ "Send", "multiple", "emails", "to", "multiple", "recipients", ".", "Given", "a", "datatuple", "of", "(", "subject", "message", "sender", "recipient_list", ")", "sends", "each", "message", "to", "each", "recipient", "list", ".", "Returns", "the", "number", "of", "e", "-", "mails", "sent", ".", "If", "auth_user", "and", "auth_password", "are", "set", "they", "re", "used", "to", "log", "in", ".", "Note", ":", "The", "API", "for", "this", "method", "is", "frozen", ".", "New", "code", "wanting", "to", "extend", "the", "functionality", "should", "use", "the", "EmailMessage", "class", "directly", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L497-L517
alefnula/tea
tea/msg/mail.py
SMTPConnection.open
def open(self): """Ensure we have a connection to the email server. Returns whether or not a new connection was required (True or False). """ if self.connection: # Nothing to do if the connection is already open. return False try: # If local_hostname is not specified, socket.getfqdn() gets used. # For performance, we use the cached FQDN for local_hostname. self.connection = smtplib.SMTP( self.host, self.port, local_hostname=DNS_NAME.get_fqdn() ) if self.use_tls: self.connection.ehlo() self.connection.starttls() self.connection.ehlo() if self.username and self.password: self.connection.login(self.username, self.password) return True except Exception as e: logger.error( "Error trying to connect to server %s:%s: %s", self.host, self.port, e, ) if not self.fail_silently: raise
python
def open(self): """Ensure we have a connection to the email server. Returns whether or not a new connection was required (True or False). """ if self.connection: # Nothing to do if the connection is already open. return False try: # If local_hostname is not specified, socket.getfqdn() gets used. # For performance, we use the cached FQDN for local_hostname. self.connection = smtplib.SMTP( self.host, self.port, local_hostname=DNS_NAME.get_fqdn() ) if self.use_tls: self.connection.ehlo() self.connection.starttls() self.connection.ehlo() if self.username and self.password: self.connection.login(self.username, self.password) return True except Exception as e: logger.error( "Error trying to connect to server %s:%s: %s", self.host, self.port, e, ) if not self.fail_silently: raise
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "connection", ":", "# Nothing to do if the connection is already open.\r", "return", "False", "try", ":", "# If local_hostname is not specified, socket.getfqdn() gets used.\r", "# For performance, we use the cached FQDN for local_hostname.\r", "self", ".", "connection", "=", "smtplib", ".", "SMTP", "(", "self", ".", "host", ",", "self", ".", "port", ",", "local_hostname", "=", "DNS_NAME", ".", "get_fqdn", "(", ")", ")", "if", "self", ".", "use_tls", ":", "self", ".", "connection", ".", "ehlo", "(", ")", "self", ".", "connection", ".", "starttls", "(", ")", "self", ".", "connection", ".", "ehlo", "(", ")", "if", "self", ".", "username", "and", "self", ".", "password", ":", "self", ".", "connection", ".", "login", "(", "self", ".", "username", ",", "self", ".", "password", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Error trying to connect to server %s:%s: %s\"", ",", "self", ".", "host", ",", "self", ".", "port", ",", "e", ",", ")", "if", "not", "self", ".", "fail_silently", ":", "raise" ]
Ensure we have a connection to the email server. Returns whether or not a new connection was required (True or False).
[ "Ensure", "we", "have", "a", "connection", "to", "the", "email", "server", ".", "Returns", "whether", "or", "not", "a", "new", "connection", "was", "required", "(", "True", "or", "False", ")", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L165-L194
alefnula/tea
tea/msg/mail.py
SMTPConnection.close
def close(self): """Close the connection to the email server.""" try: try: self.connection.quit() except socket.sslerror: # This happens when calling quit() on a TLS connection # sometimes. self.connection.close() except Exception as e: logger.error( "Error trying to close connection to server " "%s:%s: %s", self.host, self.port, e, ) if self.fail_silently: return raise finally: self.connection = None
python
def close(self): """Close the connection to the email server.""" try: try: self.connection.quit() except socket.sslerror: # This happens when calling quit() on a TLS connection # sometimes. self.connection.close() except Exception as e: logger.error( "Error trying to close connection to server " "%s:%s: %s", self.host, self.port, e, ) if self.fail_silently: return raise finally: self.connection = None
[ "def", "close", "(", "self", ")", ":", "try", ":", "try", ":", "self", ".", "connection", ".", "quit", "(", ")", "except", "socket", ".", "sslerror", ":", "# This happens when calling quit() on a TLS connection\r", "# sometimes.\r", "self", ".", "connection", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Error trying to close connection to server \"", "\"%s:%s: %s\"", ",", "self", ".", "host", ",", "self", ".", "port", ",", "e", ",", ")", "if", "self", ".", "fail_silently", ":", "return", "raise", "finally", ":", "self", ".", "connection", "=", "None" ]
Close the connection to the email server.
[ "Close", "the", "connection", "to", "the", "email", "server", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L196-L216
alefnula/tea
tea/msg/mail.py
SMTPConnection.send_messages
def send_messages(self, messages): """Send one or more EmailMessage objects. Returns: int: Number of email messages sent. """ if not messages: return new_conn_created = self.open() if not self.connection: # We failed silently on open(). Trying to send would be pointless. return num_sent = 0 for message in messages: sent = self._send(message) if sent: num_sent += 1 if new_conn_created: self.close() return num_sent
python
def send_messages(self, messages): """Send one or more EmailMessage objects. Returns: int: Number of email messages sent. """ if not messages: return new_conn_created = self.open() if not self.connection: # We failed silently on open(). Trying to send would be pointless. return num_sent = 0 for message in messages: sent = self._send(message) if sent: num_sent += 1 if new_conn_created: self.close() return num_sent
[ "def", "send_messages", "(", "self", ",", "messages", ")", ":", "if", "not", "messages", ":", "return", "new_conn_created", "=", "self", ".", "open", "(", ")", "if", "not", "self", ".", "connection", ":", "# We failed silently on open(). Trying to send would be pointless.\r", "return", "num_sent", "=", "0", "for", "message", "in", "messages", ":", "sent", "=", "self", ".", "_send", "(", "message", ")", "if", "sent", ":", "num_sent", "+=", "1", "if", "new_conn_created", ":", "self", ".", "close", "(", ")", "return", "num_sent" ]
Send one or more EmailMessage objects. Returns: int: Number of email messages sent.
[ "Send", "one", "or", "more", "EmailMessage", "objects", ".", "Returns", ":", "int", ":", "Number", "of", "email", "messages", "sent", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L218-L237
alefnula/tea
tea/msg/mail.py
SMTPConnection._send
def _send(self, message): """Send an email. Helper method that does the actual sending. """ if not message.recipients(): return False try: self.connection.sendmail( message.sender, message.recipients(), message.message().as_string(), ) except Exception as e: logger.error( "Error sending a message to server %s:%s: %s", self.host, self.port, e, ) if not self.fail_silently: raise return False return True
python
def _send(self, message): """Send an email. Helper method that does the actual sending. """ if not message.recipients(): return False try: self.connection.sendmail( message.sender, message.recipients(), message.message().as_string(), ) except Exception as e: logger.error( "Error sending a message to server %s:%s: %s", self.host, self.port, e, ) if not self.fail_silently: raise return False return True
[ "def", "_send", "(", "self", ",", "message", ")", ":", "if", "not", "message", ".", "recipients", "(", ")", ":", "return", "False", "try", ":", "self", ".", "connection", ".", "sendmail", "(", "message", ".", "sender", ",", "message", ".", "recipients", "(", ")", ",", "message", ".", "message", "(", ")", ".", "as_string", "(", ")", ",", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Error sending a message to server %s:%s: %s\"", ",", "self", ".", "host", ",", "self", ".", "port", ",", "e", ",", ")", "if", "not", "self", ".", "fail_silently", ":", "raise", "return", "False", "return", "True" ]
Send an email. Helper method that does the actual sending.
[ "Send", "an", "email", ".", "Helper", "method", "that", "does", "the", "actual", "sending", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L239-L262
alefnula/tea
tea/msg/mail.py
EmailMessage.attach
def attach(self, filename=None, content=None, mimetype=None): """Attache a file with the given filename and content. The filename can be omitted (useful for multipart/alternative messages) and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass it is inserted directly into the resulting message attachments. """ if isinstance(filename, MIMEBase): assert content is None and mimetype is None self.attachments.append(filename) elif content is None and os.path.isfile(filename): self.attach_file(filename, mimetype) else: assert content is not None self.attachments.append((filename, content, mimetype))
python
def attach(self, filename=None, content=None, mimetype=None): """Attache a file with the given filename and content. The filename can be omitted (useful for multipart/alternative messages) and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass it is inserted directly into the resulting message attachments. """ if isinstance(filename, MIMEBase): assert content is None and mimetype is None self.attachments.append(filename) elif content is None and os.path.isfile(filename): self.attach_file(filename, mimetype) else: assert content is not None self.attachments.append((filename, content, mimetype))
[ "def", "attach", "(", "self", ",", "filename", "=", "None", ",", "content", "=", "None", ",", "mimetype", "=", "None", ")", ":", "if", "isinstance", "(", "filename", ",", "MIMEBase", ")", ":", "assert", "content", "is", "None", "and", "mimetype", "is", "None", "self", ".", "attachments", ".", "append", "(", "filename", ")", "elif", "content", "is", "None", "and", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "self", ".", "attach_file", "(", "filename", ",", "mimetype", ")", "else", ":", "assert", "content", "is", "not", "None", "self", ".", "attachments", ".", "append", "(", "(", "filename", ",", "content", ",", "mimetype", ")", ")" ]
Attache a file with the given filename and content. The filename can be omitted (useful for multipart/alternative messages) and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass it is inserted directly into the resulting message attachments.
[ "Attache", "a", "file", "with", "the", "given", "filename", "and", "content", ".", "The", "filename", "can", "be", "omitted", "(", "useful", "for", "multipart", "/", "alternative", "messages", ")", "and", "the", "mimetype", "is", "guessed", "if", "not", "provided", ".", "If", "the", "first", "parameter", "is", "a", "MIMEBase", "subclass", "it", "is", "inserted", "directly", "into", "the", "resulting", "message", "attachments", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L370-L386
alefnula/tea
tea/msg/mail.py
EmailMessage.attach_file
def attach_file(self, path, mimetype=None): """Attache a file from the filesystem.""" filename = os.path.basename(path) content = open(path, "rb").read() self.attach(filename, content, mimetype)
python
def attach_file(self, path, mimetype=None): """Attache a file from the filesystem.""" filename = os.path.basename(path) content = open(path, "rb").read() self.attach(filename, content, mimetype)
[ "def", "attach_file", "(", "self", ",", "path", ",", "mimetype", "=", "None", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "content", "=", "open", "(", "path", ",", "\"rb\"", ")", ".", "read", "(", ")", "self", ".", "attach", "(", "filename", ",", "content", ",", "mimetype", ")" ]
Attache a file from the filesystem.
[ "Attache", "a", "file", "from", "the", "filesystem", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L388-L392
alefnula/tea
tea/msg/mail.py
EmailMessage._create_attachment
def _create_attachment(self, filename, content, mimetype=None): """Convert the filename, content, mimetype triple to attachment.""" if mimetype is None: mimetype, _ = mimetypes.guess_type(filename) if mimetype is None: mimetype = DEFAULT_ATTACHMENT_MIME_TYPE basetype, subtype = mimetype.split("/", 1) if basetype == "text": attachment = SafeMIMEText( smart_bytes(content, DEFAULT_CHARSET), subtype, DEFAULT_CHARSET ) else: # Encode non-text attachments with base64. attachment = MIMEBase(basetype, subtype) attachment.set_payload(content) encode_base64(attachment) if filename: attachment.add_header( "Content-Disposition", "attachment", filename=filename ) return attachment
python
def _create_attachment(self, filename, content, mimetype=None): """Convert the filename, content, mimetype triple to attachment.""" if mimetype is None: mimetype, _ = mimetypes.guess_type(filename) if mimetype is None: mimetype = DEFAULT_ATTACHMENT_MIME_TYPE basetype, subtype = mimetype.split("/", 1) if basetype == "text": attachment = SafeMIMEText( smart_bytes(content, DEFAULT_CHARSET), subtype, DEFAULT_CHARSET ) else: # Encode non-text attachments with base64. attachment = MIMEBase(basetype, subtype) attachment.set_payload(content) encode_base64(attachment) if filename: attachment.add_header( "Content-Disposition", "attachment", filename=filename ) return attachment
[ "def", "_create_attachment", "(", "self", ",", "filename", ",", "content", ",", "mimetype", "=", "None", ")", ":", "if", "mimetype", "is", "None", ":", "mimetype", ",", "_", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "if", "mimetype", "is", "None", ":", "mimetype", "=", "DEFAULT_ATTACHMENT_MIME_TYPE", "basetype", ",", "subtype", "=", "mimetype", ".", "split", "(", "\"/\"", ",", "1", ")", "if", "basetype", "==", "\"text\"", ":", "attachment", "=", "SafeMIMEText", "(", "smart_bytes", "(", "content", ",", "DEFAULT_CHARSET", ")", ",", "subtype", ",", "DEFAULT_CHARSET", ")", "else", ":", "# Encode non-text attachments with base64.\r", "attachment", "=", "MIMEBase", "(", "basetype", ",", "subtype", ")", "attachment", ".", "set_payload", "(", "content", ")", "encode_base64", "(", "attachment", ")", "if", "filename", ":", "attachment", ".", "add_header", "(", "\"Content-Disposition\"", ",", "\"attachment\"", ",", "filename", "=", "filename", ")", "return", "attachment" ]
Convert the filename, content, mimetype triple to attachment.
[ "Convert", "the", "filename", "content", "mimetype", "triple", "to", "attachment", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L394-L414
alefnula/tea
tea/msg/mail.py
EmailMultiAlternatives.attach_alternative
def attach_alternative(self, content, mimetype=None): """Attach an alternative content representation.""" self.attach(content=content, mimetype=mimetype)
python
def attach_alternative(self, content, mimetype=None): """Attach an alternative content representation.""" self.attach(content=content, mimetype=mimetype)
[ "def", "attach_alternative", "(", "self", ",", "content", ",", "mimetype", "=", "None", ")", ":", "self", ".", "attach", "(", "content", "=", "content", ",", "mimetype", "=", "mimetype", ")" ]
Attach an alternative content representation.
[ "Attach", "an", "alternative", "content", "representation", "." ]
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L427-L429
Robpol86/appveyor-artifacts
appveyor_artifacts.py
setup_logging
def setup_logging(verbose=False, logger=None): """Setup console logging. Info and below go to stdout, others go to stderr. :param bool verbose: Print debug statements. :param str logger: Which logger to set handlers to. Used for testing. """ if not verbose: logging.getLogger('requests').setLevel(logging.WARNING) format_ = '%(asctime)s %(levelname)-8s %(name)-40s %(message)s' if verbose else '%(message)s' level = logging.DEBUG if verbose else logging.INFO handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(logging.Formatter(format_)) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(InfoFilter()) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(format_)) handler_stderr.setLevel(logging.WARNING) root_logger = logging.getLogger(logger) root_logger.setLevel(level) root_logger.addHandler(handler_stdout) root_logger.addHandler(handler_stderr)
python
def setup_logging(verbose=False, logger=None): """Setup console logging. Info and below go to stdout, others go to stderr. :param bool verbose: Print debug statements. :param str logger: Which logger to set handlers to. Used for testing. """ if not verbose: logging.getLogger('requests').setLevel(logging.WARNING) format_ = '%(asctime)s %(levelname)-8s %(name)-40s %(message)s' if verbose else '%(message)s' level = logging.DEBUG if verbose else logging.INFO handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(logging.Formatter(format_)) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(InfoFilter()) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(format_)) handler_stderr.setLevel(logging.WARNING) root_logger = logging.getLogger(logger) root_logger.setLevel(level) root_logger.addHandler(handler_stdout) root_logger.addHandler(handler_stderr)
[ "def", "setup_logging", "(", "verbose", "=", "False", ",", "logger", "=", "None", ")", ":", "if", "not", "verbose", ":", "logging", ".", "getLogger", "(", "'requests'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "format_", "=", "'%(asctime)s %(levelname)-8s %(name)-40s %(message)s'", "if", "verbose", "else", "'%(message)s'", "level", "=", "logging", ".", "DEBUG", "if", "verbose", "else", "logging", ".", "INFO", "handler_stdout", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", "handler_stdout", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "format_", ")", ")", "handler_stdout", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "handler_stdout", ".", "addFilter", "(", "InfoFilter", "(", ")", ")", "handler_stderr", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stderr", ")", "handler_stderr", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "format_", ")", ")", "handler_stderr", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "root_logger", "=", "logging", ".", "getLogger", "(", "logger", ")", "root_logger", ".", "setLevel", "(", "level", ")", "root_logger", ".", "addHandler", "(", "handler_stdout", ")", "root_logger", ".", "addHandler", "(", "handler_stderr", ")" ]
Setup console logging. Info and below go to stdout, others go to stderr. :param bool verbose: Print debug statements. :param str logger: Which logger to set handlers to. Used for testing.
[ "Setup", "console", "logging", ".", "Info", "and", "below", "go", "to", "stdout", "others", "go", "to", "stderr", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L92-L116
Robpol86/appveyor-artifacts
appveyor_artifacts.py
with_log
def with_log(func): """Automatically adds a named logger to a function upon function call. :param func: Function to decorate. :return: Decorated function. :rtype: function """ @functools.wraps(func) def wrapper(*args, **kwargs): """Inject `log` argument into wrapped function. :param list args: Pass through all positional arguments. :param dict kwargs: Pass through all keyword arguments. """ decorator_logger = logging.getLogger('@with_log') decorator_logger.debug('Entering %s() function call.', func.__name__) log = kwargs.get('log', logging.getLogger(func.__name__)) try: ret = func(log=log, *args, **kwargs) finally: decorator_logger.debug('Leaving %s() function call.', func.__name__) return ret return wrapper
python
def with_log(func): """Automatically adds a named logger to a function upon function call. :param func: Function to decorate. :return: Decorated function. :rtype: function """ @functools.wraps(func) def wrapper(*args, **kwargs): """Inject `log` argument into wrapped function. :param list args: Pass through all positional arguments. :param dict kwargs: Pass through all keyword arguments. """ decorator_logger = logging.getLogger('@with_log') decorator_logger.debug('Entering %s() function call.', func.__name__) log = kwargs.get('log', logging.getLogger(func.__name__)) try: ret = func(log=log, *args, **kwargs) finally: decorator_logger.debug('Leaving %s() function call.', func.__name__) return ret return wrapper
[ "def", "with_log", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Inject `log` argument into wrapped function.\n\n :param list args: Pass through all positional arguments.\n :param dict kwargs: Pass through all keyword arguments.\n \"\"\"", "decorator_logger", "=", "logging", ".", "getLogger", "(", "'@with_log'", ")", "decorator_logger", ".", "debug", "(", "'Entering %s() function call.'", ",", "func", ".", "__name__", ")", "log", "=", "kwargs", ".", "get", "(", "'log'", ",", "logging", ".", "getLogger", "(", "func", ".", "__name__", ")", ")", "try", ":", "ret", "=", "func", "(", "log", "=", "log", ",", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "decorator_logger", ".", "debug", "(", "'Leaving %s() function call.'", ",", "func", ".", "__name__", ")", "return", "ret", "return", "wrapper" ]
Automatically adds a named logger to a function upon function call. :param func: Function to decorate. :return: Decorated function. :rtype: function
[ "Automatically", "adds", "a", "named", "logger", "to", "a", "function", "upon", "function", "call", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L119-L142
Robpol86/appveyor-artifacts
appveyor_artifacts.py
get_arguments
def get_arguments(argv=None, environ=None): """Get command line arguments or values from environment variables. :param list argv: Command line argument list to process. For testing. :param dict environ: Environment variables. For testing. :return: Parsed options. :rtype: dict """ name = 'appveyor-artifacts' environ = environ or os.environ require = getattr(pkg_resources, 'require') # Stupid linting error. commit, owner, pull_request, repo, tag = '', '', '', '', '' # Run docopt. project = [p for p in require(name) if p.project_name == name][0] version = project.version args = docopt(__doc__, argv=argv or sys.argv[1:], version=version) # Handle Travis environment variables. if environ.get('TRAVIS') == 'true': commit = environ.get('TRAVIS_COMMIT', '') owner = environ.get('TRAVIS_REPO_SLUG', '/').split('/')[0] pull_request = environ.get('TRAVIS_PULL_REQUEST', '') if pull_request == 'false': pull_request = '' repo = environ.get('TRAVIS_REPO_SLUG', '/').split('/')[1].replace('_', '-') tag = environ.get('TRAVIS_TAG', '') # Command line arguments override. commit = args['--commit'] or commit owner = args['--owner-name'] or owner pull_request = args['--pull-request'] or pull_request repo = args['--repo-name'] or repo tag = args['--tag-name'] or tag # Merge env variables and have command line args override. config = { 'always_job_dirs': args['--always-job-dirs'], 'commit': commit, 'dir': args['--dir'] or '', 'ignore_errors': args['--ignore-errors'], 'job_name': args['--job-name'] or '', 'mangle_coverage': args['--mangle-coverage'], 'no_job_dirs': args['--no-job-dirs'] or '', 'owner': owner, 'pull_request': pull_request, 'raise': args['--raise'], 'repo': repo, 'tag': tag, 'verbose': args['--verbose'], } return config
python
def get_arguments(argv=None, environ=None): """Get command line arguments or values from environment variables. :param list argv: Command line argument list to process. For testing. :param dict environ: Environment variables. For testing. :return: Parsed options. :rtype: dict """ name = 'appveyor-artifacts' environ = environ or os.environ require = getattr(pkg_resources, 'require') # Stupid linting error. commit, owner, pull_request, repo, tag = '', '', '', '', '' # Run docopt. project = [p for p in require(name) if p.project_name == name][0] version = project.version args = docopt(__doc__, argv=argv or sys.argv[1:], version=version) # Handle Travis environment variables. if environ.get('TRAVIS') == 'true': commit = environ.get('TRAVIS_COMMIT', '') owner = environ.get('TRAVIS_REPO_SLUG', '/').split('/')[0] pull_request = environ.get('TRAVIS_PULL_REQUEST', '') if pull_request == 'false': pull_request = '' repo = environ.get('TRAVIS_REPO_SLUG', '/').split('/')[1].replace('_', '-') tag = environ.get('TRAVIS_TAG', '') # Command line arguments override. commit = args['--commit'] or commit owner = args['--owner-name'] or owner pull_request = args['--pull-request'] or pull_request repo = args['--repo-name'] or repo tag = args['--tag-name'] or tag # Merge env variables and have command line args override. config = { 'always_job_dirs': args['--always-job-dirs'], 'commit': commit, 'dir': args['--dir'] or '', 'ignore_errors': args['--ignore-errors'], 'job_name': args['--job-name'] or '', 'mangle_coverage': args['--mangle-coverage'], 'no_job_dirs': args['--no-job-dirs'] or '', 'owner': owner, 'pull_request': pull_request, 'raise': args['--raise'], 'repo': repo, 'tag': tag, 'verbose': args['--verbose'], } return config
[ "def", "get_arguments", "(", "argv", "=", "None", ",", "environ", "=", "None", ")", ":", "name", "=", "'appveyor-artifacts'", "environ", "=", "environ", "or", "os", ".", "environ", "require", "=", "getattr", "(", "pkg_resources", ",", "'require'", ")", "# Stupid linting error.", "commit", ",", "owner", ",", "pull_request", ",", "repo", ",", "tag", "=", "''", ",", "''", ",", "''", ",", "''", ",", "''", "# Run docopt.", "project", "=", "[", "p", "for", "p", "in", "require", "(", "name", ")", "if", "p", ".", "project_name", "==", "name", "]", "[", "0", "]", "version", "=", "project", ".", "version", "args", "=", "docopt", "(", "__doc__", ",", "argv", "=", "argv", "or", "sys", ".", "argv", "[", "1", ":", "]", ",", "version", "=", "version", ")", "# Handle Travis environment variables.", "if", "environ", ".", "get", "(", "'TRAVIS'", ")", "==", "'true'", ":", "commit", "=", "environ", ".", "get", "(", "'TRAVIS_COMMIT'", ",", "''", ")", "owner", "=", "environ", ".", "get", "(", "'TRAVIS_REPO_SLUG'", ",", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "0", "]", "pull_request", "=", "environ", ".", "get", "(", "'TRAVIS_PULL_REQUEST'", ",", "''", ")", "if", "pull_request", "==", "'false'", ":", "pull_request", "=", "''", "repo", "=", "environ", ".", "get", "(", "'TRAVIS_REPO_SLUG'", ",", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "1", "]", ".", "replace", "(", "'_'", ",", "'-'", ")", "tag", "=", "environ", ".", "get", "(", "'TRAVIS_TAG'", ",", "''", ")", "# Command line arguments override.", "commit", "=", "args", "[", "'--commit'", "]", "or", "commit", "owner", "=", "args", "[", "'--owner-name'", "]", "or", "owner", "pull_request", "=", "args", "[", "'--pull-request'", "]", "or", "pull_request", "repo", "=", "args", "[", "'--repo-name'", "]", "or", "repo", "tag", "=", "args", "[", "'--tag-name'", "]", "or", "tag", "# Merge env variables and have command line args override.", "config", "=", "{", "'always_job_dirs'", ":", "args", "[", "'--always-job-dirs'", "]", ",", "'commit'", ":", "commit", ",", "'dir'", ":", "args", "[", "'--dir'", "]", "or", "''", ",", "'ignore_errors'", ":", "args", "[", "'--ignore-errors'", "]", ",", "'job_name'", ":", "args", "[", "'--job-name'", "]", "or", "''", ",", "'mangle_coverage'", ":", "args", "[", "'--mangle-coverage'", "]", ",", "'no_job_dirs'", ":", "args", "[", "'--no-job-dirs'", "]", "or", "''", ",", "'owner'", ":", "owner", ",", "'pull_request'", ":", "pull_request", ",", "'raise'", ":", "args", "[", "'--raise'", "]", ",", "'repo'", ":", "repo", ",", "'tag'", ":", "tag", ",", "'verbose'", ":", "args", "[", "'--verbose'", "]", ",", "}", "return", "config" ]
Get command line arguments or values from environment variables. :param list argv: Command line argument list to process. For testing. :param dict environ: Environment variables. For testing. :return: Parsed options. :rtype: dict
[ "Get", "command", "line", "arguments", "or", "values", "from", "environment", "variables", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L145-L198
Robpol86/appveyor-artifacts
appveyor_artifacts.py
query_api
def query_api(endpoint, log): """Query the AppVeyor API. :raise HandledError: On non HTTP200 responses or invalid JSON response. :param str endpoint: API endpoint to query (e.g. '/projects/Robpol86/appveyor-artifacts'). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Parsed JSON response. :rtype: dict """ url = API_PREFIX + endpoint headers = {'content-type': 'application/json'} response = None log.debug('Querying %s with headers %s.', url, headers) for i in range(QUERY_ATTEMPTS): try: try: response = requests.get(url, headers=headers, timeout=10) except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout, requests.Timeout): log.error('Timed out waiting for reply from server.') raise HandledError except requests.ConnectionError: log.error('Unable to connect to server.') raise HandledError except HandledError: if i == QUERY_ATTEMPTS - 1: raise log.warning('Network error, retrying in 1 second...') time.sleep(1) else: break log.debug('Response status: %d', response.status_code) log.debug('Response headers: %s', str(response.headers)) log.debug('Response text: %s', response.text) if not response.ok: message = response.json().get('message') if message: log.error('HTTP %d: %s', response.status_code, message) else: log.error('HTTP %d: Unknown error: %s', response.status_code, response.text) raise HandledError try: return response.json() except ValueError: log.error('Failed to parse JSON: %s', response.text) raise HandledError
python
def query_api(endpoint, log): """Query the AppVeyor API. :raise HandledError: On non HTTP200 responses or invalid JSON response. :param str endpoint: API endpoint to query (e.g. '/projects/Robpol86/appveyor-artifacts'). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Parsed JSON response. :rtype: dict """ url = API_PREFIX + endpoint headers = {'content-type': 'application/json'} response = None log.debug('Querying %s with headers %s.', url, headers) for i in range(QUERY_ATTEMPTS): try: try: response = requests.get(url, headers=headers, timeout=10) except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout, requests.Timeout): log.error('Timed out waiting for reply from server.') raise HandledError except requests.ConnectionError: log.error('Unable to connect to server.') raise HandledError except HandledError: if i == QUERY_ATTEMPTS - 1: raise log.warning('Network error, retrying in 1 second...') time.sleep(1) else: break log.debug('Response status: %d', response.status_code) log.debug('Response headers: %s', str(response.headers)) log.debug('Response text: %s', response.text) if not response.ok: message = response.json().get('message') if message: log.error('HTTP %d: %s', response.status_code, message) else: log.error('HTTP %d: Unknown error: %s', response.status_code, response.text) raise HandledError try: return response.json() except ValueError: log.error('Failed to parse JSON: %s', response.text) raise HandledError
[ "def", "query_api", "(", "endpoint", ",", "log", ")", ":", "url", "=", "API_PREFIX", "+", "endpoint", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", "response", "=", "None", "log", ".", "debug", "(", "'Querying %s with headers %s.'", ",", "url", ",", "headers", ")", "for", "i", "in", "range", "(", "QUERY_ATTEMPTS", ")", ":", "try", ":", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "timeout", "=", "10", ")", "except", "(", "requests", ".", "exceptions", ".", "ConnectTimeout", ",", "requests", ".", "exceptions", ".", "ReadTimeout", ",", "requests", ".", "Timeout", ")", ":", "log", ".", "error", "(", "'Timed out waiting for reply from server.'", ")", "raise", "HandledError", "except", "requests", ".", "ConnectionError", ":", "log", ".", "error", "(", "'Unable to connect to server.'", ")", "raise", "HandledError", "except", "HandledError", ":", "if", "i", "==", "QUERY_ATTEMPTS", "-", "1", ":", "raise", "log", ".", "warning", "(", "'Network error, retrying in 1 second...'", ")", "time", ".", "sleep", "(", "1", ")", "else", ":", "break", "log", ".", "debug", "(", "'Response status: %d'", ",", "response", ".", "status_code", ")", "log", ".", "debug", "(", "'Response headers: %s'", ",", "str", "(", "response", ".", "headers", ")", ")", "log", ".", "debug", "(", "'Response text: %s'", ",", "response", ".", "text", ")", "if", "not", "response", ".", "ok", ":", "message", "=", "response", ".", "json", "(", ")", ".", "get", "(", "'message'", ")", "if", "message", ":", "log", ".", "error", "(", "'HTTP %d: %s'", ",", "response", ".", "status_code", ",", "message", ")", "else", ":", "log", ".", "error", "(", "'HTTP %d: Unknown error: %s'", ",", "response", ".", "status_code", ",", "response", ".", "text", ")", "raise", "HandledError", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "log", ".", "error", "(", "'Failed to parse JSON: %s'", ",", "response", ".", "text", ")", "raise", "HandledError" ]
Query the AppVeyor API. :raise HandledError: On non HTTP200 responses or invalid JSON response. :param str endpoint: API endpoint to query (e.g. '/projects/Robpol86/appveyor-artifacts'). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Parsed JSON response. :rtype: dict
[ "Query", "the", "AppVeyor", "API", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L202-L250
Robpol86/appveyor-artifacts
appveyor_artifacts.py
validate
def validate(config, log): """Validate config values. :raise HandledError: On invalid config values. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ if config['always_job_dirs'] and config['no_job_dirs']: log.error('Contradiction: --always-job-dirs and --no-job-dirs used.') raise HandledError if config['commit'] and not REGEX_COMMIT.match(config['commit']): log.error('No or invalid git commit obtained.') raise HandledError if config['dir'] and not os.path.isdir(config['dir']): log.error("Not a directory or doesn't exist: %s", config['dir']) raise HandledError if config['no_job_dirs'] not in ('', 'rename', 'overwrite', 'skip'): log.error('--no-job-dirs has invalid value. Check --help for valid values.') raise HandledError if not config['owner'] or not REGEX_GENERAL.match(config['owner']): log.error('No or invalid repo owner name obtained.') raise HandledError if config['pull_request'] and not config['pull_request'].isdigit(): log.error('--pull-request is not a digit.') raise HandledError if not config['repo'] or not REGEX_GENERAL.match(config['repo']): log.error('No or invalid repo name obtained.') raise HandledError if config['tag'] and not REGEX_GENERAL.match(config['tag']): log.error('Invalid git tag obtained.') raise HandledError
python
def validate(config, log): """Validate config values. :raise HandledError: On invalid config values. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ if config['always_job_dirs'] and config['no_job_dirs']: log.error('Contradiction: --always-job-dirs and --no-job-dirs used.') raise HandledError if config['commit'] and not REGEX_COMMIT.match(config['commit']): log.error('No or invalid git commit obtained.') raise HandledError if config['dir'] and not os.path.isdir(config['dir']): log.error("Not a directory or doesn't exist: %s", config['dir']) raise HandledError if config['no_job_dirs'] not in ('', 'rename', 'overwrite', 'skip'): log.error('--no-job-dirs has invalid value. Check --help for valid values.') raise HandledError if not config['owner'] or not REGEX_GENERAL.match(config['owner']): log.error('No or invalid repo owner name obtained.') raise HandledError if config['pull_request'] and not config['pull_request'].isdigit(): log.error('--pull-request is not a digit.') raise HandledError if not config['repo'] or not REGEX_GENERAL.match(config['repo']): log.error('No or invalid repo name obtained.') raise HandledError if config['tag'] and not REGEX_GENERAL.match(config['tag']): log.error('Invalid git tag obtained.') raise HandledError
[ "def", "validate", "(", "config", ",", "log", ")", ":", "if", "config", "[", "'always_job_dirs'", "]", "and", "config", "[", "'no_job_dirs'", "]", ":", "log", ".", "error", "(", "'Contradiction: --always-job-dirs and --no-job-dirs used.'", ")", "raise", "HandledError", "if", "config", "[", "'commit'", "]", "and", "not", "REGEX_COMMIT", ".", "match", "(", "config", "[", "'commit'", "]", ")", ":", "log", ".", "error", "(", "'No or invalid git commit obtained.'", ")", "raise", "HandledError", "if", "config", "[", "'dir'", "]", "and", "not", "os", ".", "path", ".", "isdir", "(", "config", "[", "'dir'", "]", ")", ":", "log", ".", "error", "(", "\"Not a directory or doesn't exist: %s\"", ",", "config", "[", "'dir'", "]", ")", "raise", "HandledError", "if", "config", "[", "'no_job_dirs'", "]", "not", "in", "(", "''", ",", "'rename'", ",", "'overwrite'", ",", "'skip'", ")", ":", "log", ".", "error", "(", "'--no-job-dirs has invalid value. Check --help for valid values.'", ")", "raise", "HandledError", "if", "not", "config", "[", "'owner'", "]", "or", "not", "REGEX_GENERAL", ".", "match", "(", "config", "[", "'owner'", "]", ")", ":", "log", ".", "error", "(", "'No or invalid repo owner name obtained.'", ")", "raise", "HandledError", "if", "config", "[", "'pull_request'", "]", "and", "not", "config", "[", "'pull_request'", "]", ".", "isdigit", "(", ")", ":", "log", ".", "error", "(", "'--pull-request is not a digit.'", ")", "raise", "HandledError", "if", "not", "config", "[", "'repo'", "]", "or", "not", "REGEX_GENERAL", ".", "match", "(", "config", "[", "'repo'", "]", ")", ":", "log", ".", "error", "(", "'No or invalid repo name obtained.'", ")", "raise", "HandledError", "if", "config", "[", "'tag'", "]", "and", "not", "REGEX_GENERAL", ".", "match", "(", "config", "[", "'tag'", "]", ")", ":", "log", ".", "error", "(", "'Invalid git tag obtained.'", ")", "raise", "HandledError" ]
Validate config values. :raise HandledError: On invalid config values. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator.
[ "Validate", "config", "values", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L254-L285
Robpol86/appveyor-artifacts
appveyor_artifacts.py
query_build_version
def query_build_version(config, log): """Find the build version we're looking for. AppVeyor calls build IDs "versions" which is confusing but whatever. Job IDs aren't available in the history query, only on latest, specific version, and deployment queries. Hence we need two queries to get a one-time status update. Returns None if the job isn't queued yet. :raise HandledError: On invalid JSON data. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Build version. :rtype: str """ url = '/projects/{0}/{1}/history?recordsNumber=10'.format(config['owner'], config['repo']) # Query history. log.debug('Querying AppVeyor history API for %s/%s...', config['owner'], config['repo']) json_data = query_api(url) if 'builds' not in json_data: log.error('Bad JSON reply: "builds" key missing.') raise HandledError # Find AppVeyor build "version". for build in json_data['builds']: if config['tag'] and config['tag'] == build.get('tag'): log.debug('This is a tag build.') elif config['pull_request'] and config['pull_request'] == build.get('pullRequestId'): log.debug('This is a pull request build.') elif config['commit'] == build['commitId']: log.debug('This is a branch build.') else: continue log.debug('Build JSON dict: %s', str(build)) return build['version'] return None
python
def query_build_version(config, log): """Find the build version we're looking for. AppVeyor calls build IDs "versions" which is confusing but whatever. Job IDs aren't available in the history query, only on latest, specific version, and deployment queries. Hence we need two queries to get a one-time status update. Returns None if the job isn't queued yet. :raise HandledError: On invalid JSON data. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Build version. :rtype: str """ url = '/projects/{0}/{1}/history?recordsNumber=10'.format(config['owner'], config['repo']) # Query history. log.debug('Querying AppVeyor history API for %s/%s...', config['owner'], config['repo']) json_data = query_api(url) if 'builds' not in json_data: log.error('Bad JSON reply: "builds" key missing.') raise HandledError # Find AppVeyor build "version". for build in json_data['builds']: if config['tag'] and config['tag'] == build.get('tag'): log.debug('This is a tag build.') elif config['pull_request'] and config['pull_request'] == build.get('pullRequestId'): log.debug('This is a pull request build.') elif config['commit'] == build['commitId']: log.debug('This is a branch build.') else: continue log.debug('Build JSON dict: %s', str(build)) return build['version'] return None
[ "def", "query_build_version", "(", "config", ",", "log", ")", ":", "url", "=", "'/projects/{0}/{1}/history?recordsNumber=10'", ".", "format", "(", "config", "[", "'owner'", "]", ",", "config", "[", "'repo'", "]", ")", "# Query history.", "log", ".", "debug", "(", "'Querying AppVeyor history API for %s/%s...'", ",", "config", "[", "'owner'", "]", ",", "config", "[", "'repo'", "]", ")", "json_data", "=", "query_api", "(", "url", ")", "if", "'builds'", "not", "in", "json_data", ":", "log", ".", "error", "(", "'Bad JSON reply: \"builds\" key missing.'", ")", "raise", "HandledError", "# Find AppVeyor build \"version\".", "for", "build", "in", "json_data", "[", "'builds'", "]", ":", "if", "config", "[", "'tag'", "]", "and", "config", "[", "'tag'", "]", "==", "build", ".", "get", "(", "'tag'", ")", ":", "log", ".", "debug", "(", "'This is a tag build.'", ")", "elif", "config", "[", "'pull_request'", "]", "and", "config", "[", "'pull_request'", "]", "==", "build", ".", "get", "(", "'pullRequestId'", ")", ":", "log", ".", "debug", "(", "'This is a pull request build.'", ")", "elif", "config", "[", "'commit'", "]", "==", "build", "[", "'commitId'", "]", ":", "log", ".", "debug", "(", "'This is a branch build.'", ")", "else", ":", "continue", "log", ".", "debug", "(", "'Build JSON dict: %s'", ",", "str", "(", "build", ")", ")", "return", "build", "[", "'version'", "]", "return", "None" ]
Find the build version we're looking for. AppVeyor calls build IDs "versions" which is confusing but whatever. Job IDs aren't available in the history query, only on latest, specific version, and deployment queries. Hence we need two queries to get a one-time status update. Returns None if the job isn't queued yet. :raise HandledError: On invalid JSON data. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Build version. :rtype: str
[ "Find", "the", "build", "version", "we", "re", "looking", "for", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L289-L326
Robpol86/appveyor-artifacts
appveyor_artifacts.py
query_job_ids
def query_job_ids(build_version, config, log): """Get one or more job IDs and their status associated with a build version. Filters jobs by name if --job-name is specified. :raise HandledError: On invalid JSON data or bad job name. :param str build_version: AppVeyor build version from query_build_version(). :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of two-item tuples. Job ID (first) and its status (second). :rtype: list """ url = '/projects/{0}/{1}/build/{2}'.format(config['owner'], config['repo'], build_version) # Query version. log.debug('Querying AppVeyor version API for %s/%s at %s...', config['owner'], config['repo'], build_version) json_data = query_api(url) if 'build' not in json_data: log.error('Bad JSON reply: "build" key missing.') raise HandledError if 'jobs' not in json_data['build']: log.error('Bad JSON reply: "jobs" key missing.') raise HandledError # Find AppVeyor job. all_jobs = list() for job in json_data['build']['jobs']: if config['job_name'] and config['job_name'] == job['name']: log.debug('Filtering by job name: found match!') return [(job['jobId'], job['status'])] all_jobs.append((job['jobId'], job['status'])) if config['job_name']: log.error('Job name "%s" not found.', config['job_name']) raise HandledError return all_jobs
python
def query_job_ids(build_version, config, log): """Get one or more job IDs and their status associated with a build version. Filters jobs by name if --job-name is specified. :raise HandledError: On invalid JSON data or bad job name. :param str build_version: AppVeyor build version from query_build_version(). :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of two-item tuples. Job ID (first) and its status (second). :rtype: list """ url = '/projects/{0}/{1}/build/{2}'.format(config['owner'], config['repo'], build_version) # Query version. log.debug('Querying AppVeyor version API for %s/%s at %s...', config['owner'], config['repo'], build_version) json_data = query_api(url) if 'build' not in json_data: log.error('Bad JSON reply: "build" key missing.') raise HandledError if 'jobs' not in json_data['build']: log.error('Bad JSON reply: "jobs" key missing.') raise HandledError # Find AppVeyor job. all_jobs = list() for job in json_data['build']['jobs']: if config['job_name'] and config['job_name'] == job['name']: log.debug('Filtering by job name: found match!') return [(job['jobId'], job['status'])] all_jobs.append((job['jobId'], job['status'])) if config['job_name']: log.error('Job name "%s" not found.', config['job_name']) raise HandledError return all_jobs
[ "def", "query_job_ids", "(", "build_version", ",", "config", ",", "log", ")", ":", "url", "=", "'/projects/{0}/{1}/build/{2}'", ".", "format", "(", "config", "[", "'owner'", "]", ",", "config", "[", "'repo'", "]", ",", "build_version", ")", "# Query version.", "log", ".", "debug", "(", "'Querying AppVeyor version API for %s/%s at %s...'", ",", "config", "[", "'owner'", "]", ",", "config", "[", "'repo'", "]", ",", "build_version", ")", "json_data", "=", "query_api", "(", "url", ")", "if", "'build'", "not", "in", "json_data", ":", "log", ".", "error", "(", "'Bad JSON reply: \"build\" key missing.'", ")", "raise", "HandledError", "if", "'jobs'", "not", "in", "json_data", "[", "'build'", "]", ":", "log", ".", "error", "(", "'Bad JSON reply: \"jobs\" key missing.'", ")", "raise", "HandledError", "# Find AppVeyor job.", "all_jobs", "=", "list", "(", ")", "for", "job", "in", "json_data", "[", "'build'", "]", "[", "'jobs'", "]", ":", "if", "config", "[", "'job_name'", "]", "and", "config", "[", "'job_name'", "]", "==", "job", "[", "'name'", "]", ":", "log", ".", "debug", "(", "'Filtering by job name: found match!'", ")", "return", "[", "(", "job", "[", "'jobId'", "]", ",", "job", "[", "'status'", "]", ")", "]", "all_jobs", ".", "append", "(", "(", "job", "[", "'jobId'", "]", ",", "job", "[", "'status'", "]", ")", ")", "if", "config", "[", "'job_name'", "]", ":", "log", ".", "error", "(", "'Job name \"%s\" not found.'", ",", "config", "[", "'job_name'", "]", ")", "raise", "HandledError", "return", "all_jobs" ]
Get one or more job IDs and their status associated with a build version. Filters jobs by name if --job-name is specified. :raise HandledError: On invalid JSON data or bad job name. :param str build_version: AppVeyor build version from query_build_version(). :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of two-item tuples. Job ID (first) and its status (second). :rtype: list
[ "Get", "one", "or", "more", "job", "IDs", "and", "their", "status", "associated", "with", "a", "build", "version", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L330-L366
Robpol86/appveyor-artifacts
appveyor_artifacts.py
query_artifacts
def query_artifacts(job_ids, log): """Query API again for artifacts. :param iter job_ids: List of AppVeyor jobIDs. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of tuples: (job ID, artifact file name, artifact file size). :rtype: list """ jobs_artifacts = list() for job in job_ids: url = '/buildjobs/{0}/artifacts'.format(job) log.debug('Querying AppVeyor artifact API for %s...', job) json_data = query_api(url) for artifact in json_data: jobs_artifacts.append((job, artifact['fileName'], artifact['size'])) return jobs_artifacts
python
def query_artifacts(job_ids, log): """Query API again for artifacts. :param iter job_ids: List of AppVeyor jobIDs. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of tuples: (job ID, artifact file name, artifact file size). :rtype: list """ jobs_artifacts = list() for job in job_ids: url = '/buildjobs/{0}/artifacts'.format(job) log.debug('Querying AppVeyor artifact API for %s...', job) json_data = query_api(url) for artifact in json_data: jobs_artifacts.append((job, artifact['fileName'], artifact['size'])) return jobs_artifacts
[ "def", "query_artifacts", "(", "job_ids", ",", "log", ")", ":", "jobs_artifacts", "=", "list", "(", ")", "for", "job", "in", "job_ids", ":", "url", "=", "'/buildjobs/{0}/artifacts'", ".", "format", "(", "job", ")", "log", ".", "debug", "(", "'Querying AppVeyor artifact API for %s...'", ",", "job", ")", "json_data", "=", "query_api", "(", "url", ")", "for", "artifact", "in", "json_data", ":", "jobs_artifacts", ".", "append", "(", "(", "job", ",", "artifact", "[", "'fileName'", "]", ",", "artifact", "[", "'size'", "]", ")", ")", "return", "jobs_artifacts" ]
Query API again for artifacts. :param iter job_ids: List of AppVeyor jobIDs. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of tuples: (job ID, artifact file name, artifact file size). :rtype: list
[ "Query", "API", "again", "for", "artifacts", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L370-L386
Robpol86/appveyor-artifacts
appveyor_artifacts.py
artifacts_urls
def artifacts_urls(config, jobs_artifacts, log): """Determine destination file paths for job artifacts. :param dict config: Dictionary from get_arguments(). :param iter jobs_artifacts: List of job artifacts from query_artifacts(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Destination file paths (keys), download URLs (value[0]), and expected file size (value[1]). :rtype: dict """ artifacts = dict() # Determine if we should create job ID directories. if config['always_job_dirs']: job_dirs = True elif config['no_job_dirs']: job_dirs = False elif len(set(i[0] for i in jobs_artifacts)) == 1: log.debug('Only one job ID, automatically setting job_dirs = False.') job_dirs = False elif len(set(i[1] for i in jobs_artifacts)) == len(jobs_artifacts): log.debug('No local file conflicts, automatically setting job_dirs = False') job_dirs = False else: log.debug('Multiple job IDs with file conflicts, automatically setting job_dirs = True') job_dirs = True # Get final URLs and destination file paths. root_dir = config['dir'] or os.getcwd() for job, file_name, size in jobs_artifacts: artifact_url = '{0}/buildjobs/{1}/artifacts/{2}'.format(API_PREFIX, job, file_name) artifact_local = os.path.join(root_dir, job if job_dirs else '', file_name) if artifact_local in artifacts: if config['no_job_dirs'] == 'skip': log.debug('Skipping %s from %s', artifact_local, artifact_url) continue if config['no_job_dirs'] == 'rename': new_name = artifact_local while new_name in artifacts: path, ext = os.path.splitext(new_name) new_name = (path + '_' + ext) if ext else (new_name + '_') log.debug('Renaming %s to %s from %s', artifact_local, new_name, artifact_url) artifact_local = new_name elif config['no_job_dirs'] == 'overwrite': log.debug('Overwriting %s from %s with %s', artifact_local, artifacts[artifact_local][0], artifact_url) else: log.error('Collision: %s from %s and %s', artifact_local, artifacts[artifact_local][0], artifact_url) raise HandledError artifacts[artifact_local] = (artifact_url, size) return artifacts
python
def artifacts_urls(config, jobs_artifacts, log): """Determine destination file paths for job artifacts. :param dict config: Dictionary from get_arguments(). :param iter jobs_artifacts: List of job artifacts from query_artifacts(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Destination file paths (keys), download URLs (value[0]), and expected file size (value[1]). :rtype: dict """ artifacts = dict() # Determine if we should create job ID directories. if config['always_job_dirs']: job_dirs = True elif config['no_job_dirs']: job_dirs = False elif len(set(i[0] for i in jobs_artifacts)) == 1: log.debug('Only one job ID, automatically setting job_dirs = False.') job_dirs = False elif len(set(i[1] for i in jobs_artifacts)) == len(jobs_artifacts): log.debug('No local file conflicts, automatically setting job_dirs = False') job_dirs = False else: log.debug('Multiple job IDs with file conflicts, automatically setting job_dirs = True') job_dirs = True # Get final URLs and destination file paths. root_dir = config['dir'] or os.getcwd() for job, file_name, size in jobs_artifacts: artifact_url = '{0}/buildjobs/{1}/artifacts/{2}'.format(API_PREFIX, job, file_name) artifact_local = os.path.join(root_dir, job if job_dirs else '', file_name) if artifact_local in artifacts: if config['no_job_dirs'] == 'skip': log.debug('Skipping %s from %s', artifact_local, artifact_url) continue if config['no_job_dirs'] == 'rename': new_name = artifact_local while new_name in artifacts: path, ext = os.path.splitext(new_name) new_name = (path + '_' + ext) if ext else (new_name + '_') log.debug('Renaming %s to %s from %s', artifact_local, new_name, artifact_url) artifact_local = new_name elif config['no_job_dirs'] == 'overwrite': log.debug('Overwriting %s from %s with %s', artifact_local, artifacts[artifact_local][0], artifact_url) else: log.error('Collision: %s from %s and %s', artifact_local, artifacts[artifact_local][0], artifact_url) raise HandledError artifacts[artifact_local] = (artifact_url, size) return artifacts
[ "def", "artifacts_urls", "(", "config", ",", "jobs_artifacts", ",", "log", ")", ":", "artifacts", "=", "dict", "(", ")", "# Determine if we should create job ID directories.", "if", "config", "[", "'always_job_dirs'", "]", ":", "job_dirs", "=", "True", "elif", "config", "[", "'no_job_dirs'", "]", ":", "job_dirs", "=", "False", "elif", "len", "(", "set", "(", "i", "[", "0", "]", "for", "i", "in", "jobs_artifacts", ")", ")", "==", "1", ":", "log", ".", "debug", "(", "'Only one job ID, automatically setting job_dirs = False.'", ")", "job_dirs", "=", "False", "elif", "len", "(", "set", "(", "i", "[", "1", "]", "for", "i", "in", "jobs_artifacts", ")", ")", "==", "len", "(", "jobs_artifacts", ")", ":", "log", ".", "debug", "(", "'No local file conflicts, automatically setting job_dirs = False'", ")", "job_dirs", "=", "False", "else", ":", "log", ".", "debug", "(", "'Multiple job IDs with file conflicts, automatically setting job_dirs = True'", ")", "job_dirs", "=", "True", "# Get final URLs and destination file paths.", "root_dir", "=", "config", "[", "'dir'", "]", "or", "os", ".", "getcwd", "(", ")", "for", "job", ",", "file_name", ",", "size", "in", "jobs_artifacts", ":", "artifact_url", "=", "'{0}/buildjobs/{1}/artifacts/{2}'", ".", "format", "(", "API_PREFIX", ",", "job", ",", "file_name", ")", "artifact_local", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "job", "if", "job_dirs", "else", "''", ",", "file_name", ")", "if", "artifact_local", "in", "artifacts", ":", "if", "config", "[", "'no_job_dirs'", "]", "==", "'skip'", ":", "log", ".", "debug", "(", "'Skipping %s from %s'", ",", "artifact_local", ",", "artifact_url", ")", "continue", "if", "config", "[", "'no_job_dirs'", "]", "==", "'rename'", ":", "new_name", "=", "artifact_local", "while", "new_name", "in", "artifacts", ":", "path", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "new_name", ")", "new_name", "=", "(", "path", "+", "'_'", "+", "ext", ")", "if", "ext", "else", "(", "new_name", "+", "'_'", ")", "log", ".", "debug", "(", "'Renaming %s to %s from %s'", ",", "artifact_local", ",", "new_name", ",", "artifact_url", ")", "artifact_local", "=", "new_name", "elif", "config", "[", "'no_job_dirs'", "]", "==", "'overwrite'", ":", "log", ".", "debug", "(", "'Overwriting %s from %s with %s'", ",", "artifact_local", ",", "artifacts", "[", "artifact_local", "]", "[", "0", "]", ",", "artifact_url", ")", "else", ":", "log", ".", "error", "(", "'Collision: %s from %s and %s'", ",", "artifact_local", ",", "artifacts", "[", "artifact_local", "]", "[", "0", "]", ",", "artifact_url", ")", "raise", "HandledError", "artifacts", "[", "artifact_local", "]", "=", "(", "artifact_url", ",", "size", ")", "return", "artifacts" ]
Determine destination file paths for job artifacts. :param dict config: Dictionary from get_arguments(). :param iter jobs_artifacts: List of job artifacts from query_artifacts(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Destination file paths (keys), download URLs (value[0]), and expected file size (value[1]). :rtype: dict
[ "Determine", "destination", "file", "paths", "for", "job", "artifacts", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L390-L440
Robpol86/appveyor-artifacts
appveyor_artifacts.py
get_urls
def get_urls(config, log): """Wait for AppVeyor job to finish and get all artifacts' URLs. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Paths and URLs from artifacts_urls. :rtype: dict """ # Wait for job to be queued. Once it is we'll have the "version". build_version = None for _ in range(3): build_version = query_build_version(config) if build_version: break log.info('Waiting for job to be queued...') time.sleep(SLEEP_FOR) if not build_version: log.error('Timed out waiting for job to be queued or build not found.') raise HandledError # Get job IDs. Wait for AppVeyor job to finish. job_ids = list() valid_statuses = ['success', 'failed', 'running', 'queued'] while True: job_ids = query_job_ids(build_version, config) statuses = set([i[1] for i in job_ids]) if 'failed' in statuses: job = [i[0] for i in job_ids if i[1] == 'failed'][0] url = 'https://ci.appveyor.com/project/{0}/{1}/build/job/{2}'.format(config['owner'], config['repo'], job) log.error('AppVeyor job failed: %s', url) raise HandledError if statuses == set(valid_statuses[:1]): log.info('Build successful. Found %d job%s.', len(job_ids), '' if len(job_ids) == 1 else 's') break if 'running' in statuses: log.info('Waiting for job%s to finish...', '' if len(job_ids) == 1 else 's') elif 'queued' in statuses: log.info('Waiting for all jobs to start...') else: log.error('Got unknown status from AppVeyor API: %s', ' '.join(statuses - set(valid_statuses))) raise HandledError time.sleep(SLEEP_FOR) # Get artifacts. artifacts = query_artifacts([i[0] for i in job_ids]) log.info('Found %d artifact%s.', len(artifacts), '' if len(artifacts) == 1 else 's') return artifacts_urls(config, artifacts) if artifacts else dict()
python
def get_urls(config, log): """Wait for AppVeyor job to finish and get all artifacts' URLs. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Paths and URLs from artifacts_urls. :rtype: dict """ # Wait for job to be queued. Once it is we'll have the "version". build_version = None for _ in range(3): build_version = query_build_version(config) if build_version: break log.info('Waiting for job to be queued...') time.sleep(SLEEP_FOR) if not build_version: log.error('Timed out waiting for job to be queued or build not found.') raise HandledError # Get job IDs. Wait for AppVeyor job to finish. job_ids = list() valid_statuses = ['success', 'failed', 'running', 'queued'] while True: job_ids = query_job_ids(build_version, config) statuses = set([i[1] for i in job_ids]) if 'failed' in statuses: job = [i[0] for i in job_ids if i[1] == 'failed'][0] url = 'https://ci.appveyor.com/project/{0}/{1}/build/job/{2}'.format(config['owner'], config['repo'], job) log.error('AppVeyor job failed: %s', url) raise HandledError if statuses == set(valid_statuses[:1]): log.info('Build successful. Found %d job%s.', len(job_ids), '' if len(job_ids) == 1 else 's') break if 'running' in statuses: log.info('Waiting for job%s to finish...', '' if len(job_ids) == 1 else 's') elif 'queued' in statuses: log.info('Waiting for all jobs to start...') else: log.error('Got unknown status from AppVeyor API: %s', ' '.join(statuses - set(valid_statuses))) raise HandledError time.sleep(SLEEP_FOR) # Get artifacts. artifacts = query_artifacts([i[0] for i in job_ids]) log.info('Found %d artifact%s.', len(artifacts), '' if len(artifacts) == 1 else 's') return artifacts_urls(config, artifacts) if artifacts else dict()
[ "def", "get_urls", "(", "config", ",", "log", ")", ":", "# Wait for job to be queued. Once it is we'll have the \"version\".", "build_version", "=", "None", "for", "_", "in", "range", "(", "3", ")", ":", "build_version", "=", "query_build_version", "(", "config", ")", "if", "build_version", ":", "break", "log", ".", "info", "(", "'Waiting for job to be queued...'", ")", "time", ".", "sleep", "(", "SLEEP_FOR", ")", "if", "not", "build_version", ":", "log", ".", "error", "(", "'Timed out waiting for job to be queued or build not found.'", ")", "raise", "HandledError", "# Get job IDs. Wait for AppVeyor job to finish.", "job_ids", "=", "list", "(", ")", "valid_statuses", "=", "[", "'success'", ",", "'failed'", ",", "'running'", ",", "'queued'", "]", "while", "True", ":", "job_ids", "=", "query_job_ids", "(", "build_version", ",", "config", ")", "statuses", "=", "set", "(", "[", "i", "[", "1", "]", "for", "i", "in", "job_ids", "]", ")", "if", "'failed'", "in", "statuses", ":", "job", "=", "[", "i", "[", "0", "]", "for", "i", "in", "job_ids", "if", "i", "[", "1", "]", "==", "'failed'", "]", "[", "0", "]", "url", "=", "'https://ci.appveyor.com/project/{0}/{1}/build/job/{2}'", ".", "format", "(", "config", "[", "'owner'", "]", ",", "config", "[", "'repo'", "]", ",", "job", ")", "log", ".", "error", "(", "'AppVeyor job failed: %s'", ",", "url", ")", "raise", "HandledError", "if", "statuses", "==", "set", "(", "valid_statuses", "[", ":", "1", "]", ")", ":", "log", ".", "info", "(", "'Build successful. Found %d job%s.'", ",", "len", "(", "job_ids", ")", ",", "''", "if", "len", "(", "job_ids", ")", "==", "1", "else", "'s'", ")", "break", "if", "'running'", "in", "statuses", ":", "log", ".", "info", "(", "'Waiting for job%s to finish...'", ",", "''", "if", "len", "(", "job_ids", ")", "==", "1", "else", "'s'", ")", "elif", "'queued'", "in", "statuses", ":", "log", ".", "info", "(", "'Waiting for all jobs to start...'", ")", "else", ":", "log", ".", "error", "(", "'Got unknown status from AppVeyor API: %s'", ",", "' '", ".", "join", "(", "statuses", "-", "set", "(", "valid_statuses", ")", ")", ")", "raise", "HandledError", "time", ".", "sleep", "(", "SLEEP_FOR", ")", "# Get artifacts.", "artifacts", "=", "query_artifacts", "(", "[", "i", "[", "0", "]", "for", "i", "in", "job_ids", "]", ")", "log", ".", "info", "(", "'Found %d artifact%s.'", ",", "len", "(", "artifacts", ")", ",", "''", "if", "len", "(", "artifacts", ")", "==", "1", "else", "'s'", ")", "return", "artifacts_urls", "(", "config", ",", "artifacts", ")", "if", "artifacts", "else", "dict", "(", ")" ]
Wait for AppVeyor job to finish and get all artifacts' URLs. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Paths and URLs from artifacts_urls. :rtype: dict
[ "Wait", "for", "AppVeyor", "job", "to", "finish", "and", "get", "all", "artifacts", "URLs", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L444-L491
Robpol86/appveyor-artifacts
appveyor_artifacts.py
download_file
def download_file(config, local_path, url, expected_size, chunk_size, log): """Download a file. :param dict config: Dictionary from get_arguments(). :param str local_path: Destination path to save file to. :param str url: URL of the file to download. :param int expected_size: Expected file size in bytes. :param int chunk_size: Number of bytes to read in memory before writing to disk and printing a dot. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ if not os.path.exists(os.path.dirname(local_path)): log.debug('Creating directory: %s', os.path.dirname(local_path)) os.makedirs(os.path.dirname(local_path)) if os.path.exists(local_path): log.error('File already exists: %s', local_path) raise HandledError relative_path = os.path.relpath(local_path, config['dir'] or os.getcwd()) print(' => {0}'.format(relative_path), end=' ', file=sys.stderr) # Download file. log.debug('Writing to: %s', local_path) with open(local_path, 'wb') as handle: response = requests.get(url, stream=True) for chunk in response.iter_content(chunk_size): handle.write(chunk) print('.', end='', file=sys.stderr) file_size = os.path.getsize(local_path) print(' {0} bytes'.format(file_size), file=sys.stderr) if file_size != expected_size: log.error('Expected %d bytes but got %d bytes instead.', expected_size, file_size) raise HandledError
python
def download_file(config, local_path, url, expected_size, chunk_size, log): """Download a file. :param dict config: Dictionary from get_arguments(). :param str local_path: Destination path to save file to. :param str url: URL of the file to download. :param int expected_size: Expected file size in bytes. :param int chunk_size: Number of bytes to read in memory before writing to disk and printing a dot. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ if not os.path.exists(os.path.dirname(local_path)): log.debug('Creating directory: %s', os.path.dirname(local_path)) os.makedirs(os.path.dirname(local_path)) if os.path.exists(local_path): log.error('File already exists: %s', local_path) raise HandledError relative_path = os.path.relpath(local_path, config['dir'] or os.getcwd()) print(' => {0}'.format(relative_path), end=' ', file=sys.stderr) # Download file. log.debug('Writing to: %s', local_path) with open(local_path, 'wb') as handle: response = requests.get(url, stream=True) for chunk in response.iter_content(chunk_size): handle.write(chunk) print('.', end='', file=sys.stderr) file_size = os.path.getsize(local_path) print(' {0} bytes'.format(file_size), file=sys.stderr) if file_size != expected_size: log.error('Expected %d bytes but got %d bytes instead.', expected_size, file_size) raise HandledError
[ "def", "download_file", "(", "config", ",", "local_path", ",", "url", ",", "expected_size", ",", "chunk_size", ",", "log", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "local_path", ")", ")", ":", "log", ".", "debug", "(", "'Creating directory: %s'", ",", "os", ".", "path", ".", "dirname", "(", "local_path", ")", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "local_path", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "local_path", ")", ":", "log", ".", "error", "(", "'File already exists: %s'", ",", "local_path", ")", "raise", "HandledError", "relative_path", "=", "os", ".", "path", ".", "relpath", "(", "local_path", ",", "config", "[", "'dir'", "]", "or", "os", ".", "getcwd", "(", ")", ")", "print", "(", "' => {0}'", ".", "format", "(", "relative_path", ")", ",", "end", "=", "' '", ",", "file", "=", "sys", ".", "stderr", ")", "# Download file.", "log", ".", "debug", "(", "'Writing to: %s'", ",", "local_path", ")", "with", "open", "(", "local_path", ",", "'wb'", ")", "as", "handle", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", ")", ":", "handle", ".", "write", "(", "chunk", ")", "print", "(", "'.'", ",", "end", "=", "''", ",", "file", "=", "sys", ".", "stderr", ")", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "local_path", ")", "print", "(", "' {0} bytes'", ".", "format", "(", "file_size", ")", ",", "file", "=", "sys", ".", "stderr", ")", "if", "file_size", "!=", "expected_size", ":", "log", ".", "error", "(", "'Expected %d bytes but got %d bytes instead.'", ",", "expected_size", ",", "file_size", ")", "raise", "HandledError" ]
Download a file. :param dict config: Dictionary from get_arguments(). :param str local_path: Destination path to save file to. :param str url: URL of the file to download. :param int expected_size: Expected file size in bytes. :param int chunk_size: Number of bytes to read in memory before writing to disk and printing a dot. :param logging.Logger log: Logger for this function. Populated by with_log() decorator.
[ "Download", "a", "file", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L495-L526
Robpol86/appveyor-artifacts
appveyor_artifacts.py
mangle_coverage
def mangle_coverage(local_path, log): """Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ # Read the file, or return if not a .coverage file. with open(local_path, mode='rb') as handle: if handle.read(13) != b'!coverage.py:': log.debug('File %s not a coverage file.', local_path) return handle.seek(0) # I'm lazy, reading all of this into memory. What could possibly go wrong? file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough? # Substitute paths. for windows_path in set(REGEX_MANGLE.findall(file_contents)): unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1] unix_absolute_path = os.path.abspath(unix_relative_path) if not os.path.isfile(unix_absolute_path): log.debug('Windows path: %s', windows_path) log.debug('Unix relative path: %s', unix_relative_path) log.error('No such file: %s', unix_absolute_path) raise HandledError file_contents = file_contents.replace(windows_path, unix_absolute_path) # Write. with open(local_path, 'w') as handle: handle.write(file_contents)
python
def mangle_coverage(local_path, log): """Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ # Read the file, or return if not a .coverage file. with open(local_path, mode='rb') as handle: if handle.read(13) != b'!coverage.py:': log.debug('File %s not a coverage file.', local_path) return handle.seek(0) # I'm lazy, reading all of this into memory. What could possibly go wrong? file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough? # Substitute paths. for windows_path in set(REGEX_MANGLE.findall(file_contents)): unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1] unix_absolute_path = os.path.abspath(unix_relative_path) if not os.path.isfile(unix_absolute_path): log.debug('Windows path: %s', windows_path) log.debug('Unix relative path: %s', unix_relative_path) log.error('No such file: %s', unix_absolute_path) raise HandledError file_contents = file_contents.replace(windows_path, unix_absolute_path) # Write. with open(local_path, 'w') as handle: handle.write(file_contents)
[ "def", "mangle_coverage", "(", "local_path", ",", "log", ")", ":", "# Read the file, or return if not a .coverage file.", "with", "open", "(", "local_path", ",", "mode", "=", "'rb'", ")", "as", "handle", ":", "if", "handle", ".", "read", "(", "13", ")", "!=", "b'!coverage.py:'", ":", "log", ".", "debug", "(", "'File %s not a coverage file.'", ",", "local_path", ")", "return", "handle", ".", "seek", "(", "0", ")", "# I'm lazy, reading all of this into memory. What could possibly go wrong?", "file_contents", "=", "handle", ".", "read", "(", "52428800", ")", ".", "decode", "(", "'utf-8'", ")", "# 50 MiB limit, surely this is enough?", "# Substitute paths.", "for", "windows_path", "in", "set", "(", "REGEX_MANGLE", ".", "findall", "(", "file_contents", ")", ")", ":", "unix_relative_path", "=", "windows_path", ".", "replace", "(", "r'\\\\'", ",", "'/'", ")", ".", "split", "(", "'/'", ",", "3", ")", "[", "-", "1", "]", "unix_absolute_path", "=", "os", ".", "path", ".", "abspath", "(", "unix_relative_path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "unix_absolute_path", ")", ":", "log", ".", "debug", "(", "'Windows path: %s'", ",", "windows_path", ")", "log", ".", "debug", "(", "'Unix relative path: %s'", ",", "unix_relative_path", ")", "log", ".", "error", "(", "'No such file: %s'", ",", "unix_absolute_path", ")", "raise", "HandledError", "file_contents", "=", "file_contents", ".", "replace", "(", "windows_path", ",", "unix_absolute_path", ")", "# Write.", "with", "open", "(", "local_path", ",", "'w'", ")", "as", "handle", ":", "handle", ".", "write", "(", "file_contents", ")" ]
Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator.
[ "Edit", ".", "coverage", "file", "substituting", "Windows", "file", "paths", "to", "Linux", "paths", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L530-L559
Robpol86/appveyor-artifacts
appveyor_artifacts.py
main
def main(config, log): """Main function. Runs the program. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ validate(config) paths_and_urls = get_urls(config) if not paths_and_urls: log.warning('No artifacts; nothing to download.') return # Download files. total_size = 0 chunk_size = max(min(max(v[1] for v in paths_and_urls.values()) // 50, 1048576), 1024) log.info('Downloading file%s (1 dot ~ %d KiB):', '' if len(paths_and_urls) == 1 else 's', chunk_size // 1024) for size, local_path, url in sorted((v[1], k, v[0]) for k, v in paths_and_urls.items()): download_file(config, local_path, url, size, chunk_size) total_size += size if config['mangle_coverage']: mangle_coverage(local_path) log.info('Downloaded %d file(s), %d bytes total.', len(paths_and_urls), total_size)
python
def main(config, log): """Main function. Runs the program. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ validate(config) paths_and_urls = get_urls(config) if not paths_and_urls: log.warning('No artifacts; nothing to download.') return # Download files. total_size = 0 chunk_size = max(min(max(v[1] for v in paths_and_urls.values()) // 50, 1048576), 1024) log.info('Downloading file%s (1 dot ~ %d KiB):', '' if len(paths_and_urls) == 1 else 's', chunk_size // 1024) for size, local_path, url in sorted((v[1], k, v[0]) for k, v in paths_and_urls.items()): download_file(config, local_path, url, size, chunk_size) total_size += size if config['mangle_coverage']: mangle_coverage(local_path) log.info('Downloaded %d file(s), %d bytes total.', len(paths_and_urls), total_size)
[ "def", "main", "(", "config", ",", "log", ")", ":", "validate", "(", "config", ")", "paths_and_urls", "=", "get_urls", "(", "config", ")", "if", "not", "paths_and_urls", ":", "log", ".", "warning", "(", "'No artifacts; nothing to download.'", ")", "return", "# Download files.", "total_size", "=", "0", "chunk_size", "=", "max", "(", "min", "(", "max", "(", "v", "[", "1", "]", "for", "v", "in", "paths_and_urls", ".", "values", "(", ")", ")", "//", "50", ",", "1048576", ")", ",", "1024", ")", "log", ".", "info", "(", "'Downloading file%s (1 dot ~ %d KiB):'", ",", "''", "if", "len", "(", "paths_and_urls", ")", "==", "1", "else", "'s'", ",", "chunk_size", "//", "1024", ")", "for", "size", ",", "local_path", ",", "url", "in", "sorted", "(", "(", "v", "[", "1", "]", ",", "k", ",", "v", "[", "0", "]", ")", "for", "k", ",", "v", "in", "paths_and_urls", ".", "items", "(", ")", ")", ":", "download_file", "(", "config", ",", "local_path", ",", "url", ",", "size", ",", "chunk_size", ")", "total_size", "+=", "size", "if", "config", "[", "'mangle_coverage'", "]", ":", "mangle_coverage", "(", "local_path", ")", "log", ".", "info", "(", "'Downloaded %d file(s), %d bytes total.'", ",", "len", "(", "paths_and_urls", ")", ",", "total_size", ")" ]
Main function. Runs the program. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator.
[ "Main", "function", ".", "Runs", "the", "program", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L563-L585
Robpol86/appveyor-artifacts
appveyor_artifacts.py
entry_point
def entry_point(): """Entry-point from setuptools.""" signal.signal(signal.SIGINT, lambda *_: getattr(os, '_exit')(0)) # Properly handle Control+C config = get_arguments() setup_logging(config['verbose']) try: main(config) except HandledError: if config['raise']: raise logging.critical('Failure.') sys.exit(0 if config['ignore_errors'] else 1)
python
def entry_point(): """Entry-point from setuptools.""" signal.signal(signal.SIGINT, lambda *_: getattr(os, '_exit')(0)) # Properly handle Control+C config = get_arguments() setup_logging(config['verbose']) try: main(config) except HandledError: if config['raise']: raise logging.critical('Failure.') sys.exit(0 if config['ignore_errors'] else 1)
[ "def", "entry_point", "(", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "lambda", "*", "_", ":", "getattr", "(", "os", ",", "'_exit'", ")", "(", "0", ")", ")", "# Properly handle Control+C", "config", "=", "get_arguments", "(", ")", "setup_logging", "(", "config", "[", "'verbose'", "]", ")", "try", ":", "main", "(", "config", ")", "except", "HandledError", ":", "if", "config", "[", "'raise'", "]", ":", "raise", "logging", ".", "critical", "(", "'Failure.'", ")", "sys", ".", "exit", "(", "0", "if", "config", "[", "'ignore_errors'", "]", "else", "1", ")" ]
Entry-point from setuptools.
[ "Entry", "-", "point", "from", "setuptools", "." ]
train
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L588-L599
mmEissen/airpixel
airpixel/client.py
ConnectionSupervisor.incoming_messages
def incoming_messages(self) -> t.List[t.Tuple[float, bytes]]: """Consume the receive buffer and return the messages. If there are new messages added to the queue while this funciton is being processed, they will not be returned. This ensures that this terminates in a timely manner. """ approximate_messages = self._receive_buffer.qsize() messages = [] for _ in range(approximate_messages): try: messages.append(self._receive_buffer.get_nowait()) except queue.Empty: break return messages
python
def incoming_messages(self) -> t.List[t.Tuple[float, bytes]]: """Consume the receive buffer and return the messages. If there are new messages added to the queue while this funciton is being processed, they will not be returned. This ensures that this terminates in a timely manner. """ approximate_messages = self._receive_buffer.qsize() messages = [] for _ in range(approximate_messages): try: messages.append(self._receive_buffer.get_nowait()) except queue.Empty: break return messages
[ "def", "incoming_messages", "(", "self", ")", "->", "t", ".", "List", "[", "t", ".", "Tuple", "[", "float", ",", "bytes", "]", "]", ":", "approximate_messages", "=", "self", ".", "_receive_buffer", ".", "qsize", "(", ")", "messages", "=", "[", "]", "for", "_", "in", "range", "(", "approximate_messages", ")", ":", "try", ":", "messages", ".", "append", "(", "self", ".", "_receive_buffer", ".", "get_nowait", "(", ")", ")", "except", "queue", ".", "Empty", ":", "break", "return", "messages" ]
Consume the receive buffer and return the messages. If there are new messages added to the queue while this funciton is being processed, they will not be returned. This ensures that this terminates in a timely manner.
[ "Consume", "the", "receive", "buffer", "and", "return", "the", "messages", "." ]
train
https://github.com/mmEissen/airpixel/blob/23385776eaa1a8a573fa94200eea051fa7c6c120/airpixel/client.py#L278-L292
pyout/pyout
pyout/common.py
_safe_get
def _safe_get(mapping, key, default=None): """Helper for accessing style values. It exists to avoid checking whether `mapping` is indeed a mapping before trying to get a key. In the context of style dicts, this eliminates "is this a mapping" checks in two common situations: 1) a style argument is None, and 2) a style key's value (e.g., width) can be either a mapping or a plain value. """ try: return mapping.get(key, default) except AttributeError: return default
python
def _safe_get(mapping, key, default=None): """Helper for accessing style values. It exists to avoid checking whether `mapping` is indeed a mapping before trying to get a key. In the context of style dicts, this eliminates "is this a mapping" checks in two common situations: 1) a style argument is None, and 2) a style key's value (e.g., width) can be either a mapping or a plain value. """ try: return mapping.get(key, default) except AttributeError: return default
[ "def", "_safe_get", "(", "mapping", ",", "key", ",", "default", "=", "None", ")", ":", "try", ":", "return", "mapping", ".", "get", "(", "key", ",", "default", ")", "except", "AttributeError", ":", "return", "default" ]
Helper for accessing style values. It exists to avoid checking whether `mapping` is indeed a mapping before trying to get a key. In the context of style dicts, this eliminates "is this a mapping" checks in two common situations: 1) a style argument is None, and 2) a style key's value (e.g., width) can be either a mapping or a plain value.
[ "Helper", "for", "accessing", "style", "values", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L214-L226
pyout/pyout
pyout/common.py
RowNormalizer.strip_callables
def strip_callables(row): """Extract callable values from `row`. Replace the callable values with the initial value (if specified) or an empty string. Parameters ---------- row : mapping A data row. The keys are either a single column name or a tuple of column names. The values take one of three forms: 1) a non-callable value, 2) a tuple (initial_value, callable), 3) or a single callable (in which case the initial value is set to an empty string). Returns ------- list of (column, callable) """ callables = [] to_delete = [] to_add = [] for columns, value in row.items(): if isinstance(value, tuple): initial, fn = value else: initial = NOTHING # Value could be a normal (non-callable) value or a # callable with no initial value. fn = value if callable(fn) or inspect.isgenerator(fn): lgr.debug("Using %r as the initial value " "for columns %r in row %r", initial, columns, row) if not isinstance(columns, tuple): columns = columns, else: to_delete.append(columns) for column in columns: to_add.append((column, initial)) callables.append((columns, fn)) for column, value in to_add: row[column] = value for multi_columns in to_delete: del row[multi_columns] return callables
python
def strip_callables(row): """Extract callable values from `row`. Replace the callable values with the initial value (if specified) or an empty string. Parameters ---------- row : mapping A data row. The keys are either a single column name or a tuple of column names. The values take one of three forms: 1) a non-callable value, 2) a tuple (initial_value, callable), 3) or a single callable (in which case the initial value is set to an empty string). Returns ------- list of (column, callable) """ callables = [] to_delete = [] to_add = [] for columns, value in row.items(): if isinstance(value, tuple): initial, fn = value else: initial = NOTHING # Value could be a normal (non-callable) value or a # callable with no initial value. fn = value if callable(fn) or inspect.isgenerator(fn): lgr.debug("Using %r as the initial value " "for columns %r in row %r", initial, columns, row) if not isinstance(columns, tuple): columns = columns, else: to_delete.append(columns) for column in columns: to_add.append((column, initial)) callables.append((columns, fn)) for column, value in to_add: row[column] = value for multi_columns in to_delete: del row[multi_columns] return callables
[ "def", "strip_callables", "(", "row", ")", ":", "callables", "=", "[", "]", "to_delete", "=", "[", "]", "to_add", "=", "[", "]", "for", "columns", ",", "value", "in", "row", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "initial", ",", "fn", "=", "value", "else", ":", "initial", "=", "NOTHING", "# Value could be a normal (non-callable) value or a", "# callable with no initial value.", "fn", "=", "value", "if", "callable", "(", "fn", ")", "or", "inspect", ".", "isgenerator", "(", "fn", ")", ":", "lgr", ".", "debug", "(", "\"Using %r as the initial value \"", "\"for columns %r in row %r\"", ",", "initial", ",", "columns", ",", "row", ")", "if", "not", "isinstance", "(", "columns", ",", "tuple", ")", ":", "columns", "=", "columns", ",", "else", ":", "to_delete", ".", "append", "(", "columns", ")", "for", "column", "in", "columns", ":", "to_add", ".", "append", "(", "(", "column", ",", "initial", ")", ")", "callables", ".", "append", "(", "(", "columns", ",", "fn", ")", ")", "for", "column", ",", "value", "in", "to_add", ":", "row", "[", "column", "]", "=", "value", "for", "multi_columns", "in", "to_delete", ":", "del", "row", "[", "multi_columns", "]", "return", "callables" ]
Extract callable values from `row`. Replace the callable values with the initial value (if specified) or an empty string. Parameters ---------- row : mapping A data row. The keys are either a single column name or a tuple of column names. The values take one of three forms: 1) a non-callable value, 2) a tuple (initial_value, callable), 3) or a single callable (in which case the initial value is set to an empty string). Returns ------- list of (column, callable)
[ "Extract", "callable", "values", "from", "row", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L150-L198
pyout/pyout
pyout/common.py
StyleFields.build
def build(self, columns): """Build the style and fields. Parameters ---------- columns : list of str Column names. """ self.columns = columns default = dict(elements.default("default_"), **_safe_get(self.init_style, "default_", {})) self.style = elements.adopt({c: default for c in columns}, self.init_style) # Store special keys in _style so that they can be validated. self.style["default_"] = default self.style["header_"] = self._compose("header_", {"align", "width"}) self.style["aggregate_"] = self._compose("aggregate_", {"align", "width"}) self.style["separator_"] = _safe_get(self.init_style, "separator_", elements.default("separator_")) lgr.debug("Validating style %r", self.style) self.style["width_"] = _safe_get(self.init_style, "width_", elements.default("width_")) elements.validate(self.style) self._setup_fields() ngaps = len(self.columns) - 1 self.width_separtor = len(self.style["separator_"]) * ngaps lgr.debug("Calculated separator width as %d", self.width_separtor)
python
def build(self, columns): """Build the style and fields. Parameters ---------- columns : list of str Column names. """ self.columns = columns default = dict(elements.default("default_"), **_safe_get(self.init_style, "default_", {})) self.style = elements.adopt({c: default for c in columns}, self.init_style) # Store special keys in _style so that they can be validated. self.style["default_"] = default self.style["header_"] = self._compose("header_", {"align", "width"}) self.style["aggregate_"] = self._compose("aggregate_", {"align", "width"}) self.style["separator_"] = _safe_get(self.init_style, "separator_", elements.default("separator_")) lgr.debug("Validating style %r", self.style) self.style["width_"] = _safe_get(self.init_style, "width_", elements.default("width_")) elements.validate(self.style) self._setup_fields() ngaps = len(self.columns) - 1 self.width_separtor = len(self.style["separator_"]) * ngaps lgr.debug("Calculated separator width as %d", self.width_separtor)
[ "def", "build", "(", "self", ",", "columns", ")", ":", "self", ".", "columns", "=", "columns", "default", "=", "dict", "(", "elements", ".", "default", "(", "\"default_\"", ")", ",", "*", "*", "_safe_get", "(", "self", ".", "init_style", ",", "\"default_\"", ",", "{", "}", ")", ")", "self", ".", "style", "=", "elements", ".", "adopt", "(", "{", "c", ":", "default", "for", "c", "in", "columns", "}", ",", "self", ".", "init_style", ")", "# Store special keys in _style so that they can be validated.", "self", ".", "style", "[", "\"default_\"", "]", "=", "default", "self", ".", "style", "[", "\"header_\"", "]", "=", "self", ".", "_compose", "(", "\"header_\"", ",", "{", "\"align\"", ",", "\"width\"", "}", ")", "self", ".", "style", "[", "\"aggregate_\"", "]", "=", "self", ".", "_compose", "(", "\"aggregate_\"", ",", "{", "\"align\"", ",", "\"width\"", "}", ")", "self", ".", "style", "[", "\"separator_\"", "]", "=", "_safe_get", "(", "self", ".", "init_style", ",", "\"separator_\"", ",", "elements", ".", "default", "(", "\"separator_\"", ")", ")", "lgr", ".", "debug", "(", "\"Validating style %r\"", ",", "self", ".", "style", ")", "self", ".", "style", "[", "\"width_\"", "]", "=", "_safe_get", "(", "self", ".", "init_style", ",", "\"width_\"", ",", "elements", ".", "default", "(", "\"width_\"", ")", ")", "elements", ".", "validate", "(", "self", ".", "style", ")", "self", ".", "_setup_fields", "(", ")", "ngaps", "=", "len", "(", "self", ".", "columns", ")", "-", "1", "self", ".", "width_separtor", "=", "len", "(", "self", ".", "style", "[", "\"separator_\"", "]", ")", "*", "ngaps", "lgr", ".", "debug", "(", "\"Calculated separator width as %d\"", ",", "self", ".", "width_separtor", ")" ]
Build the style and fields. Parameters ---------- columns : list of str Column names.
[ "Build", "the", "style", "and", "fields", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L252-L281
pyout/pyout
pyout/common.py
StyleFields._compose
def _compose(self, name, attributes): """Construct a style taking `attributes` from the column styles. Parameters ---------- name : str Name of main style (e.g., "header_"). attributes : set of str Adopt these elements from the column styles. Returns ------- The composite style for `name`. """ name_style = _safe_get(self.init_style, name, elements.default(name)) if self.init_style is not None and name_style is not None: result = {} for col in self.columns: cstyle = {k: v for k, v in self.style[col].items() if k in attributes} result[col] = dict(cstyle, **name_style) return result
python
def _compose(self, name, attributes): """Construct a style taking `attributes` from the column styles. Parameters ---------- name : str Name of main style (e.g., "header_"). attributes : set of str Adopt these elements from the column styles. Returns ------- The composite style for `name`. """ name_style = _safe_get(self.init_style, name, elements.default(name)) if self.init_style is not None and name_style is not None: result = {} for col in self.columns: cstyle = {k: v for k, v in self.style[col].items() if k in attributes} result[col] = dict(cstyle, **name_style) return result
[ "def", "_compose", "(", "self", ",", "name", ",", "attributes", ")", ":", "name_style", "=", "_safe_get", "(", "self", ".", "init_style", ",", "name", ",", "elements", ".", "default", "(", "name", ")", ")", "if", "self", ".", "init_style", "is", "not", "None", "and", "name_style", "is", "not", "None", ":", "result", "=", "{", "}", "for", "col", "in", "self", ".", "columns", ":", "cstyle", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "style", "[", "col", "]", ".", "items", "(", ")", "if", "k", "in", "attributes", "}", "result", "[", "col", "]", "=", "dict", "(", "cstyle", ",", "*", "*", "name_style", ")", "return", "result" ]
Construct a style taking `attributes` from the column styles. Parameters ---------- name : str Name of main style (e.g., "header_"). attributes : set of str Adopt these elements from the column styles. Returns ------- The composite style for `name`.
[ "Construct", "a", "style", "taking", "attributes", "from", "the", "column", "styles", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L283-L304
pyout/pyout
pyout/common.py
StyleFields._set_widths
def _set_widths(self, row, proc_group): """Update auto-width Fields based on `row`. Parameters ---------- row : dict proc_group : {'default', 'override'} Whether to consider 'default' or 'override' key for pre- and post-format processors. Returns ------- True if any widths required adjustment. """ width_free = self.style["width_"] - sum( [sum(self.fields[c].width for c in self.columns), self.width_separtor]) if width_free < 0: width_fixed = sum( [sum(self.fields[c].width for c in self.columns if c not in self.autowidth_columns), self.width_separtor]) assert width_fixed > self.style["width_"], "bug in width logic" raise elements.StyleError( "Fixed widths specified in style exceed total width") elif width_free == 0: lgr.debug("Not checking widths; no free width left") return False lgr.debug("Checking width for row %r", row) adjusted = False for column in sorted(self.columns, key=lambda c: self.fields[c].width): # ^ Sorting the columns by increasing widths isn't necessary; we do # it so that columns that already take up more of the screen don't # continue to grow and use up free width before smaller columns # have a chance to claim some. if width_free < 1: lgr.debug("Giving up on checking widths; no free width left") break if column in self.autowidth_columns: field = self.fields[column] lgr.debug("Checking width of column %r " "(field width: %d, free width: %d)", column, field.width, width_free) # If we've added any style transform functions as # pre-format processors, we want to measure the width # of their result rather than the raw value. if field.pre[proc_group]: value = field(row[column], keys=[proc_group], exclude_post=True) else: value = row[column] value = six.text_type(value) value_width = len(value) wmax = self.autowidth_columns[column]["max"] if value_width > field.width: width_old = field.width width_available = width_free + field.width width_new = min(value_width, wmax or width_available, width_available) if width_new > width_old: adjusted = True field.width = width_new lgr.debug("Adjusting width of %r column from %d to %d " "to accommodate value %r", column, width_old, field.width, value) self._truncaters[column].length = field.width width_free -= field.width - width_old lgr.debug("Free width is %d after processing column %r", width_free, column) return adjusted
python
def _set_widths(self, row, proc_group): """Update auto-width Fields based on `row`. Parameters ---------- row : dict proc_group : {'default', 'override'} Whether to consider 'default' or 'override' key for pre- and post-format processors. Returns ------- True if any widths required adjustment. """ width_free = self.style["width_"] - sum( [sum(self.fields[c].width for c in self.columns), self.width_separtor]) if width_free < 0: width_fixed = sum( [sum(self.fields[c].width for c in self.columns if c not in self.autowidth_columns), self.width_separtor]) assert width_fixed > self.style["width_"], "bug in width logic" raise elements.StyleError( "Fixed widths specified in style exceed total width") elif width_free == 0: lgr.debug("Not checking widths; no free width left") return False lgr.debug("Checking width for row %r", row) adjusted = False for column in sorted(self.columns, key=lambda c: self.fields[c].width): # ^ Sorting the columns by increasing widths isn't necessary; we do # it so that columns that already take up more of the screen don't # continue to grow and use up free width before smaller columns # have a chance to claim some. if width_free < 1: lgr.debug("Giving up on checking widths; no free width left") break if column in self.autowidth_columns: field = self.fields[column] lgr.debug("Checking width of column %r " "(field width: %d, free width: %d)", column, field.width, width_free) # If we've added any style transform functions as # pre-format processors, we want to measure the width # of their result rather than the raw value. if field.pre[proc_group]: value = field(row[column], keys=[proc_group], exclude_post=True) else: value = row[column] value = six.text_type(value) value_width = len(value) wmax = self.autowidth_columns[column]["max"] if value_width > field.width: width_old = field.width width_available = width_free + field.width width_new = min(value_width, wmax or width_available, width_available) if width_new > width_old: adjusted = True field.width = width_new lgr.debug("Adjusting width of %r column from %d to %d " "to accommodate value %r", column, width_old, field.width, value) self._truncaters[column].length = field.width width_free -= field.width - width_old lgr.debug("Free width is %d after processing column %r", width_free, column) return adjusted
[ "def", "_set_widths", "(", "self", ",", "row", ",", "proc_group", ")", ":", "width_free", "=", "self", ".", "style", "[", "\"width_\"", "]", "-", "sum", "(", "[", "sum", "(", "self", ".", "fields", "[", "c", "]", ".", "width", "for", "c", "in", "self", ".", "columns", ")", ",", "self", ".", "width_separtor", "]", ")", "if", "width_free", "<", "0", ":", "width_fixed", "=", "sum", "(", "[", "sum", "(", "self", ".", "fields", "[", "c", "]", ".", "width", "for", "c", "in", "self", ".", "columns", "if", "c", "not", "in", "self", ".", "autowidth_columns", ")", ",", "self", ".", "width_separtor", "]", ")", "assert", "width_fixed", ">", "self", ".", "style", "[", "\"width_\"", "]", ",", "\"bug in width logic\"", "raise", "elements", ".", "StyleError", "(", "\"Fixed widths specified in style exceed total width\"", ")", "elif", "width_free", "==", "0", ":", "lgr", ".", "debug", "(", "\"Not checking widths; no free width left\"", ")", "return", "False", "lgr", ".", "debug", "(", "\"Checking width for row %r\"", ",", "row", ")", "adjusted", "=", "False", "for", "column", "in", "sorted", "(", "self", ".", "columns", ",", "key", "=", "lambda", "c", ":", "self", ".", "fields", "[", "c", "]", ".", "width", ")", ":", "# ^ Sorting the columns by increasing widths isn't necessary; we do", "# it so that columns that already take up more of the screen don't", "# continue to grow and use up free width before smaller columns", "# have a chance to claim some.", "if", "width_free", "<", "1", ":", "lgr", ".", "debug", "(", "\"Giving up on checking widths; no free width left\"", ")", "break", "if", "column", "in", "self", ".", "autowidth_columns", ":", "field", "=", "self", ".", "fields", "[", "column", "]", "lgr", ".", "debug", "(", "\"Checking width of column %r \"", "\"(field width: %d, free width: %d)\"", ",", "column", ",", "field", ".", "width", ",", "width_free", ")", "# If we've added any style transform functions as", "# pre-format processors, we want to measure the width", "# of their result rather than the raw value.", "if", "field", ".", "pre", "[", "proc_group", "]", ":", "value", "=", "field", "(", "row", "[", "column", "]", ",", "keys", "=", "[", "proc_group", "]", ",", "exclude_post", "=", "True", ")", "else", ":", "value", "=", "row", "[", "column", "]", "value", "=", "six", ".", "text_type", "(", "value", ")", "value_width", "=", "len", "(", "value", ")", "wmax", "=", "self", ".", "autowidth_columns", "[", "column", "]", "[", "\"max\"", "]", "if", "value_width", ">", "field", ".", "width", ":", "width_old", "=", "field", ".", "width", "width_available", "=", "width_free", "+", "field", ".", "width", "width_new", "=", "min", "(", "value_width", ",", "wmax", "or", "width_available", ",", "width_available", ")", "if", "width_new", ">", "width_old", ":", "adjusted", "=", "True", "field", ".", "width", "=", "width_new", "lgr", ".", "debug", "(", "\"Adjusting width of %r column from %d to %d \"", "\"to accommodate value %r\"", ",", "column", ",", "width_old", ",", "field", ".", "width", ",", "value", ")", "self", ".", "_truncaters", "[", "column", "]", ".", "length", "=", "field", ".", "width", "width_free", "-=", "field", ".", "width", "-", "width_old", "lgr", ".", "debug", "(", "\"Free width is %d after processing column %r\"", ",", "width_free", ",", "column", ")", "return", "adjusted" ]
Update auto-width Fields based on `row`. Parameters ---------- row : dict proc_group : {'default', 'override'} Whether to consider 'default' or 'override' key for pre- and post-format processors. Returns ------- True if any widths required adjustment.
[ "Update", "auto", "-", "width", "Fields", "based", "on", "row", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L361-L434
pyout/pyout
pyout/common.py
StyleFields._proc_group
def _proc_group(self, style, adopt=True): """Return whether group is "default" or "override". In the case of "override", the self.fields pre-format and post-format processors will be set under the "override" key. Parameters ---------- style : dict A style that follows the schema defined in pyout.elements. adopt : bool, optional Merge `self.style` and `style`, giving priority to the latter's keys when there are conflicts. If False, treat `style` as a standalone style. """ fields = self.fields if style is not None: if adopt: style = elements.adopt(self.style, style) elements.validate(style) for column in self.columns: fields[column].add( "pre", "override", *(self.procgen.pre_from_style(style[column]))) fields[column].add( "post", "override", *(self.procgen.post_from_style(style[column]))) return "override" else: return "default"
python
def _proc_group(self, style, adopt=True): """Return whether group is "default" or "override". In the case of "override", the self.fields pre-format and post-format processors will be set under the "override" key. Parameters ---------- style : dict A style that follows the schema defined in pyout.elements. adopt : bool, optional Merge `self.style` and `style`, giving priority to the latter's keys when there are conflicts. If False, treat `style` as a standalone style. """ fields = self.fields if style is not None: if adopt: style = elements.adopt(self.style, style) elements.validate(style) for column in self.columns: fields[column].add( "pre", "override", *(self.procgen.pre_from_style(style[column]))) fields[column].add( "post", "override", *(self.procgen.post_from_style(style[column]))) return "override" else: return "default"
[ "def", "_proc_group", "(", "self", ",", "style", ",", "adopt", "=", "True", ")", ":", "fields", "=", "self", ".", "fields", "if", "style", "is", "not", "None", ":", "if", "adopt", ":", "style", "=", "elements", ".", "adopt", "(", "self", ".", "style", ",", "style", ")", "elements", ".", "validate", "(", "style", ")", "for", "column", "in", "self", ".", "columns", ":", "fields", "[", "column", "]", ".", "add", "(", "\"pre\"", ",", "\"override\"", ",", "*", "(", "self", ".", "procgen", ".", "pre_from_style", "(", "style", "[", "column", "]", ")", ")", ")", "fields", "[", "column", "]", ".", "add", "(", "\"post\"", ",", "\"override\"", ",", "*", "(", "self", ".", "procgen", ".", "post_from_style", "(", "style", "[", "column", "]", ")", ")", ")", "return", "\"override\"", "else", ":", "return", "\"default\"" ]
Return whether group is "default" or "override". In the case of "override", the self.fields pre-format and post-format processors will be set under the "override" key. Parameters ---------- style : dict A style that follows the schema defined in pyout.elements. adopt : bool, optional Merge `self.style` and `style`, giving priority to the latter's keys when there are conflicts. If False, treat `style` as a standalone style.
[ "Return", "whether", "group", "is", "default", "or", "override", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L436-L466
pyout/pyout
pyout/common.py
StyleFields.render
def render(self, row, style=None, adopt=True): """Render fields with values from `row`. Parameters ---------- row : dict A normalized row. style : dict, optional A style that follows the schema defined in pyout.elements. If None, `self.style` is used. adopt : bool, optional Merge `self.style` and `style`, using the latter's keys when there are conflicts. If False, treat `style` as a standalone style. Returns ------- A tuple with the rendered value (str) and a flag that indicates whether the field widths required adjustment (bool). """ group = self._proc_group(style, adopt=adopt) if group == "override": # Override the "default" processor key. proc_keys = ["width", "override"] else: # Use the set of processors defined by _setup_fields. proc_keys = None adjusted = self._set_widths(row, group) proc_fields = [self.fields[c](row[c], keys=proc_keys) for c in self.columns] return self.style["separator_"].join(proc_fields) + "\n", adjusted
python
def render(self, row, style=None, adopt=True): """Render fields with values from `row`. Parameters ---------- row : dict A normalized row. style : dict, optional A style that follows the schema defined in pyout.elements. If None, `self.style` is used. adopt : bool, optional Merge `self.style` and `style`, using the latter's keys when there are conflicts. If False, treat `style` as a standalone style. Returns ------- A tuple with the rendered value (str) and a flag that indicates whether the field widths required adjustment (bool). """ group = self._proc_group(style, adopt=adopt) if group == "override": # Override the "default" processor key. proc_keys = ["width", "override"] else: # Use the set of processors defined by _setup_fields. proc_keys = None adjusted = self._set_widths(row, group) proc_fields = [self.fields[c](row[c], keys=proc_keys) for c in self.columns] return self.style["separator_"].join(proc_fields) + "\n", adjusted
[ "def", "render", "(", "self", ",", "row", ",", "style", "=", "None", ",", "adopt", "=", "True", ")", ":", "group", "=", "self", ".", "_proc_group", "(", "style", ",", "adopt", "=", "adopt", ")", "if", "group", "==", "\"override\"", ":", "# Override the \"default\" processor key.", "proc_keys", "=", "[", "\"width\"", ",", "\"override\"", "]", "else", ":", "# Use the set of processors defined by _setup_fields.", "proc_keys", "=", "None", "adjusted", "=", "self", ".", "_set_widths", "(", "row", ",", "group", ")", "proc_fields", "=", "[", "self", ".", "fields", "[", "c", "]", "(", "row", "[", "c", "]", ",", "keys", "=", "proc_keys", ")", "for", "c", "in", "self", ".", "columns", "]", "return", "self", ".", "style", "[", "\"separator_\"", "]", ".", "join", "(", "proc_fields", ")", "+", "\"\\n\"", ",", "adjusted" ]
Render fields with values from `row`. Parameters ---------- row : dict A normalized row. style : dict, optional A style that follows the schema defined in pyout.elements. If None, `self.style` is used. adopt : bool, optional Merge `self.style` and `style`, using the latter's keys when there are conflicts. If False, treat `style` as a standalone style. Returns ------- A tuple with the rendered value (str) and a flag that indicates whether the field widths required adjustment (bool).
[ "Render", "fields", "with", "values", "from", "row", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L468-L499
HubbleHQ/heroku-kafka
heroku_kafka.py
HerokuKafka.get_config
def get_config(self): """ Sets up the basic config from the variables passed in all of these are from what Heroku gives you. """ self.create_ssl_certs() config = { "bootstrap_servers": self.get_brokers(), "security_protocol": 'SSL', "ssl_cafile": self.ssl["ca"]["file"].name, "ssl_certfile": self.ssl["cert"]["file"].name, "ssl_keyfile": self.ssl["key"]["file"].name, "ssl_check_hostname": False, "ssl_password": None } self.config.update(config)
python
def get_config(self): """ Sets up the basic config from the variables passed in all of these are from what Heroku gives you. """ self.create_ssl_certs() config = { "bootstrap_servers": self.get_brokers(), "security_protocol": 'SSL', "ssl_cafile": self.ssl["ca"]["file"].name, "ssl_certfile": self.ssl["cert"]["file"].name, "ssl_keyfile": self.ssl["key"]["file"].name, "ssl_check_hostname": False, "ssl_password": None } self.config.update(config)
[ "def", "get_config", "(", "self", ")", ":", "self", ".", "create_ssl_certs", "(", ")", "config", "=", "{", "\"bootstrap_servers\"", ":", "self", ".", "get_brokers", "(", ")", ",", "\"security_protocol\"", ":", "'SSL'", ",", "\"ssl_cafile\"", ":", "self", ".", "ssl", "[", "\"ca\"", "]", "[", "\"file\"", "]", ".", "name", ",", "\"ssl_certfile\"", ":", "self", ".", "ssl", "[", "\"cert\"", "]", "[", "\"file\"", "]", ".", "name", ",", "\"ssl_keyfile\"", ":", "self", ".", "ssl", "[", "\"key\"", "]", "[", "\"file\"", "]", ".", "name", ",", "\"ssl_check_hostname\"", ":", "False", ",", "\"ssl_password\"", ":", "None", "}", "self", ".", "config", ".", "update", "(", "config", ")" ]
Sets up the basic config from the variables passed in all of these are from what Heroku gives you.
[ "Sets", "up", "the", "basic", "config", "from", "the", "variables", "passed", "in", "all", "of", "these", "are", "from", "what", "Heroku", "gives", "you", "." ]
train
https://github.com/HubbleHQ/heroku-kafka/blob/2c28b79e0ba130e13e91d9458826d4930eee2c52/heroku_kafka.py#L27-L43
HubbleHQ/heroku-kafka
heroku_kafka.py
HerokuKafka.get_brokers
def get_brokers(self): """ Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects. """ return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in [urlparse(url) for url in self.kafka_url.split(',')]]
python
def get_brokers(self): """ Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects. """ return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in [urlparse(url) for url in self.kafka_url.split(',')]]
[ "def", "get_brokers", "(", "self", ")", ":", "return", "[", "'{}:{}'", ".", "format", "(", "parsedUrl", ".", "hostname", ",", "parsedUrl", ".", "port", ")", "for", "parsedUrl", "in", "[", "urlparse", "(", "url", ")", "for", "url", "in", "self", ".", "kafka_url", ".", "split", "(", "','", ")", "]", "]" ]
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects.
[ "Parses", "the", "KAKFA_URL", "and", "returns", "a", "list", "of", "hostname", ":", "port", "pairs", "in", "the", "format", "that", "kafka", "-", "python", "expects", "." ]
train
https://github.com/HubbleHQ/heroku-kafka/blob/2c28b79e0ba130e13e91d9458826d4930eee2c52/heroku_kafka.py#L45-L51
HubbleHQ/heroku-kafka
heroku_kafka.py
HerokuKafka.create_ssl_certs
def create_ssl_certs(self): """ Creates SSL cert files """ for key, file in self.ssl.items(): file["file"] = self.create_temp_file(file["suffix"], file["content"])
python
def create_ssl_certs(self): """ Creates SSL cert files """ for key, file in self.ssl.items(): file["file"] = self.create_temp_file(file["suffix"], file["content"])
[ "def", "create_ssl_certs", "(", "self", ")", ":", "for", "key", ",", "file", "in", "self", ".", "ssl", ".", "items", "(", ")", ":", "file", "[", "\"file\"", "]", "=", "self", ".", "create_temp_file", "(", "file", "[", "\"suffix\"", "]", ",", "file", "[", "\"content\"", "]", ")" ]
Creates SSL cert files
[ "Creates", "SSL", "cert", "files" ]
train
https://github.com/HubbleHQ/heroku-kafka/blob/2c28b79e0ba130e13e91d9458826d4930eee2c52/heroku_kafka.py#L53-L58
HubbleHQ/heroku-kafka
heroku_kafka.py
HerokuKafka.create_temp_file
def create_temp_file(self, suffix, content): """ Creates file, because environment variables are by default escaped it encodes and then decodes them before write so \n etc. work correctly. """ temp = tempfile.NamedTemporaryFile(suffix=suffix) temp.write(content.encode('latin1').decode('unicode_escape').encode('utf-8')) temp.seek(0) # Resets the temp file line to 0 return temp
python
def create_temp_file(self, suffix, content): """ Creates file, because environment variables are by default escaped it encodes and then decodes them before write so \n etc. work correctly. """ temp = tempfile.NamedTemporaryFile(suffix=suffix) temp.write(content.encode('latin1').decode('unicode_escape').encode('utf-8')) temp.seek(0) # Resets the temp file line to 0 return temp
[ "def", "create_temp_file", "(", "self", ",", "suffix", ",", "content", ")", ":", "temp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "suffix", ")", "temp", ".", "write", "(", "content", ".", "encode", "(", "'latin1'", ")", ".", "decode", "(", "'unicode_escape'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "temp", ".", "seek", "(", "0", ")", "# Resets the temp file line to 0", "return", "temp" ]
Creates file, because environment variables are by default escaped it encodes and then decodes them before write so \n etc. work correctly.
[ "Creates", "file", "because", "environment", "variables", "are", "by", "default", "escaped", "it", "encodes", "and", "then", "decodes", "them", "before", "write", "so", "\\", "n", "etc", ".", "work", "correctly", "." ]
train
https://github.com/HubbleHQ/heroku-kafka/blob/2c28b79e0ba130e13e91d9458826d4930eee2c52/heroku_kafka.py#L61-L69
HubbleHQ/heroku-kafka
heroku_kafka.py
HerokuKafka.prefix_topic
def prefix_topic(self, topics): """ Adds the topic_prefix to topic(s) supplied """ if not self.topic_prefix or not topics: return topics if not isinstance(topics, str) and isinstance(topics, collections.Iterable): return [self.topic_prefix + topic for topic in topics] return self.topic_prefix + topics
python
def prefix_topic(self, topics): """ Adds the topic_prefix to topic(s) supplied """ if not self.topic_prefix or not topics: return topics if not isinstance(topics, str) and isinstance(topics, collections.Iterable): return [self.topic_prefix + topic for topic in topics] return self.topic_prefix + topics
[ "def", "prefix_topic", "(", "self", ",", "topics", ")", ":", "if", "not", "self", ".", "topic_prefix", "or", "not", "topics", ":", "return", "topics", "if", "not", "isinstance", "(", "topics", ",", "str", ")", "and", "isinstance", "(", "topics", ",", "collections", ".", "Iterable", ")", ":", "return", "[", "self", ".", "topic_prefix", "+", "topic", "for", "topic", "in", "topics", "]", "return", "self", ".", "topic_prefix", "+", "topics" ]
Adds the topic_prefix to topic(s) supplied
[ "Adds", "the", "topic_prefix", "to", "topic", "(", "s", ")", "supplied" ]
train
https://github.com/HubbleHQ/heroku-kafka/blob/2c28b79e0ba130e13e91d9458826d4930eee2c52/heroku_kafka.py#L71-L81
HubbleHQ/heroku-kafka
heroku_kafka.py
HerokuKafkaProducer.send
def send(self, topic, *args, **kwargs): """ Appends the prefix to the topic before sendingf """ prefix_topic = self.heroku_kafka.prefix_topic(topic) return super(HerokuKafkaProducer, self).send(prefix_topic, *args, **kwargs)
python
def send(self, topic, *args, **kwargs): """ Appends the prefix to the topic before sendingf """ prefix_topic = self.heroku_kafka.prefix_topic(topic) return super(HerokuKafkaProducer, self).send(prefix_topic, *args, **kwargs)
[ "def", "send", "(", "self", ",", "topic", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "prefix_topic", "=", "self", ".", "heroku_kafka", ".", "prefix_topic", "(", "topic", ")", "return", "super", "(", "HerokuKafkaProducer", ",", "self", ")", ".", "send", "(", "prefix_topic", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Appends the prefix to the topic before sendingf
[ "Appends", "the", "prefix", "to", "the", "topic", "before", "sendingf" ]
train
https://github.com/HubbleHQ/heroku-kafka/blob/2c28b79e0ba130e13e91d9458826d4930eee2c52/heroku_kafka.py#L98-L103
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/base.py
BaseParser.get
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ Inherited method should take all specified arguments. :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ raise NotImplementedError
python
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ Inherited method should take all specified arguments. :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ raise NotImplementedError
[ "def", "get", "(", "self", ",", "variable_path", ":", "str", ",", "default", ":", "t", ".", "Optional", "[", "t", ".", "Any", "]", "=", "None", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError" ]
Inherited method should take all specified arguments. :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default
[ "Inherited", "method", "should", "take", "all", "specified", "arguments", "." ]
train
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/base.py#L47-L63
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/base.py
BaseParser.coerce
def coerce(val: t.Any, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None) -> t.Any: """ Casts a type of ``val`` to ``coerce_type`` with ``coercer``. If ``coerce_type`` is bool and no ``coercer`` specified it uses :func:`~django_docker_helpers.utils.coerce_str_to_bool` by default. :param val: a value of any type :param coerce_type: any type :param coercer: provide a callback that takes ``val`` and returns a value with desired type :return: type casted value """ if not coerce_type and not coercer: return val if coerce_type and type(val) is coerce_type: return val if coerce_type and coerce_type is bool and not coercer: coercer = coerce_str_to_bool if coercer is None: coercer = coerce_type return coercer(val)
python
def coerce(val: t.Any, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None) -> t.Any: """ Casts a type of ``val`` to ``coerce_type`` with ``coercer``. If ``coerce_type`` is bool and no ``coercer`` specified it uses :func:`~django_docker_helpers.utils.coerce_str_to_bool` by default. :param val: a value of any type :param coerce_type: any type :param coercer: provide a callback that takes ``val`` and returns a value with desired type :return: type casted value """ if not coerce_type and not coercer: return val if coerce_type and type(val) is coerce_type: return val if coerce_type and coerce_type is bool and not coercer: coercer = coerce_str_to_bool if coercer is None: coercer = coerce_type return coercer(val)
[ "def", "coerce", "(", "val", ":", "t", ".", "Any", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ")", "->", "t", ".", "Any", ":", "if", "not", "coerce_type", "and", "not", "coercer", ":", "return", "val", "if", "coerce_type", "and", "type", "(", "val", ")", "is", "coerce_type", ":", "return", "val", "if", "coerce_type", "and", "coerce_type", "is", "bool", "and", "not", "coercer", ":", "coercer", "=", "coerce_str_to_bool", "if", "coercer", "is", "None", ":", "coercer", "=", "coerce_type", "return", "coercer", "(", "val", ")" ]
Casts a type of ``val`` to ``coerce_type`` with ``coercer``. If ``coerce_type`` is bool and no ``coercer`` specified it uses :func:`~django_docker_helpers.utils.coerce_str_to_bool` by default. :param val: a value of any type :param coerce_type: any type :param coercer: provide a callback that takes ``val`` and returns a value with desired type :return: type casted value
[ "Casts", "a", "type", "of", "val", "to", "coerce_type", "with", "coercer", "." ]
train
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/base.py#L66-L92
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/base.py
BaseParser.client
def client(self): """ Helper property to lazy initialize and cache client. Runs :meth:`~django_docker_helpers.config.backends.base.BaseParser.get_client`. :return: an instance of backend-specific client """ if self._client is not None: return self._client self._client = self.get_client() return self._client
python
def client(self): """ Helper property to lazy initialize and cache client. Runs :meth:`~django_docker_helpers.config.backends.base.BaseParser.get_client`. :return: an instance of backend-specific client """ if self._client is not None: return self._client self._client = self.get_client() return self._client
[ "def", "client", "(", "self", ")", ":", "if", "self", ".", "_client", "is", "not", "None", ":", "return", "self", ".", "_client", "self", ".", "_client", "=", "self", ".", "get_client", "(", ")", "return", "self", ".", "_client" ]
Helper property to lazy initialize and cache client. Runs :meth:`~django_docker_helpers.config.backends.base.BaseParser.get_client`. :return: an instance of backend-specific client
[ "Helper", "property", "to", "lazy", "initialize", "and", "cache", "client", ".", "Runs", ":", "meth", ":", "~django_docker_helpers", ".", "config", ".", "backends", ".", "base", ".", "BaseParser", ".", "get_client", "." ]
train
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/base.py#L95-L106
pyout/pyout
pyout/truncate.py
_splice
def _splice(value, n): """Splice `value` at its center, retaining a total of `n` characters. Parameters ---------- value : str n : int The total length of the returned ends will not be greater than this value. Characters will be dropped from the center to reach this limit. Returns ------- A tuple of str: (head, tail). """ if n <= 0: raise ValueError("n must be positive") value_len = len(value) center = value_len // 2 left, right = value[:center], value[center:] if n >= value_len: return left, right n_todrop = value_len - n right_idx = n_todrop // 2 left_idx = right_idx + n_todrop % 2 return left[:-left_idx], right[right_idx:]
python
def _splice(value, n): """Splice `value` at its center, retaining a total of `n` characters. Parameters ---------- value : str n : int The total length of the returned ends will not be greater than this value. Characters will be dropped from the center to reach this limit. Returns ------- A tuple of str: (head, tail). """ if n <= 0: raise ValueError("n must be positive") value_len = len(value) center = value_len // 2 left, right = value[:center], value[center:] if n >= value_len: return left, right n_todrop = value_len - n right_idx = n_todrop // 2 left_idx = right_idx + n_todrop % 2 return left[:-left_idx], right[right_idx:]
[ "def", "_splice", "(", "value", ",", "n", ")", ":", "if", "n", "<=", "0", ":", "raise", "ValueError", "(", "\"n must be positive\"", ")", "value_len", "=", "len", "(", "value", ")", "center", "=", "value_len", "//", "2", "left", ",", "right", "=", "value", "[", ":", "center", "]", ",", "value", "[", "center", ":", "]", "if", "n", ">=", "value_len", ":", "return", "left", ",", "right", "n_todrop", "=", "value_len", "-", "n", "right_idx", "=", "n_todrop", "//", "2", "left_idx", "=", "right_idx", "+", "n_todrop", "%", "2", "return", "left", "[", ":", "-", "left_idx", "]", ",", "right", "[", "right_idx", ":", "]" ]
Splice `value` at its center, retaining a total of `n` characters. Parameters ---------- value : str n : int The total length of the returned ends will not be greater than this value. Characters will be dropped from the center to reach this limit. Returns ------- A tuple of str: (head, tail).
[ "Splice", "value", "at", "its", "center", "retaining", "a", "total", "of", "n", "characters", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/truncate.py#L27-L54
night-crawler/django-docker-helpers
django_docker_helpers/cli/django/management/commands/run_configured_uwsgi.py
write_uwsgi_ini_cfg
def write_uwsgi_ini_cfg(fp: t.IO, cfg: dict): """ Writes into IO stream the uwsgi.ini file content (actually it does smth strange, just look below). uWSGI configs are likely to break INI (YAML, etc) specification (double key definition) so it writes `cfg` object (dict) in "uWSGI Style". >>> import sys >>> cfg = { ... 'static-map': [ ... '/static/=/application/static/', ... '/media/=/application/media/', ... '/usermedia/=/application/usermedia/'] ... } >>> write_uwsgi_ini_cfg(sys.stdout, cfg) [uwsgi] static-map = /static/=/application/static/ static-map = /media/=/application/media/ static-map = /usermedia/=/application/usermedia/ """ fp.write(f'[uwsgi]\n') for key, val in cfg.items(): if isinstance(val, bool): val = str(val).lower() if isinstance(val, list): for v in val: fp.write(f'{key} = {v}\n') continue fp.write(f'{key} = {val}\n')
python
def write_uwsgi_ini_cfg(fp: t.IO, cfg: dict): """ Writes into IO stream the uwsgi.ini file content (actually it does smth strange, just look below). uWSGI configs are likely to break INI (YAML, etc) specification (double key definition) so it writes `cfg` object (dict) in "uWSGI Style". >>> import sys >>> cfg = { ... 'static-map': [ ... '/static/=/application/static/', ... '/media/=/application/media/', ... '/usermedia/=/application/usermedia/'] ... } >>> write_uwsgi_ini_cfg(sys.stdout, cfg) [uwsgi] static-map = /static/=/application/static/ static-map = /media/=/application/media/ static-map = /usermedia/=/application/usermedia/ """ fp.write(f'[uwsgi]\n') for key, val in cfg.items(): if isinstance(val, bool): val = str(val).lower() if isinstance(val, list): for v in val: fp.write(f'{key} = {v}\n') continue fp.write(f'{key} = {val}\n')
[ "def", "write_uwsgi_ini_cfg", "(", "fp", ":", "t", ".", "IO", ",", "cfg", ":", "dict", ")", ":", "fp", ".", "write", "(", "f'[uwsgi]\\n'", ")", "for", "key", ",", "val", "in", "cfg", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "bool", ")", ":", "val", "=", "str", "(", "val", ")", ".", "lower", "(", ")", "if", "isinstance", "(", "val", ",", "list", ")", ":", "for", "v", "in", "val", ":", "fp", ".", "write", "(", "f'{key} = {v}\\n'", ")", "continue", "fp", ".", "write", "(", "f'{key} = {val}\\n'", ")" ]
Writes into IO stream the uwsgi.ini file content (actually it does smth strange, just look below). uWSGI configs are likely to break INI (YAML, etc) specification (double key definition) so it writes `cfg` object (dict) in "uWSGI Style". >>> import sys >>> cfg = { ... 'static-map': [ ... '/static/=/application/static/', ... '/media/=/application/media/', ... '/usermedia/=/application/usermedia/'] ... } >>> write_uwsgi_ini_cfg(sys.stdout, cfg) [uwsgi] static-map = /static/=/application/static/ static-map = /media/=/application/media/ static-map = /usermedia/=/application/usermedia/
[ "Writes", "into", "IO", "stream", "the", "uwsgi", ".", "ini", "file", "content", "(", "actually", "it", "does", "smth", "strange", "just", "look", "below", ")", "." ]
train
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/cli/django/management/commands/run_configured_uwsgi.py#L9-L40
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/mpt_consul_parser.py
MPTConsulParser.get
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ if self.path_separator != self.consul_path_separator: variable_path = variable_path.replace(self.path_separator, self.consul_path_separator) if self.scope: _scope = self.consul_path_separator.join(self.scope.split(self.path_separator)) variable_path = '{0}/{1}'.format(_scope, variable_path) index, data = self.client.kv.get(variable_path, **kwargs) if data is None: return default val = data['Value'] if val is None: # None is present and it is a valid value return val if val.startswith(self.object_serialize_prefix): # since complex data types are yaml-serialized there's no need to coerce anything _val = val[len(self.object_serialize_prefix):] bundle = self.object_deserialize(_val) if bundle == '': # check for reinforced empty flag return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer) return bundle if isinstance(val, bytes): val = val.decode() return self.coerce(val, coerce_type=coerce_type, coercer=coercer)
python
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ if self.path_separator != self.consul_path_separator: variable_path = variable_path.replace(self.path_separator, self.consul_path_separator) if self.scope: _scope = self.consul_path_separator.join(self.scope.split(self.path_separator)) variable_path = '{0}/{1}'.format(_scope, variable_path) index, data = self.client.kv.get(variable_path, **kwargs) if data is None: return default val = data['Value'] if val is None: # None is present and it is a valid value return val if val.startswith(self.object_serialize_prefix): # since complex data types are yaml-serialized there's no need to coerce anything _val = val[len(self.object_serialize_prefix):] bundle = self.object_deserialize(_val) if bundle == '': # check for reinforced empty flag return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer) return bundle if isinstance(val, bytes): val = val.decode() return self.coerce(val, coerce_type=coerce_type, coercer=coercer)
[ "def", "get", "(", "self", ",", "variable_path", ":", "str", ",", "default", ":", "t", ".", "Optional", "[", "t", ".", "Any", "]", "=", "None", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "path_separator", "!=", "self", ".", "consul_path_separator", ":", "variable_path", "=", "variable_path", ".", "replace", "(", "self", ".", "path_separator", ",", "self", ".", "consul_path_separator", ")", "if", "self", ".", "scope", ":", "_scope", "=", "self", ".", "consul_path_separator", ".", "join", "(", "self", ".", "scope", ".", "split", "(", "self", ".", "path_separator", ")", ")", "variable_path", "=", "'{0}/{1}'", ".", "format", "(", "_scope", ",", "variable_path", ")", "index", ",", "data", "=", "self", ".", "client", ".", "kv", ".", "get", "(", "variable_path", ",", "*", "*", "kwargs", ")", "if", "data", "is", "None", ":", "return", "default", "val", "=", "data", "[", "'Value'", "]", "if", "val", "is", "None", ":", "# None is present and it is a valid value", "return", "val", "if", "val", ".", "startswith", "(", "self", ".", "object_serialize_prefix", ")", ":", "# since complex data types are yaml-serialized there's no need to coerce anything", "_val", "=", "val", "[", "len", "(", "self", ".", "object_serialize_prefix", ")", ":", "]", "bundle", "=", "self", ".", "object_deserialize", "(", "_val", ")", "if", "bundle", "==", "''", ":", "# check for reinforced empty flag", "return", "self", ".", "coerce", "(", "bundle", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ")", "return", "bundle", "if", "isinstance", "(", "val", ",", "bytes", ")", ":", "val", "=", "val", ".", "decode", "(", ")", "return", "self", ".", "coerce", "(", "val", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ")" ]
:param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default
[ ":", "param", "variable_path", ":", "a", "delimiter", "-", "separated", "path", "to", "a", "nested", "value", ":", "param", "default", ":", "default", "value", "if", "there", "s", "no", "object", "by", "specified", "path", ":", "param", "coerce_type", ":", "cast", "a", "type", "of", "a", "value", "to", "a", "specified", "one", ":", "param", "coercer", ":", "perform", "a", "type", "casting", "with", "specified", "callback", ":", "param", "kwargs", ":", "additional", "arguments", "inherited", "parser", "may", "need", ":", "return", ":", "value", "or", "default" ]
train
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/mpt_consul_parser.py#L76-L119
meng89/ipodshuffle
ipodshuffle/db/itunessd.py
itunessd_to_dics
def itunessd_to_dics(itunessd): """ :param itunessd: the whole iTunesSD bytes data :return: translate to tree object, see doc of dics_to_itunessd """ # header header_size = get_table_size(header_table) header_chunk = itunessd[0:header_size] header_dic = chunk_to_dic(header_chunk, header_table) # tracks tracks_header_dic, tracks_offsets = get_dic_sub_numbers(itunessd, header_dic['tracks_header_offset'], tracks_header_table) tracks_dics = [] for track_offset in tracks_offsets: _track_dic = chunk_to_dic(itunessd[track_offset:], track_table) track_dic = get_custom_fields_dic(_track_dic, track_table) tracks_dics.append(track_dic) # playlists playlists_header_dic, playlists_offsets = get_dic_sub_numbers(itunessd, header_dic['playlists_header_offset'], playlists_header_table) playlists_dics_and_indexes = [] for playlist_offset in playlists_offsets: _playlist_header_dic, indexes_of_tracks = get_dic_sub_numbers(itunessd, playlist_offset, playlist_header_table) playlist_header_dic = get_custom_fields_dic(_playlist_header_dic, playlist_header_table) playlists_dics_and_indexes.append((playlist_header_dic, indexes_of_tracks)) return get_custom_fields_dic(header_dic, header_table), tracks_dics, playlists_dics_and_indexes
python
def itunessd_to_dics(itunessd): """ :param itunessd: the whole iTunesSD bytes data :return: translate to tree object, see doc of dics_to_itunessd """ # header header_size = get_table_size(header_table) header_chunk = itunessd[0:header_size] header_dic = chunk_to_dic(header_chunk, header_table) # tracks tracks_header_dic, tracks_offsets = get_dic_sub_numbers(itunessd, header_dic['tracks_header_offset'], tracks_header_table) tracks_dics = [] for track_offset in tracks_offsets: _track_dic = chunk_to_dic(itunessd[track_offset:], track_table) track_dic = get_custom_fields_dic(_track_dic, track_table) tracks_dics.append(track_dic) # playlists playlists_header_dic, playlists_offsets = get_dic_sub_numbers(itunessd, header_dic['playlists_header_offset'], playlists_header_table) playlists_dics_and_indexes = [] for playlist_offset in playlists_offsets: _playlist_header_dic, indexes_of_tracks = get_dic_sub_numbers(itunessd, playlist_offset, playlist_header_table) playlist_header_dic = get_custom_fields_dic(_playlist_header_dic, playlist_header_table) playlists_dics_and_indexes.append((playlist_header_dic, indexes_of_tracks)) return get_custom_fields_dic(header_dic, header_table), tracks_dics, playlists_dics_and_indexes
[ "def", "itunessd_to_dics", "(", "itunessd", ")", ":", "# header", "header_size", "=", "get_table_size", "(", "header_table", ")", "header_chunk", "=", "itunessd", "[", "0", ":", "header_size", "]", "header_dic", "=", "chunk_to_dic", "(", "header_chunk", ",", "header_table", ")", "# tracks", "tracks_header_dic", ",", "tracks_offsets", "=", "get_dic_sub_numbers", "(", "itunessd", ",", "header_dic", "[", "'tracks_header_offset'", "]", ",", "tracks_header_table", ")", "tracks_dics", "=", "[", "]", "for", "track_offset", "in", "tracks_offsets", ":", "_track_dic", "=", "chunk_to_dic", "(", "itunessd", "[", "track_offset", ":", "]", ",", "track_table", ")", "track_dic", "=", "get_custom_fields_dic", "(", "_track_dic", ",", "track_table", ")", "tracks_dics", ".", "append", "(", "track_dic", ")", "# playlists", "playlists_header_dic", ",", "playlists_offsets", "=", "get_dic_sub_numbers", "(", "itunessd", ",", "header_dic", "[", "'playlists_header_offset'", "]", ",", "playlists_header_table", ")", "playlists_dics_and_indexes", "=", "[", "]", "for", "playlist_offset", "in", "playlists_offsets", ":", "_playlist_header_dic", ",", "indexes_of_tracks", "=", "get_dic_sub_numbers", "(", "itunessd", ",", "playlist_offset", ",", "playlist_header_table", ")", "playlist_header_dic", "=", "get_custom_fields_dic", "(", "_playlist_header_dic", ",", "playlist_header_table", ")", "playlists_dics_and_indexes", ".", "append", "(", "(", "playlist_header_dic", ",", "indexes_of_tracks", ")", ")", "return", "get_custom_fields_dic", "(", "header_dic", ",", "header_table", ")", ",", "tracks_dics", ",", "playlists_dics_and_indexes" ]
:param itunessd: the whole iTunesSD bytes data :return: translate to tree object, see doc of dics_to_itunessd
[ ":", "param", "itunessd", ":", "the", "whole", "iTunesSD", "bytes", "data", ":", "return", ":", "translate", "to", "tree", "object", "see", "doc", "of", "dics_to_itunessd" ]
train
https://github.com/meng89/ipodshuffle/blob/c9093dbb5cdac609376ebd3b4ef1b0fc58107d96/ipodshuffle/db/itunessd.py#L372-L402
meng89/ipodshuffle
ipodshuffle/db/itunessd.py
dics_to_itunessd
def dics_to_itunessd(header_dic, tracks_dics, playlists_dics_and_indexes): """ :param header_dic: dic of header_table :param tracks_dics: list of all track_table's dics :param playlists_dics_and_indexes: list of all playlists and all their track's indexes :return: the whole iTunesSD bytes data """ ############################################ # header ###### header_dic['length'] = get_table_size(header_table) header_dic['number_of_tracks'] = len(tracks_dics) header_dic['number_of_playlists'] = len(playlists_dics_and_indexes) header_dic['number_of_tracks2'] = 0 header_part_size = get_table_size(header_table) #################################################################################################################### # tracks ########## # Chunk of header tracks_header_dic = { 'length': get_table_size(tracks_header_table) + 4 * len(tracks_dics), 'number_of_tracks': len(tracks_dics) } tracks_header_chunk = dic_to_chunk(tracks_header_dic, tracks_header_table) # Chunk of all tracks [track_dic.update({'length': get_table_size(track_table)}) for track_dic in tracks_dics] _tracks_chunks = [dic_to_chunk(dic, track_table) for dic in tracks_dics] all_tracks_chunck = b''.join(_tracks_chunks) # Chunk of offsets _length_before_tracks_offsets = header_part_size + len(tracks_header_chunk) tracks_offsets_chunck = get_offsets_chunk(_length_before_tracks_offsets, _tracks_chunks) # Put chunks together track_part_chunk = tracks_header_chunk + tracks_offsets_chunck + all_tracks_chunck #################################################################################################################### # playlists ############# # Chunk of header _playlists_dics = [playlist_indexes[0] for playlist_indexes in playlists_dics_and_indexes] _types = [playlist_dic['type'] for playlist_dic in _playlists_dics] playlists_header_dic = { 'length': get_table_size(playlists_header_table) + 4 * len(playlists_dics_and_indexes), 'number_of_all_playlists': len(_types), 'flag1': 0xffffffff if _types.count(NORMAL) == 0 else 1, 'number_of_normal_playlists': _types.count(NORMAL), 'flag2': 0xffffffff if _types.count(AUDIOBOOK) == 0 else (_types.count(MASTER) + _types.count(NORMAL) + _types.count(PODCAST)), 'number_of_audiobook_playlists': _types.count(AUDIOBOOK), 'flag3': 0xffffffff if _types.count(PODCAST) == 0 else _types.count(1) + _types.count(NORMAL), 'number_of_podcast_playlists': _types.count(PODCAST) } playlists_header_chunk = dic_to_chunk(playlists_header_dic, playlists_header_table) # Chunk of all playlists _playlists_chunks = [] for playlist_header_dic, indexes in playlists_dics_and_indexes: dic = playlist_header_dic.copy() dic['length'] = get_table_size(playlist_header_table) + 4 * len(indexes) dic['number_of_all_track'] = len(indexes) dic['number_of_normal_track'] = len(indexes) if dic['type'] in (1, 2) else 0 if dic['type'] == MASTER: header_dic['number_of_tracks2'] = len(indexes) _playlist_header_chunk = dic_to_chunk(dic, playlist_header_table) _indexes_chunk = b''.join([i.to_bytes(4, 'little') for i in indexes]) playlist_chunk = _playlist_header_chunk + _indexes_chunk _playlists_chunks.append(playlist_chunk) all_playlists_chunk = b''.join(_playlists_chunks) # Chunk of offsets _length_before_playlists_offsets = header_part_size + len(track_part_chunk) + len(playlists_header_chunk) playlists_offsets_chunk = get_offsets_chunk(_length_before_playlists_offsets, _playlists_chunks) # Put chunks together playlists_part_chunk = playlists_header_chunk + playlists_offsets_chunk + all_playlists_chunk ######################################################################## header_dic['tracks_header_offset'] = header_part_size header_dic['playlists_header_offset'] = header_part_size + len(track_part_chunk) header_part_chunk = dic_to_chunk(header_dic, header_table) ######################################################################## itunessd = header_part_chunk + track_part_chunk + playlists_part_chunk return itunessd
python
def dics_to_itunessd(header_dic, tracks_dics, playlists_dics_and_indexes): """ :param header_dic: dic of header_table :param tracks_dics: list of all track_table's dics :param playlists_dics_and_indexes: list of all playlists and all their track's indexes :return: the whole iTunesSD bytes data """ ############################################ # header ###### header_dic['length'] = get_table_size(header_table) header_dic['number_of_tracks'] = len(tracks_dics) header_dic['number_of_playlists'] = len(playlists_dics_and_indexes) header_dic['number_of_tracks2'] = 0 header_part_size = get_table_size(header_table) #################################################################################################################### # tracks ########## # Chunk of header tracks_header_dic = { 'length': get_table_size(tracks_header_table) + 4 * len(tracks_dics), 'number_of_tracks': len(tracks_dics) } tracks_header_chunk = dic_to_chunk(tracks_header_dic, tracks_header_table) # Chunk of all tracks [track_dic.update({'length': get_table_size(track_table)}) for track_dic in tracks_dics] _tracks_chunks = [dic_to_chunk(dic, track_table) for dic in tracks_dics] all_tracks_chunck = b''.join(_tracks_chunks) # Chunk of offsets _length_before_tracks_offsets = header_part_size + len(tracks_header_chunk) tracks_offsets_chunck = get_offsets_chunk(_length_before_tracks_offsets, _tracks_chunks) # Put chunks together track_part_chunk = tracks_header_chunk + tracks_offsets_chunck + all_tracks_chunck #################################################################################################################### # playlists ############# # Chunk of header _playlists_dics = [playlist_indexes[0] for playlist_indexes in playlists_dics_and_indexes] _types = [playlist_dic['type'] for playlist_dic in _playlists_dics] playlists_header_dic = { 'length': get_table_size(playlists_header_table) + 4 * len(playlists_dics_and_indexes), 'number_of_all_playlists': len(_types), 'flag1': 0xffffffff if _types.count(NORMAL) == 0 else 1, 'number_of_normal_playlists': _types.count(NORMAL), 'flag2': 0xffffffff if _types.count(AUDIOBOOK) == 0 else (_types.count(MASTER) + _types.count(NORMAL) + _types.count(PODCAST)), 'number_of_audiobook_playlists': _types.count(AUDIOBOOK), 'flag3': 0xffffffff if _types.count(PODCAST) == 0 else _types.count(1) + _types.count(NORMAL), 'number_of_podcast_playlists': _types.count(PODCAST) } playlists_header_chunk = dic_to_chunk(playlists_header_dic, playlists_header_table) # Chunk of all playlists _playlists_chunks = [] for playlist_header_dic, indexes in playlists_dics_and_indexes: dic = playlist_header_dic.copy() dic['length'] = get_table_size(playlist_header_table) + 4 * len(indexes) dic['number_of_all_track'] = len(indexes) dic['number_of_normal_track'] = len(indexes) if dic['type'] in (1, 2) else 0 if dic['type'] == MASTER: header_dic['number_of_tracks2'] = len(indexes) _playlist_header_chunk = dic_to_chunk(dic, playlist_header_table) _indexes_chunk = b''.join([i.to_bytes(4, 'little') for i in indexes]) playlist_chunk = _playlist_header_chunk + _indexes_chunk _playlists_chunks.append(playlist_chunk) all_playlists_chunk = b''.join(_playlists_chunks) # Chunk of offsets _length_before_playlists_offsets = header_part_size + len(track_part_chunk) + len(playlists_header_chunk) playlists_offsets_chunk = get_offsets_chunk(_length_before_playlists_offsets, _playlists_chunks) # Put chunks together playlists_part_chunk = playlists_header_chunk + playlists_offsets_chunk + all_playlists_chunk ######################################################################## header_dic['tracks_header_offset'] = header_part_size header_dic['playlists_header_offset'] = header_part_size + len(track_part_chunk) header_part_chunk = dic_to_chunk(header_dic, header_table) ######################################################################## itunessd = header_part_chunk + track_part_chunk + playlists_part_chunk return itunessd
[ "def", "dics_to_itunessd", "(", "header_dic", ",", "tracks_dics", ",", "playlists_dics_and_indexes", ")", ":", "############################################", "# header", "######", "header_dic", "[", "'length'", "]", "=", "get_table_size", "(", "header_table", ")", "header_dic", "[", "'number_of_tracks'", "]", "=", "len", "(", "tracks_dics", ")", "header_dic", "[", "'number_of_playlists'", "]", "=", "len", "(", "playlists_dics_and_indexes", ")", "header_dic", "[", "'number_of_tracks2'", "]", "=", "0", "header_part_size", "=", "get_table_size", "(", "header_table", ")", "####################################################################################################################", "# tracks", "##########", "# Chunk of header", "tracks_header_dic", "=", "{", "'length'", ":", "get_table_size", "(", "tracks_header_table", ")", "+", "4", "*", "len", "(", "tracks_dics", ")", ",", "'number_of_tracks'", ":", "len", "(", "tracks_dics", ")", "}", "tracks_header_chunk", "=", "dic_to_chunk", "(", "tracks_header_dic", ",", "tracks_header_table", ")", "# Chunk of all tracks", "[", "track_dic", ".", "update", "(", "{", "'length'", ":", "get_table_size", "(", "track_table", ")", "}", ")", "for", "track_dic", "in", "tracks_dics", "]", "_tracks_chunks", "=", "[", "dic_to_chunk", "(", "dic", ",", "track_table", ")", "for", "dic", "in", "tracks_dics", "]", "all_tracks_chunck", "=", "b''", ".", "join", "(", "_tracks_chunks", ")", "# Chunk of offsets", "_length_before_tracks_offsets", "=", "header_part_size", "+", "len", "(", "tracks_header_chunk", ")", "tracks_offsets_chunck", "=", "get_offsets_chunk", "(", "_length_before_tracks_offsets", ",", "_tracks_chunks", ")", "# Put chunks together", "track_part_chunk", "=", "tracks_header_chunk", "+", "tracks_offsets_chunck", "+", "all_tracks_chunck", "####################################################################################################################", "# playlists", "#############", "# Chunk of header", "_playlists_dics", "=", "[", "playlist_indexes", "[", "0", "]", "for", "playlist_indexes", "in", "playlists_dics_and_indexes", "]", "_types", "=", "[", "playlist_dic", "[", "'type'", "]", "for", "playlist_dic", "in", "_playlists_dics", "]", "playlists_header_dic", "=", "{", "'length'", ":", "get_table_size", "(", "playlists_header_table", ")", "+", "4", "*", "len", "(", "playlists_dics_and_indexes", ")", ",", "'number_of_all_playlists'", ":", "len", "(", "_types", ")", ",", "'flag1'", ":", "0xffffffff", "if", "_types", ".", "count", "(", "NORMAL", ")", "==", "0", "else", "1", ",", "'number_of_normal_playlists'", ":", "_types", ".", "count", "(", "NORMAL", ")", ",", "'flag2'", ":", "0xffffffff", "if", "_types", ".", "count", "(", "AUDIOBOOK", ")", "==", "0", "else", "(", "_types", ".", "count", "(", "MASTER", ")", "+", "_types", ".", "count", "(", "NORMAL", ")", "+", "_types", ".", "count", "(", "PODCAST", ")", ")", ",", "'number_of_audiobook_playlists'", ":", "_types", ".", "count", "(", "AUDIOBOOK", ")", ",", "'flag3'", ":", "0xffffffff", "if", "_types", ".", "count", "(", "PODCAST", ")", "==", "0", "else", "_types", ".", "count", "(", "1", ")", "+", "_types", ".", "count", "(", "NORMAL", ")", ",", "'number_of_podcast_playlists'", ":", "_types", ".", "count", "(", "PODCAST", ")", "}", "playlists_header_chunk", "=", "dic_to_chunk", "(", "playlists_header_dic", ",", "playlists_header_table", ")", "# Chunk of all playlists", "_playlists_chunks", "=", "[", "]", "for", "playlist_header_dic", ",", "indexes", "in", "playlists_dics_and_indexes", ":", "dic", "=", "playlist_header_dic", ".", "copy", "(", ")", "dic", "[", "'length'", "]", "=", "get_table_size", "(", "playlist_header_table", ")", "+", "4", "*", "len", "(", "indexes", ")", "dic", "[", "'number_of_all_track'", "]", "=", "len", "(", "indexes", ")", "dic", "[", "'number_of_normal_track'", "]", "=", "len", "(", "indexes", ")", "if", "dic", "[", "'type'", "]", "in", "(", "1", ",", "2", ")", "else", "0", "if", "dic", "[", "'type'", "]", "==", "MASTER", ":", "header_dic", "[", "'number_of_tracks2'", "]", "=", "len", "(", "indexes", ")", "_playlist_header_chunk", "=", "dic_to_chunk", "(", "dic", ",", "playlist_header_table", ")", "_indexes_chunk", "=", "b''", ".", "join", "(", "[", "i", ".", "to_bytes", "(", "4", ",", "'little'", ")", "for", "i", "in", "indexes", "]", ")", "playlist_chunk", "=", "_playlist_header_chunk", "+", "_indexes_chunk", "_playlists_chunks", ".", "append", "(", "playlist_chunk", ")", "all_playlists_chunk", "=", "b''", ".", "join", "(", "_playlists_chunks", ")", "# Chunk of offsets", "_length_before_playlists_offsets", "=", "header_part_size", "+", "len", "(", "track_part_chunk", ")", "+", "len", "(", "playlists_header_chunk", ")", "playlists_offsets_chunk", "=", "get_offsets_chunk", "(", "_length_before_playlists_offsets", ",", "_playlists_chunks", ")", "# Put chunks together", "playlists_part_chunk", "=", "playlists_header_chunk", "+", "playlists_offsets_chunk", "+", "all_playlists_chunk", "########################################################################", "header_dic", "[", "'tracks_header_offset'", "]", "=", "header_part_size", "header_dic", "[", "'playlists_header_offset'", "]", "=", "header_part_size", "+", "len", "(", "track_part_chunk", ")", "header_part_chunk", "=", "dic_to_chunk", "(", "header_dic", ",", "header_table", ")", "########################################################################", "itunessd", "=", "header_part_chunk", "+", "track_part_chunk", "+", "playlists_part_chunk", "return", "itunessd" ]
:param header_dic: dic of header_table :param tracks_dics: list of all track_table's dics :param playlists_dics_and_indexes: list of all playlists and all their track's indexes :return: the whole iTunesSD bytes data
[ ":", "param", "header_dic", ":", "dic", "of", "header_table", ":", "param", "tracks_dics", ":", "list", "of", "all", "track_table", "s", "dics", ":", "param", "playlists_dics_and_indexes", ":", "list", "of", "all", "playlists", "and", "all", "their", "track", "s", "indexes", ":", "return", ":", "the", "whole", "iTunesSD", "bytes", "data" ]
train
https://github.com/meng89/ipodshuffle/blob/c9093dbb5cdac609376ebd3b4ef1b0fc58107d96/ipodshuffle/db/itunessd.py#L418-L516
pyout/pyout
pyout/field.py
Field.add
def add(self, kind, key, *values): """Add processor functions. Any previous list of processors for `kind` and `key` will be overwritten. Parameters ---------- kind : {"pre", "post"} key : str A registered key. Add the functions (in order) to this key's list of processors. *values : callables Processors to add. """ if kind == "pre": procs = self.pre elif kind == "post": procs = self.post else: raise ValueError("kind is not 'pre' or 'post'") self._check_if_registered(key) procs[key] = values
python
def add(self, kind, key, *values): """Add processor functions. Any previous list of processors for `kind` and `key` will be overwritten. Parameters ---------- kind : {"pre", "post"} key : str A registered key. Add the functions (in order) to this key's list of processors. *values : callables Processors to add. """ if kind == "pre": procs = self.pre elif kind == "post": procs = self.post else: raise ValueError("kind is not 'pre' or 'post'") self._check_if_registered(key) procs[key] = values
[ "def", "add", "(", "self", ",", "kind", ",", "key", ",", "*", "values", ")", ":", "if", "kind", "==", "\"pre\"", ":", "procs", "=", "self", ".", "pre", "elif", "kind", "==", "\"post\"", ":", "procs", "=", "self", ".", "post", "else", ":", "raise", "ValueError", "(", "\"kind is not 'pre' or 'post'\"", ")", "self", ".", "_check_if_registered", "(", "key", ")", "procs", "[", "key", "]", "=", "values" ]
Add processor functions. Any previous list of processors for `kind` and `key` will be overwritten. Parameters ---------- kind : {"pre", "post"} key : str A registered key. Add the functions (in order) to this key's list of processors. *values : callables Processors to add.
[ "Add", "processor", "functions", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/field.py#L84-L106
pyout/pyout
pyout/field.py
Field._format
def _format(self, _, result): """Wrap format call as a two-argument processor function. """ return self._fmt.format(six.text_type(result))
python
def _format(self, _, result): """Wrap format call as a two-argument processor function. """ return self._fmt.format(six.text_type(result))
[ "def", "_format", "(", "self", ",", "_", ",", "result", ")", ":", "return", "self", ".", "_fmt", ".", "format", "(", "six", ".", "text_type", "(", "result", ")", ")" ]
Wrap format call as a two-argument processor function.
[ "Wrap", "format", "call", "as", "a", "two", "-", "argument", "processor", "function", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/field.py#L121-L124
pyout/pyout
pyout/field.py
StyleProcessors.transform
def transform(function): """Return a processor for a style's "transform" function. """ def transform_fn(_, result): if isinstance(result, Nothing): return result lgr.debug("Transforming %r with %r", result, function) try: return function(result) except: exctype, value, tb = sys.exc_info() try: new_exc = StyleFunctionError(function, exctype, value) # Remove the "During handling ..." since we're # reraising with the traceback. new_exc.__cause__ = None six.reraise(StyleFunctionError, new_exc, tb) finally: # Remove circular reference. # https://docs.python.org/2/library/sys.html#sys.exc_info del tb return transform_fn
python
def transform(function): """Return a processor for a style's "transform" function. """ def transform_fn(_, result): if isinstance(result, Nothing): return result lgr.debug("Transforming %r with %r", result, function) try: return function(result) except: exctype, value, tb = sys.exc_info() try: new_exc = StyleFunctionError(function, exctype, value) # Remove the "During handling ..." since we're # reraising with the traceback. new_exc.__cause__ = None six.reraise(StyleFunctionError, new_exc, tb) finally: # Remove circular reference. # https://docs.python.org/2/library/sys.html#sys.exc_info del tb return transform_fn
[ "def", "transform", "(", "function", ")", ":", "def", "transform_fn", "(", "_", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "Nothing", ")", ":", "return", "result", "lgr", ".", "debug", "(", "\"Transforming %r with %r\"", ",", "result", ",", "function", ")", "try", ":", "return", "function", "(", "result", ")", "except", ":", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "try", ":", "new_exc", "=", "StyleFunctionError", "(", "function", ",", "exctype", ",", "value", ")", "# Remove the \"During handling ...\" since we're", "# reraising with the traceback.", "new_exc", ".", "__cause__", "=", "None", "six", ".", "reraise", "(", "StyleFunctionError", ",", "new_exc", ",", "tb", ")", "finally", ":", "# Remove circular reference.", "# https://docs.python.org/2/library/sys.html#sys.exc_info", "del", "tb", "return", "transform_fn" ]
Return a processor for a style's "transform" function.
[ "Return", "a", "processor", "for", "a", "style", "s", "transform", "function", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/field.py#L240-L262
pyout/pyout
pyout/field.py
StyleProcessors.by_key
def by_key(self, style_key, style_value): """Return a processor for a "simple" style value. Parameters ---------- style_key : str A style key. style_value : bool or str A "simple" style value that is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function. """ if self.style_types[style_key] is bool: style_attr = style_key else: style_attr = style_value def proc(_, result): return self.render(style_attr, result) return proc
python
def by_key(self, style_key, style_value): """Return a processor for a "simple" style value. Parameters ---------- style_key : str A style key. style_value : bool or str A "simple" style value that is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function. """ if self.style_types[style_key] is bool: style_attr = style_key else: style_attr = style_value def proc(_, result): return self.render(style_attr, result) return proc
[ "def", "by_key", "(", "self", ",", "style_key", ",", "style_value", ")", ":", "if", "self", ".", "style_types", "[", "style_key", "]", "is", "bool", ":", "style_attr", "=", "style_key", "else", ":", "style_attr", "=", "style_value", "def", "proc", "(", "_", ",", "result", ")", ":", "return", "self", ".", "render", "(", "style_attr", ",", "result", ")", "return", "proc" ]
Return a processor for a "simple" style value. Parameters ---------- style_key : str A style key. style_value : bool or str A "simple" style value that is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function.
[ "Return", "a", "processor", "for", "a", "simple", "style", "value", "." ]
train
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/field.py#L264-L287