repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
thunder-project/thunder
thunder/series/series.py
Series.tobinary
def tobinary(self, path, prefix='series', overwrite=False, credentials=None): """ Write data to binary files. Parameters ---------- path : string path or URI to directory to be created Output files will be written underneath path. Directory will be created as a result of this call. prefix : str, optional, default = 'series' String prefix for files. overwrite : bool If true, path and all its contents will be deleted and recreated as partof this call. """ from thunder.series.writers import tobinary tobinary(self, path, prefix=prefix, overwrite=overwrite, credentials=credentials)
python
def tobinary(self, path, prefix='series', overwrite=False, credentials=None): """ Write data to binary files. Parameters ---------- path : string path or URI to directory to be created Output files will be written underneath path. Directory will be created as a result of this call. prefix : str, optional, default = 'series' String prefix for files. overwrite : bool If true, path and all its contents will be deleted and recreated as partof this call. """ from thunder.series.writers import tobinary tobinary(self, path, prefix=prefix, overwrite=overwrite, credentials=credentials)
[ "def", "tobinary", "(", "self", ",", "path", ",", "prefix", "=", "'series'", ",", "overwrite", "=", "False", ",", "credentials", "=", "None", ")", ":", "from", "thunder", ".", "series", ".", "writers", "import", "tobinary", "tobinary", "(", "self", ",", "path", ",", "prefix", "=", "prefix", ",", "overwrite", "=", "overwrite", ",", "credentials", "=", "credentials", ")" ]
Write data to binary files. Parameters ---------- path : string path or URI to directory to be created Output files will be written underneath path. Directory will be created as a result of this call. prefix : str, optional, default = 'series' String prefix for files. overwrite : bool If true, path and all its contents will be deleted and recreated as partof this call.
[ "Write", "data", "to", "binary", "files", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L1110-L1128
train
thunder-project/thunder
thunder/readers.py
addextension
def addextension(path, ext=None): """ Helper function for handling of paths given separately passed file extensions. """ if ext: if '*' in path: return path elif os.path.splitext(path)[1]: return path else: if not ext.startswith('.'): ext = '.'+ext if not path.endswith(ext): if not path.endswith(os.path.sep): path += os.path.sep return path + '*' + ext else: return path else: return path
python
def addextension(path, ext=None): """ Helper function for handling of paths given separately passed file extensions. """ if ext: if '*' in path: return path elif os.path.splitext(path)[1]: return path else: if not ext.startswith('.'): ext = '.'+ext if not path.endswith(ext): if not path.endswith(os.path.sep): path += os.path.sep return path + '*' + ext else: return path else: return path
[ "def", "addextension", "(", "path", ",", "ext", "=", "None", ")", ":", "if", "ext", ":", "if", "'*'", "in", "path", ":", "return", "path", "elif", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", ":", "return", "path", "else", ":", "if", "not", "ext", ".", "startswith", "(", "'.'", ")", ":", "ext", "=", "'.'", "+", "ext", "if", "not", "path", ".", "endswith", "(", "ext", ")", ":", "if", "not", "path", ".", "endswith", "(", "os", ".", "path", ".", "sep", ")", ":", "path", "+=", "os", ".", "path", ".", "sep", "return", "path", "+", "'*'", "+", "ext", "else", ":", "return", "path", "else", ":", "return", "path" ]
Helper function for handling of paths given separately passed file extensions.
[ "Helper", "function", "for", "handling", "of", "paths", "given", "separately", "passed", "file", "extensions", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L21-L40
train
thunder-project/thunder
thunder/readers.py
select
def select(files, start, stop): """ Helper function for handling start and stop indices """ if start or stop: if start is None: start = 0 if stop is None: stop = len(files) files = files[start:stop] return files
python
def select(files, start, stop): """ Helper function for handling start and stop indices """ if start or stop: if start is None: start = 0 if stop is None: stop = len(files) files = files[start:stop] return files
[ "def", "select", "(", "files", ",", "start", ",", "stop", ")", ":", "if", "start", "or", "stop", ":", "if", "start", "is", "None", ":", "start", "=", "0", "if", "stop", "is", "None", ":", "stop", "=", "len", "(", "files", ")", "files", "=", "files", "[", "start", ":", "stop", "]", "return", "files" ]
Helper function for handling start and stop indices
[ "Helper", "function", "for", "handling", "start", "and", "stop", "indices" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L42-L52
train
thunder-project/thunder
thunder/readers.py
listrecursive
def listrecursive(path, ext=None): """ List files recurisvely """ filenames = set() for root, dirs, files in os.walk(path): if ext: if ext == 'tif' or ext == 'tiff': tmp = fnmatch.filter(files, '*.' + 'tiff') files = tmp + fnmatch.filter(files, '*.' + 'tif') else: files = fnmatch.filter(files, '*.' + ext) for filename in files: filenames.add(os.path.join(root, filename)) filenames = list(filenames) filenames.sort() return sorted(filenames)
python
def listrecursive(path, ext=None): """ List files recurisvely """ filenames = set() for root, dirs, files in os.walk(path): if ext: if ext == 'tif' or ext == 'tiff': tmp = fnmatch.filter(files, '*.' + 'tiff') files = tmp + fnmatch.filter(files, '*.' + 'tif') else: files = fnmatch.filter(files, '*.' + ext) for filename in files: filenames.add(os.path.join(root, filename)) filenames = list(filenames) filenames.sort() return sorted(filenames)
[ "def", "listrecursive", "(", "path", ",", "ext", "=", "None", ")", ":", "filenames", "=", "set", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "if", "ext", ":", "if", "ext", "==", "'tif'", "or", "ext", "==", "'tiff'", ":", "tmp", "=", "fnmatch", ".", "filter", "(", "files", ",", "'*.'", "+", "'tiff'", ")", "files", "=", "tmp", "+", "fnmatch", ".", "filter", "(", "files", ",", "'*.'", "+", "'tif'", ")", "else", ":", "files", "=", "fnmatch", ".", "filter", "(", "files", ",", "'*.'", "+", "ext", ")", "for", "filename", "in", "files", ":", "filenames", ".", "add", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "filenames", "=", "list", "(", "filenames", ")", "filenames", ".", "sort", "(", ")", "return", "sorted", "(", "filenames", ")" ]
List files recurisvely
[ "List", "files", "recurisvely" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L72-L88
train
thunder-project/thunder
thunder/readers.py
listflat
def listflat(path, ext=None): """ List files without recursion """ if os.path.isdir(path): if ext: if ext == 'tif' or ext == 'tiff': files = glob.glob(os.path.join(path, '*.tif')) files = files + glob.glob(os.path.join(path, '*.tiff')) else: files = glob.glob(os.path.join(path, '*.' + ext)) else: files = [os.path.join(path, fname) for fname in os.listdir(path)] else: files = glob.glob(path) # filter out directories files = [fpath for fpath in files if not isinstance(fpath, list) and not os.path.isdir(fpath)] return sorted(files)
python
def listflat(path, ext=None): """ List files without recursion """ if os.path.isdir(path): if ext: if ext == 'tif' or ext == 'tiff': files = glob.glob(os.path.join(path, '*.tif')) files = files + glob.glob(os.path.join(path, '*.tiff')) else: files = glob.glob(os.path.join(path, '*.' + ext)) else: files = [os.path.join(path, fname) for fname in os.listdir(path)] else: files = glob.glob(path) # filter out directories files = [fpath for fpath in files if not isinstance(fpath, list) and not os.path.isdir(fpath)] return sorted(files)
[ "def", "listflat", "(", "path", ",", "ext", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "if", "ext", ":", "if", "ext", "==", "'tif'", "or", "ext", "==", "'tiff'", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.tif'", ")", ")", "files", "=", "files", "+", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.tiff'", ")", ")", "else", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.'", "+", "ext", ")", ")", "else", ":", "files", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "fname", ")", "for", "fname", "in", "os", ".", "listdir", "(", "path", ")", "]", "else", ":", "files", "=", "glob", ".", "glob", "(", "path", ")", "# filter out directories", "files", "=", "[", "fpath", "for", "fpath", "in", "files", "if", "not", "isinstance", "(", "fpath", ",", "list", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "fpath", ")", "]", "return", "sorted", "(", "files", ")" ]
List files without recursion
[ "List", "files", "without", "recursion" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L90-L107
train
thunder-project/thunder
thunder/readers.py
normalize_scheme
def normalize_scheme(path, ext): """ Normalize scheme for paths related to hdfs """ path = addextension(path, ext) parsed = urlparse(path) if parsed.scheme: # this appears to already be a fully-qualified URI return path else: # this looks like a local path spec import os dirname, filename = os.path.split(path) if not os.path.isabs(dirname): # need to make relative local paths absolute dirname = os.path.abspath(dirname) path = os.path.join(dirname, filename) return "file://" + path
python
def normalize_scheme(path, ext): """ Normalize scheme for paths related to hdfs """ path = addextension(path, ext) parsed = urlparse(path) if parsed.scheme: # this appears to already be a fully-qualified URI return path else: # this looks like a local path spec import os dirname, filename = os.path.split(path) if not os.path.isabs(dirname): # need to make relative local paths absolute dirname = os.path.abspath(dirname) path = os.path.join(dirname, filename) return "file://" + path
[ "def", "normalize_scheme", "(", "path", ",", "ext", ")", ":", "path", "=", "addextension", "(", "path", ",", "ext", ")", "parsed", "=", "urlparse", "(", "path", ")", "if", "parsed", ".", "scheme", ":", "# this appears to already be a fully-qualified URI", "return", "path", "else", ":", "# this looks like a local path spec", "import", "os", "dirname", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "dirname", ")", ":", "# need to make relative local paths absolute", "dirname", "=", "os", ".", "path", ".", "abspath", "(", "dirname", ")", "path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", "return", "\"file://\"", "+", "path" ]
Normalize scheme for paths related to hdfs
[ "Normalize", "scheme", "for", "paths", "related", "to", "hdfs" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L620-L638
train
thunder-project/thunder
thunder/readers.py
LocalParallelReader.list
def list(path, ext=None, start=None, stop=None, recursive=False): """ Get sorted list of file paths matching path and extension """ files = listflat(path, ext) if not recursive else listrecursive(path, ext) if len(files) < 1: raise FileNotFoundError('Cannot find files of type "%s" in %s' % (ext if ext else '*', path)) files = select(files, start, stop) return files
python
def list(path, ext=None, start=None, stop=None, recursive=False): """ Get sorted list of file paths matching path and extension """ files = listflat(path, ext) if not recursive else listrecursive(path, ext) if len(files) < 1: raise FileNotFoundError('Cannot find files of type "%s" in %s' % (ext if ext else '*', path)) files = select(files, start, stop) return files
[ "def", "list", "(", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ")", ":", "files", "=", "listflat", "(", "path", ",", "ext", ")", "if", "not", "recursive", "else", "listrecursive", "(", "path", ",", "ext", ")", "if", "len", "(", "files", ")", "<", "1", ":", "raise", "FileNotFoundError", "(", "'Cannot find files of type \"%s\" in %s'", "%", "(", "ext", "if", "ext", "else", "'*'", ",", "path", ")", ")", "files", "=", "select", "(", "files", ",", "start", ",", "stop", ")", "return", "files" ]
Get sorted list of file paths matching path and extension
[ "Get", "sorted", "list", "of", "file", "paths", "matching", "path", "and", "extension" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L133-L143
train
thunder-project/thunder
thunder/readers.py
LocalParallelReader.read
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across files specified by dataPath on local filesystem. Returns RDD of <integer file index, string buffer> k/v pairs. """ path = uri_to_path(path) files = self.list(path, ext=ext, start=start, stop=stop, recursive=recursive) nfiles = len(files) self.nfiles = nfiles if spark and isinstance(self.engine, spark): npartitions = min(npartitions, nfiles) if npartitions else nfiles rdd = self.engine.parallelize(enumerate(files), npartitions) return rdd.map(lambda kv: (kv[0], readlocal(kv[1]), kv[1])) else: return [(k, readlocal(v), v) for k, v in enumerate(files)]
python
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across files specified by dataPath on local filesystem. Returns RDD of <integer file index, string buffer> k/v pairs. """ path = uri_to_path(path) files = self.list(path, ext=ext, start=start, stop=stop, recursive=recursive) nfiles = len(files) self.nfiles = nfiles if spark and isinstance(self.engine, spark): npartitions = min(npartitions, nfiles) if npartitions else nfiles rdd = self.engine.parallelize(enumerate(files), npartitions) return rdd.map(lambda kv: (kv[0], readlocal(kv[1]), kv[1])) else: return [(k, readlocal(v), v) for k, v in enumerate(files)]
[ "def", "read", "(", "self", ",", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ")", ":", "path", "=", "uri_to_path", "(", "path", ")", "files", "=", "self", ".", "list", "(", "path", ",", "ext", "=", "ext", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ")", "nfiles", "=", "len", "(", "files", ")", "self", ".", "nfiles", "=", "nfiles", "if", "spark", "and", "isinstance", "(", "self", ".", "engine", ",", "spark", ")", ":", "npartitions", "=", "min", "(", "npartitions", ",", "nfiles", ")", "if", "npartitions", "else", "nfiles", "rdd", "=", "self", ".", "engine", ".", "parallelize", "(", "enumerate", "(", "files", ")", ",", "npartitions", ")", "return", "rdd", ".", "map", "(", "lambda", "kv", ":", "(", "kv", "[", "0", "]", ",", "readlocal", "(", "kv", "[", "1", "]", ")", ",", "kv", "[", "1", "]", ")", ")", "else", ":", "return", "[", "(", "k", ",", "readlocal", "(", "v", ")", ",", "v", ")", "for", "k", ",", "v", "in", "enumerate", "(", "files", ")", "]" ]
Sets up Spark RDD across files specified by dataPath on local filesystem. Returns RDD of <integer file index, string buffer> k/v pairs.
[ "Sets", "up", "Spark", "RDD", "across", "files", "specified", "by", "dataPath", "on", "local", "filesystem", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L145-L162
train
thunder-project/thunder
thunder/readers.py
LocalFileReader.list
def list(path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List files specified by dataPath. Datapath may include a single wildcard ('*') in the filename specifier. Returns sorted list of absolute path strings. """ path = uri_to_path(path) if not filename and recursive: return listrecursive(path) if filename: if os.path.isdir(path): path = os.path.join(path, filename) else: path = os.path.join(os.path.dirname(path), filename) else: if os.path.isdir(path) and not directories: path = os.path.join(path, "*") files = glob.glob(path) if not directories: files = [fpath for fpath in files if not os.path.isdir(fpath)] files.sort() files = select(files, start, stop) return files
python
def list(path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List files specified by dataPath. Datapath may include a single wildcard ('*') in the filename specifier. Returns sorted list of absolute path strings. """ path = uri_to_path(path) if not filename and recursive: return listrecursive(path) if filename: if os.path.isdir(path): path = os.path.join(path, filename) else: path = os.path.join(os.path.dirname(path), filename) else: if os.path.isdir(path) and not directories: path = os.path.join(path, "*") files = glob.glob(path) if not directories: files = [fpath for fpath in files if not os.path.isdir(fpath)] files.sort() files = select(files, start, stop) return files
[ "def", "list", "(", "path", ",", "filename", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "directories", "=", "False", ")", ":", "path", "=", "uri_to_path", "(", "path", ")", "if", "not", "filename", "and", "recursive", ":", "return", "listrecursive", "(", "path", ")", "if", "filename", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "filename", ")", "else", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "not", "directories", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"*\"", ")", "files", "=", "glob", ".", "glob", "(", "path", ")", "if", "not", "directories", ":", "files", "=", "[", "fpath", "for", "fpath", "in", "files", "if", "not", "os", ".", "path", ".", "isdir", "(", "fpath", ")", "]", "files", ".", "sort", "(", ")", "files", "=", "select", "(", "files", ",", "start", ",", "stop", ")", "return", "files" ]
List files specified by dataPath. Datapath may include a single wildcard ('*') in the filename specifier. Returns sorted list of absolute path strings.
[ "List", "files", "specified", "by", "dataPath", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L173-L202
train
thunder-project/thunder
thunder/readers.py
BotoClient.parse_query
def parse_query(query, delim='/'): """ Parse a boto query """ key = '' prefix = '' postfix = '' parsed = urlparse(query) query = parsed.path.lstrip(delim) bucket = parsed.netloc if not parsed.scheme.lower() in ('', "gs", "s3", "s3n"): raise ValueError("Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got: '%s'" % parsed.scheme) storage = parsed.scheme.lower() if not bucket.strip() and query: toks = query.split(delim, 1) bucket = toks[0] if len(toks) == 2: key = toks[1] else: key = '' if not bucket.strip(): raise ValueError("Could not parse bucket name from query string '%s'" % query) tokens = query.split("*") n = len(tokens) if n == 0: pass elif n == 1: key = tokens[0] elif n == 2: index = tokens[0].rfind(delim) if index >= 0: key = tokens[0][:(index + 1)] prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else '' else: prefix = tokens[0] postfix = tokens[1] else: raise ValueError("Only one wildcard ('*') allowed in query string, got: '%s'" % query) return storage, bucket, key, prefix, postfix
python
def parse_query(query, delim='/'): """ Parse a boto query """ key = '' prefix = '' postfix = '' parsed = urlparse(query) query = parsed.path.lstrip(delim) bucket = parsed.netloc if not parsed.scheme.lower() in ('', "gs", "s3", "s3n"): raise ValueError("Query scheme must be one of '', 'gs', 's3', or 's3n'; " "got: '%s'" % parsed.scheme) storage = parsed.scheme.lower() if not bucket.strip() and query: toks = query.split(delim, 1) bucket = toks[0] if len(toks) == 2: key = toks[1] else: key = '' if not bucket.strip(): raise ValueError("Could not parse bucket name from query string '%s'" % query) tokens = query.split("*") n = len(tokens) if n == 0: pass elif n == 1: key = tokens[0] elif n == 2: index = tokens[0].rfind(delim) if index >= 0: key = tokens[0][:(index + 1)] prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else '' else: prefix = tokens[0] postfix = tokens[1] else: raise ValueError("Only one wildcard ('*') allowed in query string, got: '%s'" % query) return storage, bucket, key, prefix, postfix
[ "def", "parse_query", "(", "query", ",", "delim", "=", "'/'", ")", ":", "key", "=", "''", "prefix", "=", "''", "postfix", "=", "''", "parsed", "=", "urlparse", "(", "query", ")", "query", "=", "parsed", ".", "path", ".", "lstrip", "(", "delim", ")", "bucket", "=", "parsed", ".", "netloc", "if", "not", "parsed", ".", "scheme", ".", "lower", "(", ")", "in", "(", "''", ",", "\"gs\"", ",", "\"s3\"", ",", "\"s3n\"", ")", ":", "raise", "ValueError", "(", "\"Query scheme must be one of '', 'gs', 's3', or 's3n'; \"", "\"got: '%s'\"", "%", "parsed", ".", "scheme", ")", "storage", "=", "parsed", ".", "scheme", ".", "lower", "(", ")", "if", "not", "bucket", ".", "strip", "(", ")", "and", "query", ":", "toks", "=", "query", ".", "split", "(", "delim", ",", "1", ")", "bucket", "=", "toks", "[", "0", "]", "if", "len", "(", "toks", ")", "==", "2", ":", "key", "=", "toks", "[", "1", "]", "else", ":", "key", "=", "''", "if", "not", "bucket", ".", "strip", "(", ")", ":", "raise", "ValueError", "(", "\"Could not parse bucket name from query string '%s'\"", "%", "query", ")", "tokens", "=", "query", ".", "split", "(", "\"*\"", ")", "n", "=", "len", "(", "tokens", ")", "if", "n", "==", "0", ":", "pass", "elif", "n", "==", "1", ":", "key", "=", "tokens", "[", "0", "]", "elif", "n", "==", "2", ":", "index", "=", "tokens", "[", "0", "]", ".", "rfind", "(", "delim", ")", "if", "index", ">=", "0", ":", "key", "=", "tokens", "[", "0", "]", "[", ":", "(", "index", "+", "1", ")", "]", "prefix", "=", "tokens", "[", "0", "]", "[", "(", "index", "+", "1", ")", ":", "]", "if", "len", "(", "tokens", "[", "0", "]", ")", ">", "(", "index", "+", "1", ")", "else", "''", "else", ":", "prefix", "=", "tokens", "[", "0", "]", "postfix", "=", "tokens", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "\"Only one wildcard ('*') allowed in query string, got: '%s'\"", "%", "query", ")", "return", "storage", ",", "bucket", ",", "key", ",", "prefix", ",", "postfix" ]
Parse a boto query
[ "Parse", "a", "boto", "query" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L233-L278
train
thunder-project/thunder
thunder/readers.py
BotoClient.retrieve_keys
def retrieve_keys(bucket, key, prefix='', postfix='', delim='/', directories=False, recursive=False): """ Retrieve keys from a bucket """ if key and prefix: assert key.endswith(delim) key += prefix # check whether key is a directory if not key.endswith(delim) and key: # check for matching prefix if BotoClient.check_prefix(bucket, key + delim, delim=delim): # found a directory key += delim listdelim = delim if not recursive else None results = bucket.list(prefix=key, delimiter=listdelim) if postfix: func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True) return filter(func, results) elif not directories: func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False) return filter(func, results) else: return results
python
def retrieve_keys(bucket, key, prefix='', postfix='', delim='/', directories=False, recursive=False): """ Retrieve keys from a bucket """ if key and prefix: assert key.endswith(delim) key += prefix # check whether key is a directory if not key.endswith(delim) and key: # check for matching prefix if BotoClient.check_prefix(bucket, key + delim, delim=delim): # found a directory key += delim listdelim = delim if not recursive else None results = bucket.list(prefix=key, delimiter=listdelim) if postfix: func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True) return filter(func, results) elif not directories: func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False) return filter(func, results) else: return results
[ "def", "retrieve_keys", "(", "bucket", ",", "key", ",", "prefix", "=", "''", ",", "postfix", "=", "''", ",", "delim", "=", "'/'", ",", "directories", "=", "False", ",", "recursive", "=", "False", ")", ":", "if", "key", "and", "prefix", ":", "assert", "key", ".", "endswith", "(", "delim", ")", "key", "+=", "prefix", "# check whether key is a directory", "if", "not", "key", ".", "endswith", "(", "delim", ")", "and", "key", ":", "# check for matching prefix", "if", "BotoClient", ".", "check_prefix", "(", "bucket", ",", "key", "+", "delim", ",", "delim", "=", "delim", ")", ":", "# found a directory", "key", "+=", "delim", "listdelim", "=", "delim", "if", "not", "recursive", "else", "None", "results", "=", "bucket", ".", "list", "(", "prefix", "=", "key", ",", "delimiter", "=", "listdelim", ")", "if", "postfix", ":", "func", "=", "lambda", "k_", ":", "BotoClient", ".", "filter_predicate", "(", "k_", ",", "postfix", ",", "inclusive", "=", "True", ")", "return", "filter", "(", "func", ",", "results", ")", "elif", "not", "directories", ":", "func", "=", "lambda", "k_", ":", "BotoClient", ".", "filter_predicate", "(", "k_", ",", "delim", ",", "inclusive", "=", "False", ")", "return", "filter", "(", "func", ",", "results", ")", "else", ":", "return", "results" ]
Retrieve keys from a bucket
[ "Retrieve", "keys", "from", "a", "bucket" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L291-L316
train
thunder-project/thunder
thunder/readers.py
BotoParallelReader.getfiles
def getfiles(self, path, ext=None, start=None, stop=None, recursive=False): """ Get scheme, bucket, and keys for a set of files """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(parse[1]) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) keys = BotoClient.retrieve_keys( bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive) keylist = [key.name for key in keys] if ext: if ext == 'tif' or ext == 'tiff': keylist = [keyname for keyname in keylist if keyname.endswith('tif')] keylist.append([keyname for keyname in keylist if keyname.endswith('tiff')]) else: keylist = [keyname for keyname in keylist if keyname.endswith(ext)] keylist.sort() keylist = select(keylist, start, stop) return scheme, bucket.name, keylist
python
def getfiles(self, path, ext=None, start=None, stop=None, recursive=False): """ Get scheme, bucket, and keys for a set of files """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(parse[1]) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) keys = BotoClient.retrieve_keys( bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive) keylist = [key.name for key in keys] if ext: if ext == 'tif' or ext == 'tiff': keylist = [keyname for keyname in keylist if keyname.endswith('tif')] keylist.append([keyname for keyname in keylist if keyname.endswith('tiff')]) else: keylist = [keyname for keyname in keylist if keyname.endswith(ext)] keylist.sort() keylist = select(keylist, start, stop) return scheme, bucket.name, keylist
[ "def", "getfiles", "(", "self", ",", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parse", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parse", "[", "0", "]", "bucket_name", "=", "parse", "[", "1", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "parse", "[", "1", "]", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "keys", "=", "BotoClient", ".", "retrieve_keys", "(", "bucket", ",", "parse", "[", "2", "]", ",", "prefix", "=", "parse", "[", "3", "]", ",", "postfix", "=", "parse", "[", "4", "]", ",", "recursive", "=", "recursive", ")", "keylist", "=", "[", "key", ".", "name", "for", "key", "in", "keys", "]", "if", "ext", ":", "if", "ext", "==", "'tif'", "or", "ext", "==", "'tiff'", ":", "keylist", "=", "[", "keyname", "for", "keyname", "in", "keylist", "if", "keyname", ".", "endswith", "(", "'tif'", ")", "]", "keylist", ".", "append", "(", "[", "keyname", "for", "keyname", "in", "keylist", "if", "keyname", ".", "endswith", "(", "'tiff'", ")", "]", ")", "else", ":", "keylist", "=", "[", "keyname", "for", "keyname", "in", "keylist", "if", "keyname", ".", "endswith", "(", "ext", ")", "]", "keylist", ".", "sort", "(", ")", "keylist", "=", "select", "(", "keylist", ",", "start", ",", "stop", ")", "return", "scheme", ",", "bucket", ".", "name", ",", "keylist" ]
Get scheme, bucket, and keys for a set of files
[ "Get", "scheme", "bucket", "and", "keys", "for", "a", "set", "of", "files" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L328-L361
train
thunder-project/thunder
thunder/readers.py
BotoParallelReader.list
def list(self, dataPath, ext=None, start=None, stop=None, recursive=False): """ List files from remote storage """ scheme, bucket_name, keylist = self.getfiles( dataPath, ext=ext, start=start, stop=stop, recursive=recursive) return ["%s:///%s/%s" % (scheme, bucket_name, key) for key in keylist]
python
def list(self, dataPath, ext=None, start=None, stop=None, recursive=False): """ List files from remote storage """ scheme, bucket_name, keylist = self.getfiles( dataPath, ext=ext, start=start, stop=stop, recursive=recursive) return ["%s:///%s/%s" % (scheme, bucket_name, key) for key in keylist]
[ "def", "list", "(", "self", ",", "dataPath", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ")", ":", "scheme", ",", "bucket_name", ",", "keylist", "=", "self", ".", "getfiles", "(", "dataPath", ",", "ext", "=", "ext", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ")", "return", "[", "\"%s:///%s/%s\"", "%", "(", "scheme", ",", "bucket_name", ",", "key", ")", "for", "key", "in", "keylist", "]" ]
List files from remote storage
[ "List", "files", "from", "remote", "storage" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L363-L370
train
thunder-project/thunder
thunder/readers.py
BotoParallelReader.read
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs. """ from .utils import connection_with_anon, connection_with_gs path = addextension(path, ext) scheme, bucket_name, keylist = self.getfiles( path, start=start, stop=stop, recursive=recursive) if not keylist: raise FileNotFoundError("No objects found for '%s'" % path) credentials = self.credentials self.nfiles = len(keylist) if spark and isinstance(self.engine, spark): def getsplit(kvIter): if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = boto.storage_uri(bucket_name, 'gs') bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) for kv in kvIter: idx, keyname = kv key = bucket.get_key(keyname) buf = key.get_contents_as_string() yield idx, buf, keyname npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles rdd = self.engine.parallelize(enumerate(keylist), npartitions) return rdd.mapPartitions(getsplit) else: if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) def getsplit(kv): idx, keyName = kv key = bucket.get_key(keyName) buf = key.get_contents_as_string() return idx, buf, keyName return [getsplit(kv) for kv in enumerate(keylist)]
python
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): """ Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs. """ from .utils import connection_with_anon, connection_with_gs path = addextension(path, ext) scheme, bucket_name, keylist = self.getfiles( path, start=start, stop=stop, recursive=recursive) if not keylist: raise FileNotFoundError("No objects found for '%s'" % path) credentials = self.credentials self.nfiles = len(keylist) if spark and isinstance(self.engine, spark): def getsplit(kvIter): if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = boto.storage_uri(bucket_name, 'gs') bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) for kv in kvIter: idx, keyname = kv key = bucket.get_key(keyname) buf = key.get_contents_as_string() yield idx, buf, keyname npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles rdd = self.engine.parallelize(enumerate(keylist), npartitions) return rdd.mapPartitions(getsplit) else: if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) def getsplit(kv): idx, keyName = kv key = bucket.get_key(keyName) buf = key.get_contents_as_string() return idx, buf, keyName return [getsplit(kv) for kv in enumerate(keylist)]
[ "def", "read", "(", "self", ",", "path", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "path", "=", "addextension", "(", "path", ",", "ext", ")", "scheme", ",", "bucket_name", ",", "keylist", "=", "self", ".", "getfiles", "(", "path", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "recursive", "=", "recursive", ")", "if", "not", "keylist", ":", "raise", "FileNotFoundError", "(", "\"No objects found for '%s'\"", "%", "path", ")", "credentials", "=", "self", ".", "credentials", "self", ".", "nfiles", "=", "len", "(", "keylist", ")", "if", "spark", "and", "isinstance", "(", "self", ".", "engine", ",", "spark", ")", ":", "def", "getsplit", "(", "kvIter", ")", ":", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "boto", ".", "storage_uri", "(", "bucket_name", ",", "'gs'", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "for", "kv", "in", "kvIter", ":", "idx", ",", "keyname", "=", "kv", "key", "=", "bucket", ".", "get_key", "(", "keyname", ")", "buf", "=", "key", ".", "get_contents_as_string", "(", ")", "yield", "idx", ",", "buf", ",", "keyname", "npartitions", "=", "min", "(", "npartitions", ",", "self", ".", "nfiles", ")", "if", "npartitions", "else", "self", ".", "nfiles", "rdd", "=", "self", ".", "engine", ".", "parallelize", "(", "enumerate", "(", "keylist", ")", ",", "npartitions", ")", "return", "rdd", ".", "mapPartitions", "(", "getsplit", ")", "else", ":", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "def", "getsplit", "(", "kv", ")", ":", "idx", ",", "keyName", "=", "kv", "key", "=", "bucket", ".", "get_key", "(", "keyName", ")", "buf", "=", "key", ".", "get_contents_as_string", "(", ")", "return", "idx", ",", "buf", ",", "keyName", "return", "[", "getsplit", "(", "kv", ")", "for", "kv", "in", "enumerate", "(", "keylist", ")", "]" ]
Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs.
[ "Sets", "up", "Spark", "RDD", "across", "S3", "or", "GS", "objects", "specified", "by", "dataPath", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L372-L430
train
thunder-project/thunder
thunder/readers.py
BotoFileReader.getkeys
def getkeys(self, path, filename=None, directories=False, recursive=False): """ Get matching keys for a path """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] key = parse[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if filename: if not key.endswith("/"): if self.check_prefix(bucket, key + "/"): key += "/" else: index = key.rfind("/") if index >= 0: key = key[:(index+1)] else: key = "" key += filename keylist = BotoClient.retrieve_keys(bucket, key, prefix=parse[3], postfix=parse[4], directories=directories, recursive=recursive) return scheme, keylist
python
def getkeys(self, path, filename=None, directories=False, recursive=False): """ Get matching keys for a path """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] key = parse[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if filename: if not key.endswith("/"): if self.check_prefix(bucket, key + "/"): key += "/" else: index = key.rfind("/") if index >= 0: key = key[:(index+1)] else: key = "" key += filename keylist = BotoClient.retrieve_keys(bucket, key, prefix=parse[3], postfix=parse[4], directories=directories, recursive=recursive) return scheme, keylist
[ "def", "getkeys", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "directories", "=", "False", ",", "recursive", "=", "False", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parse", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parse", "[", "0", "]", "bucket_name", "=", "parse", "[", "1", "]", "key", "=", "parse", "[", "2", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "if", "filename", ":", "if", "not", "key", ".", "endswith", "(", "\"/\"", ")", ":", "if", "self", ".", "check_prefix", "(", "bucket", ",", "key", "+", "\"/\"", ")", ":", "key", "+=", "\"/\"", "else", ":", "index", "=", "key", ".", "rfind", "(", "\"/\"", ")", "if", "index", ">=", "0", ":", "key", "=", "key", "[", ":", "(", "index", "+", "1", ")", "]", "else", ":", "key", "=", "\"\"", "key", "+=", "filename", "keylist", "=", "BotoClient", ".", "retrieve_keys", "(", "bucket", ",", "key", ",", "prefix", "=", "parse", "[", "3", "]", ",", "postfix", "=", "parse", "[", "4", "]", ",", "directories", "=", "directories", ",", "recursive", "=", "recursive", ")", "return", "scheme", ",", "keylist" ]
Get matching keys for a path
[ "Get", "matching", "keys", "for", "a", "path" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L437-L472
train
thunder-project/thunder
thunder/readers.py
BotoFileReader.getkey
def getkey(self, path, filename=None): """ Get single matching key for a path """ scheme, keys = self.getkeys(path, filename=filename) try: key = next(keys) except StopIteration: raise FileNotFoundError("Could not find object for: '%s'" % path) # we expect to only have a single key returned nextKey = None try: nextKey = next(keys) except StopIteration: pass if nextKey: raise ValueError("Found multiple keys for: '%s'" % path) return scheme, key
python
def getkey(self, path, filename=None): """ Get single matching key for a path """ scheme, keys = self.getkeys(path, filename=filename) try: key = next(keys) except StopIteration: raise FileNotFoundError("Could not find object for: '%s'" % path) # we expect to only have a single key returned nextKey = None try: nextKey = next(keys) except StopIteration: pass if nextKey: raise ValueError("Found multiple keys for: '%s'" % path) return scheme, key
[ "def", "getkey", "(", "self", ",", "path", ",", "filename", "=", "None", ")", ":", "scheme", ",", "keys", "=", "self", ".", "getkeys", "(", "path", ",", "filename", "=", "filename", ")", "try", ":", "key", "=", "next", "(", "keys", ")", "except", "StopIteration", ":", "raise", "FileNotFoundError", "(", "\"Could not find object for: '%s'\"", "%", "path", ")", "# we expect to only have a single key returned", "nextKey", "=", "None", "try", ":", "nextKey", "=", "next", "(", "keys", ")", "except", "StopIteration", ":", "pass", "if", "nextKey", ":", "raise", "ValueError", "(", "\"Found multiple keys for: '%s'\"", "%", "path", ")", "return", "scheme", ",", "key" ]
Get single matching key for a path
[ "Get", "single", "matching", "key", "for", "a", "path" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L474-L492
train
thunder-project/thunder
thunder/readers.py
BotoFileReader.list
def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List objects specified by path. Returns sorted list of 'gs://' or 's3n://' URIs. """ storageScheme, keys = self.getkeys( path, filename=filename, directories=directories, recursive=recursive) keys = [storageScheme + ":///" + key.bucket.name + "/" + key.name for key in keys] keys.sort() keys = select(keys, start, stop) return keys
python
def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False): """ List objects specified by path. Returns sorted list of 'gs://' or 's3n://' URIs. """ storageScheme, keys = self.getkeys( path, filename=filename, directories=directories, recursive=recursive) keys = [storageScheme + ":///" + key.bucket.name + "/" + key.name for key in keys] keys.sort() keys = select(keys, start, stop) return keys
[ "def", "list", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "directories", "=", "False", ")", ":", "storageScheme", ",", "keys", "=", "self", ".", "getkeys", "(", "path", ",", "filename", "=", "filename", ",", "directories", "=", "directories", ",", "recursive", "=", "recursive", ")", "keys", "=", "[", "storageScheme", "+", "\":///\"", "+", "key", ".", "bucket", ".", "name", "+", "\"/\"", "+", "key", ".", "name", "for", "key", "in", "keys", "]", "keys", ".", "sort", "(", ")", "keys", "=", "select", "(", "keys", ",", "start", ",", "stop", ")", "return", "keys" ]
List objects specified by path. Returns sorted list of 'gs://' or 's3n://' URIs.
[ "List", "objects", "specified", "by", "path", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L494-L505
train
thunder-project/thunder
thunder/readers.py
BotoFileReader.read
def read(self, path, filename=None, offset=None, size=-1): """ Read a file specified by path. """ storageScheme, key = self.getkey(path, filename=filename) if offset or (size > -1): if not offset: offset = 0 if size > -1: sizeStr = offset + size - 1 # range header is inclusive else: sizeStr = "" headers = {"Range": "bytes=%d-%s" % (offset, sizeStr)} return key.get_contents_as_string(headers=headers) else: return key.get_contents_as_string()
python
def read(self, path, filename=None, offset=None, size=-1): """ Read a file specified by path. """ storageScheme, key = self.getkey(path, filename=filename) if offset or (size > -1): if not offset: offset = 0 if size > -1: sizeStr = offset + size - 1 # range header is inclusive else: sizeStr = "" headers = {"Range": "bytes=%d-%s" % (offset, sizeStr)} return key.get_contents_as_string(headers=headers) else: return key.get_contents_as_string()
[ "def", "read", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "offset", "=", "None", ",", "size", "=", "-", "1", ")", ":", "storageScheme", ",", "key", "=", "self", ".", "getkey", "(", "path", ",", "filename", "=", "filename", ")", "if", "offset", "or", "(", "size", ">", "-", "1", ")", ":", "if", "not", "offset", ":", "offset", "=", "0", "if", "size", ">", "-", "1", ":", "sizeStr", "=", "offset", "+", "size", "-", "1", "# range header is inclusive", "else", ":", "sizeStr", "=", "\"\"", "headers", "=", "{", "\"Range\"", ":", "\"bytes=%d-%s\"", "%", "(", "offset", ",", "sizeStr", ")", "}", "return", "key", ".", "get_contents_as_string", "(", "headers", "=", "headers", ")", "else", ":", "return", "key", ".", "get_contents_as_string", "(", ")" ]
Read a file specified by path.
[ "Read", "a", "file", "specified", "by", "path", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L507-L523
train
thunder-project/thunder
thunder/readers.py
BotoFileReader.open
def open(self, path, filename=None): """ Open a file specified by path. """ scheme, key = self.getkey(path, filename=filename) return BotoReadFileHandle(scheme, key)
python
def open(self, path, filename=None): """ Open a file specified by path. """ scheme, key = self.getkey(path, filename=filename) return BotoReadFileHandle(scheme, key)
[ "def", "open", "(", "self", ",", "path", ",", "filename", "=", "None", ")", ":", "scheme", ",", "key", "=", "self", ".", "getkey", "(", "path", ",", "filename", "=", "filename", ")", "return", "BotoReadFileHandle", "(", "scheme", ",", "key", ")" ]
Open a file specified by path.
[ "Open", "a", "file", "specified", "by", "path", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L525-L530
train
thunder-project/thunder
thunder/utils.py
check_path
def check_path(path, credentials=None): """ Check that specified output path does not already exist The ValueError message will suggest calling with overwrite=True; this function is expected to be called from the various output methods that accept an 'overwrite' keyword argument. """ from thunder.readers import get_file_reader reader = get_file_reader(path)(credentials=credentials) existing = reader.list(path, directories=True) if existing: raise ValueError('Path %s appears to already exist. Specify a new directory, ' 'or call with overwrite=True to overwrite.' % path)
python
def check_path(path, credentials=None): """ Check that specified output path does not already exist The ValueError message will suggest calling with overwrite=True; this function is expected to be called from the various output methods that accept an 'overwrite' keyword argument. """ from thunder.readers import get_file_reader reader = get_file_reader(path)(credentials=credentials) existing = reader.list(path, directories=True) if existing: raise ValueError('Path %s appears to already exist. Specify a new directory, ' 'or call with overwrite=True to overwrite.' % path)
[ "def", "check_path", "(", "path", ",", "credentials", "=", "None", ")", ":", "from", "thunder", ".", "readers", "import", "get_file_reader", "reader", "=", "get_file_reader", "(", "path", ")", "(", "credentials", "=", "credentials", ")", "existing", "=", "reader", ".", "list", "(", "path", ",", "directories", "=", "True", ")", "if", "existing", ":", "raise", "ValueError", "(", "'Path %s appears to already exist. Specify a new directory, '", "'or call with overwrite=True to overwrite.'", "%", "path", ")" ]
Check that specified output path does not already exist The ValueError message will suggest calling with overwrite=True; this function is expected to be called from the various output methods that accept an 'overwrite' keyword argument.
[ "Check", "that", "specified", "output", "path", "does", "not", "already", "exist" ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/utils.py#L18-L31
train
thunder-project/thunder
thunder/utils.py
connection_with_anon
def connection_with_anon(credentials, anon=True): """ Connect to S3 with automatic handling for anonymous access. Parameters ---------- credentials : dict AWS access key ('access') and secret access key ('secret') anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate """ from boto.s3.connection import S3Connection from boto.exception import NoAuthHandlerFound try: conn = S3Connection(aws_access_key_id=credentials['access'], aws_secret_access_key=credentials['secret']) return conn except NoAuthHandlerFound: if anon: conn = S3Connection(anon=True) return conn else: raise
python
def connection_with_anon(credentials, anon=True): """ Connect to S3 with automatic handling for anonymous access. Parameters ---------- credentials : dict AWS access key ('access') and secret access key ('secret') anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate """ from boto.s3.connection import S3Connection from boto.exception import NoAuthHandlerFound try: conn = S3Connection(aws_access_key_id=credentials['access'], aws_secret_access_key=credentials['secret']) return conn except NoAuthHandlerFound: if anon: conn = S3Connection(anon=True) return conn else: raise
[ "def", "connection_with_anon", "(", "credentials", ",", "anon", "=", "True", ")", ":", "from", "boto", ".", "s3", ".", "connection", "import", "S3Connection", "from", "boto", ".", "exception", "import", "NoAuthHandlerFound", "try", ":", "conn", "=", "S3Connection", "(", "aws_access_key_id", "=", "credentials", "[", "'access'", "]", ",", "aws_secret_access_key", "=", "credentials", "[", "'secret'", "]", ")", "return", "conn", "except", "NoAuthHandlerFound", ":", "if", "anon", ":", "conn", "=", "S3Connection", "(", "anon", "=", "True", ")", "return", "conn", "else", ":", "raise" ]
Connect to S3 with automatic handling for anonymous access. Parameters ---------- credentials : dict AWS access key ('access') and secret access key ('secret') anon : boolean, optional, default = True Whether to make an anonymous connection if credentials fail to authenticate
[ "Connect", "to", "S3", "with", "automatic", "handling", "for", "anonymous", "access", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/utils.py#L33-L58
train
thunder-project/thunder
thunder/writers.py
BotoWriter.activate
def activate(self, path, isdirectory): """ Set up a boto connection. """ from .utils import connection_with_anon, connection_with_gs parsed = BotoClient.parse_query(path) scheme = parsed[0] bucket_name = parsed[1] key = parsed[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if isdirectory and (not key.endswith("/")): key += "/" self._scheme = scheme self._conn = conn self._key = key self._bucket = bucket self._active = True
python
def activate(self, path, isdirectory): """ Set up a boto connection. """ from .utils import connection_with_anon, connection_with_gs parsed = BotoClient.parse_query(path) scheme = parsed[0] bucket_name = parsed[1] key = parsed[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if isdirectory and (not key.endswith("/")): key += "/" self._scheme = scheme self._conn = conn self._key = key self._bucket = bucket self._active = True
[ "def", "activate", "(", "self", ",", "path", ",", "isdirectory", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parsed", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parsed", "[", "0", "]", "bucket_name", "=", "parsed", "[", "1", "]", "key", "=", "parsed", "[", "2", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "if", "isdirectory", "and", "(", "not", "key", ".", "endswith", "(", "\"/\"", ")", ")", ":", "key", "+=", "\"/\"", "self", ".", "_scheme", "=", "scheme", "self", ".", "_conn", "=", "conn", "self", ".", "_key", "=", "key", "self", ".", "_bucket", "=", "bucket", "self", ".", "_active", "=", "True" ]
Set up a boto connection.
[ "Set", "up", "a", "boto", "connection", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/writers.py#L50-L78
train
thunder-project/thunder
thunder/images/writers.py
topng
def topng(images, path, prefix="image", overwrite=False, credentials=None): """ Write out PNG files for 2d image data. See also -------- thunder.data.images.topng """ value_shape = images.value_shape if not len(value_shape) in [2, 3]: raise ValueError("Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len(value_shape)) from scipy.misc import imsave from io import BytesIO from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix+"-"+"%05d.png" % int(key) bytebuf = BytesIO() imsave(bytebuf, img, format='PNG') return fname, bytebuf.getvalue() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x)))
python
def topng(images, path, prefix="image", overwrite=False, credentials=None): """ Write out PNG files for 2d image data. See also -------- thunder.data.images.topng """ value_shape = images.value_shape if not len(value_shape) in [2, 3]: raise ValueError("Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len(value_shape)) from scipy.misc import imsave from io import BytesIO from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix+"-"+"%05d.png" % int(key) bytebuf = BytesIO() imsave(bytebuf, img, format='PNG') return fname, bytebuf.getvalue() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x)))
[ "def", "topng", "(", "images", ",", "path", ",", "prefix", "=", "\"image\"", ",", "overwrite", "=", "False", ",", "credentials", "=", "None", ")", ":", "value_shape", "=", "images", ".", "value_shape", "if", "not", "len", "(", "value_shape", ")", "in", "[", "2", ",", "3", "]", ":", "raise", "ValueError", "(", "\"Only 2D or 3D images can be exported to png, \"", "\"images are %d-dimensional.\"", "%", "len", "(", "value_shape", ")", ")", "from", "scipy", ".", "misc", "import", "imsave", "from", "io", "import", "BytesIO", "from", "thunder", ".", "writers", "import", "get_parallel_writer", "def", "tobuffer", "(", "kv", ")", ":", "key", ",", "img", "=", "kv", "fname", "=", "prefix", "+", "\"-\"", "+", "\"%05d.png\"", "%", "int", "(", "key", ")", "bytebuf", "=", "BytesIO", "(", ")", "imsave", "(", "bytebuf", ",", "img", ",", "format", "=", "'PNG'", ")", "return", "fname", ",", "bytebuf", ".", "getvalue", "(", ")", "writer", "=", "get_parallel_writer", "(", "path", ")", "(", "path", ",", "overwrite", "=", "overwrite", ",", "credentials", "=", "credentials", ")", "images", ".", "foreach", "(", "lambda", "x", ":", "writer", ".", "write", "(", "tobuffer", "(", "x", ")", ")", ")" ]
Write out PNG files for 2d image data. See also -------- thunder.data.images.topng
[ "Write", "out", "PNG", "files", "for", "2d", "image", "data", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/writers.py#L4-L29
train
thunder-project/thunder
thunder/images/writers.py
tobinary
def tobinary(images, path, prefix="image", overwrite=False, credentials=None): """ Write out images as binary files. See also -------- thunder.data.images.tobinary """ from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix + "-" + "%05d.bin" % int(key) return fname, img.copy() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x))) config(path, list(images.value_shape), images.dtype, overwrite=overwrite)
python
def tobinary(images, path, prefix="image", overwrite=False, credentials=None): """ Write out images as binary files. See also -------- thunder.data.images.tobinary """ from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix + "-" + "%05d.bin" % int(key) return fname, img.copy() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x))) config(path, list(images.value_shape), images.dtype, overwrite=overwrite)
[ "def", "tobinary", "(", "images", ",", "path", ",", "prefix", "=", "\"image\"", ",", "overwrite", "=", "False", ",", "credentials", "=", "None", ")", ":", "from", "thunder", ".", "writers", "import", "get_parallel_writer", "def", "tobuffer", "(", "kv", ")", ":", "key", ",", "img", "=", "kv", "fname", "=", "prefix", "+", "\"-\"", "+", "\"%05d.bin\"", "%", "int", "(", "key", ")", "return", "fname", ",", "img", ".", "copy", "(", ")", "writer", "=", "get_parallel_writer", "(", "path", ")", "(", "path", ",", "overwrite", "=", "overwrite", ",", "credentials", "=", "credentials", ")", "images", ".", "foreach", "(", "lambda", "x", ":", "writer", ".", "write", "(", "tobuffer", "(", "x", ")", ")", ")", "config", "(", "path", ",", "list", "(", "images", ".", "value_shape", ")", ",", "images", ".", "dtype", ",", "overwrite", "=", "overwrite", ")" ]
Write out images as binary files. See also -------- thunder.data.images.tobinary
[ "Write", "out", "images", "as", "binary", "files", "." ]
967ff8f3e7c2fabe1705743d95eb2746d4329786
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/writers.py#L58-L75
train
lidaobing/python-lunardate
lunardate.py
yearInfo2yearDay
def yearInfo2yearDay(yearInfo): '''calculate the days in a lunar year from the lunar year's info >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days. 348 >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days. 377 >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days. 360 >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days. 390 >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days. >>> yearInfo2yearDay((2**12-1)*16+1) 389 ''' yearInfo = int(yearInfo) res = 29 * 12 leap = False if yearInfo % 16 != 0: leap = True res += 29 yearInfo //= 16 for i in range(12 + leap): if yearInfo % 2 == 1: res += 1 yearInfo //= 2 return res
python
def yearInfo2yearDay(yearInfo): '''calculate the days in a lunar year from the lunar year's info >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days. 348 >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days. 377 >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days. 360 >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days. 390 >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days. >>> yearInfo2yearDay((2**12-1)*16+1) 389 ''' yearInfo = int(yearInfo) res = 29 * 12 leap = False if yearInfo % 16 != 0: leap = True res += 29 yearInfo //= 16 for i in range(12 + leap): if yearInfo % 2 == 1: res += 1 yearInfo //= 2 return res
[ "def", "yearInfo2yearDay", "(", "yearInfo", ")", ":", "yearInfo", "=", "int", "(", "yearInfo", ")", "res", "=", "29", "*", "12", "leap", "=", "False", "if", "yearInfo", "%", "16", "!=", "0", ":", "leap", "=", "True", "res", "+=", "29", "yearInfo", "//=", "16", "for", "i", "in", "range", "(", "12", "+", "leap", ")", ":", "if", "yearInfo", "%", "2", "==", "1", ":", "res", "+=", "1", "yearInfo", "//=", "2", "return", "res" ]
calculate the days in a lunar year from the lunar year's info >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days. 348 >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days. 377 >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days. 360 >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days. 390 >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days. >>> yearInfo2yearDay((2**12-1)*16+1) 389
[ "calculate", "the", "days", "in", "a", "lunar", "year", "from", "the", "lunar", "year", "s", "info" ]
261334a27d772489c9fc70b8ecef129ba3c13118
https://github.com/lidaobing/python-lunardate/blob/261334a27d772489c9fc70b8ecef129ba3c13118/lunardate.py#L367-L397
train
plone/plone.app.mosaic
src/plone/app/mosaic/browser/upload.py
MosaicUploadView.cleanupFilename
def cleanupFilename(self, name): """Generate a unique id which doesn't match the system generated ids""" context = self.context id = '' name = name.replace('\\', '/') # Fixup Windows filenames name = name.split('/')[-1] # Throw away any path part. for c in name: if c.isalnum() or c in '._': id += c # Raise condition here, but not a lot we can do about that if context.check_id(id) is None and getattr(context, id, None) is None: return id # Now make the id unique count = 1 while 1: if count == 1: sc = '' else: sc = str(count) newid = "copy{0:s}_of_{1:s}".format(sc, id) if context.check_id(newid) is None \ and getattr(context, newid, None) is None: return newid count += 1
python
def cleanupFilename(self, name): """Generate a unique id which doesn't match the system generated ids""" context = self.context id = '' name = name.replace('\\', '/') # Fixup Windows filenames name = name.split('/')[-1] # Throw away any path part. for c in name: if c.isalnum() or c in '._': id += c # Raise condition here, but not a lot we can do about that if context.check_id(id) is None and getattr(context, id, None) is None: return id # Now make the id unique count = 1 while 1: if count == 1: sc = '' else: sc = str(count) newid = "copy{0:s}_of_{1:s}".format(sc, id) if context.check_id(newid) is None \ and getattr(context, newid, None) is None: return newid count += 1
[ "def", "cleanupFilename", "(", "self", ",", "name", ")", ":", "context", "=", "self", ".", "context", "id", "=", "''", "name", "=", "name", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# Fixup Windows filenames", "name", "=", "name", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "# Throw away any path part.", "for", "c", "in", "name", ":", "if", "c", ".", "isalnum", "(", ")", "or", "c", "in", "'._'", ":", "id", "+=", "c", "# Raise condition here, but not a lot we can do about that", "if", "context", ".", "check_id", "(", "id", ")", "is", "None", "and", "getattr", "(", "context", ",", "id", ",", "None", ")", "is", "None", ":", "return", "id", "# Now make the id unique", "count", "=", "1", "while", "1", ":", "if", "count", "==", "1", ":", "sc", "=", "''", "else", ":", "sc", "=", "str", "(", "count", ")", "newid", "=", "\"copy{0:s}_of_{1:s}\"", ".", "format", "(", "sc", ",", "id", ")", "if", "context", ".", "check_id", "(", "newid", ")", "is", "None", "and", "getattr", "(", "context", ",", "newid", ",", "None", ")", "is", "None", ":", "return", "newid", "count", "+=", "1" ]
Generate a unique id which doesn't match the system generated ids
[ "Generate", "a", "unique", "id", "which", "doesn", "t", "match", "the", "system", "generated", "ids" ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/upload.py#L80-L106
train
plone/plone.app.mosaic
src/plone/app/mosaic/browser/main_template.py
parse_data_slots
def parse_data_slots(value): """Parse data-slots value into slots used to wrap node, prepend to node or append to node. >>> parse_data_slots('') ([], [], []) >>> parse_data_slots('foo bar') (['foo', 'bar'], [], []) >>> parse_data_slots('foo bar > foobar') (['foo', 'bar'], ['foobar'], []) >>> parse_data_slots('> foobar') ([], ['foobar'], []) >>> parse_data_slots('> foo * bar') ([], ['foo'], ['bar']) >>> parse_data_slots('foobar > foo * bar') (['foobar'], ['foo'], ['bar']) >>> parse_data_slots('foo > * bar') (['foo'], [], ['bar']) """ value = unquote(value) if '>' in value: wrappers, children = value.split('>', 1) else: wrappers = value children = '' if '*' in children: prepends, appends = children.split('*', 1) else: prepends = children appends = '' wrappers = list(filter(bool, list(map(str.strip, wrappers.split())))) prepends = list(filter(bool, list(map(str.strip, prepends.split())))) appends = list(filter(bool, list(map(str.strip, appends.split())))) return wrappers, prepends, appends
python
def parse_data_slots(value): """Parse data-slots value into slots used to wrap node, prepend to node or append to node. >>> parse_data_slots('') ([], [], []) >>> parse_data_slots('foo bar') (['foo', 'bar'], [], []) >>> parse_data_slots('foo bar > foobar') (['foo', 'bar'], ['foobar'], []) >>> parse_data_slots('> foobar') ([], ['foobar'], []) >>> parse_data_slots('> foo * bar') ([], ['foo'], ['bar']) >>> parse_data_slots('foobar > foo * bar') (['foobar'], ['foo'], ['bar']) >>> parse_data_slots('foo > * bar') (['foo'], [], ['bar']) """ value = unquote(value) if '>' in value: wrappers, children = value.split('>', 1) else: wrappers = value children = '' if '*' in children: prepends, appends = children.split('*', 1) else: prepends = children appends = '' wrappers = list(filter(bool, list(map(str.strip, wrappers.split())))) prepends = list(filter(bool, list(map(str.strip, prepends.split())))) appends = list(filter(bool, list(map(str.strip, appends.split())))) return wrappers, prepends, appends
[ "def", "parse_data_slots", "(", "value", ")", ":", "value", "=", "unquote", "(", "value", ")", "if", "'>'", "in", "value", ":", "wrappers", ",", "children", "=", "value", ".", "split", "(", "'>'", ",", "1", ")", "else", ":", "wrappers", "=", "value", "children", "=", "''", "if", "'*'", "in", "children", ":", "prepends", ",", "appends", "=", "children", ".", "split", "(", "'*'", ",", "1", ")", "else", ":", "prepends", "=", "children", "appends", "=", "''", "wrappers", "=", "list", "(", "filter", "(", "bool", ",", "list", "(", "map", "(", "str", ".", "strip", ",", "wrappers", ".", "split", "(", ")", ")", ")", ")", ")", "prepends", "=", "list", "(", "filter", "(", "bool", ",", "list", "(", "map", "(", "str", ".", "strip", ",", "prepends", ".", "split", "(", ")", ")", ")", ")", ")", "appends", "=", "list", "(", "filter", "(", "bool", ",", "list", "(", "map", "(", "str", ".", "strip", ",", "appends", ".", "split", "(", ")", ")", ")", ")", ")", "return", "wrappers", ",", "prepends", ",", "appends" ]
Parse data-slots value into slots used to wrap node, prepend to node or append to node. >>> parse_data_slots('') ([], [], []) >>> parse_data_slots('foo bar') (['foo', 'bar'], [], []) >>> parse_data_slots('foo bar > foobar') (['foo', 'bar'], ['foobar'], []) >>> parse_data_slots('> foobar') ([], ['foobar'], []) >>> parse_data_slots('> foo * bar') ([], ['foo'], ['bar']) >>> parse_data_slots('foobar > foo * bar') (['foobar'], ['foo'], ['bar']) >>> parse_data_slots('foo > * bar') (['foo'], [], ['bar'])
[ "Parse", "data", "-", "slots", "value", "into", "slots", "used", "to", "wrap", "node", "prepend", "to", "node", "or", "append", "to", "node", "." ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/main_template.py#L65-L107
train
plone/plone.app.mosaic
src/plone/app/mosaic/browser/main_template.py
cook_layout
def cook_layout(layout, ajax): """Return main_template compatible layout""" # Fix XHTML layouts with CR[+LF] line endings layout = re.sub('\r', '\n', re.sub('\r\n', '\n', layout)) # Parse layout if isinstance(layout, six.text_type): result = getHTMLSerializer([layout.encode('utf-8')], encoding='utf-8') else: result = getHTMLSerializer([layout], encoding='utf-8') # Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[) if '<![CDATA[' in layout: result.serializer = html.tostring # Wrap all panels with a metal:fill-slot -tag: all_slots = [] for layoutPanelNode in slotsXPath(result.tree): data_slots = layoutPanelNode.attrib['data-slots'] all_slots += wrap_append_prepend_slots(layoutPanelNode, data_slots) del layoutPanelNode.attrib['data-slots'] # When no slots are explicitly defined, try to inject the very default # slots if len(all_slots) == 0: for node in result.tree.xpath('//*[@data-panel="content"]'): wrap_append_prepend_slots( node, 'content > body header main * content-core') # Append implicit slots head = result.tree.getroot().find('head') if not ajax and head is not None: for name in ['top_slot', 'head_slot', 'style_slot', 'javascript_head_slot']: slot = etree.Element('{{{0:s}}}{1:s}'.format(NSMAP['metal'], name), nsmap=NSMAP) slot.attrib['define-slot'] = name head.append(slot) template = TEMPLATE metal = 'xmlns:metal="http://namespaces.zope.org/metal"' return (template % ''.join(result)).replace(metal, '')
python
def cook_layout(layout, ajax): """Return main_template compatible layout""" # Fix XHTML layouts with CR[+LF] line endings layout = re.sub('\r', '\n', re.sub('\r\n', '\n', layout)) # Parse layout if isinstance(layout, six.text_type): result = getHTMLSerializer([layout.encode('utf-8')], encoding='utf-8') else: result = getHTMLSerializer([layout], encoding='utf-8') # Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[) if '<![CDATA[' in layout: result.serializer = html.tostring # Wrap all panels with a metal:fill-slot -tag: all_slots = [] for layoutPanelNode in slotsXPath(result.tree): data_slots = layoutPanelNode.attrib['data-slots'] all_slots += wrap_append_prepend_slots(layoutPanelNode, data_slots) del layoutPanelNode.attrib['data-slots'] # When no slots are explicitly defined, try to inject the very default # slots if len(all_slots) == 0: for node in result.tree.xpath('//*[@data-panel="content"]'): wrap_append_prepend_slots( node, 'content > body header main * content-core') # Append implicit slots head = result.tree.getroot().find('head') if not ajax and head is not None: for name in ['top_slot', 'head_slot', 'style_slot', 'javascript_head_slot']: slot = etree.Element('{{{0:s}}}{1:s}'.format(NSMAP['metal'], name), nsmap=NSMAP) slot.attrib['define-slot'] = name head.append(slot) template = TEMPLATE metal = 'xmlns:metal="http://namespaces.zope.org/metal"' return (template % ''.join(result)).replace(metal, '')
[ "def", "cook_layout", "(", "layout", ",", "ajax", ")", ":", "# Fix XHTML layouts with CR[+LF] line endings", "layout", "=", "re", ".", "sub", "(", "'\\r'", ",", "'\\n'", ",", "re", ".", "sub", "(", "'\\r\\n'", ",", "'\\n'", ",", "layout", ")", ")", "# Parse layout", "if", "isinstance", "(", "layout", ",", "six", ".", "text_type", ")", ":", "result", "=", "getHTMLSerializer", "(", "[", "layout", ".", "encode", "(", "'utf-8'", ")", "]", ",", "encoding", "=", "'utf-8'", ")", "else", ":", "result", "=", "getHTMLSerializer", "(", "[", "layout", "]", ",", "encoding", "=", "'utf-8'", ")", "# Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[)", "if", "'<![CDATA['", "in", "layout", ":", "result", ".", "serializer", "=", "html", ".", "tostring", "# Wrap all panels with a metal:fill-slot -tag:", "all_slots", "=", "[", "]", "for", "layoutPanelNode", "in", "slotsXPath", "(", "result", ".", "tree", ")", ":", "data_slots", "=", "layoutPanelNode", ".", "attrib", "[", "'data-slots'", "]", "all_slots", "+=", "wrap_append_prepend_slots", "(", "layoutPanelNode", ",", "data_slots", ")", "del", "layoutPanelNode", ".", "attrib", "[", "'data-slots'", "]", "# When no slots are explicitly defined, try to inject the very default", "# slots", "if", "len", "(", "all_slots", ")", "==", "0", ":", "for", "node", "in", "result", ".", "tree", ".", "xpath", "(", "'//*[@data-panel=\"content\"]'", ")", ":", "wrap_append_prepend_slots", "(", "node", ",", "'content > body header main * content-core'", ")", "# Append implicit slots", "head", "=", "result", ".", "tree", ".", "getroot", "(", ")", ".", "find", "(", "'head'", ")", "if", "not", "ajax", "and", "head", "is", "not", "None", ":", "for", "name", "in", "[", "'top_slot'", ",", "'head_slot'", ",", "'style_slot'", ",", "'javascript_head_slot'", "]", ":", "slot", "=", "etree", ".", "Element", "(", "'{{{0:s}}}{1:s}'", ".", "format", "(", "NSMAP", "[", "'metal'", "]", ",", "name", ")", ",", "nsmap", "=", "NSMAP", ")", "slot", ".", "attrib", "[", "'define-slot'", "]", "=", "name", "head", ".", "append", "(", "slot", ")", "template", "=", "TEMPLATE", "metal", "=", "'xmlns:metal=\"http://namespaces.zope.org/metal\"'", "return", "(", "template", "%", "''", ".", "join", "(", "result", ")", ")", ".", "replace", "(", "metal", ",", "''", ")" ]
Return main_template compatible layout
[ "Return", "main_template", "compatible", "layout" ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/main_template.py#L138-L179
train
plone/plone.app.mosaic
src/plone/app/mosaic/browser/editor.py
ManageLayoutView.existing
def existing(self): """ find existing content assigned to this layout""" catalog = api.portal.get_tool('portal_catalog') results = [] layout_path = self._get_layout_path( self.request.form.get('layout', '') ) for brain in catalog(layout=layout_path): results.append({ 'title': brain.Title, 'url': brain.getURL() }) return json.dumps({ 'total': len(results), 'data': results })
python
def existing(self): """ find existing content assigned to this layout""" catalog = api.portal.get_tool('portal_catalog') results = [] layout_path = self._get_layout_path( self.request.form.get('layout', '') ) for brain in catalog(layout=layout_path): results.append({ 'title': brain.Title, 'url': brain.getURL() }) return json.dumps({ 'total': len(results), 'data': results })
[ "def", "existing", "(", "self", ")", ":", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "results", "=", "[", "]", "layout_path", "=", "self", ".", "_get_layout_path", "(", "self", ".", "request", ".", "form", ".", "get", "(", "'layout'", ",", "''", ")", ")", "for", "brain", "in", "catalog", "(", "layout", "=", "layout_path", ")", ":", "results", ".", "append", "(", "{", "'title'", ":", "brain", ".", "Title", ",", "'url'", ":", "brain", ".", "getURL", "(", ")", "}", ")", "return", "json", ".", "dumps", "(", "{", "'total'", ":", "len", "(", "results", ")", ",", "'data'", ":", "results", "}", ")" ]
find existing content assigned to this layout
[ "find", "existing", "content", "assigned", "to", "this", "layout" ]
73b6acb18905025a76b239c86de9543ed9350991
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/browser/editor.py#L127-L142
train
sergiocorreia/panflute
panflute/io.py
load_reader_options
def load_reader_options(): """ Retrieve Pandoc Reader options from the environment """ options = os.environ['PANDOC_READER_OPTIONS'] options = json.loads(options, object_pairs_hook=OrderedDict) return options
python
def load_reader_options(): """ Retrieve Pandoc Reader options from the environment """ options = os.environ['PANDOC_READER_OPTIONS'] options = json.loads(options, object_pairs_hook=OrderedDict) return options
[ "def", "load_reader_options", "(", ")", ":", "options", "=", "os", ".", "environ", "[", "'PANDOC_READER_OPTIONS'", "]", "options", "=", "json", ".", "loads", "(", "options", ",", "object_pairs_hook", "=", "OrderedDict", ")", "return", "options" ]
Retrieve Pandoc Reader options from the environment
[ "Retrieve", "Pandoc", "Reader", "options", "from", "the", "environment" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L263-L269
train
sergiocorreia/panflute
panflute/tools.py
yaml_filter
def yaml_filter(element, doc, tag=None, function=None, tags=None, strict_yaml=False): ''' Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action) ''' # Allow for either tag+function or a dict {tag: function} assert (tag is None) + (tags is None) == 1 # XOR if tags is None: tags = {tag: function} if type(element) == CodeBlock: for tag in tags: if tag in element.classes: function = tags[tag] if not strict_yaml: # Split YAML and data parts (separated by ... or ---) raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 1, re.MULTILINE) data = raw[2] if len(raw) > 2 else '' data = data.lstrip('\n') raw = raw[0] try: options = yaml.safe_load(raw) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return if options is None: options = {} else: options = {} data = [] raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 0, re.MULTILINE) rawmode = True for chunk in raw: chunk = chunk.strip('\n') if not chunk: continue if rawmode: if chunk.startswith('---'): rawmode = False else: data.append(chunk) else: if chunk.startswith('---') or chunk.startswith('...'): rawmode = True else: try: options.update(yaml.safe_load(chunk)) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return data = '\n'.join(data) return function(options=options, data=data, element=element, doc=doc)
python
def yaml_filter(element, doc, tag=None, function=None, tags=None, strict_yaml=False): ''' Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action) ''' # Allow for either tag+function or a dict {tag: function} assert (tag is None) + (tags is None) == 1 # XOR if tags is None: tags = {tag: function} if type(element) == CodeBlock: for tag in tags: if tag in element.classes: function = tags[tag] if not strict_yaml: # Split YAML and data parts (separated by ... or ---) raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 1, re.MULTILINE) data = raw[2] if len(raw) > 2 else '' data = data.lstrip('\n') raw = raw[0] try: options = yaml.safe_load(raw) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return if options is None: options = {} else: options = {} data = [] raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 0, re.MULTILINE) rawmode = True for chunk in raw: chunk = chunk.strip('\n') if not chunk: continue if rawmode: if chunk.startswith('---'): rawmode = False else: data.append(chunk) else: if chunk.startswith('---') or chunk.startswith('...'): rawmode = True else: try: options.update(yaml.safe_load(chunk)) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return data = '\n'.join(data) return function(options=options, data=data, element=element, doc=doc)
[ "def", "yaml_filter", "(", "element", ",", "doc", ",", "tag", "=", "None", ",", "function", "=", "None", ",", "tags", "=", "None", ",", "strict_yaml", "=", "False", ")", ":", "# Allow for either tag+function or a dict {tag: function}", "assert", "(", "tag", "is", "None", ")", "+", "(", "tags", "is", "None", ")", "==", "1", "# XOR", "if", "tags", "is", "None", ":", "tags", "=", "{", "tag", ":", "function", "}", "if", "type", "(", "element", ")", "==", "CodeBlock", ":", "for", "tag", "in", "tags", ":", "if", "tag", "in", "element", ".", "classes", ":", "function", "=", "tags", "[", "tag", "]", "if", "not", "strict_yaml", ":", "# Split YAML and data parts (separated by ... or ---)", "raw", "=", "re", ".", "split", "(", "\"^([.]{3,}|[-]{3,})$\"", ",", "element", ".", "text", ",", "1", ",", "re", ".", "MULTILINE", ")", "data", "=", "raw", "[", "2", "]", "if", "len", "(", "raw", ")", ">", "2", "else", "''", "data", "=", "data", ".", "lstrip", "(", "'\\n'", ")", "raw", "=", "raw", "[", "0", "]", "try", ":", "options", "=", "yaml", ".", "safe_load", "(", "raw", ")", "except", "yaml", ".", "scanner", ".", "ScannerError", ":", "debug", "(", "\"panflute: malformed YAML block\"", ")", "return", "if", "options", "is", "None", ":", "options", "=", "{", "}", "else", ":", "options", "=", "{", "}", "data", "=", "[", "]", "raw", "=", "re", ".", "split", "(", "\"^([.]{3,}|[-]{3,})$\"", ",", "element", ".", "text", ",", "0", ",", "re", ".", "MULTILINE", ")", "rawmode", "=", "True", "for", "chunk", "in", "raw", ":", "chunk", "=", "chunk", ".", "strip", "(", "'\\n'", ")", "if", "not", "chunk", ":", "continue", "if", "rawmode", ":", "if", "chunk", ".", "startswith", "(", "'---'", ")", ":", "rawmode", "=", "False", "else", ":", "data", ".", "append", "(", "chunk", ")", "else", ":", "if", "chunk", ".", "startswith", "(", "'---'", ")", "or", "chunk", ".", "startswith", "(", "'...'", ")", ":", "rawmode", "=", "True", "else", ":", "try", ":", "options", ".", "update", "(", "yaml", ".", "safe_load", "(", "chunk", ")", ")", "except", "yaml", ".", "scanner", ".", "ScannerError", ":", "debug", "(", "\"panflute: malformed YAML block\"", ")", "return", "data", "=", "'\\n'", ".", "join", "(", "data", ")", "return", "function", "(", "options", "=", "options", ",", "data", "=", "data", ",", "element", "=", "element", ",", "doc", "=", "doc", ")" ]
Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
[ "Convenience", "function", "for", "parsing", "code", "blocks", "with", "YAML", "options" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L44-L158
train
sergiocorreia/panflute
panflute/base.py
Element._set_content
def _set_content(self, value, oktypes): """ Similar to content.setter but when there are no existing oktypes """ if value is None: value = [] self._content = ListContainer(*value, oktypes=oktypes, parent=self)
python
def _set_content(self, value, oktypes): """ Similar to content.setter but when there are no existing oktypes """ if value is None: value = [] self._content = ListContainer(*value, oktypes=oktypes, parent=self)
[ "def", "_set_content", "(", "self", ",", "value", ",", "oktypes", ")", ":", "if", "value", "is", "None", ":", "value", "=", "[", "]", "self", ".", "_content", "=", "ListContainer", "(", "*", "value", ",", "oktypes", "=", "oktypes", ",", "parent", "=", "self", ")" ]
Similar to content.setter but when there are no existing oktypes
[ "Similar", "to", "content", ".", "setter", "but", "when", "there", "are", "no", "existing", "oktypes" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L123-L129
train
sergiocorreia/panflute
panflute/base.py
Element.offset
def offset(self, n): """ Return a sibling element offset by n :rtype: :class:`Element` | ``None`` """ idx = self.index if idx is not None: sibling = idx + n container = self.container if 0 <= sibling < len(container): return container[sibling]
python
def offset(self, n): """ Return a sibling element offset by n :rtype: :class:`Element` | ``None`` """ idx = self.index if idx is not None: sibling = idx + n container = self.container if 0 <= sibling < len(container): return container[sibling]
[ "def", "offset", "(", "self", ",", "n", ")", ":", "idx", "=", "self", ".", "index", "if", "idx", "is", "not", "None", ":", "sibling", "=", "idx", "+", "n", "container", "=", "self", ".", "container", "if", "0", "<=", "sibling", "<", "len", "(", "container", ")", ":", "return", "container", "[", "sibling", "]" ]
Return a sibling element offset by n :rtype: :class:`Element` | ``None``
[ "Return", "a", "sibling", "element", "offset", "by", "n" ]
65c2d570c26a190deb600cab5e2ad8a828a3302e
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L166-L178
train
laike9m/pdir2
pdir/api.py
PrettyDir.search
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir': """Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names. """ if case_sensitive: return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name] ) else: term = term.lower() return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()] )
python
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir': """Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names. """ if case_sensitive: return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name] ) else: term = term.lower() return PrettyDir( self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()] )
[ "def", "search", "(", "self", ",", "term", ":", "str", ",", "case_sensitive", ":", "bool", "=", "False", ")", "->", "'PrettyDir'", ":", "if", "case_sensitive", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "term", "in", "pattr", ".", "name", "]", ")", "else", ":", "term", "=", "term", ".", "lower", "(", ")", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "term", "in", "pattr", ".", "name", ".", "lower", "(", ")", "]", ")" ]
Searches for names that match some pattern. Args: term: String used to match names. A name is returned if it matches the whole search term. case_sensitive: Boolean to match case or not, default is False (case insensitive). Return: A PrettyDir object with matched names.
[ "Searches", "for", "names", "that", "match", "some", "pattern", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L74-L94
train
laike9m/pdir2
pdir/api.py
PrettyDir.properties
def properties(self) -> 'PrettyDir': """Returns all properties of the inspected object. Note that "properties" can mean "variables". """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.PROPERTY) ], )
python
def properties(self) -> 'PrettyDir': """Returns all properties of the inspected object. Note that "properties" can mean "variables". """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.PROPERTY) ], )
[ "def", "properties", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "category_match", "(", "pattr", ".", "category", ",", "AttrCategory", ".", "PROPERTY", ")", "]", ",", ")" ]
Returns all properties of the inspected object. Note that "properties" can mean "variables".
[ "Returns", "all", "properties", "of", "the", "inspected", "object", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L104-L116
train
laike9m/pdir2
pdir/api.py
PrettyDir.methods
def methods(self) -> 'PrettyDir': """Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION) ], )
python
def methods(self) -> 'PrettyDir': """Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION) ], )
[ "def", "methods", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "category_match", "(", "pattr", ".", "category", ",", "AttrCategory", ".", "FUNCTION", ")", "]", ",", ")" ]
Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module.
[ "Returns", "all", "methods", "of", "the", "inspected", "object", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L119-L131
train
laike9m/pdir2
pdir/api.py
PrettyDir.public
def public(self) -> 'PrettyDir': """Returns public attributes of the inspected object.""" return PrettyDir( self.obj, [pattr for pattr in self.pattrs if not pattr.name.startswith('_')] )
python
def public(self) -> 'PrettyDir': """Returns public attributes of the inspected object.""" return PrettyDir( self.obj, [pattr for pattr in self.pattrs if not pattr.name.startswith('_')] )
[ "def", "public", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "not", "pattr", ".", "name", ".", "startswith", "(", "'_'", ")", "]", ")" ]
Returns public attributes of the inspected object.
[ "Returns", "public", "attributes", "of", "the", "inspected", "object", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L134-L138
train
laike9m/pdir2
pdir/api.py
PrettyDir.own
def own(self) -> 'PrettyDir': """Returns attributes that are not inhterited from parent classes. Now we only use a simple judgement, it is expected that many attributes not get returned, especially invoked on a module. For instance, there's no way to distinguish between properties that are initialized in instance class's __init__ and parent class's __init__(assuming super() is called). So we'll just leave it. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if pattr.name in type(self.obj).__dict__ or pattr.name in self.obj.__dict__ ], )
python
def own(self) -> 'PrettyDir': """Returns attributes that are not inhterited from parent classes. Now we only use a simple judgement, it is expected that many attributes not get returned, especially invoked on a module. For instance, there's no way to distinguish between properties that are initialized in instance class's __init__ and parent class's __init__(assuming super() is called). So we'll just leave it. """ return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if pattr.name in type(self.obj).__dict__ or pattr.name in self.obj.__dict__ ], )
[ "def", "own", "(", "self", ")", "->", "'PrettyDir'", ":", "return", "PrettyDir", "(", "self", ".", "obj", ",", "[", "pattr", "for", "pattr", "in", "self", ".", "pattrs", "if", "pattr", ".", "name", "in", "type", "(", "self", ".", "obj", ")", ".", "__dict__", "or", "pattr", ".", "name", "in", "self", ".", "obj", ".", "__dict__", "]", ",", ")" ]
Returns attributes that are not inhterited from parent classes. Now we only use a simple judgement, it is expected that many attributes not get returned, especially invoked on a module. For instance, there's no way to distinguish between properties that are initialized in instance class's __init__ and parent class's __init__(assuming super() is called). So we'll just leave it.
[ "Returns", "attributes", "that", "are", "not", "inhterited", "from", "parent", "classes", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L141-L159
train
laike9m/pdir2
pdir/api.py
PrettyAttribute.get_oneline_doc
def get_oneline_doc(self) -> str: """ Doc doesn't necessarily mean doctring. It could be anything that should be put after the attr's name as an explanation. """ attr = self.attr_obj if self.display_group == AttrCategory.DESCRIPTOR: if isinstance(attr, property): doc_list = ['@property with getter'] if attr.fset: doc_list.append(SETTER) if attr.fdel: doc_list.append(DELETER) else: doc_list = ['class %s' % attr.__class__.__name__] if hasattr(attr, '__get__'): doc_list.append(GETTER) if hasattr(attr, '__set__'): doc_list.append(SETTER) if hasattr(attr, '__delete__'): doc_list.append(DELETER) doc_list[0] = ' '.join([doc_list[0], 'with', doc_list.pop(1)]) if attr.__doc__ is not None: doc_list.append(inspect.getdoc(attr).split('\n', 1)[0]) return ', '.join(doc_list) if hasattr(attr, '__doc__'): doc = inspect.getdoc(attr) return doc.split('\n', 1)[0] if doc else '' # default doc is None return ''
python
def get_oneline_doc(self) -> str: """ Doc doesn't necessarily mean doctring. It could be anything that should be put after the attr's name as an explanation. """ attr = self.attr_obj if self.display_group == AttrCategory.DESCRIPTOR: if isinstance(attr, property): doc_list = ['@property with getter'] if attr.fset: doc_list.append(SETTER) if attr.fdel: doc_list.append(DELETER) else: doc_list = ['class %s' % attr.__class__.__name__] if hasattr(attr, '__get__'): doc_list.append(GETTER) if hasattr(attr, '__set__'): doc_list.append(SETTER) if hasattr(attr, '__delete__'): doc_list.append(DELETER) doc_list[0] = ' '.join([doc_list[0], 'with', doc_list.pop(1)]) if attr.__doc__ is not None: doc_list.append(inspect.getdoc(attr).split('\n', 1)[0]) return ', '.join(doc_list) if hasattr(attr, '__doc__'): doc = inspect.getdoc(attr) return doc.split('\n', 1)[0] if doc else '' # default doc is None return ''
[ "def", "get_oneline_doc", "(", "self", ")", "->", "str", ":", "attr", "=", "self", ".", "attr_obj", "if", "self", ".", "display_group", "==", "AttrCategory", ".", "DESCRIPTOR", ":", "if", "isinstance", "(", "attr", ",", "property", ")", ":", "doc_list", "=", "[", "'@property with getter'", "]", "if", "attr", ".", "fset", ":", "doc_list", ".", "append", "(", "SETTER", ")", "if", "attr", ".", "fdel", ":", "doc_list", ".", "append", "(", "DELETER", ")", "else", ":", "doc_list", "=", "[", "'class %s'", "%", "attr", ".", "__class__", ".", "__name__", "]", "if", "hasattr", "(", "attr", ",", "'__get__'", ")", ":", "doc_list", ".", "append", "(", "GETTER", ")", "if", "hasattr", "(", "attr", ",", "'__set__'", ")", ":", "doc_list", ".", "append", "(", "SETTER", ")", "if", "hasattr", "(", "attr", ",", "'__delete__'", ")", ":", "doc_list", ".", "append", "(", "DELETER", ")", "doc_list", "[", "0", "]", "=", "' '", ".", "join", "(", "[", "doc_list", "[", "0", "]", ",", "'with'", ",", "doc_list", ".", "pop", "(", "1", ")", "]", ")", "if", "attr", ".", "__doc__", "is", "not", "None", ":", "doc_list", ".", "append", "(", "inspect", ".", "getdoc", "(", "attr", ")", ".", "split", "(", "'\\n'", ",", "1", ")", "[", "0", "]", ")", "return", "', '", ".", "join", "(", "doc_list", ")", "if", "hasattr", "(", "attr", ",", "'__doc__'", ")", ":", "doc", "=", "inspect", ".", "getdoc", "(", "attr", ")", "return", "doc", ".", "split", "(", "'\\n'", ",", "1", ")", "[", "0", "]", "if", "doc", "else", "''", "# default doc is None", "return", "''" ]
Doc doesn't necessarily mean doctring. It could be anything that should be put after the attr's name as an explanation.
[ "Doc", "doesn", "t", "necessarily", "mean", "doctring", ".", "It", "could", "be", "anything", "that", "should", "be", "put", "after", "the", "attr", "s", "name", "as", "an", "explanation", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/api.py#L183-L212
train
laike9m/pdir2
pdir/format.py
format_pattrs
def format_pattrs(pattrs: List['api.PrettyAttribute']) -> str: """Generates repr string given a list of pattrs.""" output = [] pattrs.sort( key=lambda x: ( _FORMATTER[x.display_group].display_index, x.display_group, x.name, ) ) for display_group, grouped_pattrs in groupby(pattrs, lambda x: x.display_group): output.append( _FORMATTER[display_group].formatter(display_group, grouped_pattrs) ) return '\n'.join(output)
python
def format_pattrs(pattrs: List['api.PrettyAttribute']) -> str: """Generates repr string given a list of pattrs.""" output = [] pattrs.sort( key=lambda x: ( _FORMATTER[x.display_group].display_index, x.display_group, x.name, ) ) for display_group, grouped_pattrs in groupby(pattrs, lambda x: x.display_group): output.append( _FORMATTER[display_group].formatter(display_group, grouped_pattrs) ) return '\n'.join(output)
[ "def", "format_pattrs", "(", "pattrs", ":", "List", "[", "'api.PrettyAttribute'", "]", ")", "->", "str", ":", "output", "=", "[", "]", "pattrs", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "_FORMATTER", "[", "x", ".", "display_group", "]", ".", "display_index", ",", "x", ".", "display_group", ",", "x", ".", "name", ",", ")", ")", "for", "display_group", ",", "grouped_pattrs", "in", "groupby", "(", "pattrs", ",", "lambda", "x", ":", "x", ".", "display_group", ")", ":", "output", ".", "append", "(", "_FORMATTER", "[", "display_group", "]", ".", "formatter", "(", "display_group", ",", "grouped_pattrs", ")", ")", "return", "'\\n'", ".", "join", "(", "output", ")" ]
Generates repr string given a list of pattrs.
[ "Generates", "repr", "string", "given", "a", "list", "of", "pattrs", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/format.py#L14-L29
train
laike9m/pdir2
pdir/_internal_utils.py
get_attr_from_dict
def get_attr_from_dict(inspected_obj: Any, attr_name: str) -> Any: """Ensures we get descriptor object instead of its return value. """ if inspect.isclass(inspected_obj): obj_list = [inspected_obj] + list(inspected_obj.__mro__) else: obj_list = [inspected_obj] + list(inspected_obj.__class__.__mro__) for obj in obj_list: if hasattr(obj, '__dict__') and attr_name in obj.__dict__: return obj.__dict__[attr_name] # This happens when user-defined __dir__ returns something that's not # in any __dict__. See test_override_dir. # Returns attr_name so that it's treated as a normal property. return attr_name
python
def get_attr_from_dict(inspected_obj: Any, attr_name: str) -> Any: """Ensures we get descriptor object instead of its return value. """ if inspect.isclass(inspected_obj): obj_list = [inspected_obj] + list(inspected_obj.__mro__) else: obj_list = [inspected_obj] + list(inspected_obj.__class__.__mro__) for obj in obj_list: if hasattr(obj, '__dict__') and attr_name in obj.__dict__: return obj.__dict__[attr_name] # This happens when user-defined __dir__ returns something that's not # in any __dict__. See test_override_dir. # Returns attr_name so that it's treated as a normal property. return attr_name
[ "def", "get_attr_from_dict", "(", "inspected_obj", ":", "Any", ",", "attr_name", ":", "str", ")", "->", "Any", ":", "if", "inspect", ".", "isclass", "(", "inspected_obj", ")", ":", "obj_list", "=", "[", "inspected_obj", "]", "+", "list", "(", "inspected_obj", ".", "__mro__", ")", "else", ":", "obj_list", "=", "[", "inspected_obj", "]", "+", "list", "(", "inspected_obj", ".", "__class__", ".", "__mro__", ")", "for", "obj", "in", "obj_list", ":", "if", "hasattr", "(", "obj", ",", "'__dict__'", ")", "and", "attr_name", "in", "obj", ".", "__dict__", ":", "return", "obj", ".", "__dict__", "[", "attr_name", "]", "# This happens when user-defined __dir__ returns something that's not", "# in any __dict__. See test_override_dir.", "# Returns attr_name so that it's treated as a normal property.", "return", "attr_name" ]
Ensures we get descriptor object instead of its return value.
[ "Ensures", "we", "get", "descriptor", "object", "instead", "of", "its", "return", "value", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/_internal_utils.py#L9-L22
train
laike9m/pdir2
pdir/attr_category.py
attr_category_postprocess
def attr_category_postprocess(get_attr_category_func): """Unifies attr_category to a tuple, add AttrCategory.SLOT if needed.""" @functools.wraps(get_attr_category_func) def wrapped( name: str, attr: Any, obj: Any ) -> Tuple[AttrCategory, ...]: category = get_attr_category_func(name, attr, obj) category = list(category) if isinstance(category, tuple) else [category] if is_slotted_attr(obj, name): # Refactoring all tuples to lists is not easy # and pleasant. Maybe do this in future if necessary category.append(AttrCategory.SLOT) return tuple(category) return wrapped
python
def attr_category_postprocess(get_attr_category_func): """Unifies attr_category to a tuple, add AttrCategory.SLOT if needed.""" @functools.wraps(get_attr_category_func) def wrapped( name: str, attr: Any, obj: Any ) -> Tuple[AttrCategory, ...]: category = get_attr_category_func(name, attr, obj) category = list(category) if isinstance(category, tuple) else [category] if is_slotted_attr(obj, name): # Refactoring all tuples to lists is not easy # and pleasant. Maybe do this in future if necessary category.append(AttrCategory.SLOT) return tuple(category) return wrapped
[ "def", "attr_category_postprocess", "(", "get_attr_category_func", ")", ":", "@", "functools", ".", "wraps", "(", "get_attr_category_func", ")", "def", "wrapped", "(", "name", ":", "str", ",", "attr", ":", "Any", ",", "obj", ":", "Any", ")", "->", "Tuple", "[", "AttrCategory", ",", "...", "]", ":", "category", "=", "get_attr_category_func", "(", "name", ",", "attr", ",", "obj", ")", "category", "=", "list", "(", "category", ")", "if", "isinstance", "(", "category", ",", "tuple", ")", "else", "[", "category", "]", "if", "is_slotted_attr", "(", "obj", ",", "name", ")", ":", "# Refactoring all tuples to lists is not easy", "# and pleasant. Maybe do this in future if necessary", "category", ".", "append", "(", "AttrCategory", ".", "SLOT", ")", "return", "tuple", "(", "category", ")", "return", "wrapped" ]
Unifies attr_category to a tuple, add AttrCategory.SLOT if needed.
[ "Unifies", "attr_category", "to", "a", "tuple", "add", "AttrCategory", ".", "SLOT", "if", "needed", "." ]
c4550523fe9b54bf9b755ffa28900a5e9f493d02
https://github.com/laike9m/pdir2/blob/c4550523fe9b54bf9b755ffa28900a5e9f493d02/pdir/attr_category.py#L216-L230
train
mattloper/chumpy
chumpy/monitor.py
get_peak_mem
def get_peak_mem(): ''' this returns peak memory use since process starts till the moment its called ''' import resource rusage_denom = 1024. if sys.platform == 'darwin': # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
python
def get_peak_mem(): ''' this returns peak memory use since process starts till the moment its called ''' import resource rusage_denom = 1024. if sys.platform == 'darwin': # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
[ "def", "get_peak_mem", "(", ")", ":", "import", "resource", "rusage_denom", "=", "1024.", "if", "sys", ".", "platform", "==", "'darwin'", ":", "# ... it seems that in OSX the output is different units ...", "rusage_denom", "=", "rusage_denom", "*", "rusage_denom", "mem", "=", "resource", ".", "getrusage", "(", "resource", ".", "RUSAGE_SELF", ")", ".", "ru_maxrss", "/", "rusage_denom", "return", "mem" ]
this returns peak memory use since process starts till the moment its called
[ "this", "returns", "peak", "memory", "use", "since", "process", "starts", "till", "the", "moment", "its", "called" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/monitor.py#L26-L36
train
mattloper/chumpy
chumpy/utils.py
dfs_do_func_on_graph
def dfs_do_func_on_graph(node, func, *args, **kwargs): ''' invoke func on each node of the dr graph ''' for _node in node.tree_iterator(): func(_node, *args, **kwargs)
python
def dfs_do_func_on_graph(node, func, *args, **kwargs): ''' invoke func on each node of the dr graph ''' for _node in node.tree_iterator(): func(_node, *args, **kwargs)
[ "def", "dfs_do_func_on_graph", "(", "node", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "_node", "in", "node", ".", "tree_iterator", "(", ")", ":", "func", "(", "_node", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
invoke func on each node of the dr graph
[ "invoke", "func", "on", "each", "node", "of", "the", "dr", "graph" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L36-L41
train
mattloper/chumpy
chumpy/utils.py
sparse_is_desireable
def sparse_is_desireable(lhs, rhs): ''' Examines a pair of matrices and determines if the result of their multiplication should be sparse or not. ''' return False if len(lhs.shape) == 1: return False else: lhs_rows, lhs_cols = lhs.shape if len(rhs.shape) == 1: rhs_rows = 1 rhs_cols = rhs.size else: rhs_rows, rhs_cols = rhs.shape result_size = lhs_rows * rhs_cols if sp.issparse(lhs) and sp.issparse(rhs): return True elif sp.issparse(lhs): lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size rhs_zero_cols = np.all(rhs==0, axis=0).sum() elif sp.issparse(rhs): lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size else: lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = np.all(rhs==0, axis=0).sum() num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return (float(num_zeros) / float(size)) >= 0.5
python
def sparse_is_desireable(lhs, rhs): ''' Examines a pair of matrices and determines if the result of their multiplication should be sparse or not. ''' return False if len(lhs.shape) == 1: return False else: lhs_rows, lhs_cols = lhs.shape if len(rhs.shape) == 1: rhs_rows = 1 rhs_cols = rhs.size else: rhs_rows, rhs_cols = rhs.shape result_size = lhs_rows * rhs_cols if sp.issparse(lhs) and sp.issparse(rhs): return True elif sp.issparse(lhs): lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size rhs_zero_cols = np.all(rhs==0, axis=0).sum() elif sp.issparse(rhs): lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size else: lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = np.all(rhs==0, axis=0).sum() num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return (float(num_zeros) / float(size)) >= 0.5
[ "def", "sparse_is_desireable", "(", "lhs", ",", "rhs", ")", ":", "return", "False", "if", "len", "(", "lhs", ".", "shape", ")", "==", "1", ":", "return", "False", "else", ":", "lhs_rows", ",", "lhs_cols", "=", "lhs", ".", "shape", "if", "len", "(", "rhs", ".", "shape", ")", "==", "1", ":", "rhs_rows", "=", "1", "rhs_cols", "=", "rhs", ".", "size", "else", ":", "rhs_rows", ",", "rhs_cols", "=", "rhs", ".", "shape", "result_size", "=", "lhs_rows", "*", "rhs_cols", "if", "sp", ".", "issparse", "(", "lhs", ")", "and", "sp", ".", "issparse", "(", "rhs", ")", ":", "return", "True", "elif", "sp", ".", "issparse", "(", "lhs", ")", ":", "lhs_zero_rows", "=", "lhs_rows", "-", "np", ".", "unique", "(", "lhs", ".", "nonzero", "(", ")", "[", "0", "]", ")", ".", "size", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "elif", "sp", ".", "issparse", "(", "rhs", ")", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "rhs_cols", "-", "np", ".", "unique", "(", "rhs", ".", "nonzero", "(", ")", "[", "1", "]", ")", ".", "size", "else", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "num_zeros", "=", "lhs_zero_rows", "*", "rhs_cols", "+", "rhs_zero_cols", "*", "lhs_rows", "-", "lhs_zero_rows", "*", "rhs_zero_cols", "# A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff.", "return", "(", "float", "(", "num_zeros", ")", "/", "float", "(", "size", ")", ")", ">=", "0.5" ]
Examines a pair of matrices and determines if the result of their multiplication should be sparse or not.
[ "Examines", "a", "pair", "of", "matrices", "and", "determines", "if", "the", "result", "of", "their", "multiplication", "should", "be", "sparse", "or", "not", "." ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L44-L78
train
mattloper/chumpy
chumpy/utils.py
convert_inputs_to_sparse_if_necessary
def convert_inputs_to_sparse_if_necessary(lhs, rhs): ''' This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so. ''' if not sp.issparse(lhs) or not sp.issparse(rhs): if sparse_is_desireable(lhs, rhs): if not sp.issparse(lhs): lhs = sp.csc_matrix(lhs) #print "converting lhs into sparse matrix" if not sp.issparse(rhs): rhs = sp.csc_matrix(rhs) #print "converting rhs into sparse matrix" return lhs, rhs
python
def convert_inputs_to_sparse_if_necessary(lhs, rhs): ''' This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so. ''' if not sp.issparse(lhs) or not sp.issparse(rhs): if sparse_is_desireable(lhs, rhs): if not sp.issparse(lhs): lhs = sp.csc_matrix(lhs) #print "converting lhs into sparse matrix" if not sp.issparse(rhs): rhs = sp.csc_matrix(rhs) #print "converting rhs into sparse matrix" return lhs, rhs
[ "def", "convert_inputs_to_sparse_if_necessary", "(", "lhs", ",", "rhs", ")", ":", "if", "not", "sp", ".", "issparse", "(", "lhs", ")", "or", "not", "sp", ".", "issparse", "(", "rhs", ")", ":", "if", "sparse_is_desireable", "(", "lhs", ",", "rhs", ")", ":", "if", "not", "sp", ".", "issparse", "(", "lhs", ")", ":", "lhs", "=", "sp", ".", "csc_matrix", "(", "lhs", ")", "#print \"converting lhs into sparse matrix\"", "if", "not", "sp", ".", "issparse", "(", "rhs", ")", ":", "rhs", "=", "sp", ".", "csc_matrix", "(", "rhs", ")", "#print \"converting rhs into sparse matrix\"", "return", "lhs", ",", "rhs" ]
This function checks to see if a sparse output is desireable given the inputs and if so, casts the inputs to sparse in order to make it so.
[ "This", "function", "checks", "to", "see", "if", "a", "sparse", "output", "is", "desireable", "given", "the", "inputs", "and", "if", "so", "casts", "the", "inputs", "to", "sparse", "in", "order", "to", "make", "it", "so", "." ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L81-L93
train
mattloper/chumpy
chumpy/optimization_internal.py
ChInputsStacked.dr_wrt
def dr_wrt(self, wrt, profiler=None): ''' Loop over free variables and delete cache for the whole tree after finished each one ''' if wrt is self.x: jacs = [] for fvi, freevar in enumerate(self.free_variables): tm = timer() if isinstance(freevar, ch.Select): new_jac = self.obj.dr_wrt(freevar.a, profiler=profiler) try: new_jac = new_jac[:, freevar.idxs] except: # non-csc sparse matrices may not support column-wise indexing new_jac = new_jac.tocsc()[:, freevar.idxs] else: new_jac = self.obj.dr_wrt(freevar, profiler=profiler) pif('dx wrt {} in {}sec, sparse: {}'.format(freevar.short_name, tm(), sp.issparse(new_jac))) if self._make_dense and sp.issparse(new_jac): new_jac = new_jac.todense() if self._make_sparse and not sp.issparse(new_jac): new_jac = sp.csc_matrix(new_jac) if new_jac is None: raise Exception( 'Objective has no derivative wrt free variable {}. ' 'You should likely remove it.'.format(fvi)) jacs.append(new_jac) tm = timer() utils.dfs_do_func_on_graph(self.obj, clear_cache_single) pif('dfs_do_func_on_graph in {}sec'.format(tm())) tm = timer() J = hstack(jacs) pif('hstack in {}sec'.format(tm())) return J
python
def dr_wrt(self, wrt, profiler=None): ''' Loop over free variables and delete cache for the whole tree after finished each one ''' if wrt is self.x: jacs = [] for fvi, freevar in enumerate(self.free_variables): tm = timer() if isinstance(freevar, ch.Select): new_jac = self.obj.dr_wrt(freevar.a, profiler=profiler) try: new_jac = new_jac[:, freevar.idxs] except: # non-csc sparse matrices may not support column-wise indexing new_jac = new_jac.tocsc()[:, freevar.idxs] else: new_jac = self.obj.dr_wrt(freevar, profiler=profiler) pif('dx wrt {} in {}sec, sparse: {}'.format(freevar.short_name, tm(), sp.issparse(new_jac))) if self._make_dense and sp.issparse(new_jac): new_jac = new_jac.todense() if self._make_sparse and not sp.issparse(new_jac): new_jac = sp.csc_matrix(new_jac) if new_jac is None: raise Exception( 'Objective has no derivative wrt free variable {}. ' 'You should likely remove it.'.format(fvi)) jacs.append(new_jac) tm = timer() utils.dfs_do_func_on_graph(self.obj, clear_cache_single) pif('dfs_do_func_on_graph in {}sec'.format(tm())) tm = timer() J = hstack(jacs) pif('hstack in {}sec'.format(tm())) return J
[ "def", "dr_wrt", "(", "self", ",", "wrt", ",", "profiler", "=", "None", ")", ":", "if", "wrt", "is", "self", ".", "x", ":", "jacs", "=", "[", "]", "for", "fvi", ",", "freevar", "in", "enumerate", "(", "self", ".", "free_variables", ")", ":", "tm", "=", "timer", "(", ")", "if", "isinstance", "(", "freevar", ",", "ch", ".", "Select", ")", ":", "new_jac", "=", "self", ".", "obj", ".", "dr_wrt", "(", "freevar", ".", "a", ",", "profiler", "=", "profiler", ")", "try", ":", "new_jac", "=", "new_jac", "[", ":", ",", "freevar", ".", "idxs", "]", "except", ":", "# non-csc sparse matrices may not support column-wise indexing", "new_jac", "=", "new_jac", ".", "tocsc", "(", ")", "[", ":", ",", "freevar", ".", "idxs", "]", "else", ":", "new_jac", "=", "self", ".", "obj", ".", "dr_wrt", "(", "freevar", ",", "profiler", "=", "profiler", ")", "pif", "(", "'dx wrt {} in {}sec, sparse: {}'", ".", "format", "(", "freevar", ".", "short_name", ",", "tm", "(", ")", ",", "sp", ".", "issparse", "(", "new_jac", ")", ")", ")", "if", "self", ".", "_make_dense", "and", "sp", ".", "issparse", "(", "new_jac", ")", ":", "new_jac", "=", "new_jac", ".", "todense", "(", ")", "if", "self", ".", "_make_sparse", "and", "not", "sp", ".", "issparse", "(", "new_jac", ")", ":", "new_jac", "=", "sp", ".", "csc_matrix", "(", "new_jac", ")", "if", "new_jac", "is", "None", ":", "raise", "Exception", "(", "'Objective has no derivative wrt free variable {}. '", "'You should likely remove it.'", ".", "format", "(", "fvi", ")", ")", "jacs", ".", "append", "(", "new_jac", ")", "tm", "=", "timer", "(", ")", "utils", ".", "dfs_do_func_on_graph", "(", "self", ".", "obj", ",", "clear_cache_single", ")", "pif", "(", "'dfs_do_func_on_graph in {}sec'", ".", "format", "(", "tm", "(", ")", ")", ")", "tm", "=", "timer", "(", ")", "J", "=", "hstack", "(", "jacs", ")", "pif", "(", "'hstack in {}sec'", ".", "format", "(", "tm", "(", ")", ")", ")", "return", "J" ]
Loop over free variables and delete cache for the whole tree after finished each one
[ "Loop", "over", "free", "variables", "and", "delete", "cache", "for", "the", "whole", "tree", "after", "finished", "each", "one" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/optimization_internal.py#L34-L71
train
mattloper/chumpy
chumpy/optimization_internal.py
ChInputsStacked.J
def J(self): ''' Compute Jacobian. Analyze dr graph first to disable unnecessary caching ''' result = self.dr_wrt(self.x, profiler=self.profiler).copy() if self.profiler: self.profiler.harvest() return np.atleast_2d(result) if not sp.issparse(result) else result
python
def J(self): ''' Compute Jacobian. Analyze dr graph first to disable unnecessary caching ''' result = self.dr_wrt(self.x, profiler=self.profiler).copy() if self.profiler: self.profiler.harvest() return np.atleast_2d(result) if not sp.issparse(result) else result
[ "def", "J", "(", "self", ")", ":", "result", "=", "self", ".", "dr_wrt", "(", "self", ".", "x", ",", "profiler", "=", "self", ".", "profiler", ")", ".", "copy", "(", ")", "if", "self", ".", "profiler", ":", "self", ".", "profiler", ".", "harvest", "(", ")", "return", "np", ".", "atleast_2d", "(", "result", ")", "if", "not", "sp", ".", "issparse", "(", "result", ")", "else", "result" ]
Compute Jacobian. Analyze dr graph first to disable unnecessary caching
[ "Compute", "Jacobian", ".", "Analyze", "dr", "graph", "first", "to", "disable", "unnecessary", "caching" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/optimization_internal.py#L101-L108
train
mattloper/chumpy
chumpy/ch.py
Ch.sid
def sid(self): """Semantic id.""" pnames = list(self.terms)+list(self.dterms) pnames.sort() return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__]))
python
def sid(self): """Semantic id.""" pnames = list(self.terms)+list(self.dterms) pnames.sort() return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__]))
[ "def", "sid", "(", "self", ")", ":", "pnames", "=", "list", "(", "self", ".", "terms", ")", "+", "list", "(", "self", ".", "dterms", ")", "pnames", ".", "sort", "(", ")", "return", "(", "self", ".", "__class__", ",", "tuple", "(", "[", "(", "k", ",", "id", "(", "self", ".", "__dict__", "[", "k", "]", ")", ")", "for", "k", "in", "pnames", "if", "k", "in", "self", ".", "__dict__", "]", ")", ")" ]
Semantic id.
[ "Semantic", "id", "." ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/ch.py#L185-L189
train
mattloper/chumpy
chumpy/ch.py
Ch.compute_dr_wrt
def compute_dr_wrt(self,wrt): """Default method for objects that just contain a number or ndarray""" if wrt is self: # special base case return sp.eye(self.x.size, self.x.size) #return np.array([[1]]) return None
python
def compute_dr_wrt(self,wrt): """Default method for objects that just contain a number or ndarray""" if wrt is self: # special base case return sp.eye(self.x.size, self.x.size) #return np.array([[1]]) return None
[ "def", "compute_dr_wrt", "(", "self", ",", "wrt", ")", ":", "if", "wrt", "is", "self", ":", "# special base case ", "return", "sp", ".", "eye", "(", "self", ".", "x", ".", "size", ",", "self", ".", "x", ".", "size", ")", "#return np.array([[1]])", "return", "None" ]
Default method for objects that just contain a number or ndarray
[ "Default", "method", "for", "objects", "that", "just", "contain", "a", "number", "or", "ndarray" ]
a3cfdb1be3c8265c369c507b22f6f3f89414c772
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/ch.py#L275-L280
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.get_ubuntu_release_from_sentry
def get_ubuntu_release_from_sentry(self, sentry_unit): """Get Ubuntu release codename from sentry unit. :param sentry_unit: amulet sentry/service unit pointer :returns: list of strings - release codename, failure message """ msg = None cmd = 'lsb_release -cs' release, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} lsb_release: {}'.format( sentry_unit.info['unit_name'], release)) else: msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, release, code)) if release not in self.ubuntu_releases: msg = ("Release ({}) not found in Ubuntu releases " "({})".format(release, self.ubuntu_releases)) return release, msg
python
def get_ubuntu_release_from_sentry(self, sentry_unit): """Get Ubuntu release codename from sentry unit. :param sentry_unit: amulet sentry/service unit pointer :returns: list of strings - release codename, failure message """ msg = None cmd = 'lsb_release -cs' release, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} lsb_release: {}'.format( sentry_unit.info['unit_name'], release)) else: msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, release, code)) if release not in self.ubuntu_releases: msg = ("Release ({}) not found in Ubuntu releases " "({})".format(release, self.ubuntu_releases)) return release, msg
[ "def", "get_ubuntu_release_from_sentry", "(", "self", ",", "sentry_unit", ")", ":", "msg", "=", "None", "cmd", "=", "'lsb_release -cs'", "release", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "if", "code", "==", "0", ":", "self", ".", "log", ".", "debug", "(", "'{} lsb_release: {}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "release", ")", ")", "else", ":", "msg", "=", "(", "'{} `{}` returned {} '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "release", ",", "code", ")", ")", "if", "release", "not", "in", "self", ".", "ubuntu_releases", ":", "msg", "=", "(", "\"Release ({}) not found in Ubuntu releases \"", "\"({})\"", ".", "format", "(", "release", ",", "self", ".", "ubuntu_releases", ")", ")", "return", "release", ",", "msg" ]
Get Ubuntu release codename from sentry unit. :param sentry_unit: amulet sentry/service unit pointer :returns: list of strings - release codename, failure message
[ "Get", "Ubuntu", "release", "codename", "from", "sentry", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L83-L102
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_services
def validate_services(self, commands): """Validate that lists of commands succeed on service units. Can be used to verify system services are running on the corresponding service units. :param commands: dict with sentry keys and arbitrary command list vals :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(k.info['unit_name'], cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None
python
def validate_services(self, commands): """Validate that lists of commands succeed on service units. Can be used to verify system services are running on the corresponding service units. :param commands: dict with sentry keys and arbitrary command list vals :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(k.info['unit_name'], cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None
[ "def", "validate_services", "(", "self", ",", "commands", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking status of system services...'", ")", "# /!\\ DEPRECATION WARNING (beisner):", "# New and existing tests should be rewritten to use", "# validate_services_by_name() as it is aware of init systems.", "self", ".", "log", ".", "warn", "(", "'DEPRECATION WARNING: use '", "'validate_services_by_name instead of validate_services '", "'due to init system differences.'", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "commands", ")", ":", "for", "cmd", "in", "v", ":", "output", ",", "code", "=", "k", ".", "run", "(", "cmd", ")", "self", ".", "log", ".", "debug", "(", "'{} `{}` returned '", "'{}'", ".", "format", "(", "k", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "if", "code", "!=", "0", ":", "return", "\"command `{}` returned {}\"", ".", "format", "(", "cmd", ",", "str", "(", "code", ")", ")", "return", "None" ]
Validate that lists of commands succeed on service units. Can be used to verify system services are running on the corresponding service units. :param commands: dict with sentry keys and arbitrary command list vals :returns: None if successful, Failure string message otherwise
[ "Validate", "that", "lists", "of", "commands", "succeed", "on", "service", "units", ".", "Can", "be", "used", "to", "verify", "system", "services", "are", "running", "on", "the", "corresponding", "service", "units", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L104-L129
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_services_by_name
def validate_services_by_name(self, sentry_services): """Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # Point at which systemd became a thing systemd_switch = self.ubuntu_releases.index('vivid') for sentry_unit, services_list in six.iteritems(sentry_services): # Get lsb_release codename from unit release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) if ret: return ret for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or service_name in ['rabbitmq-server', 'apache2', 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 and "start/running" in output self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) if not service_running: return u"command `{}` returned {} {}".format( cmd, output, str(code)) return None
python
def validate_services_by_name(self, sentry_services): """Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # Point at which systemd became a thing systemd_switch = self.ubuntu_releases.index('vivid') for sentry_unit, services_list in six.iteritems(sentry_services): # Get lsb_release codename from unit release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) if ret: return ret for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or service_name in ['rabbitmq-server', 'apache2', 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 and "start/running" in output self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) if not service_running: return u"command `{}` returned {} {}".format( cmd, output, str(code)) return None
[ "def", "validate_services_by_name", "(", "self", ",", "sentry_services", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking status of system services...'", ")", "# Point at which systemd became a thing", "systemd_switch", "=", "self", ".", "ubuntu_releases", ".", "index", "(", "'vivid'", ")", "for", "sentry_unit", ",", "services_list", "in", "six", ".", "iteritems", "(", "sentry_services", ")", ":", "# Get lsb_release codename from unit", "release", ",", "ret", "=", "self", ".", "get_ubuntu_release_from_sentry", "(", "sentry_unit", ")", "if", "ret", ":", "return", "ret", "for", "service_name", "in", "services_list", ":", "if", "(", "self", ".", "ubuntu_releases", ".", "index", "(", "release", ")", ">=", "systemd_switch", "or", "service_name", "in", "[", "'rabbitmq-server'", ",", "'apache2'", ",", "'memcached'", "]", ")", ":", "# init is systemd (or regular sysv)", "cmd", "=", "'sudo service {} status'", ".", "format", "(", "service_name", ")", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "service_running", "=", "code", "==", "0", "elif", "self", ".", "ubuntu_releases", ".", "index", "(", "release", ")", "<", "systemd_switch", ":", "# init is upstart", "cmd", "=", "'sudo status {}'", ".", "format", "(", "service_name", ")", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "service_running", "=", "code", "==", "0", "and", "\"start/running\"", "in", "output", "self", ".", "log", ".", "debug", "(", "'{} `{}` returned '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "if", "not", "service_running", ":", "return", "u\"command `{}` returned {} {}\"", ".", "format", "(", "cmd", ",", "output", ",", "str", "(", "code", ")", ")", "return", "None" ]
Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise
[ "Validate", "system", "service", "status", "by", "service", "name", "automatically", "detecting", "init", "system", "based", "on", "Ubuntu", "release", "codename", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L131-L169
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils._get_config
def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config
python
def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config
[ "def", "_get_config", "(", "self", ",", "unit", ",", "filename", ")", ":", "file_contents", "=", "unit", ".", "file_contents", "(", "filename", ")", "# NOTE(beisner): by default, ConfigParser does not handle options", "# with no value, such as the flags used in the mysql my.cnf file.", "# https://bugs.python.org/issue7005", "config", "=", "configparser", ".", "ConfigParser", "(", "allow_no_value", "=", "True", ")", "config", ".", "readfp", "(", "io", ".", "StringIO", "(", "file_contents", ")", ")", "return", "config" ]
Get a ConfigParser object for parsing a unit's config file.
[ "Get", "a", "ConfigParser", "object", "for", "parsing", "a", "unit", "s", "config", "file", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L171-L180
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_config_data
def validate_config_data(self, sentry_unit, config_file, section, expected): """Validate config file data. Verify that the specified section of the config file contains the expected option key:value pairs. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): return "section [{}] does not exist".format(section) for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) actual = config.get(section, k) v = expected[k] if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual): return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) return None
python
def validate_config_data(self, sentry_unit, config_file, section, expected): """Validate config file data. Verify that the specified section of the config file contains the expected option key:value pairs. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): return "section [{}] does not exist".format(section) for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) actual = config.get(section, k) v = expected[k] if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual): return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) return None
[ "def", "validate_config_data", "(", "self", ",", "sentry_unit", ",", "config_file", ",", "section", ",", "expected", ")", ":", "self", ".", "log", ".", "debug", "(", "'Validating config file data ({} in {} on {})'", "'...'", ".", "format", "(", "section", ",", "config_file", ",", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ")", ")", "config", "=", "self", ".", "_get_config", "(", "sentry_unit", ",", "config_file", ")", "if", "section", "!=", "'DEFAULT'", "and", "not", "config", ".", "has_section", "(", "section", ")", ":", "return", "\"section [{}] does not exist\"", ".", "format", "(", "section", ")", "for", "k", "in", "expected", ".", "keys", "(", ")", ":", "if", "not", "config", ".", "has_option", "(", "section", ",", "k", ")", ":", "return", "\"section [{}] is missing option {}\"", ".", "format", "(", "section", ",", "k", ")", "actual", "=", "config", ".", "get", "(", "section", ",", "k", ")", "v", "=", "expected", "[", "k", "]", "if", "(", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "v", ",", "bool", ")", "or", "isinstance", "(", "v", ",", "six", ".", "integer_types", ")", ")", ":", "# handle explicit values", "if", "actual", "!=", "v", ":", "return", "\"section [{}] {}:{} != expected {}:{}\"", ".", "format", "(", "section", ",", "k", ",", "actual", ",", "k", ",", "expected", "[", "k", "]", ")", "# handle function pointers, such as not_null or valid_ip", "elif", "not", "v", "(", "actual", ")", ":", "return", "\"section [{}] {}:{} != expected {}:{}\"", ".", "format", "(", "section", ",", "k", ",", "actual", ",", "k", ",", "expected", "[", "k", "]", ")", "return", "None" ]
Validate config file data. Verify that the specified section of the config file contains the expected option key:value pairs. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool.
[ "Validate", "config", "file", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L182-L219
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils._validate_dict_data
def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('expected: {}'.format(repr(expected))) for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: return "key '{}' does not exist".format(k) return None
python
def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('expected: {}'.format(repr(expected))) for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: return "key '{}' does not exist".format(k) return None
[ "def", "_validate_dict_data", "(", "self", ",", "expected", ",", "actual", ")", ":", "self", ".", "log", ".", "debug", "(", "'actual: {}'", ".", "format", "(", "repr", "(", "actual", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "'expected: {}'", ".", "format", "(", "repr", "(", "expected", ")", ")", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "expected", ")", ":", "if", "k", "in", "actual", ":", "if", "(", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "v", ",", "bool", ")", "or", "isinstance", "(", "v", ",", "six", ".", "integer_types", ")", ")", ":", "# handle explicit values", "if", "v", "!=", "actual", "[", "k", "]", ":", "return", "\"{}:{}\"", ".", "format", "(", "k", ",", "actual", "[", "k", "]", ")", "# handle function pointers, such as not_null or valid_ip", "elif", "not", "v", "(", "actual", "[", "k", "]", ")", ":", "return", "\"{}:{}\"", ".", "format", "(", "k", ",", "actual", "[", "k", "]", ")", "else", ":", "return", "\"key '{}' does not exist\"", ".", "format", "(", "k", ")", "return", "None" ]
Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, longs, or can be a function that evaluates a variable and returns a bool.
[ "Validate", "dictionary", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L221-L245
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_relation_data
def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) return self._validate_dict_data(expected, actual)
python
def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) return self._validate_dict_data(expected, actual)
[ "def", "validate_relation_data", "(", "self", ",", "sentry_unit", ",", "relation", ",", "expected", ")", ":", "actual", "=", "sentry_unit", ".", "relation", "(", "relation", "[", "0", "]", ",", "relation", "[", "1", "]", ")", "return", "self", ".", "_validate_dict_data", "(", "expected", ",", "actual", ")" ]
Validate actual relation data based on expected relation data.
[ "Validate", "actual", "relation", "data", "based", "on", "expected", "relation", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L247-L250
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils._validate_list_data
def _validate_list_data(self, expected, actual): """Compare expected list vs actual list data.""" for e in expected: if e not in actual: return "expected item {} not found in actual list".format(e) return None
python
def _validate_list_data(self, expected, actual): """Compare expected list vs actual list data.""" for e in expected: if e not in actual: return "expected item {} not found in actual list".format(e) return None
[ "def", "_validate_list_data", "(", "self", ",", "expected", ",", "actual", ")", ":", "for", "e", "in", "expected", ":", "if", "e", "not", "in", "actual", ":", "return", "\"expected item {} not found in actual list\"", ".", "format", "(", "e", ")", "return", "None" ]
Compare expected list vs actual list data.
[ "Compare", "expected", "list", "vs", "actual", "list", "data", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L252-L257
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.service_restarted
def service_restarted(self, sentry_unit, service, filename, pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ # /!\ DEPRECATION WARNING (beisner): # This method is prone to races in that no before-time is known. # Use validate_service_config_changed instead. # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 self.log.warn('DEPRECATION WARNING: use ' 'validate_service_config_changed instead of ' 'service_restarted due to known races.') time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): return True else: return False
python
def service_restarted(self, sentry_unit, service, filename, pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ # /!\ DEPRECATION WARNING (beisner): # This method is prone to races in that no before-time is known. # Use validate_service_config_changed instead. # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 self.log.warn('DEPRECATION WARNING: use ' 'validate_service_config_changed instead of ' 'service_restarted due to known races.') time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): return True else: return False
[ "def", "service_restarted", "(", "self", ",", "sentry_unit", ",", "service", ",", "filename", ",", "pgrep_full", "=", "None", ",", "sleep_time", "=", "20", ")", ":", "# /!\\ DEPRECATION WARNING (beisner):", "# This method is prone to races in that no before-time is known.", "# Use validate_service_config_changed instead.", "# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now", "# used instead of pgrep. pgrep_full is still passed through to ensure", "# deprecation WARNS. lp1474030", "self", ".", "log", ".", "warn", "(", "'DEPRECATION WARNING: use '", "'validate_service_config_changed instead of '", "'service_restarted due to known races.'", ")", "time", ".", "sleep", "(", "sleep_time", ")", "if", "(", "self", ".", "_get_proc_start_time", "(", "sentry_unit", ",", "service", ",", "pgrep_full", ")", ">=", "self", ".", "_get_file_mtime", "(", "sentry_unit", ",", "filename", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted.
[ "Check", "if", "service", "was", "restarted", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L294-L318
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.service_restarted_since
def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Initial sleep time (s) before looking for file retry_sleep_time (int): Time (s) to sleep between retries retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, False if service is older than mtime or if service was not found. """ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 unit_name = sentry_unit.info['unit_name'] self.log.debug('Checking that %s service restarted since %s on ' '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) proc_start_time = None tries = 0 while tries <= retry_count and not proc_start_time: try: proc_start_time = self._get_proc_start_time(sentry_unit, service, pgrep_full) self.log.debug('Attempt {} to get {} proc start time on {} ' 'OK'.format(tries, service, unit_name)) except IOError as e: # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} proc start time on {} ' 'failed\n{}'.format(tries, service, unit_name, e)) time.sleep(retry_sleep_time) tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: self.log.debug('Proc start time is newer than provided mtime' '(%s >= %s) on %s (OK)' % (proc_start_time, mtime, unit_name)) return True else: self.log.warn('Proc start time (%s) is older than provided mtime ' '(%s) on %s, service did not ' 'restart' % (proc_start_time, mtime, unit_name)) return False
python
def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Initial sleep time (s) before looking for file retry_sleep_time (int): Time (s) to sleep between retries retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, False if service is older than mtime or if service was not found. """ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 unit_name = sentry_unit.info['unit_name'] self.log.debug('Checking that %s service restarted since %s on ' '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) proc_start_time = None tries = 0 while tries <= retry_count and not proc_start_time: try: proc_start_time = self._get_proc_start_time(sentry_unit, service, pgrep_full) self.log.debug('Attempt {} to get {} proc start time on {} ' 'OK'.format(tries, service, unit_name)) except IOError as e: # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} proc start time on {} ' 'failed\n{}'.format(tries, service, unit_name, e)) time.sleep(retry_sleep_time) tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: self.log.debug('Proc start time is newer than provided mtime' '(%s >= %s) on %s (OK)' % (proc_start_time, mtime, unit_name)) return True else: self.log.warn('Proc start time (%s) is older than provided mtime ' '(%s) on %s, service did not ' 'restart' % (proc_start_time, mtime, unit_name)) return False
[ "def", "service_restarted_since", "(", "self", ",", "sentry_unit", ",", "mtime", ",", "service", ",", "pgrep_full", "=", "None", ",", "sleep_time", "=", "20", ",", "retry_count", "=", "30", ",", "retry_sleep_time", "=", "10", ")", ":", "# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now", "# used instead of pgrep. pgrep_full is still passed through to ensure", "# deprecation WARNS. lp1474030", "unit_name", "=", "sentry_unit", ".", "info", "[", "'unit_name'", "]", "self", ".", "log", ".", "debug", "(", "'Checking that %s service restarted since %s on '", "'%s'", "%", "(", "service", ",", "mtime", ",", "unit_name", ")", ")", "time", ".", "sleep", "(", "sleep_time", ")", "proc_start_time", "=", "None", "tries", "=", "0", "while", "tries", "<=", "retry_count", "and", "not", "proc_start_time", ":", "try", ":", "proc_start_time", "=", "self", ".", "_get_proc_start_time", "(", "sentry_unit", ",", "service", ",", "pgrep_full", ")", "self", ".", "log", ".", "debug", "(", "'Attempt {} to get {} proc start time on {} '", "'OK'", ".", "format", "(", "tries", ",", "service", ",", "unit_name", ")", ")", "except", "IOError", "as", "e", ":", "# NOTE(beisner) - race avoidance, proc may not exist yet.", "# https://bugs.launchpad.net/charm-helpers/+bug/1474030", "self", ".", "log", ".", "debug", "(", "'Attempt {} to get {} proc start time on {} '", "'failed\\n{}'", ".", "format", "(", "tries", ",", "service", ",", "unit_name", ",", "e", ")", ")", "time", ".", "sleep", "(", "retry_sleep_time", ")", "tries", "+=", "1", "if", "not", "proc_start_time", ":", "self", ".", "log", ".", "warn", "(", "'No proc start time found, assuming service did '", "'not start'", ")", "return", "False", "if", "proc_start_time", ">=", "mtime", ":", "self", ".", "log", ".", "debug", "(", "'Proc start time is newer than provided mtime'", "'(%s >= %s) on %s (OK)'", "%", "(", "proc_start_time", ",", "mtime", ",", "unit_name", ")", ")", "return", "True", "else", ":", "self", ".", "log", ".", "warn", "(", "'Proc start time (%s) is older than provided mtime '", "'(%s) on %s, service did not '", "'restart'", "%", "(", "proc_start_time", ",", "mtime", ",", "unit_name", ")", ")", "return", "False" ]
Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Initial sleep time (s) before looking for file retry_sleep_time (int): Time (s) to sleep between retries retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, False if service is older than mtime or if service was not found.
[ "Check", "if", "service", "was", "been", "started", "after", "a", "given", "time", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L320-L378
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.config_updated_since
def config_updated_since(self, sentry_unit, filename, mtime, sleep_time=20, retry_count=30, retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against sleep_time (int): Initial sleep time (s) before looking for file retry_sleep_time (int): Time (s) to sleep between retries retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if file was modified before mtime, or if file not found. """ unit_name = sentry_unit.info['unit_name'] self.log.debug('Checking that %s updated since %s on ' '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) file_mtime = None tries = 0 while tries <= retry_count and not file_mtime: try: file_mtime = self._get_file_mtime(sentry_unit, filename) self.log.debug('Attempt {} to get {} file mtime on {} ' 'OK'.format(tries, filename, unit_name)) except IOError as e: # NOTE(beisner) - race avoidance, file may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} file mtime on {} ' 'failed\n{}'.format(tries, filename, unit_name, e)) time.sleep(retry_sleep_time) tries += 1 if not file_mtime: self.log.warn('Could not determine file mtime, assuming ' 'file does not exist') return False if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' '(%s >= %s) on %s (OK)' % (file_mtime, mtime, unit_name)) return True else: self.log.warn('File mtime is older than provided mtime' '(%s < on %s) on %s' % (file_mtime, mtime, unit_name)) return False
python
def config_updated_since(self, sentry_unit, filename, mtime, sleep_time=20, retry_count=30, retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against sleep_time (int): Initial sleep time (s) before looking for file retry_sleep_time (int): Time (s) to sleep between retries retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if file was modified before mtime, or if file not found. """ unit_name = sentry_unit.info['unit_name'] self.log.debug('Checking that %s updated since %s on ' '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) file_mtime = None tries = 0 while tries <= retry_count and not file_mtime: try: file_mtime = self._get_file_mtime(sentry_unit, filename) self.log.debug('Attempt {} to get {} file mtime on {} ' 'OK'.format(tries, filename, unit_name)) except IOError as e: # NOTE(beisner) - race avoidance, file may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} file mtime on {} ' 'failed\n{}'.format(tries, filename, unit_name, e)) time.sleep(retry_sleep_time) tries += 1 if not file_mtime: self.log.warn('Could not determine file mtime, assuming ' 'file does not exist') return False if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' '(%s >= %s) on %s (OK)' % (file_mtime, mtime, unit_name)) return True else: self.log.warn('File mtime is older than provided mtime' '(%s < on %s) on %s' % (file_mtime, mtime, unit_name)) return False
[ "def", "config_updated_since", "(", "self", ",", "sentry_unit", ",", "filename", ",", "mtime", ",", "sleep_time", "=", "20", ",", "retry_count", "=", "30", ",", "retry_sleep_time", "=", "10", ")", ":", "unit_name", "=", "sentry_unit", ".", "info", "[", "'unit_name'", "]", "self", ".", "log", ".", "debug", "(", "'Checking that %s updated since %s on '", "'%s'", "%", "(", "filename", ",", "mtime", ",", "unit_name", ")", ")", "time", ".", "sleep", "(", "sleep_time", ")", "file_mtime", "=", "None", "tries", "=", "0", "while", "tries", "<=", "retry_count", "and", "not", "file_mtime", ":", "try", ":", "file_mtime", "=", "self", ".", "_get_file_mtime", "(", "sentry_unit", ",", "filename", ")", "self", ".", "log", ".", "debug", "(", "'Attempt {} to get {} file mtime on {} '", "'OK'", ".", "format", "(", "tries", ",", "filename", ",", "unit_name", ")", ")", "except", "IOError", "as", "e", ":", "# NOTE(beisner) - race avoidance, file may not exist yet.", "# https://bugs.launchpad.net/charm-helpers/+bug/1474030", "self", ".", "log", ".", "debug", "(", "'Attempt {} to get {} file mtime on {} '", "'failed\\n{}'", ".", "format", "(", "tries", ",", "filename", ",", "unit_name", ",", "e", ")", ")", "time", ".", "sleep", "(", "retry_sleep_time", ")", "tries", "+=", "1", "if", "not", "file_mtime", ":", "self", ".", "log", ".", "warn", "(", "'Could not determine file mtime, assuming '", "'file does not exist'", ")", "return", "False", "if", "file_mtime", ">=", "mtime", ":", "self", ".", "log", ".", "debug", "(", "'File mtime is newer than provided mtime '", "'(%s >= %s) on %s (OK)'", "%", "(", "file_mtime", ",", "mtime", ",", "unit_name", ")", ")", "return", "True", "else", ":", "self", ".", "log", ".", "warn", "(", "'File mtime is older than provided mtime'", "'(%s < on %s) on %s'", "%", "(", "file_mtime", ",", "mtime", ",", "unit_name", ")", ")", "return", "False" ]
Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against sleep_time (int): Initial sleep time (s) before looking for file retry_sleep_time (int): Time (s) to sleep between retries retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if file was modified before mtime, or if file not found.
[ "Check", "if", "file", "was", "modified", "after", "a", "given", "time", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L380-L431
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_service_config_changed
def validate_service_config_changed(self, sentry_unit, mtime, service, filename, pgrep_full=None, sleep_time=20, retry_count=30, retry_sleep_time=10): """Check service and file were updated after mtime Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) ... mtime = u.get_sentry_time(self.cinder_sentry) self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) if not u.validate_service_config_changed(self.cinder_sentry, mtime, 'cinder-api', '/etc/cinder/cinder.conf') amulet.raise_status(amulet.FAIL, msg='update failed') Returns: bool: True if both service and file where updated/restarted after mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 service_restart = self.service_restarted_since( sentry_unit, mtime, service, pgrep_full=pgrep_full, sleep_time=sleep_time, retry_count=retry_count, retry_sleep_time=retry_sleep_time) config_update = self.config_updated_since( sentry_unit, filename, mtime, sleep_time=sleep_time, retry_count=retry_count, retry_sleep_time=retry_sleep_time) return service_restart and config_update
python
def validate_service_config_changed(self, sentry_unit, mtime, service, filename, pgrep_full=None, sleep_time=20, retry_count=30, retry_sleep_time=10): """Check service and file were updated after mtime Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) ... mtime = u.get_sentry_time(self.cinder_sentry) self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) if not u.validate_service_config_changed(self.cinder_sentry, mtime, 'cinder-api', '/etc/cinder/cinder.conf') amulet.raise_status(amulet.FAIL, msg='update failed') Returns: bool: True if both service and file where updated/restarted after mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 service_restart = self.service_restarted_since( sentry_unit, mtime, service, pgrep_full=pgrep_full, sleep_time=sleep_time, retry_count=retry_count, retry_sleep_time=retry_sleep_time) config_update = self.config_updated_since( sentry_unit, filename, mtime, sleep_time=sleep_time, retry_count=retry_count, retry_sleep_time=retry_sleep_time) return service_restart and config_update
[ "def", "validate_service_config_changed", "(", "self", ",", "sentry_unit", ",", "mtime", ",", "service", ",", "filename", ",", "pgrep_full", "=", "None", ",", "sleep_time", "=", "20", ",", "retry_count", "=", "30", ",", "retry_sleep_time", "=", "10", ")", ":", "# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now", "# used instead of pgrep. pgrep_full is still passed through to ensure", "# deprecation WARNS. lp1474030", "service_restart", "=", "self", ".", "service_restarted_since", "(", "sentry_unit", ",", "mtime", ",", "service", ",", "pgrep_full", "=", "pgrep_full", ",", "sleep_time", "=", "sleep_time", ",", "retry_count", "=", "retry_count", ",", "retry_sleep_time", "=", "retry_sleep_time", ")", "config_update", "=", "self", ".", "config_updated_since", "(", "sentry_unit", ",", "filename", ",", "mtime", ",", "sleep_time", "=", "sleep_time", ",", "retry_count", "=", "retry_count", ",", "retry_sleep_time", "=", "retry_sleep_time", ")", "return", "service_restart", "and", "config_update" ]
Check service and file were updated after mtime Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) ... mtime = u.get_sentry_time(self.cinder_sentry) self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) if not u.validate_service_config_changed(self.cinder_sentry, mtime, 'cinder-api', '/etc/cinder/cinder.conf') amulet.raise_status(amulet.FAIL, msg='update failed') Returns: bool: True if both service and file where updated/restarted after mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime.
[ "Check", "service", "and", "file", "were", "updated", "after", "mtime" ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L433-L485
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.file_to_url
def file_to_url(self, file_rel_path): """Convert a relative file path to a file URL.""" _abs_path = os.path.abspath(file_rel_path) return urlparse.urlparse(_abs_path, scheme='file').geturl()
python
def file_to_url(self, file_rel_path): """Convert a relative file path to a file URL.""" _abs_path = os.path.abspath(file_rel_path) return urlparse.urlparse(_abs_path, scheme='file').geturl()
[ "def", "file_to_url", "(", "self", ",", "file_rel_path", ")", ":", "_abs_path", "=", "os", ".", "path", ".", "abspath", "(", "file_rel_path", ")", "return", "urlparse", ".", "urlparse", "(", "_abs_path", ",", "scheme", "=", "'file'", ")", ".", "geturl", "(", ")" ]
Convert a relative file path to a file URL.
[ "Convert", "a", "relative", "file", "path", "to", "a", "file", "URL", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L504-L507
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.check_commands_on_units
def check_commands_on_units(self, commands, sentry_units): """Check that all commands in a list exit zero on all sentry units in a list. :param commands: list of bash commands :param sentry_units: list of sentry unit pointers :returns: None if successful; Failure message otherwise """ self.log.debug('Checking exit codes for {} commands on {} ' 'sentry units...'.format(len(commands), len(sentry_units))) for sentry_unit in sentry_units: for cmd in commands: output, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} `{}` returned {} ' '(OK)'.format(sentry_unit.info['unit_name'], cmd, code)) else: return ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) return None
python
def check_commands_on_units(self, commands, sentry_units): """Check that all commands in a list exit zero on all sentry units in a list. :param commands: list of bash commands :param sentry_units: list of sentry unit pointers :returns: None if successful; Failure message otherwise """ self.log.debug('Checking exit codes for {} commands on {} ' 'sentry units...'.format(len(commands), len(sentry_units))) for sentry_unit in sentry_units: for cmd in commands: output, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} `{}` returned {} ' '(OK)'.format(sentry_unit.info['unit_name'], cmd, code)) else: return ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) return None
[ "def", "check_commands_on_units", "(", "self", ",", "commands", ",", "sentry_units", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking exit codes for {} commands on {} '", "'sentry units...'", ".", "format", "(", "len", "(", "commands", ")", ",", "len", "(", "sentry_units", ")", ")", ")", "for", "sentry_unit", "in", "sentry_units", ":", "for", "cmd", "in", "commands", ":", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "if", "code", "==", "0", ":", "self", ".", "log", ".", "debug", "(", "'{} `{}` returned {} '", "'(OK)'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "else", ":", "return", "(", "'{} `{}` returned {} '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ",", "output", ")", ")", "return", "None" ]
Check that all commands in a list exit zero on all sentry units in a list. :param commands: list of bash commands :param sentry_units: list of sentry unit pointers :returns: None if successful; Failure message otherwise
[ "Check", "that", "all", "commands", "in", "a", "list", "exit", "zero", "on", "all", "sentry", "units", "in", "a", "list", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L509-L531
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.get_unit_process_ids
def get_unit_process_ids( self, unit_processes, expect_success=True, pgrep_full=False): """Construct a dict containing unit sentries, process names, and process IDs. :param unit_processes: A dictionary of Amulet sentry instance to list of process names. :param expect_success: if False expect the processes to not be running, raise if they are. :returns: Dictionary of Amulet sentry instance to dictionary of process names to PIDs. """ pid_dict = {} for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: pids = self.get_process_id_list( sentry_unit, process, expect_success=expect_success, pgrep_full=pgrep_full) pid_dict[sentry_unit].update({process: pids}) return pid_dict
python
def get_unit_process_ids( self, unit_processes, expect_success=True, pgrep_full=False): """Construct a dict containing unit sentries, process names, and process IDs. :param unit_processes: A dictionary of Amulet sentry instance to list of process names. :param expect_success: if False expect the processes to not be running, raise if they are. :returns: Dictionary of Amulet sentry instance to dictionary of process names to PIDs. """ pid_dict = {} for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: pids = self.get_process_id_list( sentry_unit, process, expect_success=expect_success, pgrep_full=pgrep_full) pid_dict[sentry_unit].update({process: pids}) return pid_dict
[ "def", "get_unit_process_ids", "(", "self", ",", "unit_processes", ",", "expect_success", "=", "True", ",", "pgrep_full", "=", "False", ")", ":", "pid_dict", "=", "{", "}", "for", "sentry_unit", ",", "process_list", "in", "six", ".", "iteritems", "(", "unit_processes", ")", ":", "pid_dict", "[", "sentry_unit", "]", "=", "{", "}", "for", "process", "in", "process_list", ":", "pids", "=", "self", ".", "get_process_id_list", "(", "sentry_unit", ",", "process", ",", "expect_success", "=", "expect_success", ",", "pgrep_full", "=", "pgrep_full", ")", "pid_dict", "[", "sentry_unit", "]", ".", "update", "(", "{", "process", ":", "pids", "}", ")", "return", "pid_dict" ]
Construct a dict containing unit sentries, process names, and process IDs. :param unit_processes: A dictionary of Amulet sentry instance to list of process names. :param expect_success: if False expect the processes to not be running, raise if they are. :returns: Dictionary of Amulet sentry instance to dictionary of process names to PIDs.
[ "Construct", "a", "dict", "containing", "unit", "sentries", "process", "names", "and", "process", "IDs", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L558-L578
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_unit_process_ids
def validate_unit_process_ids(self, expected, actual): """Validate process id quantities for services on units.""" self.log.debug('Checking units for running processes...') self.log.debug('Expected PIDs: {}'.format(expected)) self.log.debug('Actual PIDs: {}'.format(actual)) if len(actual) != len(expected): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: return ('Expected sentry ({}) not found in actual dict data.' '{}'.format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids, a_pids_length, a_pids)) # If expected is a list, ensure at least one PID quantity match if isinstance(e_pids, list) and \ a_pids_length not in e_pids: return fail_msg # If expected is not bool and not list, # ensure PID quantities match elif not isinstance(e_pids, bool) and \ not isinstance(e_pids, list) and \ a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist elif isinstance(e_pids, bool) and \ e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist elif isinstance(e_pids, bool) and \ e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, e_pids, a_pids)) return None
python
def validate_unit_process_ids(self, expected, actual): """Validate process id quantities for services on units.""" self.log.debug('Checking units for running processes...') self.log.debug('Expected PIDs: {}'.format(expected)) self.log.debug('Actual PIDs: {}'.format(actual)) if len(actual) != len(expected): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: return ('Expected sentry ({}) not found in actual dict data.' '{}'.format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids, a_pids_length, a_pids)) # If expected is a list, ensure at least one PID quantity match if isinstance(e_pids, list) and \ a_pids_length not in e_pids: return fail_msg # If expected is not bool and not list, # ensure PID quantities match elif not isinstance(e_pids, bool) and \ not isinstance(e_pids, list) and \ a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist elif isinstance(e_pids, bool) and \ e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist elif isinstance(e_pids, bool) and \ e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, e_pids, a_pids)) return None
[ "def", "validate_unit_process_ids", "(", "self", ",", "expected", ",", "actual", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking units for running processes...'", ")", "self", ".", "log", ".", "debug", "(", "'Expected PIDs: {}'", ".", "format", "(", "expected", ")", ")", "self", ".", "log", ".", "debug", "(", "'Actual PIDs: {}'", ".", "format", "(", "actual", ")", ")", "if", "len", "(", "actual", ")", "!=", "len", "(", "expected", ")", ":", "return", "(", "'Unit count mismatch. expected, actual: {}, '", "'{} '", ".", "format", "(", "len", "(", "expected", ")", ",", "len", "(", "actual", ")", ")", ")", "for", "(", "e_sentry", ",", "e_proc_names", ")", "in", "six", ".", "iteritems", "(", "expected", ")", ":", "e_sentry_name", "=", "e_sentry", ".", "info", "[", "'unit_name'", "]", "if", "e_sentry", "in", "actual", ".", "keys", "(", ")", ":", "a_proc_names", "=", "actual", "[", "e_sentry", "]", "else", ":", "return", "(", "'Expected sentry ({}) not found in actual dict data.'", "'{}'", ".", "format", "(", "e_sentry_name", ",", "e_sentry", ")", ")", "if", "len", "(", "e_proc_names", ".", "keys", "(", ")", ")", "!=", "len", "(", "a_proc_names", ".", "keys", "(", ")", ")", ":", "return", "(", "'Process name count mismatch. expected, actual: {}, '", "'{}'", ".", "format", "(", "len", "(", "expected", ")", ",", "len", "(", "actual", ")", ")", ")", "for", "(", "e_proc_name", ",", "e_pids", ")", ",", "(", "a_proc_name", ",", "a_pids", ")", "in", "zip", "(", "e_proc_names", ".", "items", "(", ")", ",", "a_proc_names", ".", "items", "(", ")", ")", ":", "if", "e_proc_name", "!=", "a_proc_name", ":", "return", "(", "'Process name mismatch. expected, actual: {}, '", "'{}'", ".", "format", "(", "e_proc_name", ",", "a_proc_name", ")", ")", "a_pids_length", "=", "len", "(", "a_pids", ")", "fail_msg", "=", "(", "'PID count mismatch. {} ({}) expected, actual: '", "'{}, {} ({})'", ".", "format", "(", "e_sentry_name", ",", "e_proc_name", ",", "e_pids", ",", "a_pids_length", ",", "a_pids", ")", ")", "# If expected is a list, ensure at least one PID quantity match", "if", "isinstance", "(", "e_pids", ",", "list", ")", "and", "a_pids_length", "not", "in", "e_pids", ":", "return", "fail_msg", "# If expected is not bool and not list,", "# ensure PID quantities match", "elif", "not", "isinstance", "(", "e_pids", ",", "bool", ")", "and", "not", "isinstance", "(", "e_pids", ",", "list", ")", "and", "a_pids_length", "!=", "e_pids", ":", "return", "fail_msg", "# If expected is bool True, ensure 1 or more PIDs exist", "elif", "isinstance", "(", "e_pids", ",", "bool", ")", "and", "e_pids", "is", "True", "and", "a_pids_length", "<", "1", ":", "return", "fail_msg", "# If expected is bool False, ensure 0 PIDs exist", "elif", "isinstance", "(", "e_pids", ",", "bool", ")", "and", "e_pids", "is", "False", "and", "a_pids_length", "!=", "0", ":", "return", "fail_msg", "else", ":", "self", ".", "log", ".", "debug", "(", "'PID check OK: {} {} {}: '", "'{}'", ".", "format", "(", "e_sentry_name", ",", "e_proc_name", ",", "e_pids", ",", "a_pids", ")", ")", "return", "None" ]
Validate process id quantities for services on units.
[ "Validate", "process", "id", "quantities", "for", "services", "on", "units", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L580-L636
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.validate_list_of_identical_dicts
def validate_list_of_identical_dicts(self, list_of_dicts): """Check that all dicts within a list are identical.""" hashes = [] for _dict in list_of_dicts: hashes.append(hash(frozenset(_dict.items()))) self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: self.log.debug('Dicts within list are identical') else: return 'Dicts within list are not identical' return None
python
def validate_list_of_identical_dicts(self, list_of_dicts): """Check that all dicts within a list are identical.""" hashes = [] for _dict in list_of_dicts: hashes.append(hash(frozenset(_dict.items()))) self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: self.log.debug('Dicts within list are identical') else: return 'Dicts within list are not identical' return None
[ "def", "validate_list_of_identical_dicts", "(", "self", ",", "list_of_dicts", ")", ":", "hashes", "=", "[", "]", "for", "_dict", "in", "list_of_dicts", ":", "hashes", ".", "append", "(", "hash", "(", "frozenset", "(", "_dict", ".", "items", "(", ")", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "'Hashes: {}'", ".", "format", "(", "hashes", ")", ")", "if", "len", "(", "set", "(", "hashes", ")", ")", "==", "1", ":", "self", ".", "log", ".", "debug", "(", "'Dicts within list are identical'", ")", "else", ":", "return", "'Dicts within list are not identical'", "return", "None" ]
Check that all dicts within a list are identical.
[ "Check", "that", "all", "dicts", "within", "a", "list", "are", "identical", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L638-L650
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.get_unit_hostnames
def get_unit_hostnames(self, units): """Return a dict of juju unit names to hostnames.""" host_names = {} for unit in units: host_names[unit.info['unit_name']] = \ str(unit.file_contents('/etc/hostname').strip()) self.log.debug('Unit host names: {}'.format(host_names)) return host_names
python
def get_unit_hostnames(self, units): """Return a dict of juju unit names to hostnames.""" host_names = {} for unit in units: host_names[unit.info['unit_name']] = \ str(unit.file_contents('/etc/hostname').strip()) self.log.debug('Unit host names: {}'.format(host_names)) return host_names
[ "def", "get_unit_hostnames", "(", "self", ",", "units", ")", ":", "host_names", "=", "{", "}", "for", "unit", "in", "units", ":", "host_names", "[", "unit", ".", "info", "[", "'unit_name'", "]", "]", "=", "str", "(", "unit", ".", "file_contents", "(", "'/etc/hostname'", ")", ".", "strip", "(", ")", ")", "self", ".", "log", ".", "debug", "(", "'Unit host names: {}'", ".", "format", "(", "host_names", ")", ")", "return", "host_names" ]
Return a dict of juju unit names to hostnames.
[ "Return", "a", "dict", "of", "juju", "unit", "names", "to", "hostnames", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L669-L676
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.run_cmd_unit
def run_cmd_unit(self, sentry_unit, cmd): """Run a command on a unit, return the output and exit code.""" output, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} `{}` command returned {} ' '(OK)'.format(sentry_unit.info['unit_name'], cmd, code)) else: msg = ('{} `{}` command returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) return str(output), code
python
def run_cmd_unit(self, sentry_unit, cmd): """Run a command on a unit, return the output and exit code.""" output, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} `{}` command returned {} ' '(OK)'.format(sentry_unit.info['unit_name'], cmd, code)) else: msg = ('{} `{}` command returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) return str(output), code
[ "def", "run_cmd_unit", "(", "self", ",", "sentry_unit", ",", "cmd", ")", ":", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "if", "code", "==", "0", ":", "self", ".", "log", ".", "debug", "(", "'{} `{}` command returned {} '", "'(OK)'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "else", ":", "msg", "=", "(", "'{} `{}` command returned {} '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ",", "output", ")", ")", "amulet", ".", "raise_status", "(", "amulet", ".", "FAIL", ",", "msg", "=", "msg", ")", "return", "str", "(", "output", ")", ",", "code" ]
Run a command on a unit, return the output and exit code.
[ "Run", "a", "command", "on", "a", "unit", "return", "the", "output", "and", "exit", "code", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L678-L690
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.file_exists_on_unit
def file_exists_on_unit(self, sentry_unit, file_name): """Check if a file exists on a unit.""" try: sentry_unit.file_stat(file_name) return True except IOError: return False except Exception as e: msg = 'Error checking file {}: {}'.format(file_name, e) amulet.raise_status(amulet.FAIL, msg=msg)
python
def file_exists_on_unit(self, sentry_unit, file_name): """Check if a file exists on a unit.""" try: sentry_unit.file_stat(file_name) return True except IOError: return False except Exception as e: msg = 'Error checking file {}: {}'.format(file_name, e) amulet.raise_status(amulet.FAIL, msg=msg)
[ "def", "file_exists_on_unit", "(", "self", ",", "sentry_unit", ",", "file_name", ")", ":", "try", ":", "sentry_unit", ".", "file_stat", "(", "file_name", ")", "return", "True", "except", "IOError", ":", "return", "False", "except", "Exception", "as", "e", ":", "msg", "=", "'Error checking file {}: {}'", ".", "format", "(", "file_name", ",", "e", ")", "amulet", ".", "raise_status", "(", "amulet", ".", "FAIL", ",", "msg", "=", "msg", ")" ]
Check if a file exists on a unit.
[ "Check", "if", "a", "file", "exists", "on", "a", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L692-L701
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.file_contents_safe
def file_contents_safe(self, sentry_unit, file_name, max_wait=60, fatal=False): """Get file contents from a sentry unit. Wrap amulet file_contents with retry logic to address races where a file checks as existing, but no longer exists by the time file_contents is called. Return None if file not found. Optionally raise if fatal is True.""" unit_name = sentry_unit.info['unit_name'] file_contents = False tries = 0 while not file_contents and tries < (max_wait / 4): try: file_contents = sentry_unit.file_contents(file_name) except IOError: self.log.debug('Attempt {} to open file {} from {} ' 'failed'.format(tries, file_name, unit_name)) time.sleep(4) tries += 1 if file_contents: return file_contents elif not fatal: return None elif fatal: msg = 'Failed to get file contents from unit.' amulet.raise_status(amulet.FAIL, msg)
python
def file_contents_safe(self, sentry_unit, file_name, max_wait=60, fatal=False): """Get file contents from a sentry unit. Wrap amulet file_contents with retry logic to address races where a file checks as existing, but no longer exists by the time file_contents is called. Return None if file not found. Optionally raise if fatal is True.""" unit_name = sentry_unit.info['unit_name'] file_contents = False tries = 0 while not file_contents and tries < (max_wait / 4): try: file_contents = sentry_unit.file_contents(file_name) except IOError: self.log.debug('Attempt {} to open file {} from {} ' 'failed'.format(tries, file_name, unit_name)) time.sleep(4) tries += 1 if file_contents: return file_contents elif not fatal: return None elif fatal: msg = 'Failed to get file contents from unit.' amulet.raise_status(amulet.FAIL, msg)
[ "def", "file_contents_safe", "(", "self", ",", "sentry_unit", ",", "file_name", ",", "max_wait", "=", "60", ",", "fatal", "=", "False", ")", ":", "unit_name", "=", "sentry_unit", ".", "info", "[", "'unit_name'", "]", "file_contents", "=", "False", "tries", "=", "0", "while", "not", "file_contents", "and", "tries", "<", "(", "max_wait", "/", "4", ")", ":", "try", ":", "file_contents", "=", "sentry_unit", ".", "file_contents", "(", "file_name", ")", "except", "IOError", ":", "self", ".", "log", ".", "debug", "(", "'Attempt {} to open file {} from {} '", "'failed'", ".", "format", "(", "tries", ",", "file_name", ",", "unit_name", ")", ")", "time", ".", "sleep", "(", "4", ")", "tries", "+=", "1", "if", "file_contents", ":", "return", "file_contents", "elif", "not", "fatal", ":", "return", "None", "elif", "fatal", ":", "msg", "=", "'Failed to get file contents from unit.'", "amulet", ".", "raise_status", "(", "amulet", ".", "FAIL", ",", "msg", ")" ]
Get file contents from a sentry unit. Wrap amulet file_contents with retry logic to address races where a file checks as existing, but no longer exists by the time file_contents is called. Return None if file not found. Optionally raise if fatal is True.
[ "Get", "file", "contents", "from", "a", "sentry", "unit", ".", "Wrap", "amulet", "file_contents", "with", "retry", "logic", "to", "address", "races", "where", "a", "file", "checks", "as", "existing", "but", "no", "longer", "exists", "by", "the", "time", "file_contents", "is", "called", ".", "Return", "None", "if", "file", "not", "found", ".", "Optionally", "raise", "if", "fatal", "is", "True", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L703-L728
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.port_knock_tcp
def port_knock_tcp(self, host="localhost", port=22, timeout=15): """Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed """ # Resolve host name if possible try: connect_host = socket.gethostbyname(host) host_human = "{} ({})".format(connect_host, host) except socket.error as e: self.log.warn('Unable to resolve address: ' '{} ({}) Trying anyway!'.format(host, e)) connect_host = host host_human = connect_host # Attempt socket connection try: knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) knock.settimeout(timeout) knock.connect((connect_host, port)) knock.close() self.log.debug('Socket connect OK for host ' '{} on port {}.'.format(host_human, port)) return True except socket.error as e: self.log.debug('Socket connect FAIL for' ' {} port {} ({})'.format(host_human, port, e)) return False
python
def port_knock_tcp(self, host="localhost", port=22, timeout=15): """Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed """ # Resolve host name if possible try: connect_host = socket.gethostbyname(host) host_human = "{} ({})".format(connect_host, host) except socket.error as e: self.log.warn('Unable to resolve address: ' '{} ({}) Trying anyway!'.format(host, e)) connect_host = host host_human = connect_host # Attempt socket connection try: knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) knock.settimeout(timeout) knock.connect((connect_host, port)) knock.close() self.log.debug('Socket connect OK for host ' '{} on port {}.'.format(host_human, port)) return True except socket.error as e: self.log.debug('Socket connect FAIL for' ' {} port {} ({})'.format(host_human, port, e)) return False
[ "def", "port_knock_tcp", "(", "self", ",", "host", "=", "\"localhost\"", ",", "port", "=", "22", ",", "timeout", "=", "15", ")", ":", "# Resolve host name if possible", "try", ":", "connect_host", "=", "socket", ".", "gethostbyname", "(", "host", ")", "host_human", "=", "\"{} ({})\"", ".", "format", "(", "connect_host", ",", "host", ")", "except", "socket", ".", "error", "as", "e", ":", "self", ".", "log", ".", "warn", "(", "'Unable to resolve address: '", "'{} ({}) Trying anyway!'", ".", "format", "(", "host", ",", "e", ")", ")", "connect_host", "=", "host", "host_human", "=", "connect_host", "# Attempt socket connection", "try", ":", "knock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "knock", ".", "settimeout", "(", "timeout", ")", "knock", ".", "connect", "(", "(", "connect_host", ",", "port", ")", ")", "knock", ".", "close", "(", ")", "self", ".", "log", ".", "debug", "(", "'Socket connect OK for host '", "'{} on port {}.'", ".", "format", "(", "host_human", ",", "port", ")", ")", "return", "True", "except", "socket", ".", "error", "as", "e", ":", "self", ".", "log", ".", "debug", "(", "'Socket connect FAIL for'", "' {} port {} ({})'", ".", "format", "(", "host_human", ",", "port", ",", "e", ")", ")", "return", "False" ]
Open a TCP socket to check for a listening sevice on a host. :param host: host name or IP address, default to localhost :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :returns: True if successful, False if connect failed
[ "Open", "a", "TCP", "socket", "to", "check", "for", "a", "listening", "sevice", "on", "a", "host", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L730-L761
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.port_knock_units
def port_knock_units(self, sentry_units, port=22, timeout=15, expect_success=True): """Open a TCP socket to check for a listening sevice on each listed juju unit. :param sentry_units: list of sentry unit pointers :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :expect_success: True by default, set False to invert logic :returns: None if successful, Failure message otherwise """ for unit in sentry_units: host = unit.info['public-address'] connected = self.port_knock_tcp(host, port, timeout) if not connected and expect_success: return 'Socket connect failed.' elif connected and not expect_success: return 'Socket connected unexpectedly.'
python
def port_knock_units(self, sentry_units, port=22, timeout=15, expect_success=True): """Open a TCP socket to check for a listening sevice on each listed juju unit. :param sentry_units: list of sentry unit pointers :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :expect_success: True by default, set False to invert logic :returns: None if successful, Failure message otherwise """ for unit in sentry_units: host = unit.info['public-address'] connected = self.port_knock_tcp(host, port, timeout) if not connected and expect_success: return 'Socket connect failed.' elif connected and not expect_success: return 'Socket connected unexpectedly.'
[ "def", "port_knock_units", "(", "self", ",", "sentry_units", ",", "port", "=", "22", ",", "timeout", "=", "15", ",", "expect_success", "=", "True", ")", ":", "for", "unit", "in", "sentry_units", ":", "host", "=", "unit", ".", "info", "[", "'public-address'", "]", "connected", "=", "self", ".", "port_knock_tcp", "(", "host", ",", "port", ",", "timeout", ")", "if", "not", "connected", "and", "expect_success", ":", "return", "'Socket connect failed.'", "elif", "connected", "and", "not", "expect_success", ":", "return", "'Socket connected unexpectedly.'" ]
Open a TCP socket to check for a listening sevice on each listed juju unit. :param sentry_units: list of sentry unit pointers :param port: TCP port number, default to 22 :param timeout: Connect timeout, default to 15 seconds :expect_success: True by default, set False to invert logic :returns: None if successful, Failure message otherwise
[ "Open", "a", "TCP", "socket", "to", "check", "for", "a", "listening", "sevice", "on", "each", "listed", "juju", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L763-L780
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.wait_on_action
def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. action_id a string action uuid _check_output parameter is no longer used """ data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed"
python
def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. action_id a string action uuid _check_output parameter is no longer used """ data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed"
[ "def", "wait_on_action", "(", "self", ",", "action_id", ",", "_check_output", "=", "subprocess", ".", "check_output", ")", ":", "data", "=", "amulet", ".", "actions", ".", "get_action_output", "(", "action_id", ",", "full_output", "=", "True", ")", "return", "data", ".", "get", "(", "u\"status\"", ")", "==", "\"completed\"" ]
Wait for a given action, returning if it completed or not. action_id a string action uuid _check_output parameter is no longer used
[ "Wait", "for", "a", "given", "action", "returning", "if", "it", "completed", "or", "not", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L804-L811
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
AmuletUtils.status_get
def status_get(self, unit): """Return the current service status of this unit.""" raw_status, return_code = unit.run( "status-get --format=json --include-data") if return_code != 0: return ("unknown", "") status = json.loads(raw_status) return (status["status"], status["message"])
python
def status_get(self, unit): """Return the current service status of this unit.""" raw_status, return_code = unit.run( "status-get --format=json --include-data") if return_code != 0: return ("unknown", "") status = json.loads(raw_status) return (status["status"], status["message"])
[ "def", "status_get", "(", "self", ",", "unit", ")", ":", "raw_status", ",", "return_code", "=", "unit", ".", "run", "(", "\"status-get --format=json --include-data\"", ")", "if", "return_code", "!=", "0", ":", "return", "(", "\"unknown\"", ",", "\"\"", ")", "status", "=", "json", ".", "loads", "(", "raw_status", ")", "return", "(", "status", "[", "\"status\"", "]", ",", "status", "[", "\"message\"", "]", ")" ]
Return the current service status of this unit.
[ "Return", "the", "current", "service", "status", "of", "this", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L813-L820
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.execute
def execute(self, sql): """Execute arbitary SQL against the database.""" cursor = self.connection.cursor() try: cursor.execute(sql) finally: cursor.close()
python
def execute(self, sql): """Execute arbitary SQL against the database.""" cursor = self.connection.cursor() try: cursor.execute(sql) finally: cursor.close()
[ "def", "execute", "(", "self", ",", "sql", ")", ":", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", ")", "try", ":", "cursor", ".", "execute", "(", "sql", ")", "finally", ":", "cursor", ".", "close", "(", ")" ]
Execute arbitary SQL against the database.
[ "Execute", "arbitary", "SQL", "against", "the", "database", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L160-L166
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.select
def select(self, sql): """ Execute arbitrary SQL select query against the database and return the results. :param sql: SQL select query to execute :type sql: string :returns: SQL select query result :rtype: list of lists :raises: MySQLdb.Error """ cursor = self.connection.cursor() try: cursor.execute(sql) results = [list(i) for i in cursor.fetchall()] finally: cursor.close() return results
python
def select(self, sql): """ Execute arbitrary SQL select query against the database and return the results. :param sql: SQL select query to execute :type sql: string :returns: SQL select query result :rtype: list of lists :raises: MySQLdb.Error """ cursor = self.connection.cursor() try: cursor.execute(sql) results = [list(i) for i in cursor.fetchall()] finally: cursor.close() return results
[ "def", "select", "(", "self", ",", "sql", ")", ":", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", ")", "try", ":", "cursor", ".", "execute", "(", "sql", ")", "results", "=", "[", "list", "(", "i", ")", "for", "i", "in", "cursor", ".", "fetchall", "(", ")", "]", "finally", ":", "cursor", ".", "close", "(", ")", "return", "results" ]
Execute arbitrary SQL select query against the database and return the results. :param sql: SQL select query to execute :type sql: string :returns: SQL select query result :rtype: list of lists :raises: MySQLdb.Error
[ "Execute", "arbitrary", "SQL", "select", "query", "against", "the", "database", "and", "return", "the", "results", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L168-L185
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.migrate_passwords_to_leader_storage
def migrate_passwords_to_leader_storage(self, excludes=None): """Migrate any passwords storage on disk to leader storage.""" if not is_leader(): log("Skipping password migration as not the lead unit", level=DEBUG) return dirname = os.path.dirname(self.root_passwd_file_template) path = os.path.join(dirname, '*.passwd') for f in glob.glob(path): if excludes and f in excludes: log("Excluding %s from leader storage migration" % (f), level=DEBUG) continue key = os.path.basename(f) with open(f, 'r') as passwd: _value = passwd.read().strip() try: leader_set(settings={key: _value}) if self.delete_ondisk_passwd_file: os.unlink(f) except ValueError: # NOTE cluster relation not yet ready - skip for now pass
python
def migrate_passwords_to_leader_storage(self, excludes=None): """Migrate any passwords storage on disk to leader storage.""" if not is_leader(): log("Skipping password migration as not the lead unit", level=DEBUG) return dirname = os.path.dirname(self.root_passwd_file_template) path = os.path.join(dirname, '*.passwd') for f in glob.glob(path): if excludes and f in excludes: log("Excluding %s from leader storage migration" % (f), level=DEBUG) continue key = os.path.basename(f) with open(f, 'r') as passwd: _value = passwd.read().strip() try: leader_set(settings={key: _value}) if self.delete_ondisk_passwd_file: os.unlink(f) except ValueError: # NOTE cluster relation not yet ready - skip for now pass
[ "def", "migrate_passwords_to_leader_storage", "(", "self", ",", "excludes", "=", "None", ")", ":", "if", "not", "is_leader", "(", ")", ":", "log", "(", "\"Skipping password migration as not the lead unit\"", ",", "level", "=", "DEBUG", ")", "return", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "root_passwd_file_template", ")", "path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "'*.passwd'", ")", "for", "f", "in", "glob", ".", "glob", "(", "path", ")", ":", "if", "excludes", "and", "f", "in", "excludes", ":", "log", "(", "\"Excluding %s from leader storage migration\"", "%", "(", "f", ")", ",", "level", "=", "DEBUG", ")", "continue", "key", "=", "os", ".", "path", ".", "basename", "(", "f", ")", "with", "open", "(", "f", ",", "'r'", ")", "as", "passwd", ":", "_value", "=", "passwd", ".", "read", "(", ")", ".", "strip", "(", ")", "try", ":", "leader_set", "(", "settings", "=", "{", "key", ":", "_value", "}", ")", "if", "self", ".", "delete_ondisk_passwd_file", ":", "os", ".", "unlink", "(", "f", ")", "except", "ValueError", ":", "# NOTE cluster relation not yet ready - skip for now", "pass" ]
Migrate any passwords storage on disk to leader storage.
[ "Migrate", "any", "passwords", "storage", "on", "disk", "to", "leader", "storage", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L187-L212
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.get_mysql_password_on_disk
def get_mysql_password_on_disk(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username on disk.""" if username: template = self.user_passwd_file_template passwd_file = template.format(username) else: passwd_file = self.root_passwd_file_template _password = None if os.path.exists(passwd_file): log("Using existing password file '%s'" % passwd_file, level=DEBUG) with open(passwd_file, 'r') as passwd: _password = passwd.read().strip() else: log("Generating new password file '%s'" % passwd_file, level=DEBUG) if not os.path.isdir(os.path.dirname(passwd_file)): # NOTE: need to ensure this is not mysql root dir (which needs # to be mysql readable) mkdir(os.path.dirname(passwd_file), owner='root', group='root', perms=0o770) # Force permissions - for some reason the chmod in makedirs # fails os.chmod(os.path.dirname(passwd_file), 0o770) _password = password or pwgen(length=32) write_file(passwd_file, _password, owner='root', group='root', perms=0o660) return _password
python
def get_mysql_password_on_disk(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username on disk.""" if username: template = self.user_passwd_file_template passwd_file = template.format(username) else: passwd_file = self.root_passwd_file_template _password = None if os.path.exists(passwd_file): log("Using existing password file '%s'" % passwd_file, level=DEBUG) with open(passwd_file, 'r') as passwd: _password = passwd.read().strip() else: log("Generating new password file '%s'" % passwd_file, level=DEBUG) if not os.path.isdir(os.path.dirname(passwd_file)): # NOTE: need to ensure this is not mysql root dir (which needs # to be mysql readable) mkdir(os.path.dirname(passwd_file), owner='root', group='root', perms=0o770) # Force permissions - for some reason the chmod in makedirs # fails os.chmod(os.path.dirname(passwd_file), 0o770) _password = password or pwgen(length=32) write_file(passwd_file, _password, owner='root', group='root', perms=0o660) return _password
[ "def", "get_mysql_password_on_disk", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "username", ":", "template", "=", "self", ".", "user_passwd_file_template", "passwd_file", "=", "template", ".", "format", "(", "username", ")", "else", ":", "passwd_file", "=", "self", ".", "root_passwd_file_template", "_password", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "passwd_file", ")", ":", "log", "(", "\"Using existing password file '%s'\"", "%", "passwd_file", ",", "level", "=", "DEBUG", ")", "with", "open", "(", "passwd_file", ",", "'r'", ")", "as", "passwd", ":", "_password", "=", "passwd", ".", "read", "(", ")", ".", "strip", "(", ")", "else", ":", "log", "(", "\"Generating new password file '%s'\"", "%", "passwd_file", ",", "level", "=", "DEBUG", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "passwd_file", ")", ")", ":", "# NOTE: need to ensure this is not mysql root dir (which needs", "# to be mysql readable)", "mkdir", "(", "os", ".", "path", ".", "dirname", "(", "passwd_file", ")", ",", "owner", "=", "'root'", ",", "group", "=", "'root'", ",", "perms", "=", "0o770", ")", "# Force permissions - for some reason the chmod in makedirs", "# fails", "os", ".", "chmod", "(", "os", ".", "path", ".", "dirname", "(", "passwd_file", ")", ",", "0o770", ")", "_password", "=", "password", "or", "pwgen", "(", "length", "=", "32", ")", "write_file", "(", "passwd_file", ",", "_password", ",", "owner", "=", "'root'", ",", "group", "=", "'root'", ",", "perms", "=", "0o660", ")", "return", "_password" ]
Retrieve, generate or store a mysql password for the provided username on disk.
[ "Retrieve", "generate", "or", "store", "a", "mysql", "password", "for", "the", "provided", "username", "on", "disk", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L214-L243
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.passwd_keys
def passwd_keys(self, username): """Generator to return keys used to store passwords in peer store. NOTE: we support both legacy and new format to support mysql charm prior to refactor. This is necessary to avoid LP 1451890. """ keys = [] if username == 'mysql': log("Bad username '%s'" % (username), level=WARNING) if username: # IMPORTANT: *newer* format must be returned first keys.append('mysql-%s.passwd' % (username)) keys.append('%s.passwd' % (username)) else: keys.append('mysql.passwd') for key in keys: yield key
python
def passwd_keys(self, username): """Generator to return keys used to store passwords in peer store. NOTE: we support both legacy and new format to support mysql charm prior to refactor. This is necessary to avoid LP 1451890. """ keys = [] if username == 'mysql': log("Bad username '%s'" % (username), level=WARNING) if username: # IMPORTANT: *newer* format must be returned first keys.append('mysql-%s.passwd' % (username)) keys.append('%s.passwd' % (username)) else: keys.append('mysql.passwd') for key in keys: yield key
[ "def", "passwd_keys", "(", "self", ",", "username", ")", ":", "keys", "=", "[", "]", "if", "username", "==", "'mysql'", ":", "log", "(", "\"Bad username '%s'\"", "%", "(", "username", ")", ",", "level", "=", "WARNING", ")", "if", "username", ":", "# IMPORTANT: *newer* format must be returned first", "keys", ".", "append", "(", "'mysql-%s.passwd'", "%", "(", "username", ")", ")", "keys", ".", "append", "(", "'%s.passwd'", "%", "(", "username", ")", ")", "else", ":", "keys", ".", "append", "(", "'mysql.passwd'", ")", "for", "key", "in", "keys", ":", "yield", "key" ]
Generator to return keys used to store passwords in peer store. NOTE: we support both legacy and new format to support mysql charm prior to refactor. This is necessary to avoid LP 1451890.
[ "Generator", "to", "return", "keys", "used", "to", "store", "passwords", "in", "peer", "store", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L245-L263
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.get_mysql_password
def get_mysql_password(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username using peer relation cluster.""" excludes = [] # First check peer relation. try: for key in self.passwd_keys(username): _password = leader_get(key) if _password: break # If root password available don't update peer relation from local if _password and not username: excludes.append(self.root_passwd_file_template) except ValueError: # cluster relation is not yet started; use on-disk _password = None # If none available, generate new one if not _password: _password = self.get_mysql_password_on_disk(username, password) # Put on wire if required if self.migrate_passwd_to_leader_storage: self.migrate_passwords_to_leader_storage(excludes=excludes) return _password
python
def get_mysql_password(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username using peer relation cluster.""" excludes = [] # First check peer relation. try: for key in self.passwd_keys(username): _password = leader_get(key) if _password: break # If root password available don't update peer relation from local if _password and not username: excludes.append(self.root_passwd_file_template) except ValueError: # cluster relation is not yet started; use on-disk _password = None # If none available, generate new one if not _password: _password = self.get_mysql_password_on_disk(username, password) # Put on wire if required if self.migrate_passwd_to_leader_storage: self.migrate_passwords_to_leader_storage(excludes=excludes) return _password
[ "def", "get_mysql_password", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "excludes", "=", "[", "]", "# First check peer relation.", "try", ":", "for", "key", "in", "self", ".", "passwd_keys", "(", "username", ")", ":", "_password", "=", "leader_get", "(", "key", ")", "if", "_password", ":", "break", "# If root password available don't update peer relation from local", "if", "_password", "and", "not", "username", ":", "excludes", ".", "append", "(", "self", ".", "root_passwd_file_template", ")", "except", "ValueError", ":", "# cluster relation is not yet started; use on-disk", "_password", "=", "None", "# If none available, generate new one", "if", "not", "_password", ":", "_password", "=", "self", ".", "get_mysql_password_on_disk", "(", "username", ",", "password", ")", "# Put on wire if required", "if", "self", ".", "migrate_passwd_to_leader_storage", ":", "self", ".", "migrate_passwords_to_leader_storage", "(", "excludes", "=", "excludes", ")", "return", "_password" ]
Retrieve, generate or store a mysql password for the provided username using peer relation cluster.
[ "Retrieve", "generate", "or", "store", "a", "mysql", "password", "for", "the", "provided", "username", "using", "peer", "relation", "cluster", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L265-L293
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.set_mysql_password
def set_mysql_password(self, username, password): """Update a mysql password for the provided username changing the leader settings To update root's password pass `None` in the username """ if username is None: username = 'root' # get root password via leader-get, it may be that in the past (when # changes to root-password were not supported) the user changed the # password, so leader-get is more reliable source than # config.previous('root-password'). rel_username = None if username == 'root' else username cur_passwd = self.get_mysql_password(rel_username) # password that needs to be set new_passwd = password # update password for all users (e.g. root@localhost, root@::1, etc) try: self.connect(user=username, password=cur_passwd) cursor = self.connection.cursor() except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError(('Cannot connect using password in ' 'leader settings (%s)') % ex, ex) try: # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account # fails when using SET PASSWORD so using UPDATE against the # mysql.user table is needed, but changes to this table are not # replicated across the cluster, so this update needs to run in # all the nodes. More info at # http://galeracluster.com/documentation-webpages/userchanges.html release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) if release < 'bionic': SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " "PASSWORD( %s ) WHERE user = %s;") else: # PXC 5.7 (introduced in Bionic) uses authentication_string SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " "authentication_string = " "PASSWORD( %s ) WHERE user = %s;") cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username)) cursor.execute('FLUSH PRIVILEGES;') self.connection.commit() except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError('Cannot update password: %s' % str(ex), ex) finally: cursor.close() # check the password was changed try: self.connect(user=username, password=new_passwd) self.execute('select 1;') except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError(('Cannot connect using new password: ' '%s') % str(ex), ex) if not is_leader(): log('Only the leader can set a new password in the relation', level=DEBUG) return for key in self.passwd_keys(rel_username): _password = leader_get(key) if _password: log('Updating password for %s (%s)' % (key, rel_username), level=DEBUG) leader_set(settings={key: new_passwd})
python
def set_mysql_password(self, username, password): """Update a mysql password for the provided username changing the leader settings To update root's password pass `None` in the username """ if username is None: username = 'root' # get root password via leader-get, it may be that in the past (when # changes to root-password were not supported) the user changed the # password, so leader-get is more reliable source than # config.previous('root-password'). rel_username = None if username == 'root' else username cur_passwd = self.get_mysql_password(rel_username) # password that needs to be set new_passwd = password # update password for all users (e.g. root@localhost, root@::1, etc) try: self.connect(user=username, password=cur_passwd) cursor = self.connection.cursor() except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError(('Cannot connect using password in ' 'leader settings (%s)') % ex, ex) try: # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account # fails when using SET PASSWORD so using UPDATE against the # mysql.user table is needed, but changes to this table are not # replicated across the cluster, so this update needs to run in # all the nodes. More info at # http://galeracluster.com/documentation-webpages/userchanges.html release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) if release < 'bionic': SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " "PASSWORD( %s ) WHERE user = %s;") else: # PXC 5.7 (introduced in Bionic) uses authentication_string SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " "authentication_string = " "PASSWORD( %s ) WHERE user = %s;") cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username)) cursor.execute('FLUSH PRIVILEGES;') self.connection.commit() except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError('Cannot update password: %s' % str(ex), ex) finally: cursor.close() # check the password was changed try: self.connect(user=username, password=new_passwd) self.execute('select 1;') except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError(('Cannot connect using new password: ' '%s') % str(ex), ex) if not is_leader(): log('Only the leader can set a new password in the relation', level=DEBUG) return for key in self.passwd_keys(rel_username): _password = leader_get(key) if _password: log('Updating password for %s (%s)' % (key, rel_username), level=DEBUG) leader_set(settings={key: new_passwd})
[ "def", "set_mysql_password", "(", "self", ",", "username", ",", "password", ")", ":", "if", "username", "is", "None", ":", "username", "=", "'root'", "# get root password via leader-get, it may be that in the past (when", "# changes to root-password were not supported) the user changed the", "# password, so leader-get is more reliable source than", "# config.previous('root-password').", "rel_username", "=", "None", "if", "username", "==", "'root'", "else", "username", "cur_passwd", "=", "self", ".", "get_mysql_password", "(", "rel_username", ")", "# password that needs to be set", "new_passwd", "=", "password", "# update password for all users (e.g. root@localhost, root@::1, etc)", "try", ":", "self", ".", "connect", "(", "user", "=", "username", ",", "password", "=", "cur_passwd", ")", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", ")", "except", "MySQLdb", ".", "OperationalError", "as", "ex", ":", "raise", "MySQLSetPasswordError", "(", "(", "'Cannot connect using password in '", "'leader settings (%s)'", ")", "%", "ex", ",", "ex", ")", "try", ":", "# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account", "# fails when using SET PASSWORD so using UPDATE against the", "# mysql.user table is needed, but changes to this table are not", "# replicated across the cluster, so this update needs to run in", "# all the nodes. More info at", "# http://galeracluster.com/documentation-webpages/userchanges.html", "release", "=", "CompareHostReleases", "(", "lsb_release", "(", ")", "[", "'DISTRIB_CODENAME'", "]", ")", "if", "release", "<", "'bionic'", ":", "SQL_UPDATE_PASSWD", "=", "(", "\"UPDATE mysql.user SET password = \"", "\"PASSWORD( %s ) WHERE user = %s;\"", ")", "else", ":", "# PXC 5.7 (introduced in Bionic) uses authentication_string", "SQL_UPDATE_PASSWD", "=", "(", "\"UPDATE mysql.user SET \"", "\"authentication_string = \"", "\"PASSWORD( %s ) WHERE user = %s;\"", ")", "cursor", ".", "execute", "(", "SQL_UPDATE_PASSWD", ",", "(", "new_passwd", ",", "username", ")", ")", "cursor", ".", "execute", "(", "'FLUSH PRIVILEGES;'", ")", "self", ".", "connection", ".", "commit", "(", ")", "except", "MySQLdb", ".", "OperationalError", "as", "ex", ":", "raise", "MySQLSetPasswordError", "(", "'Cannot update password: %s'", "%", "str", "(", "ex", ")", ",", "ex", ")", "finally", ":", "cursor", ".", "close", "(", ")", "# check the password was changed", "try", ":", "self", ".", "connect", "(", "user", "=", "username", ",", "password", "=", "new_passwd", ")", "self", ".", "execute", "(", "'select 1;'", ")", "except", "MySQLdb", ".", "OperationalError", "as", "ex", ":", "raise", "MySQLSetPasswordError", "(", "(", "'Cannot connect using new password: '", "'%s'", ")", "%", "str", "(", "ex", ")", ",", "ex", ")", "if", "not", "is_leader", "(", ")", ":", "log", "(", "'Only the leader can set a new password in the relation'", ",", "level", "=", "DEBUG", ")", "return", "for", "key", "in", "self", ".", "passwd_keys", "(", "rel_username", ")", ":", "_password", "=", "leader_get", "(", "key", ")", "if", "_password", ":", "log", "(", "'Updating password for %s (%s)'", "%", "(", "key", ",", "rel_username", ")", ",", "level", "=", "DEBUG", ")", "leader_set", "(", "settings", "=", "{", "key", ":", "new_passwd", "}", ")" ]
Update a mysql password for the provided username changing the leader settings To update root's password pass `None` in the username
[ "Update", "a", "mysql", "password", "for", "the", "provided", "username", "changing", "the", "leader", "settings" ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L299-L370
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.get_allowed_units
def get_allowed_units(self, database, username, relation_id=None): """Get list of units with access grants for database with username. This is typically used to provide shared-db relations with a list of which units have been granted access to the given database. """ self.connect(password=self.get_mysql_root_password()) allowed_units = set() for unit in related_units(relation_id): settings = relation_get(rid=relation_id, unit=unit) # First check for setting with prefix, then without for attr in ["%s_hostname" % (database), 'hostname']: hosts = settings.get(attr, None) if hosts: break if hosts: # hostname can be json-encoded list of hostnames try: hosts = json.loads(hosts) except ValueError: hosts = [hosts] else: hosts = [settings['private-address']] if hosts: for host in hosts: host = self.normalize_address(host) if self.grant_exists(database, username, host): log("Grant exists for host '%s' on db '%s'" % (host, database), level=DEBUG) if unit not in allowed_units: allowed_units.add(unit) else: log("Grant does NOT exist for host '%s' on db '%s'" % (host, database), level=DEBUG) else: log("No hosts found for grant check", level=INFO) return allowed_units
python
def get_allowed_units(self, database, username, relation_id=None): """Get list of units with access grants for database with username. This is typically used to provide shared-db relations with a list of which units have been granted access to the given database. """ self.connect(password=self.get_mysql_root_password()) allowed_units = set() for unit in related_units(relation_id): settings = relation_get(rid=relation_id, unit=unit) # First check for setting with prefix, then without for attr in ["%s_hostname" % (database), 'hostname']: hosts = settings.get(attr, None) if hosts: break if hosts: # hostname can be json-encoded list of hostnames try: hosts = json.loads(hosts) except ValueError: hosts = [hosts] else: hosts = [settings['private-address']] if hosts: for host in hosts: host = self.normalize_address(host) if self.grant_exists(database, username, host): log("Grant exists for host '%s' on db '%s'" % (host, database), level=DEBUG) if unit not in allowed_units: allowed_units.add(unit) else: log("Grant does NOT exist for host '%s' on db '%s'" % (host, database), level=DEBUG) else: log("No hosts found for grant check", level=INFO) return allowed_units
[ "def", "get_allowed_units", "(", "self", ",", "database", ",", "username", ",", "relation_id", "=", "None", ")", ":", "self", ".", "connect", "(", "password", "=", "self", ".", "get_mysql_root_password", "(", ")", ")", "allowed_units", "=", "set", "(", ")", "for", "unit", "in", "related_units", "(", "relation_id", ")", ":", "settings", "=", "relation_get", "(", "rid", "=", "relation_id", ",", "unit", "=", "unit", ")", "# First check for setting with prefix, then without", "for", "attr", "in", "[", "\"%s_hostname\"", "%", "(", "database", ")", ",", "'hostname'", "]", ":", "hosts", "=", "settings", ".", "get", "(", "attr", ",", "None", ")", "if", "hosts", ":", "break", "if", "hosts", ":", "# hostname can be json-encoded list of hostnames", "try", ":", "hosts", "=", "json", ".", "loads", "(", "hosts", ")", "except", "ValueError", ":", "hosts", "=", "[", "hosts", "]", "else", ":", "hosts", "=", "[", "settings", "[", "'private-address'", "]", "]", "if", "hosts", ":", "for", "host", "in", "hosts", ":", "host", "=", "self", ".", "normalize_address", "(", "host", ")", "if", "self", ".", "grant_exists", "(", "database", ",", "username", ",", "host", ")", ":", "log", "(", "\"Grant exists for host '%s' on db '%s'\"", "%", "(", "host", ",", "database", ")", ",", "level", "=", "DEBUG", ")", "if", "unit", "not", "in", "allowed_units", ":", "allowed_units", ".", "add", "(", "unit", ")", "else", ":", "log", "(", "\"Grant does NOT exist for host '%s' on db '%s'\"", "%", "(", "host", ",", "database", ")", ",", "level", "=", "DEBUG", ")", "else", ":", "log", "(", "\"No hosts found for grant check\"", ",", "level", "=", "INFO", ")", "return", "allowed_units" ]
Get list of units with access grants for database with username. This is typically used to provide shared-db relations with a list of which units have been granted access to the given database.
[ "Get", "list", "of", "units", "with", "access", "grants", "for", "database", "with", "username", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L387-L426
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
MySQLHelper.configure_db
def configure_db(self, hostname, database, username, admin=False): """Configure access to database for username from hostname.""" self.connect(password=self.get_mysql_root_password()) if not self.database_exists(database): self.create_database(database) remote_ip = self.normalize_address(hostname) password = self.get_mysql_password(username) if not self.grant_exists(database, username, remote_ip): if not admin: self.create_grant(database, username, remote_ip, password) else: self.create_admin_grant(username, remote_ip, password) self.flush_priviledges() return password
python
def configure_db(self, hostname, database, username, admin=False): """Configure access to database for username from hostname.""" self.connect(password=self.get_mysql_root_password()) if not self.database_exists(database): self.create_database(database) remote_ip = self.normalize_address(hostname) password = self.get_mysql_password(username) if not self.grant_exists(database, username, remote_ip): if not admin: self.create_grant(database, username, remote_ip, password) else: self.create_admin_grant(username, remote_ip, password) self.flush_priviledges() return password
[ "def", "configure_db", "(", "self", ",", "hostname", ",", "database", ",", "username", ",", "admin", "=", "False", ")", ":", "self", ".", "connect", "(", "password", "=", "self", ".", "get_mysql_root_password", "(", ")", ")", "if", "not", "self", ".", "database_exists", "(", "database", ")", ":", "self", ".", "create_database", "(", "database", ")", "remote_ip", "=", "self", ".", "normalize_address", "(", "hostname", ")", "password", "=", "self", ".", "get_mysql_password", "(", "username", ")", "if", "not", "self", ".", "grant_exists", "(", "database", ",", "username", ",", "remote_ip", ")", ":", "if", "not", "admin", ":", "self", ".", "create_grant", "(", "database", ",", "username", ",", "remote_ip", ",", "password", ")", "else", ":", "self", ".", "create_admin_grant", "(", "username", ",", "remote_ip", ",", "password", ")", "self", ".", "flush_priviledges", "(", ")", "return", "password" ]
Configure access to database for username from hostname.
[ "Configure", "access", "to", "database", "for", "username", "from", "hostname", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L428-L443
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
PerconaClusterHelper.human_to_bytes
def human_to_bytes(self, human): """Convert human readable configuration options to bytes.""" num_re = re.compile('^[0-9]+$') if num_re.match(human): return human factors = { 'K': 1024, 'M': 1048576, 'G': 1073741824, 'T': 1099511627776 } modifier = human[-1] if modifier in factors: return int(human[:-1]) * factors[modifier] if modifier == '%': total_ram = self.human_to_bytes(self.get_mem_total()) if self.is_32bit_system() and total_ram > self.sys_mem_limit(): total_ram = self.sys_mem_limit() factor = int(human[:-1]) * 0.01 pctram = total_ram * factor return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) raise ValueError("Can only convert K,M,G, or T")
python
def human_to_bytes(self, human): """Convert human readable configuration options to bytes.""" num_re = re.compile('^[0-9]+$') if num_re.match(human): return human factors = { 'K': 1024, 'M': 1048576, 'G': 1073741824, 'T': 1099511627776 } modifier = human[-1] if modifier in factors: return int(human[:-1]) * factors[modifier] if modifier == '%': total_ram = self.human_to_bytes(self.get_mem_total()) if self.is_32bit_system() and total_ram > self.sys_mem_limit(): total_ram = self.sys_mem_limit() factor = int(human[:-1]) * 0.01 pctram = total_ram * factor return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) raise ValueError("Can only convert K,M,G, or T")
[ "def", "human_to_bytes", "(", "self", ",", "human", ")", ":", "num_re", "=", "re", ".", "compile", "(", "'^[0-9]+$'", ")", "if", "num_re", ".", "match", "(", "human", ")", ":", "return", "human", "factors", "=", "{", "'K'", ":", "1024", ",", "'M'", ":", "1048576", ",", "'G'", ":", "1073741824", ",", "'T'", ":", "1099511627776", "}", "modifier", "=", "human", "[", "-", "1", "]", "if", "modifier", "in", "factors", ":", "return", "int", "(", "human", "[", ":", "-", "1", "]", ")", "*", "factors", "[", "modifier", "]", "if", "modifier", "==", "'%'", ":", "total_ram", "=", "self", ".", "human_to_bytes", "(", "self", ".", "get_mem_total", "(", ")", ")", "if", "self", ".", "is_32bit_system", "(", ")", "and", "total_ram", ">", "self", ".", "sys_mem_limit", "(", ")", ":", "total_ram", "=", "self", ".", "sys_mem_limit", "(", ")", "factor", "=", "int", "(", "human", "[", ":", "-", "1", "]", ")", "*", "0.01", "pctram", "=", "total_ram", "*", "factor", "return", "int", "(", "pctram", "-", "(", "pctram", "%", "self", ".", "DEFAULT_PAGE_SIZE", ")", ")", "raise", "ValueError", "(", "\"Can only convert K,M,G, or T\"", ")" ]
Convert human readable configuration options to bytes.
[ "Convert", "human", "readable", "configuration", "options", "to", "bytes", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L470-L494
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
PerconaClusterHelper.sys_mem_limit
def sys_mem_limit(self): """Determine the default memory limit for the current service unit.""" if platform.machine() in ['armv7l']: _mem_limit = self.human_to_bytes('2700M') # experimentally determined else: # Limit for x86 based 32bit systems _mem_limit = self.human_to_bytes('4G') return _mem_limit
python
def sys_mem_limit(self): """Determine the default memory limit for the current service unit.""" if platform.machine() in ['armv7l']: _mem_limit = self.human_to_bytes('2700M') # experimentally determined else: # Limit for x86 based 32bit systems _mem_limit = self.human_to_bytes('4G') return _mem_limit
[ "def", "sys_mem_limit", "(", "self", ")", ":", "if", "platform", ".", "machine", "(", ")", "in", "[", "'armv7l'", "]", ":", "_mem_limit", "=", "self", ".", "human_to_bytes", "(", "'2700M'", ")", "# experimentally determined", "else", ":", "# Limit for x86 based 32bit systems", "_mem_limit", "=", "self", ".", "human_to_bytes", "(", "'4G'", ")", "return", "_mem_limit" ]
Determine the default memory limit for the current service unit.
[ "Determine", "the", "default", "memory", "limit", "for", "the", "current", "service", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L503-L511
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
PerconaClusterHelper.get_mem_total
def get_mem_total(self): """Calculate the total memory in the current service unit.""" with open('/proc/meminfo') as meminfo_file: for line in meminfo_file: key, mem = line.split(':', 2) if key == 'MemTotal': mtot, modifier = mem.strip().split(' ') return '%s%s' % (mtot, modifier[0].upper())
python
def get_mem_total(self): """Calculate the total memory in the current service unit.""" with open('/proc/meminfo') as meminfo_file: for line in meminfo_file: key, mem = line.split(':', 2) if key == 'MemTotal': mtot, modifier = mem.strip().split(' ') return '%s%s' % (mtot, modifier[0].upper())
[ "def", "get_mem_total", "(", "self", ")", ":", "with", "open", "(", "'/proc/meminfo'", ")", "as", "meminfo_file", ":", "for", "line", "in", "meminfo_file", ":", "key", ",", "mem", "=", "line", ".", "split", "(", "':'", ",", "2", ")", "if", "key", "==", "'MemTotal'", ":", "mtot", ",", "modifier", "=", "mem", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "return", "'%s%s'", "%", "(", "mtot", ",", "modifier", "[", "0", "]", ".", "upper", "(", ")", ")" ]
Calculate the total memory in the current service unit.
[ "Calculate", "the", "total", "memory", "in", "the", "current", "service", "unit", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L513-L520
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
PerconaClusterHelper.parse_config
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if 'innodb-flush-log-at-trx-commit' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ config['innodb-flush-log-at-trx-commit'] elif 'tuning-level' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ self.INNODB_FLUSH_CONFIG_VALUES.get(config['tuning-level'], 1) if ('innodb-change-buffering' in config and config['innodb-change-buffering'] in self.INNODB_VALID_BUFFERING_VALUES): mysql_config['innodb_change_buffering'] = config['innodb-change-buffering'] if 'innodb-io-capacity' in config: mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config.get('dataset-size', None) innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log("Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes( dataset_bytes) else: # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB # to ensure that deployments in containers # without constraints don't try to consume # silly amounts of memory. innodb_buffer_pool_size = min( int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), self.DEFAULT_INNODB_BUFFER_SIZE_MAX ) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
python
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if 'innodb-flush-log-at-trx-commit' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ config['innodb-flush-log-at-trx-commit'] elif 'tuning-level' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ self.INNODB_FLUSH_CONFIG_VALUES.get(config['tuning-level'], 1) if ('innodb-change-buffering' in config and config['innodb-change-buffering'] in self.INNODB_VALID_BUFFERING_VALUES): mysql_config['innodb_change_buffering'] = config['innodb-change-buffering'] if 'innodb-io-capacity' in config: mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config.get('dataset-size', None) innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log("Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes( dataset_bytes) else: # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB # to ensure that deployments in containers # without constraints don't try to consume # silly amounts of memory. innodb_buffer_pool_size = min( int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), self.DEFAULT_INNODB_BUFFER_SIZE_MAX ) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
[ "def", "parse_config", "(", "self", ")", ":", "config", "=", "config_get", "(", ")", "mysql_config", "=", "{", "}", "if", "'max-connections'", "in", "config", ":", "mysql_config", "[", "'max_connections'", "]", "=", "config", "[", "'max-connections'", "]", "if", "'wait-timeout'", "in", "config", ":", "mysql_config", "[", "'wait_timeout'", "]", "=", "config", "[", "'wait-timeout'", "]", "if", "'innodb-flush-log-at-trx-commit'", "in", "config", ":", "mysql_config", "[", "'innodb_flush_log_at_trx_commit'", "]", "=", "config", "[", "'innodb-flush-log-at-trx-commit'", "]", "elif", "'tuning-level'", "in", "config", ":", "mysql_config", "[", "'innodb_flush_log_at_trx_commit'", "]", "=", "self", ".", "INNODB_FLUSH_CONFIG_VALUES", ".", "get", "(", "config", "[", "'tuning-level'", "]", ",", "1", ")", "if", "(", "'innodb-change-buffering'", "in", "config", "and", "config", "[", "'innodb-change-buffering'", "]", "in", "self", ".", "INNODB_VALID_BUFFERING_VALUES", ")", ":", "mysql_config", "[", "'innodb_change_buffering'", "]", "=", "config", "[", "'innodb-change-buffering'", "]", "if", "'innodb-io-capacity'", "in", "config", ":", "mysql_config", "[", "'innodb_io_capacity'", "]", "=", "config", "[", "'innodb-io-capacity'", "]", "# Set a sane default key_buffer size", "mysql_config", "[", "'key_buffer'", "]", "=", "self", ".", "human_to_bytes", "(", "'32M'", ")", "total_memory", "=", "self", ".", "human_to_bytes", "(", "self", ".", "get_mem_total", "(", ")", ")", "dataset_bytes", "=", "config", ".", "get", "(", "'dataset-size'", ",", "None", ")", "innodb_buffer_pool_size", "=", "config", ".", "get", "(", "'innodb-buffer-pool-size'", ",", "None", ")", "if", "innodb_buffer_pool_size", ":", "innodb_buffer_pool_size", "=", "self", ".", "human_to_bytes", "(", "innodb_buffer_pool_size", ")", "elif", "dataset_bytes", ":", "log", "(", "\"Option 'dataset-size' has been deprecated, please use\"", "\"innodb_buffer_pool_size option instead\"", ",", "level", "=", "\"WARN\"", ")", "innodb_buffer_pool_size", "=", "self", ".", "human_to_bytes", "(", "dataset_bytes", ")", "else", ":", "# NOTE(jamespage): pick the smallest of 50% of RAM or 512MB", "# to ensure that deployments in containers", "# without constraints don't try to consume", "# silly amounts of memory.", "innodb_buffer_pool_size", "=", "min", "(", "int", "(", "total_memory", "*", "self", ".", "DEFAULT_INNODB_BUFFER_FACTOR", ")", ",", "self", ".", "DEFAULT_INNODB_BUFFER_SIZE_MAX", ")", "if", "innodb_buffer_pool_size", ">", "total_memory", ":", "log", "(", "\"innodb_buffer_pool_size; {} is greater than system available memory:{}\"", ".", "format", "(", "innodb_buffer_pool_size", ",", "total_memory", ")", ",", "level", "=", "'WARN'", ")", "mysql_config", "[", "'innodb_buffer_pool_size'", "]", "=", "innodb_buffer_pool_size", "return", "mysql_config" ]
Parse charm configuration and calculate values for config files.
[ "Parse", "charm", "configuration", "and", "calculate", "values", "for", "config", "files", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L522-L577
train
juju/charm-helpers
charmhelpers/contrib/storage/linux/loopback.py
create_loopback
def create_loopback(file_path): ''' Create a loopback device for a given backing file. :returns: str: Full path to new loopback device (eg, /dev/loop0) ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) for d, f in six.iteritems(loopback_devices()): if f == file_path: return d
python
def create_loopback(file_path): ''' Create a loopback device for a given backing file. :returns: str: Full path to new loopback device (eg, /dev/loop0) ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) for d, f in six.iteritems(loopback_devices()): if f == file_path: return d
[ "def", "create_loopback", "(", "file_path", ")", ":", "file_path", "=", "os", ".", "path", ".", "abspath", "(", "file_path", ")", "check_call", "(", "[", "'losetup'", ",", "'--find'", ",", "file_path", "]", ")", "for", "d", ",", "f", "in", "six", ".", "iteritems", "(", "loopback_devices", "(", ")", ")", ":", "if", "f", "==", "file_path", ":", "return", "d" ]
Create a loopback device for a given backing file. :returns: str: Full path to new loopback device (eg, /dev/loop0)
[ "Create", "a", "loopback", "device", "for", "a", "given", "backing", "file", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/loopback.py#L48-L58
train
juju/charm-helpers
charmhelpers/contrib/storage/linux/loopback.py
ensure_loopback_device
def ensure_loopback_device(path, size): ''' Ensure a loopback device exists for a given backing file path and size. If it a loopback device is not mapped to file, a new one will be created. TODO: Confirm size of found loopback device. :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' for d, f in six.iteritems(loopback_devices()): if f == path: return d if not os.path.exists(path): cmd = ['truncate', '--size', size, path] check_call(cmd) return create_loopback(path)
python
def ensure_loopback_device(path, size): ''' Ensure a loopback device exists for a given backing file path and size. If it a loopback device is not mapped to file, a new one will be created. TODO: Confirm size of found loopback device. :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' for d, f in six.iteritems(loopback_devices()): if f == path: return d if not os.path.exists(path): cmd = ['truncate', '--size', size, path] check_call(cmd) return create_loopback(path)
[ "def", "ensure_loopback_device", "(", "path", ",", "size", ")", ":", "for", "d", ",", "f", "in", "six", ".", "iteritems", "(", "loopback_devices", "(", ")", ")", ":", "if", "f", "==", "path", ":", "return", "d", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "cmd", "=", "[", "'truncate'", ",", "'--size'", ",", "size", ",", "path", "]", "check_call", "(", "cmd", ")", "return", "create_loopback", "(", "path", ")" ]
Ensure a loopback device exists for a given backing file path and size. If it a loopback device is not mapped to file, a new one will be created. TODO: Confirm size of found loopback device. :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
[ "Ensure", "a", "loopback", "device", "exists", "for", "a", "given", "backing", "file", "path", "and", "size", ".", "If", "it", "a", "loopback", "device", "is", "not", "mapped", "to", "file", "a", "new", "one", "will", "be", "created", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/loopback.py#L61-L78
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
leader_get
def leader_get(attribute=None, rid=None): """Wrapper to ensure that settings are migrated from the peer relation. This is to support upgrading an environment that does not support Juju leadership election to one that does. If a setting is not extant in the leader-get but is on the relation-get peer rel, it is migrated and marked as such so that it is not re-migrated. """ migration_key = '__leader_get_migrated_settings__' if not is_leader(): return _leader_get(attribute=attribute) settings_migrated = False leader_settings = _leader_get(attribute=attribute) previously_migrated = _leader_get(attribute=migration_key) if previously_migrated: migrated = set(json.loads(previously_migrated)) else: migrated = set([]) try: if migration_key in leader_settings: del leader_settings[migration_key] except TypeError: pass if attribute: if attribute in migrated: return leader_settings # If attribute not present in leader db, check if this unit has set # the attribute in the peer relation if not leader_settings: peer_setting = _relation_get(attribute=attribute, unit=local_unit(), rid=rid) if peer_setting: leader_set(settings={attribute: peer_setting}) leader_settings = peer_setting if leader_settings: settings_migrated = True migrated.add(attribute) else: r_settings = _relation_get(unit=local_unit(), rid=rid) if r_settings: for key in set(r_settings.keys()).difference(migrated): # Leader setting wins if not leader_settings.get(key): leader_settings[key] = r_settings[key] settings_migrated = True migrated.add(key) if settings_migrated: leader_set(**leader_settings) if migrated and settings_migrated: migrated = json.dumps(list(migrated)) leader_set(settings={migration_key: migrated}) return leader_settings
python
def leader_get(attribute=None, rid=None): """Wrapper to ensure that settings are migrated from the peer relation. This is to support upgrading an environment that does not support Juju leadership election to one that does. If a setting is not extant in the leader-get but is on the relation-get peer rel, it is migrated and marked as such so that it is not re-migrated. """ migration_key = '__leader_get_migrated_settings__' if not is_leader(): return _leader_get(attribute=attribute) settings_migrated = False leader_settings = _leader_get(attribute=attribute) previously_migrated = _leader_get(attribute=migration_key) if previously_migrated: migrated = set(json.loads(previously_migrated)) else: migrated = set([]) try: if migration_key in leader_settings: del leader_settings[migration_key] except TypeError: pass if attribute: if attribute in migrated: return leader_settings # If attribute not present in leader db, check if this unit has set # the attribute in the peer relation if not leader_settings: peer_setting = _relation_get(attribute=attribute, unit=local_unit(), rid=rid) if peer_setting: leader_set(settings={attribute: peer_setting}) leader_settings = peer_setting if leader_settings: settings_migrated = True migrated.add(attribute) else: r_settings = _relation_get(unit=local_unit(), rid=rid) if r_settings: for key in set(r_settings.keys()).difference(migrated): # Leader setting wins if not leader_settings.get(key): leader_settings[key] = r_settings[key] settings_migrated = True migrated.add(key) if settings_migrated: leader_set(**leader_settings) if migrated and settings_migrated: migrated = json.dumps(list(migrated)) leader_set(settings={migration_key: migrated}) return leader_settings
[ "def", "leader_get", "(", "attribute", "=", "None", ",", "rid", "=", "None", ")", ":", "migration_key", "=", "'__leader_get_migrated_settings__'", "if", "not", "is_leader", "(", ")", ":", "return", "_leader_get", "(", "attribute", "=", "attribute", ")", "settings_migrated", "=", "False", "leader_settings", "=", "_leader_get", "(", "attribute", "=", "attribute", ")", "previously_migrated", "=", "_leader_get", "(", "attribute", "=", "migration_key", ")", "if", "previously_migrated", ":", "migrated", "=", "set", "(", "json", ".", "loads", "(", "previously_migrated", ")", ")", "else", ":", "migrated", "=", "set", "(", "[", "]", ")", "try", ":", "if", "migration_key", "in", "leader_settings", ":", "del", "leader_settings", "[", "migration_key", "]", "except", "TypeError", ":", "pass", "if", "attribute", ":", "if", "attribute", "in", "migrated", ":", "return", "leader_settings", "# If attribute not present in leader db, check if this unit has set", "# the attribute in the peer relation", "if", "not", "leader_settings", ":", "peer_setting", "=", "_relation_get", "(", "attribute", "=", "attribute", ",", "unit", "=", "local_unit", "(", ")", ",", "rid", "=", "rid", ")", "if", "peer_setting", ":", "leader_set", "(", "settings", "=", "{", "attribute", ":", "peer_setting", "}", ")", "leader_settings", "=", "peer_setting", "if", "leader_settings", ":", "settings_migrated", "=", "True", "migrated", ".", "add", "(", "attribute", ")", "else", ":", "r_settings", "=", "_relation_get", "(", "unit", "=", "local_unit", "(", ")", ",", "rid", "=", "rid", ")", "if", "r_settings", ":", "for", "key", "in", "set", "(", "r_settings", ".", "keys", "(", ")", ")", ".", "difference", "(", "migrated", ")", ":", "# Leader setting wins", "if", "not", "leader_settings", ".", "get", "(", "key", ")", ":", "leader_settings", "[", "key", "]", "=", "r_settings", "[", "key", "]", "settings_migrated", "=", "True", "migrated", ".", "add", "(", "key", ")", "if", "settings_migrated", ":", "leader_set", "(", "*", "*", "leader_settings", ")", "if", "migrated", "and", "settings_migrated", ":", "migrated", "=", "json", ".", "dumps", "(", "list", "(", "migrated", ")", ")", "leader_set", "(", "settings", "=", "{", "migration_key", ":", "migrated", "}", ")", "return", "leader_settings" ]
Wrapper to ensure that settings are migrated from the peer relation. This is to support upgrading an environment that does not support Juju leadership election to one that does. If a setting is not extant in the leader-get but is on the relation-get peer rel, it is migrated and marked as such so that it is not re-migrated.
[ "Wrapper", "to", "ensure", "that", "settings", "are", "migrated", "from", "the", "peer", "relation", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L60-L122
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
relation_set
def relation_set(relation_id=None, relation_settings=None, **kwargs): """Attempt to use leader-set if supported in the current version of Juju, otherwise falls back on relation-set. Note that we only attempt to use leader-set if the provided relation_id is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context). """ try: if relation_id in relation_ids('cluster'): return leader_set(settings=relation_settings, **kwargs) else: raise NotImplementedError except NotImplementedError: return _relation_set(relation_id=relation_id, relation_settings=relation_settings, **kwargs)
python
def relation_set(relation_id=None, relation_settings=None, **kwargs): """Attempt to use leader-set if supported in the current version of Juju, otherwise falls back on relation-set. Note that we only attempt to use leader-set if the provided relation_id is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context). """ try: if relation_id in relation_ids('cluster'): return leader_set(settings=relation_settings, **kwargs) else: raise NotImplementedError except NotImplementedError: return _relation_set(relation_id=relation_id, relation_settings=relation_settings, **kwargs)
[ "def", "relation_set", "(", "relation_id", "=", "None", ",", "relation_settings", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "if", "relation_id", "in", "relation_ids", "(", "'cluster'", ")", ":", "return", "leader_set", "(", "settings", "=", "relation_settings", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "NotImplementedError", "except", "NotImplementedError", ":", "return", "_relation_set", "(", "relation_id", "=", "relation_id", ",", "relation_settings", "=", "relation_settings", ",", "*", "*", "kwargs", ")" ]
Attempt to use leader-set if supported in the current version of Juju, otherwise falls back on relation-set. Note that we only attempt to use leader-set if the provided relation_id is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context).
[ "Attempt", "to", "use", "leader", "-", "set", "if", "supported", "in", "the", "current", "version", "of", "Juju", "otherwise", "falls", "back", "on", "relation", "-", "set", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L125-L140
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
relation_get
def relation_get(attribute=None, unit=None, rid=None): """Attempt to use leader-get if supported in the current version of Juju, otherwise falls back on relation-get. Note that we only attempt to use leader-get if the provided rid is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context). """ try: if rid in relation_ids('cluster'): return leader_get(attribute, rid) else: raise NotImplementedError except NotImplementedError: return _relation_get(attribute=attribute, rid=rid, unit=unit)
python
def relation_get(attribute=None, unit=None, rid=None): """Attempt to use leader-get if supported in the current version of Juju, otherwise falls back on relation-get. Note that we only attempt to use leader-get if the provided rid is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context). """ try: if rid in relation_ids('cluster'): return leader_get(attribute, rid) else: raise NotImplementedError except NotImplementedError: return _relation_get(attribute=attribute, rid=rid, unit=unit)
[ "def", "relation_get", "(", "attribute", "=", "None", ",", "unit", "=", "None", ",", "rid", "=", "None", ")", ":", "try", ":", "if", "rid", "in", "relation_ids", "(", "'cluster'", ")", ":", "return", "leader_get", "(", "attribute", ",", "rid", ")", "else", ":", "raise", "NotImplementedError", "except", "NotImplementedError", ":", "return", "_relation_get", "(", "attribute", "=", "attribute", ",", "rid", "=", "rid", ",", "unit", "=", "unit", ")" ]
Attempt to use leader-get if supported in the current version of Juju, otherwise falls back on relation-get. Note that we only attempt to use leader-get if the provided rid is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context).
[ "Attempt", "to", "use", "leader", "-", "get", "if", "supported", "in", "the", "current", "version", "of", "Juju", "otherwise", "falls", "back", "on", "relation", "-", "get", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L143-L157
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
peer_retrieve
def peer_retrieve(key, relation_name='cluster'): """Retrieve a named key from peer relation `relation_name`.""" cluster_rels = relation_ids(relation_name) if len(cluster_rels) > 0: cluster_rid = cluster_rels[0] return relation_get(attribute=key, rid=cluster_rid, unit=local_unit()) else: raise ValueError('Unable to detect' 'peer relation {}'.format(relation_name))
python
def peer_retrieve(key, relation_name='cluster'): """Retrieve a named key from peer relation `relation_name`.""" cluster_rels = relation_ids(relation_name) if len(cluster_rels) > 0: cluster_rid = cluster_rels[0] return relation_get(attribute=key, rid=cluster_rid, unit=local_unit()) else: raise ValueError('Unable to detect' 'peer relation {}'.format(relation_name))
[ "def", "peer_retrieve", "(", "key", ",", "relation_name", "=", "'cluster'", ")", ":", "cluster_rels", "=", "relation_ids", "(", "relation_name", ")", "if", "len", "(", "cluster_rels", ")", ">", "0", ":", "cluster_rid", "=", "cluster_rels", "[", "0", "]", "return", "relation_get", "(", "attribute", "=", "key", ",", "rid", "=", "cluster_rid", ",", "unit", "=", "local_unit", "(", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unable to detect'", "'peer relation {}'", ".", "format", "(", "relation_name", ")", ")" ]
Retrieve a named key from peer relation `relation_name`.
[ "Retrieve", "a", "named", "key", "from", "peer", "relation", "relation_name", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L160-L169
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
peer_echo
def peer_echo(includes=None, force=False): """Echo filtered attributes back onto the same relation for storage. This is a requirement to use the peerstorage module - it needs to be called from the peer relation's changed hook. If Juju leader support exists this will be a noop unless force is True. """ try: is_leader() except NotImplementedError: pass else: if not force: return # NOOP if leader-election is supported # Use original non-leader calls relation_get = _relation_get relation_set = _relation_set rdata = relation_get() echo_data = {} if includes is None: echo_data = rdata.copy() for ex in ['private-address', 'public-address']: if ex in echo_data: echo_data.pop(ex) else: for attribute, value in six.iteritems(rdata): for include in includes: if include in attribute: echo_data[attribute] = value if len(echo_data) > 0: relation_set(relation_settings=echo_data)
python
def peer_echo(includes=None, force=False): """Echo filtered attributes back onto the same relation for storage. This is a requirement to use the peerstorage module - it needs to be called from the peer relation's changed hook. If Juju leader support exists this will be a noop unless force is True. """ try: is_leader() except NotImplementedError: pass else: if not force: return # NOOP if leader-election is supported # Use original non-leader calls relation_get = _relation_get relation_set = _relation_set rdata = relation_get() echo_data = {} if includes is None: echo_data = rdata.copy() for ex in ['private-address', 'public-address']: if ex in echo_data: echo_data.pop(ex) else: for attribute, value in six.iteritems(rdata): for include in includes: if include in attribute: echo_data[attribute] = value if len(echo_data) > 0: relation_set(relation_settings=echo_data)
[ "def", "peer_echo", "(", "includes", "=", "None", ",", "force", "=", "False", ")", ":", "try", ":", "is_leader", "(", ")", "except", "NotImplementedError", ":", "pass", "else", ":", "if", "not", "force", ":", "return", "# NOOP if leader-election is supported", "# Use original non-leader calls", "relation_get", "=", "_relation_get", "relation_set", "=", "_relation_set", "rdata", "=", "relation_get", "(", ")", "echo_data", "=", "{", "}", "if", "includes", "is", "None", ":", "echo_data", "=", "rdata", ".", "copy", "(", ")", "for", "ex", "in", "[", "'private-address'", ",", "'public-address'", "]", ":", "if", "ex", "in", "echo_data", ":", "echo_data", ".", "pop", "(", "ex", ")", "else", ":", "for", "attribute", ",", "value", "in", "six", ".", "iteritems", "(", "rdata", ")", ":", "for", "include", "in", "includes", ":", "if", "include", "in", "attribute", ":", "echo_data", "[", "attribute", "]", "=", "value", "if", "len", "(", "echo_data", ")", ">", "0", ":", "relation_set", "(", "relation_settings", "=", "echo_data", ")" ]
Echo filtered attributes back onto the same relation for storage. This is a requirement to use the peerstorage module - it needs to be called from the peer relation's changed hook. If Juju leader support exists this will be a noop unless force is True.
[ "Echo", "filtered", "attributes", "back", "onto", "the", "same", "relation", "for", "storage", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L204-L237
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
peer_store_and_set
def peer_store_and_set(relation_id=None, peer_relation_name='cluster', peer_store_fatal=False, relation_settings=None, delimiter='_', **kwargs): """Store passed-in arguments both in argument relation and in peer storage. It functions like doing relation_set() and peer_store() at the same time, with the same data. @param relation_id: the id of the relation to store the data on. Defaults to the current relation. @param peer_store_fatal: Set to True, the function will raise an exception should the peer sotrage not be avialable.""" relation_settings = relation_settings if relation_settings else {} relation_set(relation_id=relation_id, relation_settings=relation_settings, **kwargs) if is_relation_made(peer_relation_name): for key, value in six.iteritems(dict(list(kwargs.items()) + list(relation_settings.items()))): key_prefix = relation_id or current_relation_id() peer_store(key_prefix + delimiter + key, value, relation_name=peer_relation_name) else: if peer_store_fatal: raise ValueError('Unable to detect ' 'peer relation {}'.format(peer_relation_name))
python
def peer_store_and_set(relation_id=None, peer_relation_name='cluster', peer_store_fatal=False, relation_settings=None, delimiter='_', **kwargs): """Store passed-in arguments both in argument relation and in peer storage. It functions like doing relation_set() and peer_store() at the same time, with the same data. @param relation_id: the id of the relation to store the data on. Defaults to the current relation. @param peer_store_fatal: Set to True, the function will raise an exception should the peer sotrage not be avialable.""" relation_settings = relation_settings if relation_settings else {} relation_set(relation_id=relation_id, relation_settings=relation_settings, **kwargs) if is_relation_made(peer_relation_name): for key, value in six.iteritems(dict(list(kwargs.items()) + list(relation_settings.items()))): key_prefix = relation_id or current_relation_id() peer_store(key_prefix + delimiter + key, value, relation_name=peer_relation_name) else: if peer_store_fatal: raise ValueError('Unable to detect ' 'peer relation {}'.format(peer_relation_name))
[ "def", "peer_store_and_set", "(", "relation_id", "=", "None", ",", "peer_relation_name", "=", "'cluster'", ",", "peer_store_fatal", "=", "False", ",", "relation_settings", "=", "None", ",", "delimiter", "=", "'_'", ",", "*", "*", "kwargs", ")", ":", "relation_settings", "=", "relation_settings", "if", "relation_settings", "else", "{", "}", "relation_set", "(", "relation_id", "=", "relation_id", ",", "relation_settings", "=", "relation_settings", ",", "*", "*", "kwargs", ")", "if", "is_relation_made", "(", "peer_relation_name", ")", ":", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "dict", "(", "list", "(", "kwargs", ".", "items", "(", ")", ")", "+", "list", "(", "relation_settings", ".", "items", "(", ")", ")", ")", ")", ":", "key_prefix", "=", "relation_id", "or", "current_relation_id", "(", ")", "peer_store", "(", "key_prefix", "+", "delimiter", "+", "key", ",", "value", ",", "relation_name", "=", "peer_relation_name", ")", "else", ":", "if", "peer_store_fatal", ":", "raise", "ValueError", "(", "'Unable to detect '", "'peer relation {}'", ".", "format", "(", "peer_relation_name", ")", ")" ]
Store passed-in arguments both in argument relation and in peer storage. It functions like doing relation_set() and peer_store() at the same time, with the same data. @param relation_id: the id of the relation to store the data on. Defaults to the current relation. @param peer_store_fatal: Set to True, the function will raise an exception should the peer sotrage not be avialable.
[ "Store", "passed", "-", "in", "arguments", "both", "in", "argument", "relation", "and", "in", "peer", "storage", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L240-L267
train
juju/charm-helpers
charmhelpers/core/files.py
sed
def sed(filename, before, after, flags='g'): """ Search and replaces the given pattern on filename. :param filename: relative or absolute file path. :param before: expression to be replaced (see 'man sed') :param after: expression to replace with (see 'man sed') :param flags: sed-compatible regex flags in example, to make the search and replace case insensitive, specify ``flags="i"``. The ``g`` flag is always specified regardless, so you do not need to remember to include it when overriding this parameter. :returns: If the sed command exit code was zero then return, otherwise raise CalledProcessError. """ expression = r's/{0}/{1}/{2}'.format(before, after, flags) return subprocess.check_call(["sed", "-i", "-r", "-e", expression, os.path.expanduser(filename)])
python
def sed(filename, before, after, flags='g'): """ Search and replaces the given pattern on filename. :param filename: relative or absolute file path. :param before: expression to be replaced (see 'man sed') :param after: expression to replace with (see 'man sed') :param flags: sed-compatible regex flags in example, to make the search and replace case insensitive, specify ``flags="i"``. The ``g`` flag is always specified regardless, so you do not need to remember to include it when overriding this parameter. :returns: If the sed command exit code was zero then return, otherwise raise CalledProcessError. """ expression = r's/{0}/{1}/{2}'.format(before, after, flags) return subprocess.check_call(["sed", "-i", "-r", "-e", expression, os.path.expanduser(filename)])
[ "def", "sed", "(", "filename", ",", "before", ",", "after", ",", "flags", "=", "'g'", ")", ":", "expression", "=", "r's/{0}/{1}/{2}'", ".", "format", "(", "before", ",", "after", ",", "flags", ")", "return", "subprocess", ".", "check_call", "(", "[", "\"sed\"", ",", "\"-i\"", ",", "\"-r\"", ",", "\"-e\"", ",", "expression", ",", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "]", ")" ]
Search and replaces the given pattern on filename. :param filename: relative or absolute file path. :param before: expression to be replaced (see 'man sed') :param after: expression to replace with (see 'man sed') :param flags: sed-compatible regex flags in example, to make the search and replace case insensitive, specify ``flags="i"``. The ``g`` flag is always specified regardless, so you do not need to remember to include it when overriding this parameter. :returns: If the sed command exit code was zero then return, otherwise raise CalledProcessError.
[ "Search", "and", "replaces", "the", "given", "pattern", "on", "filename", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/files.py#L24-L43
train
juju/charm-helpers
charmhelpers/contrib/hardening/ssh/checks/config.py
SSHConfigContext.get_listening
def get_listening(self, listen=['0.0.0.0']): """Returns a list of addresses SSH can list on Turns input into a sensible list of IPs SSH can listen on. Input must be a python list of interface names, IPs and/or CIDRs. :param listen: list of IPs, CIDRs, interface names :returns: list of IPs available on the host """ if listen == ['0.0.0.0']: return listen value = [] for network in listen: try: ip = get_address_in_network(network=network, fatal=True) except ValueError: if is_ip(network): ip = network else: try: ip = get_iface_addr(iface=network, fatal=False)[0] except IndexError: continue value.append(ip) if value == []: return ['0.0.0.0'] return value
python
def get_listening(self, listen=['0.0.0.0']): """Returns a list of addresses SSH can list on Turns input into a sensible list of IPs SSH can listen on. Input must be a python list of interface names, IPs and/or CIDRs. :param listen: list of IPs, CIDRs, interface names :returns: list of IPs available on the host """ if listen == ['0.0.0.0']: return listen value = [] for network in listen: try: ip = get_address_in_network(network=network, fatal=True) except ValueError: if is_ip(network): ip = network else: try: ip = get_iface_addr(iface=network, fatal=False)[0] except IndexError: continue value.append(ip) if value == []: return ['0.0.0.0'] return value
[ "def", "get_listening", "(", "self", ",", "listen", "=", "[", "'0.0.0.0'", "]", ")", ":", "if", "listen", "==", "[", "'0.0.0.0'", "]", ":", "return", "listen", "value", "=", "[", "]", "for", "network", "in", "listen", ":", "try", ":", "ip", "=", "get_address_in_network", "(", "network", "=", "network", ",", "fatal", "=", "True", ")", "except", "ValueError", ":", "if", "is_ip", "(", "network", ")", ":", "ip", "=", "network", "else", ":", "try", ":", "ip", "=", "get_iface_addr", "(", "iface", "=", "network", ",", "fatal", "=", "False", ")", "[", "0", "]", "except", "IndexError", ":", "continue", "value", ".", "append", "(", "ip", ")", "if", "value", "==", "[", "]", ":", "return", "[", "'0.0.0.0'", "]", "return", "value" ]
Returns a list of addresses SSH can list on Turns input into a sensible list of IPs SSH can listen on. Input must be a python list of interface names, IPs and/or CIDRs. :param listen: list of IPs, CIDRs, interface names :returns: list of IPs available on the host
[ "Returns", "a", "list", "of", "addresses", "SSH", "can", "list", "on" ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hardening/ssh/checks/config.py#L135-L163
train
juju/charm-helpers
charmhelpers/contrib/openstack/templating.py
get_loader
def get_loader(templates_dir, os_release): """ Create a jinja2.ChoiceLoader containing template dirs up to and including os_release. If directory template directory is missing at templates_dir, it will be omitted from the loader. templates_dir is added to the bottom of the search list as a base loading dir. A charm may also ship a templates dir with this module and it will be appended to the bottom of the search list, eg:: hooks/charmhelpers/contrib/openstack/templates :param templates_dir (str): Base template directory containing release sub-directories. :param os_release (str): OpenStack release codename to construct template loader. :returns: jinja2.ChoiceLoader constructed with a list of jinja2.FilesystemLoaders, ordered in descending order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) for rel in six.itervalues(OPENSTACK_CODENAMES)] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, level=ERROR) raise OSConfigException # the bottom contains tempaltes_dir and possibly a common templates dir # shipped with the helper. loaders = [FileSystemLoader(templates_dir)] helper_templates = os.path.join(os.path.dirname(__file__), 'templates') if os.path.isdir(helper_templates): loaders.append(FileSystemLoader(helper_templates)) for rel, tmpl_dir in tmpl_dirs: if os.path.isdir(tmpl_dir): loaders.insert(0, FileSystemLoader(tmpl_dir)) if rel == os_release: break # demote this log to the lowest level; we don't really need to see these # lots in production even when debugging. log('Creating choice loader with dirs: %s' % [l.searchpath for l in loaders], level=TRACE) return ChoiceLoader(loaders)
python
def get_loader(templates_dir, os_release): """ Create a jinja2.ChoiceLoader containing template dirs up to and including os_release. If directory template directory is missing at templates_dir, it will be omitted from the loader. templates_dir is added to the bottom of the search list as a base loading dir. A charm may also ship a templates dir with this module and it will be appended to the bottom of the search list, eg:: hooks/charmhelpers/contrib/openstack/templates :param templates_dir (str): Base template directory containing release sub-directories. :param os_release (str): OpenStack release codename to construct template loader. :returns: jinja2.ChoiceLoader constructed with a list of jinja2.FilesystemLoaders, ordered in descending order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) for rel in six.itervalues(OPENSTACK_CODENAMES)] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, level=ERROR) raise OSConfigException # the bottom contains tempaltes_dir and possibly a common templates dir # shipped with the helper. loaders = [FileSystemLoader(templates_dir)] helper_templates = os.path.join(os.path.dirname(__file__), 'templates') if os.path.isdir(helper_templates): loaders.append(FileSystemLoader(helper_templates)) for rel, tmpl_dir in tmpl_dirs: if os.path.isdir(tmpl_dir): loaders.insert(0, FileSystemLoader(tmpl_dir)) if rel == os_release: break # demote this log to the lowest level; we don't really need to see these # lots in production even when debugging. log('Creating choice loader with dirs: %s' % [l.searchpath for l in loaders], level=TRACE) return ChoiceLoader(loaders)
[ "def", "get_loader", "(", "templates_dir", ",", "os_release", ")", ":", "tmpl_dirs", "=", "[", "(", "rel", ",", "os", ".", "path", ".", "join", "(", "templates_dir", ",", "rel", ")", ")", "for", "rel", "in", "six", ".", "itervalues", "(", "OPENSTACK_CODENAMES", ")", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "templates_dir", ")", ":", "log", "(", "'Templates directory not found @ %s.'", "%", "templates_dir", ",", "level", "=", "ERROR", ")", "raise", "OSConfigException", "# the bottom contains tempaltes_dir and possibly a common templates dir", "# shipped with the helper.", "loaders", "=", "[", "FileSystemLoader", "(", "templates_dir", ")", "]", "helper_templates", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates'", ")", "if", "os", ".", "path", ".", "isdir", "(", "helper_templates", ")", ":", "loaders", ".", "append", "(", "FileSystemLoader", "(", "helper_templates", ")", ")", "for", "rel", ",", "tmpl_dir", "in", "tmpl_dirs", ":", "if", "os", ".", "path", ".", "isdir", "(", "tmpl_dir", ")", ":", "loaders", ".", "insert", "(", "0", ",", "FileSystemLoader", "(", "tmpl_dir", ")", ")", "if", "rel", "==", "os_release", ":", "break", "# demote this log to the lowest level; we don't really need to see these", "# lots in production even when debugging.", "log", "(", "'Creating choice loader with dirs: %s'", "%", "[", "l", ".", "searchpath", "for", "l", "in", "loaders", "]", ",", "level", "=", "TRACE", ")", "return", "ChoiceLoader", "(", "loaders", ")" ]
Create a jinja2.ChoiceLoader containing template dirs up to and including os_release. If directory template directory is missing at templates_dir, it will be omitted from the loader. templates_dir is added to the bottom of the search list as a base loading dir. A charm may also ship a templates dir with this module and it will be appended to the bottom of the search list, eg:: hooks/charmhelpers/contrib/openstack/templates :param templates_dir (str): Base template directory containing release sub-directories. :param os_release (str): OpenStack release codename to construct template loader. :returns: jinja2.ChoiceLoader constructed with a list of jinja2.FilesystemLoaders, ordered in descending order by OpenStack release.
[ "Create", "a", "jinja2", ".", "ChoiceLoader", "containing", "template", "dirs", "up", "to", "and", "including", "os_release", ".", "If", "directory", "template", "directory", "is", "missing", "at", "templates_dir", "it", "will", "be", "omitted", "from", "the", "loader", ".", "templates_dir", "is", "added", "to", "the", "bottom", "of", "the", "search", "list", "as", "a", "base", "loading", "dir", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/templating.py#L43-L88
train
juju/charm-helpers
charmhelpers/contrib/openstack/templating.py
OSConfigTemplate.complete_contexts
def complete_contexts(self): ''' Return a list of interfaces that have satisfied contexts. ''' if self._complete_contexts: return self._complete_contexts self.context() return self._complete_contexts
python
def complete_contexts(self): ''' Return a list of interfaces that have satisfied contexts. ''' if self._complete_contexts: return self._complete_contexts self.context() return self._complete_contexts
[ "def", "complete_contexts", "(", "self", ")", ":", "if", "self", ".", "_complete_contexts", ":", "return", "self", ".", "_complete_contexts", "self", ".", "context", "(", ")", "return", "self", ".", "_complete_contexts" ]
Return a list of interfaces that have satisfied contexts.
[ "Return", "a", "list", "of", "interfaces", "that", "have", "satisfied", "contexts", "." ]
aa785c40c3b7a8c69dbfbc7921d6b9f30142e171
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/templating.py#L121-L128
train