repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
bjodah/pycompilation
pycompilation/util.py
find_binary_of_command
def find_binary_of_command(candidates): """ Calls `find_executable` from distuils for provided candidates and returns first hit. If no candidate mathces, a RuntimeError is raised """ from distutils.spawn import find_executable for c in candidates: binary_path = find_executable(c) if c and binary_path: return c, binary_path raise RuntimeError('No binary located for candidates: {}'.format( candidates))
python
def find_binary_of_command(candidates): """ Calls `find_executable` from distuils for provided candidates and returns first hit. If no candidate mathces, a RuntimeError is raised """ from distutils.spawn import find_executable for c in candidates: binary_path = find_executable(c) if c and binary_path: return c, binary_path raise RuntimeError('No binary located for candidates: {}'.format( candidates))
[ "def", "find_binary_of_command", "(", "candidates", ")", ":", "from", "distutils", ".", "spawn", "import", "find_executable", "for", "c", "in", "candidates", ":", "binary_path", "=", "find_executable", "(", "c", ")", "if", "c", "and", "binary_path", ":", "return", "c", ",", "binary_path", "raise", "RuntimeError", "(", "'No binary located for candidates: {}'", ".", "format", "(", "candidates", ")", ")" ]
Calls `find_executable` from distuils for provided candidates and returns first hit. If no candidate mathces, a RuntimeError is raised
[ "Calls", "find_executable", "from", "distuils", "for", "provided", "candidates", "and", "returns", "first", "hit", ".", "If", "no", "candidate", "mathces", "a", "RuntimeError", "is", "raised" ]
train
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L321-L333
bjodah/pycompilation
pycompilation/util.py
pyx_is_cplus
def pyx_is_cplus(path): """ Inspect a Cython source file (.pyx) and look for comment line like: # distutils: language = c++ Returns True if such a file is present in the file, else False. """ for line in open(path, 'rt'): if line.startswith('#') and '=' in line: splitted = line.split('=') if len(splitted) != 2: continue lhs, rhs = splitted if lhs.strip().split()[-1].lower() == 'language' and \ rhs.strip().split()[0].lower() == 'c++': return True return False
python
def pyx_is_cplus(path): """ Inspect a Cython source file (.pyx) and look for comment line like: # distutils: language = c++ Returns True if such a file is present in the file, else False. """ for line in open(path, 'rt'): if line.startswith('#') and '=' in line: splitted = line.split('=') if len(splitted) != 2: continue lhs, rhs = splitted if lhs.strip().split()[-1].lower() == 'language' and \ rhs.strip().split()[0].lower() == 'c++': return True return False
[ "def", "pyx_is_cplus", "(", "path", ")", ":", "for", "line", "in", "open", "(", "path", ",", "'rt'", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "and", "'='", "in", "line", ":", "splitted", "=", "line", ".", "split", "(", "'='", ")", "if", "len", "(", "splitted", ")", "!=", "2", ":", "continue", "lhs", ",", "rhs", "=", "splitted", "if", "lhs", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "==", "'language'", "and", "rhs", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "0", "]", ".", "lower", "(", ")", "==", "'c++'", ":", "return", "True", "return", "False" ]
Inspect a Cython source file (.pyx) and look for comment line like: # distutils: language = c++ Returns True if such a file is present in the file, else False.
[ "Inspect", "a", "Cython", "source", "file", "(", ".", "pyx", ")", "and", "look", "for", "comment", "line", "like", ":" ]
train
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L336-L353
bjodah/pycompilation
pycompilation/util.py
uniquify
def uniquify(l): """ Uniquify a list (skip duplicate items). """ result = [] for x in l: if x not in result: result.append(x) return result
python
def uniquify(l): """ Uniquify a list (skip duplicate items). """ result = [] for x in l: if x not in result: result.append(x) return result
[ "def", "uniquify", "(", "l", ")", ":", "result", "=", "[", "]", "for", "x", "in", "l", ":", "if", "x", "not", "in", "result", ":", "result", ".", "append", "(", "x", ")", "return", "result" ]
Uniquify a list (skip duplicate items).
[ "Uniquify", "a", "list", "(", "skip", "duplicate", "items", ")", "." ]
train
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L356-L364
bjodah/pycompilation
pycompilation/util.py
HasMetaData.get_from_metadata_file
def get_from_metadata_file(cls, dirpath, key): """ Get value of key in metadata file dict. """ fullpath = os.path.join(dirpath, cls.metadata_filename) if os.path.exists(fullpath): d = pickle.load(open(fullpath, 'rb')) return d[key] else: raise FileNotFoundError( "No such file: {0}".format(fullpath))
python
def get_from_metadata_file(cls, dirpath, key): """ Get value of key in metadata file dict. """ fullpath = os.path.join(dirpath, cls.metadata_filename) if os.path.exists(fullpath): d = pickle.load(open(fullpath, 'rb')) return d[key] else: raise FileNotFoundError( "No such file: {0}".format(fullpath))
[ "def", "get_from_metadata_file", "(", "cls", ",", "dirpath", ",", "key", ")", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "cls", ".", "metadata_filename", ")", "if", "os", ".", "path", ".", "exists", "(", "fullpath", ")", ":", "d", "=", "pickle", ".", "load", "(", "open", "(", "fullpath", ",", "'rb'", ")", ")", "return", "d", "[", "key", "]", "else", ":", "raise", "FileNotFoundError", "(", "\"No such file: {0}\"", ".", "format", "(", "fullpath", ")", ")" ]
Get value of key in metadata file dict.
[ "Get", "value", "of", "key", "in", "metadata", "file", "dict", "." ]
train
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L249-L259
bjodah/pycompilation
pycompilation/util.py
HasMetaData.save_to_metadata_file
def save_to_metadata_file(cls, dirpath, key, value): """ Store `key: value` in metadata file dict. """ fullpath = os.path.join(dirpath, cls.metadata_filename) if os.path.exists(fullpath): d = pickle.load(open(fullpath, 'rb')) d.update({key: value}) pickle.dump(d, open(fullpath, 'wb')) else: pickle.dump({key: value}, open(fullpath, 'wb'))
python
def save_to_metadata_file(cls, dirpath, key, value): """ Store `key: value` in metadata file dict. """ fullpath = os.path.join(dirpath, cls.metadata_filename) if os.path.exists(fullpath): d = pickle.load(open(fullpath, 'rb')) d.update({key: value}) pickle.dump(d, open(fullpath, 'wb')) else: pickle.dump({key: value}, open(fullpath, 'wb'))
[ "def", "save_to_metadata_file", "(", "cls", ",", "dirpath", ",", "key", ",", "value", ")", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "cls", ".", "metadata_filename", ")", "if", "os", ".", "path", ".", "exists", "(", "fullpath", ")", ":", "d", "=", "pickle", ".", "load", "(", "open", "(", "fullpath", ",", "'rb'", ")", ")", "d", ".", "update", "(", "{", "key", ":", "value", "}", ")", "pickle", ".", "dump", "(", "d", ",", "open", "(", "fullpath", ",", "'wb'", ")", ")", "else", ":", "pickle", ".", "dump", "(", "{", "key", ":", "value", "}", ",", "open", "(", "fullpath", ",", "'wb'", ")", ")" ]
Store `key: value` in metadata file dict.
[ "Store", "key", ":", "value", "in", "metadata", "file", "dict", "." ]
train
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L262-L272
chrisjsewell/jsonextended
jsonextended/plugins.py
view_interfaces
def view_interfaces(category=None): """ return a view of the plugin minimal class attribute interface(s) Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_interfaces()) {'decoders': ['plugin_name', 'plugin_descript', 'dict_signature'], 'encoders': ['plugin_name', 'plugin_descript', 'objclass'], 'parsers': ['plugin_name', 'plugin_descript', 'file_regex', 'read_file']} """ if category is not None: return sorted(_plugins_interface[category][:]) else: return {k: v[:] for k, v in _plugins_interface.items()}
python
def view_interfaces(category=None): """ return a view of the plugin minimal class attribute interface(s) Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_interfaces()) {'decoders': ['plugin_name', 'plugin_descript', 'dict_signature'], 'encoders': ['plugin_name', 'plugin_descript', 'objclass'], 'parsers': ['plugin_name', 'plugin_descript', 'file_regex', 'read_file']} """ if category is not None: return sorted(_plugins_interface[category][:]) else: return {k: v[:] for k, v in _plugins_interface.items()}
[ "def", "view_interfaces", "(", "category", "=", "None", ")", ":", "if", "category", "is", "not", "None", ":", "return", "sorted", "(", "_plugins_interface", "[", "category", "]", "[", ":", "]", ")", "else", ":", "return", "{", "k", ":", "v", "[", ":", "]", "for", "k", ",", "v", "in", "_plugins_interface", ".", "items", "(", ")", "}" ]
return a view of the plugin minimal class attribute interface(s) Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_interfaces()) {'decoders': ['plugin_name', 'plugin_descript', 'dict_signature'], 'encoders': ['plugin_name', 'plugin_descript', 'objclass'], 'parsers': ['plugin_name', 'plugin_descript', 'file_regex', 'read_file']}
[ "return", "a", "view", "of", "the", "plugin", "minimal", "class", "attribute", "interface", "(", "s", ")" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L56-L77
chrisjsewell/jsonextended
jsonextended/plugins.py
view_plugins
def view_plugins(category=None): """ return a view of the loaded plugin names and descriptions Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> view_plugins('decoders') {'example': 'a decoder for dicts containing _example_ key'} >>> unload_all_plugins() """ if category is not None: if category == 'parsers': return { name: {"descript": klass.plugin_descript, "regex": klass.file_regex} for name, klass in _all_plugins[category].items() } return { name: klass.plugin_descript for name, klass in _all_plugins[category].items() } else: return {cat: {name: klass.plugin_descript for name, klass in plugins.items()} for cat, plugins in _all_plugins.items()}
python
def view_plugins(category=None): """ return a view of the loaded plugin names and descriptions Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> view_plugins('decoders') {'example': 'a decoder for dicts containing _example_ key'} >>> unload_all_plugins() """ if category is not None: if category == 'parsers': return { name: {"descript": klass.plugin_descript, "regex": klass.file_regex} for name, klass in _all_plugins[category].items() } return { name: klass.plugin_descript for name, klass in _all_plugins[category].items() } else: return {cat: {name: klass.plugin_descript for name, klass in plugins.items()} for cat, plugins in _all_plugins.items()}
[ "def", "view_plugins", "(", "category", "=", "None", ")", ":", "if", "category", "is", "not", "None", ":", "if", "category", "==", "'parsers'", ":", "return", "{", "name", ":", "{", "\"descript\"", ":", "klass", ".", "plugin_descript", ",", "\"regex\"", ":", "klass", ".", "file_regex", "}", "for", "name", ",", "klass", "in", "_all_plugins", "[", "category", "]", ".", "items", "(", ")", "}", "return", "{", "name", ":", "klass", ".", "plugin_descript", "for", "name", ",", "klass", "in", "_all_plugins", "[", "category", "]", ".", "items", "(", ")", "}", "else", ":", "return", "{", "cat", ":", "{", "name", ":", "klass", ".", "plugin_descript", "for", "name", ",", "klass", "in", "plugins", ".", "items", "(", ")", "}", "for", "cat", ",", "plugins", "in", "_all_plugins", ".", "items", "(", ")", "}" ]
return a view of the loaded plugin names and descriptions Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> view_plugins('decoders') {'example': 'a decoder for dicts containing _example_ key'} >>> unload_all_plugins()
[ "return", "a", "view", "of", "the", "loaded", "plugin", "names", "and", "descriptions" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L80-L127
chrisjsewell/jsonextended
jsonextended/plugins.py
unload_plugin
def unload_plugin(name, category=None): """ remove single plugin Parameters ---------- name : str plugin name category : str plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin],category='decoders') >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_plugin('example','decoders') >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} """ if category is not None: _all_plugins[category].pop(name) else: for cat in _all_plugins: if name in _all_plugins[cat]: _all_plugins[cat].pop(name)
python
def unload_plugin(name, category=None): """ remove single plugin Parameters ---------- name : str plugin name category : str plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin],category='decoders') >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_plugin('example','decoders') >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} """ if category is not None: _all_plugins[category].pop(name) else: for cat in _all_plugins: if name in _all_plugins[cat]: _all_plugins[cat].pop(name)
[ "def", "unload_plugin", "(", "name", ",", "category", "=", "None", ")", ":", "if", "category", "is", "not", "None", ":", "_all_plugins", "[", "category", "]", ".", "pop", "(", "name", ")", "else", ":", "for", "cat", "in", "_all_plugins", ":", "if", "name", "in", "_all_plugins", "[", "cat", "]", ":", "_all_plugins", "[", "cat", "]", ".", "pop", "(", "name", ")" ]
remove single plugin Parameters ---------- name : str plugin name category : str plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin],category='decoders') >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_plugin('example','decoders') >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}}
[ "remove", "single", "plugin" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L174-L213
chrisjsewell/jsonextended
jsonextended/plugins.py
load_plugin_classes
def load_plugin_classes(classes, category=None, overwrite=False): """ load plugins from class objects Parameters ---------- classes: list list of classes category : None or str if str, apply for single plugin category overwrite : bool if True, allow existing plugins to be overwritten Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_all_plugins() """ load_errors = [] for klass in classes: for pcat, pinterface in _plugins_interface.items(): if category is not None and not pcat == category: continue if all([hasattr(klass, attr) for attr in pinterface]): if klass.plugin_name in _all_plugins[pcat] and not overwrite: err = '{0} is already set for {1}'.format( klass.plugin_name, pcat) load_errors.append((klass.__name__, '{}'.format(err))) continue _all_plugins[pcat][klass.plugin_name] = klass() else: load_errors.append(( klass.__name__, 'does not match {} interface: {}'.format(pcat, pinterface) )) return load_errors
python
def load_plugin_classes(classes, category=None, overwrite=False): """ load plugins from class objects Parameters ---------- classes: list list of classes category : None or str if str, apply for single plugin category overwrite : bool if True, allow existing plugins to be overwritten Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_all_plugins() """ load_errors = [] for klass in classes: for pcat, pinterface in _plugins_interface.items(): if category is not None and not pcat == category: continue if all([hasattr(klass, attr) for attr in pinterface]): if klass.plugin_name in _all_plugins[pcat] and not overwrite: err = '{0} is already set for {1}'.format( klass.plugin_name, pcat) load_errors.append((klass.__name__, '{}'.format(err))) continue _all_plugins[pcat][klass.plugin_name] = klass() else: load_errors.append(( klass.__name__, 'does not match {} interface: {}'.format(pcat, pinterface) )) return load_errors
[ "def", "load_plugin_classes", "(", "classes", ",", "category", "=", "None", ",", "overwrite", "=", "False", ")", ":", "load_errors", "=", "[", "]", "for", "klass", "in", "classes", ":", "for", "pcat", ",", "pinterface", "in", "_plugins_interface", ".", "items", "(", ")", ":", "if", "category", "is", "not", "None", "and", "not", "pcat", "==", "category", ":", "continue", "if", "all", "(", "[", "hasattr", "(", "klass", ",", "attr", ")", "for", "attr", "in", "pinterface", "]", ")", ":", "if", "klass", ".", "plugin_name", "in", "_all_plugins", "[", "pcat", "]", "and", "not", "overwrite", ":", "err", "=", "'{0} is already set for {1}'", ".", "format", "(", "klass", ".", "plugin_name", ",", "pcat", ")", "load_errors", ".", "append", "(", "(", "klass", ".", "__name__", ",", "'{}'", ".", "format", "(", "err", ")", ")", ")", "continue", "_all_plugins", "[", "pcat", "]", "[", "klass", ".", "plugin_name", "]", "=", "klass", "(", ")", "else", ":", "load_errors", ".", "append", "(", "(", "klass", ".", "__name__", ",", "'does not match {} interface: {}'", ".", "format", "(", "pcat", ",", "pinterface", ")", ")", ")", "return", "load_errors" ]
load plugins from class objects Parameters ---------- classes: list list of classes category : None or str if str, apply for single plugin category overwrite : bool if True, allow existing plugins to be overwritten Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> unload_all_plugins()
[ "load", "plugins", "from", "class", "objects" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L216-L267
chrisjsewell/jsonextended
jsonextended/plugins.py
plugins_context
def plugins_context(classes, category=None): """ context manager to load plugin class(es) then unload on exit Parameters ---------- classes: list list of classes category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> with plugins_context([DecoderPlugin]): ... pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} """ original = {cat: list(_all_plugins[cat].keys()) for cat in _all_plugins} load_plugin_classes(classes, category, overwrite=True) # if errors: # for cat in _all_plugins: # for name, kls in list(_all_plugins[cat].items()): # if name not in original[cat]: # _all_plugins[cat].pop(name) # raise RuntimeError( # "errors occurred while loading plugins: {}".format(errors)) yield for cat in _all_plugins: for name, kls in list(_all_plugins[cat].items()): if name not in original[cat]: _all_plugins[cat].pop(name)
python
def plugins_context(classes, category=None): """ context manager to load plugin class(es) then unload on exit Parameters ---------- classes: list list of classes category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> with plugins_context([DecoderPlugin]): ... pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} """ original = {cat: list(_all_plugins[cat].keys()) for cat in _all_plugins} load_plugin_classes(classes, category, overwrite=True) # if errors: # for cat in _all_plugins: # for name, kls in list(_all_plugins[cat].items()): # if name not in original[cat]: # _all_plugins[cat].pop(name) # raise RuntimeError( # "errors occurred while loading plugins: {}".format(errors)) yield for cat in _all_plugins: for name, kls in list(_all_plugins[cat].items()): if name not in original[cat]: _all_plugins[cat].pop(name)
[ "def", "plugins_context", "(", "classes", ",", "category", "=", "None", ")", ":", "original", "=", "{", "cat", ":", "list", "(", "_all_plugins", "[", "cat", "]", ".", "keys", "(", ")", ")", "for", "cat", "in", "_all_plugins", "}", "load_plugin_classes", "(", "classes", ",", "category", ",", "overwrite", "=", "True", ")", "# if errors:", "# for cat in _all_plugins:", "# for name, kls in list(_all_plugins[cat].items()):", "# if name not in original[cat]:", "# _all_plugins[cat].pop(name)", "# raise RuntimeError(", "# \"errors occurred while loading plugins: {}\".format(errors))", "yield", "for", "cat", "in", "_all_plugins", ":", "for", "name", ",", "kls", "in", "list", "(", "_all_plugins", "[", "cat", "]", ".", "items", "(", ")", ")", ":", "if", "name", "not", "in", "original", "[", "cat", "]", ":", "_all_plugins", "[", "cat", "]", ".", "pop", "(", "name", ")" ]
context manager to load plugin class(es) then unload on exit Parameters ---------- classes: list list of classes category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> with plugins_context([DecoderPlugin]): ... pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}}
[ "context", "manager", "to", "load", "plugin", "class", "(", "es", ")", "then", "unload", "on", "exit" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L271-L315
chrisjsewell/jsonextended
jsonextended/plugins.py
load_plugins_dir
def load_plugins_dir(path, category=None, overwrite=False): """ load plugins from a directory Parameters ---------- path : str or path_like category : None or str if str, apply for single plugin category overwrite : bool if True, allow existing plugins to be overwritten """ # get potential plugin python files if hasattr(path, 'glob'): pypaths = path.glob('*.py') else: pypaths = glob.glob(os.path.join(path, '*.py')) load_errors = [] for pypath in pypaths: # use uuid to ensure no conflicts in name space mod_name = str(uuid.uuid4()) try: if hasattr(pypath, 'resolve'): # Make the path absolute, resolving any symlinks pypath = pypath.resolve() with warnings.catch_warnings(record=True): warnings.filterwarnings("ignore", category=ImportWarning) # for MockPaths if hasattr(pypath, 'maketemp'): with pypath.maketemp() as f: module = load_source(mod_name, f.name) else: module = load_source(mod_name, str(pypath)) except Exception as err: load_errors.append((str(pypath), 'Load Error: {}'.format(err))) continue # only get classes that are local to the module class_members = inspect.getmembers(module, inspect.isclass) classes = [klass for klass_name, klass in class_members if klass.__module__ == mod_name] load_errors += load_plugin_classes(classes, category, overwrite) return load_errors
python
def load_plugins_dir(path, category=None, overwrite=False): """ load plugins from a directory Parameters ---------- path : str or path_like category : None or str if str, apply for single plugin category overwrite : bool if True, allow existing plugins to be overwritten """ # get potential plugin python files if hasattr(path, 'glob'): pypaths = path.glob('*.py') else: pypaths = glob.glob(os.path.join(path, '*.py')) load_errors = [] for pypath in pypaths: # use uuid to ensure no conflicts in name space mod_name = str(uuid.uuid4()) try: if hasattr(pypath, 'resolve'): # Make the path absolute, resolving any symlinks pypath = pypath.resolve() with warnings.catch_warnings(record=True): warnings.filterwarnings("ignore", category=ImportWarning) # for MockPaths if hasattr(pypath, 'maketemp'): with pypath.maketemp() as f: module = load_source(mod_name, f.name) else: module = load_source(mod_name, str(pypath)) except Exception as err: load_errors.append((str(pypath), 'Load Error: {}'.format(err))) continue # only get classes that are local to the module class_members = inspect.getmembers(module, inspect.isclass) classes = [klass for klass_name, klass in class_members if klass.__module__ == mod_name] load_errors += load_plugin_classes(classes, category, overwrite) return load_errors
[ "def", "load_plugins_dir", "(", "path", ",", "category", "=", "None", ",", "overwrite", "=", "False", ")", ":", "# get potential plugin python files", "if", "hasattr", "(", "path", ",", "'glob'", ")", ":", "pypaths", "=", "path", ".", "glob", "(", "'*.py'", ")", "else", ":", "pypaths", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'*.py'", ")", ")", "load_errors", "=", "[", "]", "for", "pypath", "in", "pypaths", ":", "# use uuid to ensure no conflicts in name space", "mod_name", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "try", ":", "if", "hasattr", "(", "pypath", ",", "'resolve'", ")", ":", "# Make the path absolute, resolving any symlinks", "pypath", "=", "pypath", ".", "resolve", "(", ")", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", ":", "warnings", ".", "filterwarnings", "(", "\"ignore\"", ",", "category", "=", "ImportWarning", ")", "# for MockPaths", "if", "hasattr", "(", "pypath", ",", "'maketemp'", ")", ":", "with", "pypath", ".", "maketemp", "(", ")", "as", "f", ":", "module", "=", "load_source", "(", "mod_name", ",", "f", ".", "name", ")", "else", ":", "module", "=", "load_source", "(", "mod_name", ",", "str", "(", "pypath", ")", ")", "except", "Exception", "as", "err", ":", "load_errors", ".", "append", "(", "(", "str", "(", "pypath", ")", ",", "'Load Error: {}'", ".", "format", "(", "err", ")", ")", ")", "continue", "# only get classes that are local to the module", "class_members", "=", "inspect", ".", "getmembers", "(", "module", ",", "inspect", ".", "isclass", ")", "classes", "=", "[", "klass", "for", "klass_name", ",", "klass", "in", "class_members", "if", "klass", ".", "__module__", "==", "mod_name", "]", "load_errors", "+=", "load_plugin_classes", "(", "classes", ",", "category", ",", "overwrite", ")", "return", "load_errors" ]
load plugins from a directory Parameters ---------- path : str or path_like category : None or str if str, apply for single plugin category overwrite : bool if True, allow existing plugins to be overwritten
[ "load", "plugins", "from", "a", "directory" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L318-L365
chrisjsewell/jsonextended
jsonextended/plugins.py
load_builtin_plugins
def load_builtin_plugins(category=None, overwrite=False): """load plugins from builtin directories Parameters ---------- name: None or str category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> errors = load_builtin_plugins() >>> errors [] >>> pprint(view_plugins(),width=200) {'decoders': {'decimal.Decimal': 'encode/decode Decimal type', 'fractions.Fraction': 'encode/decode Fraction type', 'numpy.ndarray': 'encode/decode numpy.ndarray', 'pint.Quantity': 'encode/decode pint.Quantity object', 'python.set': 'decode/encode python set'}, 'encoders': {'decimal.Decimal': 'encode/decode Decimal type', 'fractions.Fraction': 'encode/decode Fraction type', 'numpy.ndarray': 'encode/decode numpy.ndarray', 'pint.Quantity': 'encode/decode pint.Quantity object', 'python.set': 'decode/encode python set'}, 'parsers': {'csv.basic': 'read *.csv delimited file with headers to {header:[column_values]}', 'csv.literal': 'read *.literal.csv delimited files with headers to {header:column_values}, with number strings converted to int/float', 'hdf5.read': 'read *.hdf5 (in read mode) files using h5py', 'ipynb': 'read Jupyter Notebooks', 'json.basic': 'read *.json files using json.load', 'keypair': "read *.keypair, where each line should be; '<key> <pair>'", 'yaml.ruamel': 'read *.yaml files using ruamel.yaml'}} >>> unload_all_plugins() """ # noqa: E501 load_errors = [] for cat, path in _plugins_builtin.items(): if cat != category and category is not None: continue load_errors += load_plugins_dir(path, cat, overwrite=overwrite) return load_errors
python
def load_builtin_plugins(category=None, overwrite=False): """load plugins from builtin directories Parameters ---------- name: None or str category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> errors = load_builtin_plugins() >>> errors [] >>> pprint(view_plugins(),width=200) {'decoders': {'decimal.Decimal': 'encode/decode Decimal type', 'fractions.Fraction': 'encode/decode Fraction type', 'numpy.ndarray': 'encode/decode numpy.ndarray', 'pint.Quantity': 'encode/decode pint.Quantity object', 'python.set': 'decode/encode python set'}, 'encoders': {'decimal.Decimal': 'encode/decode Decimal type', 'fractions.Fraction': 'encode/decode Fraction type', 'numpy.ndarray': 'encode/decode numpy.ndarray', 'pint.Quantity': 'encode/decode pint.Quantity object', 'python.set': 'decode/encode python set'}, 'parsers': {'csv.basic': 'read *.csv delimited file with headers to {header:[column_values]}', 'csv.literal': 'read *.literal.csv delimited files with headers to {header:column_values}, with number strings converted to int/float', 'hdf5.read': 'read *.hdf5 (in read mode) files using h5py', 'ipynb': 'read Jupyter Notebooks', 'json.basic': 'read *.json files using json.load', 'keypair': "read *.keypair, where each line should be; '<key> <pair>'", 'yaml.ruamel': 'read *.yaml files using ruamel.yaml'}} >>> unload_all_plugins() """ # noqa: E501 load_errors = [] for cat, path in _plugins_builtin.items(): if cat != category and category is not None: continue load_errors += load_plugins_dir(path, cat, overwrite=overwrite) return load_errors
[ "def", "load_builtin_plugins", "(", "category", "=", "None", ",", "overwrite", "=", "False", ")", ":", "# noqa: E501", "load_errors", "=", "[", "]", "for", "cat", ",", "path", "in", "_plugins_builtin", ".", "items", "(", ")", ":", "if", "cat", "!=", "category", "and", "category", "is", "not", "None", ":", "continue", "load_errors", "+=", "load_plugins_dir", "(", "path", ",", "cat", ",", "overwrite", "=", "overwrite", ")", "return", "load_errors" ]
load plugins from builtin directories Parameters ---------- name: None or str category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> errors = load_builtin_plugins() >>> errors [] >>> pprint(view_plugins(),width=200) {'decoders': {'decimal.Decimal': 'encode/decode Decimal type', 'fractions.Fraction': 'encode/decode Fraction type', 'numpy.ndarray': 'encode/decode numpy.ndarray', 'pint.Quantity': 'encode/decode pint.Quantity object', 'python.set': 'decode/encode python set'}, 'encoders': {'decimal.Decimal': 'encode/decode Decimal type', 'fractions.Fraction': 'encode/decode Fraction type', 'numpy.ndarray': 'encode/decode numpy.ndarray', 'pint.Quantity': 'encode/decode pint.Quantity object', 'python.set': 'decode/encode python set'}, 'parsers': {'csv.basic': 'read *.csv delimited file with headers to {header:[column_values]}', 'csv.literal': 'read *.literal.csv delimited files with headers to {header:column_values}, with number strings converted to int/float', 'hdf5.read': 'read *.hdf5 (in read mode) files using h5py', 'ipynb': 'read Jupyter Notebooks', 'json.basic': 'read *.json files using json.load', 'keypair': "read *.keypair, where each line should be; '<key> <pair>'", 'yaml.ruamel': 'read *.yaml files using ruamel.yaml'}} >>> unload_all_plugins()
[ "load", "plugins", "from", "builtin", "directories" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L368-L416
chrisjsewell/jsonextended
jsonextended/plugins.py
encode
def encode(obj, outtype='json', raise_error=False): """ encode objects, via encoder plugins, to new types Parameters ---------- outtype: str use encoder method to_<outtype> to encode raise_error : bool if True, raise ValueError if no suitable plugin found Examples -------- >>> load_builtin_plugins('encoders') [] >>> from decimal import Decimal >>> encode(Decimal('1.3425345')) {'_python_Decimal_': '1.3425345'} >>> encode(Decimal('1.3425345'),outtype='str') '1.3425345' >>> encode(set([1,2,3,4,4])) {'_python_set_': [1, 2, 3, 4]} >>> encode(set([1,2,3,4,4]),outtype='str') '{1, 2, 3, 4}' >>> unload_all_plugins() """ for encoder in get_plugins('encoders').values(): if (isinstance(obj, encoder.objclass) and hasattr(encoder, 'to_{}'.format(outtype))): return getattr(encoder, 'to_{}'.format(outtype))(obj) break if raise_error: raise ValueError( "No JSON serializer is available for" "{0} (of type {1})".format(obj, type(obj))) else: return obj
python
def encode(obj, outtype='json', raise_error=False): """ encode objects, via encoder plugins, to new types Parameters ---------- outtype: str use encoder method to_<outtype> to encode raise_error : bool if True, raise ValueError if no suitable plugin found Examples -------- >>> load_builtin_plugins('encoders') [] >>> from decimal import Decimal >>> encode(Decimal('1.3425345')) {'_python_Decimal_': '1.3425345'} >>> encode(Decimal('1.3425345'),outtype='str') '1.3425345' >>> encode(set([1,2,3,4,4])) {'_python_set_': [1, 2, 3, 4]} >>> encode(set([1,2,3,4,4]),outtype='str') '{1, 2, 3, 4}' >>> unload_all_plugins() """ for encoder in get_plugins('encoders').values(): if (isinstance(obj, encoder.objclass) and hasattr(encoder, 'to_{}'.format(outtype))): return getattr(encoder, 'to_{}'.format(outtype))(obj) break if raise_error: raise ValueError( "No JSON serializer is available for" "{0} (of type {1})".format(obj, type(obj))) else: return obj
[ "def", "encode", "(", "obj", ",", "outtype", "=", "'json'", ",", "raise_error", "=", "False", ")", ":", "for", "encoder", "in", "get_plugins", "(", "'encoders'", ")", ".", "values", "(", ")", ":", "if", "(", "isinstance", "(", "obj", ",", "encoder", ".", "objclass", ")", "and", "hasattr", "(", "encoder", ",", "'to_{}'", ".", "format", "(", "outtype", ")", ")", ")", ":", "return", "getattr", "(", "encoder", ",", "'to_{}'", ".", "format", "(", "outtype", ")", ")", "(", "obj", ")", "break", "if", "raise_error", ":", "raise", "ValueError", "(", "\"No JSON serializer is available for\"", "\"{0} (of type {1})\"", ".", "format", "(", "obj", ",", "type", "(", "obj", ")", ")", ")", "else", ":", "return", "obj" ]
encode objects, via encoder plugins, to new types Parameters ---------- outtype: str use encoder method to_<outtype> to encode raise_error : bool if True, raise ValueError if no suitable plugin found Examples -------- >>> load_builtin_plugins('encoders') [] >>> from decimal import Decimal >>> encode(Decimal('1.3425345')) {'_python_Decimal_': '1.3425345'} >>> encode(Decimal('1.3425345'),outtype='str') '1.3425345' >>> encode(set([1,2,3,4,4])) {'_python_set_': [1, 2, 3, 4]} >>> encode(set([1,2,3,4,4]),outtype='str') '{1, 2, 3, 4}' >>> unload_all_plugins()
[ "encode", "objects", "via", "encoder", "plugins", "to", "new", "types" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L419-L459
chrisjsewell/jsonextended
jsonextended/plugins.py
decode
def decode(dct, intype='json', raise_error=False): """ decode dict objects, via decoder plugins, to new type Parameters ---------- intype: str use decoder method from_<intype> to encode raise_error : bool if True, raise ValueError if no suitable plugin found Examples -------- >>> load_builtin_plugins('decoders') [] >>> from decimal import Decimal >>> decode({'_python_Decimal_':'1.3425345'}) Decimal('1.3425345') >>> unload_all_plugins() """ for decoder in get_plugins('decoders').values(): if (set(list(decoder.dict_signature)).issubset(dct.keys()) and hasattr(decoder, 'from_{}'.format(intype)) and getattr(decoder, 'allow_other_keys', False)): return getattr(decoder, 'from_{}'.format(intype))(dct) break elif (sorted(list(decoder.dict_signature)) == sorted(dct.keys()) and hasattr(decoder, 'from_{}'.format(intype))): return getattr(decoder, 'from_{}'.format(intype))(dct) break if raise_error: raise ValueError('no suitable plugin found for: {}'.format(dct)) else: return dct
python
def decode(dct, intype='json', raise_error=False): """ decode dict objects, via decoder plugins, to new type Parameters ---------- intype: str use decoder method from_<intype> to encode raise_error : bool if True, raise ValueError if no suitable plugin found Examples -------- >>> load_builtin_plugins('decoders') [] >>> from decimal import Decimal >>> decode({'_python_Decimal_':'1.3425345'}) Decimal('1.3425345') >>> unload_all_plugins() """ for decoder in get_plugins('decoders').values(): if (set(list(decoder.dict_signature)).issubset(dct.keys()) and hasattr(decoder, 'from_{}'.format(intype)) and getattr(decoder, 'allow_other_keys', False)): return getattr(decoder, 'from_{}'.format(intype))(dct) break elif (sorted(list(decoder.dict_signature)) == sorted(dct.keys()) and hasattr(decoder, 'from_{}'.format(intype))): return getattr(decoder, 'from_{}'.format(intype))(dct) break if raise_error: raise ValueError('no suitable plugin found for: {}'.format(dct)) else: return dct
[ "def", "decode", "(", "dct", ",", "intype", "=", "'json'", ",", "raise_error", "=", "False", ")", ":", "for", "decoder", "in", "get_plugins", "(", "'decoders'", ")", ".", "values", "(", ")", ":", "if", "(", "set", "(", "list", "(", "decoder", ".", "dict_signature", ")", ")", ".", "issubset", "(", "dct", ".", "keys", "(", ")", ")", "and", "hasattr", "(", "decoder", ",", "'from_{}'", ".", "format", "(", "intype", ")", ")", "and", "getattr", "(", "decoder", ",", "'allow_other_keys'", ",", "False", ")", ")", ":", "return", "getattr", "(", "decoder", ",", "'from_{}'", ".", "format", "(", "intype", ")", ")", "(", "dct", ")", "break", "elif", "(", "sorted", "(", "list", "(", "decoder", ".", "dict_signature", ")", ")", "==", "sorted", "(", "dct", ".", "keys", "(", ")", ")", "and", "hasattr", "(", "decoder", ",", "'from_{}'", ".", "format", "(", "intype", ")", ")", ")", ":", "return", "getattr", "(", "decoder", ",", "'from_{}'", ".", "format", "(", "intype", ")", ")", "(", "dct", ")", "break", "if", "raise_error", ":", "raise", "ValueError", "(", "'no suitable plugin found for: {}'", ".", "format", "(", "dct", ")", ")", "else", ":", "return", "dct" ]
decode dict objects, via decoder plugins, to new type Parameters ---------- intype: str use decoder method from_<intype> to encode raise_error : bool if True, raise ValueError if no suitable plugin found Examples -------- >>> load_builtin_plugins('decoders') [] >>> from decimal import Decimal >>> decode({'_python_Decimal_':'1.3425345'}) Decimal('1.3425345') >>> unload_all_plugins()
[ "decode", "dict", "objects", "via", "decoder", "plugins", "to", "new", "type" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L462-L498
chrisjsewell/jsonextended
jsonextended/plugins.py
parser_available
def parser_available(fpath): """ test if parser plugin available for fpath Examples -------- >>> load_builtin_plugins('parsers') [] >>> test_file = StringIO('{"a":[1,2,3.4]}') >>> test_file.name = 'test.json' >>> parser_available(test_file) True >>> test_file.name = 'test.other' >>> parser_available(test_file) False >>> unload_all_plugins() """ if isinstance(fpath, basestring): fname = fpath elif hasattr(fpath, 'open') and hasattr(fpath, 'name'): fname = fpath.name elif hasattr(fpath, 'readline') and hasattr(fpath, 'name'): fname = fpath.name else: raise ValueError( 'fpath should be a str or file_like object: {}'.format(fpath)) for parser in get_plugins('parsers').values(): if fnmatch(fname, parser.file_regex): return True return False
python
def parser_available(fpath): """ test if parser plugin available for fpath Examples -------- >>> load_builtin_plugins('parsers') [] >>> test_file = StringIO('{"a":[1,2,3.4]}') >>> test_file.name = 'test.json' >>> parser_available(test_file) True >>> test_file.name = 'test.other' >>> parser_available(test_file) False >>> unload_all_plugins() """ if isinstance(fpath, basestring): fname = fpath elif hasattr(fpath, 'open') and hasattr(fpath, 'name'): fname = fpath.name elif hasattr(fpath, 'readline') and hasattr(fpath, 'name'): fname = fpath.name else: raise ValueError( 'fpath should be a str or file_like object: {}'.format(fpath)) for parser in get_plugins('parsers').values(): if fnmatch(fname, parser.file_regex): return True return False
[ "def", "parser_available", "(", "fpath", ")", ":", "if", "isinstance", "(", "fpath", ",", "basestring", ")", ":", "fname", "=", "fpath", "elif", "hasattr", "(", "fpath", ",", "'open'", ")", "and", "hasattr", "(", "fpath", ",", "'name'", ")", ":", "fname", "=", "fpath", ".", "name", "elif", "hasattr", "(", "fpath", ",", "'readline'", ")", "and", "hasattr", "(", "fpath", ",", "'name'", ")", ":", "fname", "=", "fpath", ".", "name", "else", ":", "raise", "ValueError", "(", "'fpath should be a str or file_like object: {}'", ".", "format", "(", "fpath", ")", ")", "for", "parser", "in", "get_plugins", "(", "'parsers'", ")", ".", "values", "(", ")", ":", "if", "fnmatch", "(", "fname", ",", "parser", ".", "file_regex", ")", ":", "return", "True", "return", "False" ]
test if parser plugin available for fpath Examples -------- >>> load_builtin_plugins('parsers') [] >>> test_file = StringIO('{"a":[1,2,3.4]}') >>> test_file.name = 'test.json' >>> parser_available(test_file) True >>> test_file.name = 'test.other' >>> parser_available(test_file) False >>> unload_all_plugins()
[ "test", "if", "parser", "plugin", "available", "for", "fpath" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L501-L534
chrisjsewell/jsonextended
jsonextended/plugins.py
parse
def parse(fpath, **kwargs): """ parse file contents, via parser plugins, to dict like object NB: the longest file regex will be used from plugins Parameters ---------- fpath : file_like string, object with 'open' and 'name' attributes, or object with 'readline' and 'name' attributes kwargs : to pass to parser plugin Examples -------- >>> load_builtin_plugins('parsers') [] >>> from pprint import pformat >>> json_file = StringIO('{"a":[1,2,3.4]}') >>> json_file.name = 'test.json' >>> dct = parse(json_file) >>> print(pformat(dct).replace("u'","'")) {'a': [1, 2, 3.4]} >>> reset = json_file.seek(0) >>> from decimal import Decimal >>> dct = parse(json_file, parse_float=Decimal,other=1) >>> print(pformat(dct).replace("u'","'")) {'a': [1, 2, Decimal('3.4')]} >>> class NewParser(object): ... plugin_name = 'example' ... plugin_descript = 'loads test.json files' ... file_regex = 'test.json' ... def read_file(self, file_obj, **kwargs): ... return {'example':1} >>> load_plugin_classes([NewParser],'parsers') [] >>> reset = json_file.seek(0) >>> parse(json_file) {'example': 1} >>> unload_all_plugins() """ if isinstance(fpath, basestring): fname = fpath elif hasattr(fpath, 'open') and hasattr(fpath, 'name'): fname = fpath.name elif hasattr(fpath, 'readline') and hasattr(fpath, 'name'): fname = fpath.name else: raise ValueError( 'fpath should be a str or file_like object: {}'.format(fpath)) parser_dict = { plugin.file_regex: plugin for plugin in get_plugins('parsers').values()} # find longest match first for regex in sorted(parser_dict.keys(), key=len, reverse=True): parser = parser_dict[regex] if fnmatch(fname, regex): if isinstance(fpath, basestring): with open(fpath, 'r') as file_obj: data = parser.read_file(file_obj, **kwargs) elif hasattr(fpath, 'open'): with fpath.open('r') as file_obj: data = parser.read_file(file_obj, **kwargs) elif hasattr(fpath, 'readline'): data = parser.read_file(fpath, **kwargs) return data raise ValueError('{} does not match any regex'.format(fname))
python
def parse(fpath, **kwargs): """ parse file contents, via parser plugins, to dict like object NB: the longest file regex will be used from plugins Parameters ---------- fpath : file_like string, object with 'open' and 'name' attributes, or object with 'readline' and 'name' attributes kwargs : to pass to parser plugin Examples -------- >>> load_builtin_plugins('parsers') [] >>> from pprint import pformat >>> json_file = StringIO('{"a":[1,2,3.4]}') >>> json_file.name = 'test.json' >>> dct = parse(json_file) >>> print(pformat(dct).replace("u'","'")) {'a': [1, 2, 3.4]} >>> reset = json_file.seek(0) >>> from decimal import Decimal >>> dct = parse(json_file, parse_float=Decimal,other=1) >>> print(pformat(dct).replace("u'","'")) {'a': [1, 2, Decimal('3.4')]} >>> class NewParser(object): ... plugin_name = 'example' ... plugin_descript = 'loads test.json files' ... file_regex = 'test.json' ... def read_file(self, file_obj, **kwargs): ... return {'example':1} >>> load_plugin_classes([NewParser],'parsers') [] >>> reset = json_file.seek(0) >>> parse(json_file) {'example': 1} >>> unload_all_plugins() """ if isinstance(fpath, basestring): fname = fpath elif hasattr(fpath, 'open') and hasattr(fpath, 'name'): fname = fpath.name elif hasattr(fpath, 'readline') and hasattr(fpath, 'name'): fname = fpath.name else: raise ValueError( 'fpath should be a str or file_like object: {}'.format(fpath)) parser_dict = { plugin.file_regex: plugin for plugin in get_plugins('parsers').values()} # find longest match first for regex in sorted(parser_dict.keys(), key=len, reverse=True): parser = parser_dict[regex] if fnmatch(fname, regex): if isinstance(fpath, basestring): with open(fpath, 'r') as file_obj: data = parser.read_file(file_obj, **kwargs) elif hasattr(fpath, 'open'): with fpath.open('r') as file_obj: data = parser.read_file(file_obj, **kwargs) elif hasattr(fpath, 'readline'): data = parser.read_file(fpath, **kwargs) return data raise ValueError('{} does not match any regex'.format(fname))
[ "def", "parse", "(", "fpath", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "fpath", ",", "basestring", ")", ":", "fname", "=", "fpath", "elif", "hasattr", "(", "fpath", ",", "'open'", ")", "and", "hasattr", "(", "fpath", ",", "'name'", ")", ":", "fname", "=", "fpath", ".", "name", "elif", "hasattr", "(", "fpath", ",", "'readline'", ")", "and", "hasattr", "(", "fpath", ",", "'name'", ")", ":", "fname", "=", "fpath", ".", "name", "else", ":", "raise", "ValueError", "(", "'fpath should be a str or file_like object: {}'", ".", "format", "(", "fpath", ")", ")", "parser_dict", "=", "{", "plugin", ".", "file_regex", ":", "plugin", "for", "plugin", "in", "get_plugins", "(", "'parsers'", ")", ".", "values", "(", ")", "}", "# find longest match first", "for", "regex", "in", "sorted", "(", "parser_dict", ".", "keys", "(", ")", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ":", "parser", "=", "parser_dict", "[", "regex", "]", "if", "fnmatch", "(", "fname", ",", "regex", ")", ":", "if", "isinstance", "(", "fpath", ",", "basestring", ")", ":", "with", "open", "(", "fpath", ",", "'r'", ")", "as", "file_obj", ":", "data", "=", "parser", ".", "read_file", "(", "file_obj", ",", "*", "*", "kwargs", ")", "elif", "hasattr", "(", "fpath", ",", "'open'", ")", ":", "with", "fpath", ".", "open", "(", "'r'", ")", "as", "file_obj", ":", "data", "=", "parser", ".", "read_file", "(", "file_obj", ",", "*", "*", "kwargs", ")", "elif", "hasattr", "(", "fpath", ",", "'readline'", ")", ":", "data", "=", "parser", ".", "read_file", "(", "fpath", ",", "*", "*", "kwargs", ")", "return", "data", "raise", "ValueError", "(", "'{} does not match any regex'", ".", "format", "(", "fname", ")", ")" ]
parse file contents, via parser plugins, to dict like object NB: the longest file regex will be used from plugins Parameters ---------- fpath : file_like string, object with 'open' and 'name' attributes, or object with 'readline' and 'name' attributes kwargs : to pass to parser plugin Examples -------- >>> load_builtin_plugins('parsers') [] >>> from pprint import pformat >>> json_file = StringIO('{"a":[1,2,3.4]}') >>> json_file.name = 'test.json' >>> dct = parse(json_file) >>> print(pformat(dct).replace("u'","'")) {'a': [1, 2, 3.4]} >>> reset = json_file.seek(0) >>> from decimal import Decimal >>> dct = parse(json_file, parse_float=Decimal,other=1) >>> print(pformat(dct).replace("u'","'")) {'a': [1, 2, Decimal('3.4')]} >>> class NewParser(object): ... plugin_name = 'example' ... plugin_descript = 'loads test.json files' ... file_regex = 'test.json' ... def read_file(self, file_obj, **kwargs): ... return {'example':1} >>> load_plugin_classes([NewParser],'parsers') [] >>> reset = json_file.seek(0) >>> parse(json_file) {'example': 1} >>> unload_all_plugins()
[ "parse", "file", "contents", "via", "parser", "plugins", "to", "dict", "like", "object", "NB", ":", "the", "longest", "file", "regex", "will", "be", "used", "from", "plugins" ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/plugins.py#L537-L613
chrisjsewell/jsonextended
jsonextended/mockpath.py
colortxt
def colortxt(text, color=None, on_color=None, attrs=None): """Colorize text. Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Examples -------- >>> txt = colortxt('Hello, World!', 'red', 'on_grey', ['bold']) >>> print(txt) \x1b[1m\x1b[40m\x1b[31mHello, World!\x1b[0m """ _RESET = '\033[0m' __ISON = True if __ISON and os.getenv('ANSI_COLORS_DISABLED') is None: fmt_str = '\033[%dm%s' if color is not None: text = fmt_str % (_COLORS[color], text) if on_color is not None: text = fmt_str % (_HIGHLIGHTS[on_color], text) if attrs is not None: for attr in attrs: text = fmt_str % (_ATTRIBUTES[attr], text) text += _RESET return text
python
def colortxt(text, color=None, on_color=None, attrs=None): """Colorize text. Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Examples -------- >>> txt = colortxt('Hello, World!', 'red', 'on_grey', ['bold']) >>> print(txt) \x1b[1m\x1b[40m\x1b[31mHello, World!\x1b[0m """ _RESET = '\033[0m' __ISON = True if __ISON and os.getenv('ANSI_COLORS_DISABLED') is None: fmt_str = '\033[%dm%s' if color is not None: text = fmt_str % (_COLORS[color], text) if on_color is not None: text = fmt_str % (_HIGHLIGHTS[on_color], text) if attrs is not None: for attr in attrs: text = fmt_str % (_ATTRIBUTES[attr], text) text += _RESET return text
[ "def", "colortxt", "(", "text", ",", "color", "=", "None", ",", "on_color", "=", "None", ",", "attrs", "=", "None", ")", ":", "_RESET", "=", "'\\033[0m'", "__ISON", "=", "True", "if", "__ISON", "and", "os", ".", "getenv", "(", "'ANSI_COLORS_DISABLED'", ")", "is", "None", ":", "fmt_str", "=", "'\\033[%dm%s'", "if", "color", "is", "not", "None", ":", "text", "=", "fmt_str", "%", "(", "_COLORS", "[", "color", "]", ",", "text", ")", "if", "on_color", "is", "not", "None", ":", "text", "=", "fmt_str", "%", "(", "_HIGHLIGHTS", "[", "on_color", "]", ",", "text", ")", "if", "attrs", "is", "not", "None", ":", "for", "attr", "in", "attrs", ":", "text", "=", "fmt_str", "%", "(", "_ATTRIBUTES", "[", "attr", "]", ",", "text", ")", "text", "+=", "_RESET", "return", "text" ]
Colorize text. Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Examples -------- >>> txt = colortxt('Hello, World!', 'red', 'on_grey', ['bold']) >>> print(txt) \x1b[1m\x1b[40m\x1b[31mHello, World!\x1b[0m
[ "Colorize", "text", "." ]
train
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/mockpath.py#L994-L1028
portfoliome/foil
foil/fileio.py
DelimitedReader.from_zipfile
def from_zipfile(cls, path, filename, encoding, dialect, fields, converters): """Read delimited text from zipfile.""" stream = ZipReader(path, filename).readlines(encoding) return cls(stream, dialect, fields, converters)
python
def from_zipfile(cls, path, filename, encoding, dialect, fields, converters): """Read delimited text from zipfile.""" stream = ZipReader(path, filename).readlines(encoding) return cls(stream, dialect, fields, converters)
[ "def", "from_zipfile", "(", "cls", ",", "path", ",", "filename", ",", "encoding", ",", "dialect", ",", "fields", ",", "converters", ")", ":", "stream", "=", "ZipReader", "(", "path", ",", "filename", ")", ".", "readlines", "(", "encoding", ")", "return", "cls", "(", "stream", ",", "dialect", ",", "fields", ",", "converters", ")" ]
Read delimited text from zipfile.
[ "Read", "delimited", "text", "from", "zipfile", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/fileio.py#L77-L81
portfoliome/foil
foil/fileio.py
DelimitedSubsetReader.from_file
def from_file(cls, path, encoding, dialect, fields, converters, field_index): """Read delimited text from a text file.""" return cls(open(path, 'r', encoding=encoding), dialect, fields, converters, field_index)
python
def from_file(cls, path, encoding, dialect, fields, converters, field_index): """Read delimited text from a text file.""" return cls(open(path, 'r', encoding=encoding), dialect, fields, converters, field_index)
[ "def", "from_file", "(", "cls", ",", "path", ",", "encoding", ",", "dialect", ",", "fields", ",", "converters", ",", "field_index", ")", ":", "return", "cls", "(", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "encoding", ")", ",", "dialect", ",", "fields", ",", "converters", ",", "field_index", ")" ]
Read delimited text from a text file.
[ "Read", "delimited", "text", "from", "a", "text", "file", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/fileio.py#L125-L128
portfoliome/foil
foil/fileio.py
ZipReader.read_bytes
def read_bytes(self): """Read content into byte string.""" with ZipFile(self.path, mode='r') as archive: return archive.read(self.filename)
python
def read_bytes(self): """Read content into byte string.""" with ZipFile(self.path, mode='r') as archive: return archive.read(self.filename)
[ "def", "read_bytes", "(", "self", ")", ":", "with", "ZipFile", "(", "self", ".", "path", ",", "mode", "=", "'r'", ")", "as", "archive", ":", "return", "archive", ".", "read", "(", "self", ".", "filename", ")" ]
Read content into byte string.
[ "Read", "content", "into", "byte", "string", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/fileio.py#L156-L160
portfoliome/foil
foil/fileio.py
ZipReader.readlines_bytes
def readlines_bytes(self): """Read content into byte str line iterator.""" with open_zipfile_archive(self.path, self.filename) as file: for line in file: yield line.rstrip(b'\r\n')
python
def readlines_bytes(self): """Read content into byte str line iterator.""" with open_zipfile_archive(self.path, self.filename) as file: for line in file: yield line.rstrip(b'\r\n')
[ "def", "readlines_bytes", "(", "self", ")", ":", "with", "open_zipfile_archive", "(", "self", ".", "path", ",", "self", ".", "filename", ")", "as", "file", ":", "for", "line", "in", "file", ":", "yield", "line", ".", "rstrip", "(", "b'\\r\\n'", ")" ]
Read content into byte str line iterator.
[ "Read", "content", "into", "byte", "str", "line", "iterator", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/fileio.py#L167-L172
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.xmlparse
def xmlparse(self, file): """ Parse a XML file :param file: Opened File :return: Tree """ if self.CACHE_FULL_TEI is True: return self.get_or( _cache_key("Nautilus", self.name, "File", "Tree", file.name), super(ProtoNautilusCtsResolver, self).xmlparse, file ) return super(ProtoNautilusCtsResolver, self).xmlparse(file)
python
def xmlparse(self, file): """ Parse a XML file :param file: Opened File :return: Tree """ if self.CACHE_FULL_TEI is True: return self.get_or( _cache_key("Nautilus", self.name, "File", "Tree", file.name), super(ProtoNautilusCtsResolver, self).xmlparse, file ) return super(ProtoNautilusCtsResolver, self).xmlparse(file)
[ "def", "xmlparse", "(", "self", ",", "file", ")", ":", "if", "self", ".", "CACHE_FULL_TEI", "is", "True", ":", "return", "self", ".", "get_or", "(", "_cache_key", "(", "\"Nautilus\"", ",", "self", ".", "name", ",", "\"File\"", ",", "\"Tree\"", ",", "file", ".", "name", ")", ",", "super", "(", "ProtoNautilusCtsResolver", ",", "self", ")", ".", "xmlparse", ",", "file", ")", "return", "super", "(", "ProtoNautilusCtsResolver", ",", "self", ")", ".", "xmlparse", "(", "file", ")" ]
Parse a XML file :param file: Opened File :return: Tree
[ "Parse", "a", "XML", "file" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L48-L59
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.get_or
def get_or(self, cache_key, callback, *args, **kwargs): """ Get or set the cache using callback and arguments :param cache_key: Cache key for given resource :param callback: Callback if object does not exist :param args: Ordered Argument for the callback :param kwargs: Keyword argument for the callback :return: Output of the callback """ cached = self.cache.get(cache_key) if cached is not None: return cached else: try: output = callback(*args, **kwargs) except MyCapytain.errors.UnknownCollection as E: raise UnknownCollection(str(E)) except Exception as E: raise E self.cache.set(cache_key, output, self.TIMEOUT) return output
python
def get_or(self, cache_key, callback, *args, **kwargs): """ Get or set the cache using callback and arguments :param cache_key: Cache key for given resource :param callback: Callback if object does not exist :param args: Ordered Argument for the callback :param kwargs: Keyword argument for the callback :return: Output of the callback """ cached = self.cache.get(cache_key) if cached is not None: return cached else: try: output = callback(*args, **kwargs) except MyCapytain.errors.UnknownCollection as E: raise UnknownCollection(str(E)) except Exception as E: raise E self.cache.set(cache_key, output, self.TIMEOUT) return output
[ "def", "get_or", "(", "self", ",", "cache_key", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cached", "=", "self", ".", "cache", ".", "get", "(", "cache_key", ")", "if", "cached", "is", "not", "None", ":", "return", "cached", "else", ":", "try", ":", "output", "=", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "MyCapytain", ".", "errors", ".", "UnknownCollection", "as", "E", ":", "raise", "UnknownCollection", "(", "str", "(", "E", ")", ")", "except", "Exception", "as", "E", ":", "raise", "E", "self", ".", "cache", ".", "set", "(", "cache_key", ",", "output", ",", "self", ".", "TIMEOUT", ")", "return", "output" ]
Get or set the cache using callback and arguments :param cache_key: Cache key for given resource :param callback: Callback if object does not exist :param args: Ordered Argument for the callback :param kwargs: Keyword argument for the callback :return: Output of the callback
[ "Get", "or", "set", "the", "cache", "using", "callback", "and", "arguments" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L61-L81
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.read
def read(self, identifier, path=None): """ Read a text object given an identifier and a path :param identifier: Identifier of the text :param path: Path of the text files :return: Text """ if self.CACHE_FULL_TEI is True: o = self.cache.get(_cache_key(self.texts_parsed_cache_key, identifier)) if o is not None: return o else: with open(path) as f: o = Text(urn=identifier, resource=self.xmlparse(f)) self.cache.set(_cache_key(self.texts_parsed_cache_key, identifier), o) else: with open(path) as f: o = Text(urn=identifier, resource=self.xmlparse(f)) return o
python
def read(self, identifier, path=None): """ Read a text object given an identifier and a path :param identifier: Identifier of the text :param path: Path of the text files :return: Text """ if self.CACHE_FULL_TEI is True: o = self.cache.get(_cache_key(self.texts_parsed_cache_key, identifier)) if o is not None: return o else: with open(path) as f: o = Text(urn=identifier, resource=self.xmlparse(f)) self.cache.set(_cache_key(self.texts_parsed_cache_key, identifier), o) else: with open(path) as f: o = Text(urn=identifier, resource=self.xmlparse(f)) return o
[ "def", "read", "(", "self", ",", "identifier", ",", "path", "=", "None", ")", ":", "if", "self", ".", "CACHE_FULL_TEI", "is", "True", ":", "o", "=", "self", ".", "cache", ".", "get", "(", "_cache_key", "(", "self", ".", "texts_parsed_cache_key", ",", "identifier", ")", ")", "if", "o", "is", "not", "None", ":", "return", "o", "else", ":", "with", "open", "(", "path", ")", "as", "f", ":", "o", "=", "Text", "(", "urn", "=", "identifier", ",", "resource", "=", "self", ".", "xmlparse", "(", "f", ")", ")", "self", ".", "cache", ".", "set", "(", "_cache_key", "(", "self", ".", "texts_parsed_cache_key", ",", "identifier", ")", ",", "o", ")", "else", ":", "with", "open", "(", "path", ")", "as", "f", ":", "o", "=", "Text", "(", "urn", "=", "identifier", ",", "resource", "=", "self", ".", "xmlparse", "(", "f", ")", ")", "return", "o" ]
Read a text object given an identifier and a path :param identifier: Identifier of the text :param path: Path of the text files :return: Text
[ "Read", "a", "text", "object", "given", "an", "identifier", "and", "a", "path" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L83-L101
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.parse
def parse(self, resource=None): """ Parse a list of directories ans :param resource: List of folders """ if resource is None: resource = self.__resources__ self.inventory = self.dispatcher.collection try: self._parse(resource) except MyCapytain.errors.UndispatchedTextError as E: if self.RAISE_ON_UNDISPATCHED is True: raise UndispatchedTextError(E) self.inventory = self.dispatcher.collection return self.inventory
python
def parse(self, resource=None): """ Parse a list of directories ans :param resource: List of folders """ if resource is None: resource = self.__resources__ self.inventory = self.dispatcher.collection try: self._parse(resource) except MyCapytain.errors.UndispatchedTextError as E: if self.RAISE_ON_UNDISPATCHED is True: raise UndispatchedTextError(E) self.inventory = self.dispatcher.collection return self.inventory
[ "def", "parse", "(", "self", ",", "resource", "=", "None", ")", ":", "if", "resource", "is", "None", ":", "resource", "=", "self", ".", "__resources__", "self", ".", "inventory", "=", "self", ".", "dispatcher", ".", "collection", "try", ":", "self", ".", "_parse", "(", "resource", ")", "except", "MyCapytain", ".", "errors", ".", "UndispatchedTextError", "as", "E", ":", "if", "self", ".", "RAISE_ON_UNDISPATCHED", "is", "True", ":", "raise", "UndispatchedTextError", "(", "E", ")", "self", ".", "inventory", "=", "self", ".", "dispatcher", ".", "collection", "return", "self", ".", "inventory" ]
Parse a list of directories ans :param resource: List of folders
[ "Parse", "a", "list", "of", "directories", "ans" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L103-L120
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.getReffs
def getReffs(self, textId, level=1, subreference=None): """ Retrieve the siblings of a textual node :param textId: PrototypeText Identifier :type textId: str :param level: Depth for retrieval :type level: int :param subreference: Passage Reference :type subreference: str :return: List of references :rtype: [str] """ return self.get_or( self.__cache_key_reffs__(textId, level, subreference), super(ProtoNautilusCtsResolver, self).getReffs, textId, level, subreference )
python
def getReffs(self, textId, level=1, subreference=None): """ Retrieve the siblings of a textual node :param textId: PrototypeText Identifier :type textId: str :param level: Depth for retrieval :type level: int :param subreference: Passage Reference :type subreference: str :return: List of references :rtype: [str] """ return self.get_or( self.__cache_key_reffs__(textId, level, subreference), super(ProtoNautilusCtsResolver, self).getReffs, textId, level, subreference )
[ "def", "getReffs", "(", "self", ",", "textId", ",", "level", "=", "1", ",", "subreference", "=", "None", ")", ":", "return", "self", ".", "get_or", "(", "self", ".", "__cache_key_reffs__", "(", "textId", ",", "level", ",", "subreference", ")", ",", "super", "(", "ProtoNautilusCtsResolver", ",", "self", ")", ".", "getReffs", ",", "textId", ",", "level", ",", "subreference", ")" ]
Retrieve the siblings of a textual node :param textId: PrototypeText Identifier :type textId: str :param level: Depth for retrieval :type level: int :param subreference: Passage Reference :type subreference: str :return: List of references :rtype: [str]
[ "Retrieve", "the", "siblings", "of", "a", "textual", "node" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L186-L201
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.getTextualNode
def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False): """ Retrieve a text node from the API :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: Passage :rtype: Passage """ key = _cache_key("Nautilus", self.name, "Passage", textId, subreference) o = self.cache.get(key) if o is not None: return o text, text_metadata = self.__getText__(textId) if subreference is not None: subreference = Reference(subreference) passage = text.getTextualNode(subreference) passage.set_metadata_from_collection(text_metadata) self.cache.set(key, passage) return passage
python
def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False): """ Retrieve a text node from the API :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: Passage :rtype: Passage """ key = _cache_key("Nautilus", self.name, "Passage", textId, subreference) o = self.cache.get(key) if o is not None: return o text, text_metadata = self.__getText__(textId) if subreference is not None: subreference = Reference(subreference) passage = text.getTextualNode(subreference) passage.set_metadata_from_collection(text_metadata) self.cache.set(key, passage) return passage
[ "def", "getTextualNode", "(", "self", ",", "textId", ",", "subreference", "=", "None", ",", "prevnext", "=", "False", ",", "metadata", "=", "False", ")", ":", "key", "=", "_cache_key", "(", "\"Nautilus\"", ",", "self", ".", "name", ",", "\"Passage\"", ",", "textId", ",", "subreference", ")", "o", "=", "self", ".", "cache", ".", "get", "(", "key", ")", "if", "o", "is", "not", "None", ":", "return", "o", "text", ",", "text_metadata", "=", "self", ".", "__getText__", "(", "textId", ")", "if", "subreference", "is", "not", "None", ":", "subreference", "=", "Reference", "(", "subreference", ")", "passage", "=", "text", ".", "getTextualNode", "(", "subreference", ")", "passage", ".", "set_metadata_from_collection", "(", "text_metadata", ")", "self", ".", "cache", ".", "set", "(", "key", ",", "passage", ")", "return", "passage" ]
Retrieve a text node from the API :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: Passage :rtype: Passage
[ "Retrieve", "a", "text", "node", "from", "the", "API" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L206-L231
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
ProtoNautilusCtsResolver.getSiblings
def getSiblings(self, textId, subreference): """ Retrieve the siblings of a textual node :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :return: Tuple of references :rtype: (str, str) """ key = _cache_key("Nautilus", self.name, "Siblings", textId, subreference) o = self.cache.get(key) if o is not None: return o passage = self.getTextualNode(textId, subreference, prevnext=True) siblings = passage.siblingsId self.cache.set(key, siblings) return siblings
python
def getSiblings(self, textId, subreference): """ Retrieve the siblings of a textual node :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :return: Tuple of references :rtype: (str, str) """ key = _cache_key("Nautilus", self.name, "Siblings", textId, subreference) o = self.cache.get(key) if o is not None: return o passage = self.getTextualNode(textId, subreference, prevnext=True) siblings = passage.siblingsId self.cache.set(key, siblings) return siblings
[ "def", "getSiblings", "(", "self", ",", "textId", ",", "subreference", ")", ":", "key", "=", "_cache_key", "(", "\"Nautilus\"", ",", "self", ".", "name", ",", "\"Siblings\"", ",", "textId", ",", "subreference", ")", "o", "=", "self", ".", "cache", ".", "get", "(", "key", ")", "if", "o", "is", "not", "None", ":", "return", "o", "passage", "=", "self", ".", "getTextualNode", "(", "textId", ",", "subreference", ",", "prevnext", "=", "True", ")", "siblings", "=", "passage", ".", "siblingsId", "self", ".", "cache", ".", "set", "(", "key", ",", "siblings", ")", "return", "siblings" ]
Retrieve the siblings of a textual node :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :return: Tuple of references :rtype: (str, str)
[ "Retrieve", "the", "siblings", "of", "a", "textual", "node" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L233-L250
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
NautilusCtsResolver.getMetadata
def getMetadata(self, objectId=None, **filters): """ Request metadata about a text or a collection :param objectId: Object Identifier to filter on :type objectId: str :param filters: Kwargs parameters. :type filters: dict :return: Collection """ return self.get_or( _cache_key("Nautilus", self.name, "GetMetadata", objectId), super(ProtoNautilusCtsResolver, self).getMetadata, objectId )
python
def getMetadata(self, objectId=None, **filters): """ Request metadata about a text or a collection :param objectId: Object Identifier to filter on :type objectId: str :param filters: Kwargs parameters. :type filters: dict :return: Collection """ return self.get_or( _cache_key("Nautilus", self.name, "GetMetadata", objectId), super(ProtoNautilusCtsResolver, self).getMetadata, objectId )
[ "def", "getMetadata", "(", "self", ",", "objectId", "=", "None", ",", "*", "*", "filters", ")", ":", "return", "self", ".", "get_or", "(", "_cache_key", "(", "\"Nautilus\"", ",", "self", ".", "name", ",", "\"GetMetadata\"", ",", "objectId", ")", ",", "super", "(", "ProtoNautilusCtsResolver", ",", "self", ")", ".", "getMetadata", ",", "objectId", ")" ]
Request metadata about a text or a collection :param objectId: Object Identifier to filter on :type objectId: str :param filters: Kwargs parameters. :type filters: dict :return: Collection
[ "Request", "metadata", "about", "a", "text", "or", "a", "collection" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L288-L300
Capitains/Nautilus
capitains_nautilus/cts/resolver/base.py
_SparqlSharedResolver._dispatch
def _dispatch(self, textgroup, directory): """ Sparql dispatcher do not need to dispatch works, as the link is DB stored through Textgroup :param textgroup: A Textgroup object :param directory: The path in which we found the textgroup :return: """ self.dispatcher.dispatch(textgroup, path=directory)
python
def _dispatch(self, textgroup, directory): """ Sparql dispatcher do not need to dispatch works, as the link is DB stored through Textgroup :param textgroup: A Textgroup object :param directory: The path in which we found the textgroup :return: """ self.dispatcher.dispatch(textgroup, path=directory)
[ "def", "_dispatch", "(", "self", ",", "textgroup", ",", "directory", ")", ":", "self", ".", "dispatcher", ".", "dispatch", "(", "textgroup", ",", "path", "=", "directory", ")" ]
Sparql dispatcher do not need to dispatch works, as the link is DB stored through Textgroup :param textgroup: A Textgroup object :param directory: The path in which we found the textgroup :return:
[ "Sparql", "dispatcher", "do", "not", "need", "to", "dispatch", "works", "as", "the", "link", "is", "DB", "stored", "through", "Textgroup" ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/resolver/base.py#L319-L326
PolyJIT/benchbuild
benchbuild/utils/user_interface.py
ask
def ask(question, default_answer=False, default_answer_str="no"): """ Ask for user input. This asks a yes/no question with a preset default. You can bypass the user-input and fetch the default answer, if you set Args: question: The question to ask on stdout. default_answer: The default value to return. default_answer_str: The default answer string that we present to the user. Tests: >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=True) True >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=False) False """ response = default_answer def should_ignore_tty(): """ Check, if we want to ignore an opened tty result. """ ret_to_bool = {"yes": True, "no": False, "true": True, "false": False} envs = [os.getenv("CI", default="no"), os.getenv("TEST", default="no")] vals = [ret_to_bool[val] for val in envs if val in ret_to_bool] return any(vals) ignore_stdin_istty = should_ignore_tty() has_tty = sys.stdin.isatty() and not ignore_stdin_istty if has_tty: response = query_yes_no(question, default_answer_str) else: LOG.debug("NoTTY: %s -> %s", question, response) return response
python
def ask(question, default_answer=False, default_answer_str="no"): """ Ask for user input. This asks a yes/no question with a preset default. You can bypass the user-input and fetch the default answer, if you set Args: question: The question to ask on stdout. default_answer: The default value to return. default_answer_str: The default answer string that we present to the user. Tests: >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=True) True >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=False) False """ response = default_answer def should_ignore_tty(): """ Check, if we want to ignore an opened tty result. """ ret_to_bool = {"yes": True, "no": False, "true": True, "false": False} envs = [os.getenv("CI", default="no"), os.getenv("TEST", default="no")] vals = [ret_to_bool[val] for val in envs if val in ret_to_bool] return any(vals) ignore_stdin_istty = should_ignore_tty() has_tty = sys.stdin.isatty() and not ignore_stdin_istty if has_tty: response = query_yes_no(question, default_answer_str) else: LOG.debug("NoTTY: %s -> %s", question, response) return response
[ "def", "ask", "(", "question", ",", "default_answer", "=", "False", ",", "default_answer_str", "=", "\"no\"", ")", ":", "response", "=", "default_answer", "def", "should_ignore_tty", "(", ")", ":", "\"\"\"\n Check, if we want to ignore an opened tty result.\n \"\"\"", "ret_to_bool", "=", "{", "\"yes\"", ":", "True", ",", "\"no\"", ":", "False", ",", "\"true\"", ":", "True", ",", "\"false\"", ":", "False", "}", "envs", "=", "[", "os", ".", "getenv", "(", "\"CI\"", ",", "default", "=", "\"no\"", ")", ",", "os", ".", "getenv", "(", "\"TEST\"", ",", "default", "=", "\"no\"", ")", "]", "vals", "=", "[", "ret_to_bool", "[", "val", "]", "for", "val", "in", "envs", "if", "val", "in", "ret_to_bool", "]", "return", "any", "(", "vals", ")", "ignore_stdin_istty", "=", "should_ignore_tty", "(", ")", "has_tty", "=", "sys", ".", "stdin", ".", "isatty", "(", ")", "and", "not", "ignore_stdin_istty", "if", "has_tty", ":", "response", "=", "query_yes_no", "(", "question", ",", "default_answer_str", ")", "else", ":", "LOG", ".", "debug", "(", "\"NoTTY: %s -> %s\"", ",", "question", ",", "response", ")", "return", "response" ]
Ask for user input. This asks a yes/no question with a preset default. You can bypass the user-input and fetch the default answer, if you set Args: question: The question to ask on stdout. default_answer: The default value to return. default_answer_str: The default answer string that we present to the user. Tests: >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=True) True >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=False) False
[ "Ask", "for", "user", "input", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/user_interface.py#L46-L84
davidhuser/dhis2.py
dhis2/utils.py
load_csv
def load_csv(path, delimiter=','): """ Load CSV file from path and yield CSV rows Usage: for row in load_csv('/path/to/file'): print(row) or list(load_csv('/path/to/file')) :param path: file path :param delimiter: CSV delimiter :return: a generator where __next__ is a row of the CSV """ try: with open(path, 'rb') as csvfile: reader = DictReader(csvfile, delimiter=delimiter) for row in reader: yield row except (OSError, IOError): raise ClientException("File not found: {}".format(path))
python
def load_csv(path, delimiter=','): """ Load CSV file from path and yield CSV rows Usage: for row in load_csv('/path/to/file'): print(row) or list(load_csv('/path/to/file')) :param path: file path :param delimiter: CSV delimiter :return: a generator where __next__ is a row of the CSV """ try: with open(path, 'rb') as csvfile: reader = DictReader(csvfile, delimiter=delimiter) for row in reader: yield row except (OSError, IOError): raise ClientException("File not found: {}".format(path))
[ "def", "load_csv", "(", "path", ",", "delimiter", "=", "','", ")", ":", "try", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "csvfile", ":", "reader", "=", "DictReader", "(", "csvfile", ",", "delimiter", "=", "delimiter", ")", "for", "row", "in", "reader", ":", "yield", "row", "except", "(", "OSError", ",", "IOError", ")", ":", "raise", "ClientException", "(", "\"File not found: {}\"", ".", "format", "(", "path", ")", ")" ]
Load CSV file from path and yield CSV rows Usage: for row in load_csv('/path/to/file'): print(row) or list(load_csv('/path/to/file')) :param path: file path :param delimiter: CSV delimiter :return: a generator where __next__ is a row of the CSV
[ "Load", "CSV", "file", "from", "path", "and", "yield", "CSV", "rows" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L25-L46
davidhuser/dhis2.py
dhis2/utils.py
load_json
def load_json(path): """ Load JSON file from path :param path: file path :return: A Python object (e.g. a dict) """ try: with open(path, 'r') as json_file: return json.load(json_file) except (OSError, IOError): raise ClientException("File not found: {}".format(path))
python
def load_json(path): """ Load JSON file from path :param path: file path :return: A Python object (e.g. a dict) """ try: with open(path, 'r') as json_file: return json.load(json_file) except (OSError, IOError): raise ClientException("File not found: {}".format(path))
[ "def", "load_json", "(", "path", ")", ":", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "json_file", ":", "return", "json", ".", "load", "(", "json_file", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "raise", "ClientException", "(", "\"File not found: {}\"", ".", "format", "(", "path", ")", ")" ]
Load JSON file from path :param path: file path :return: A Python object (e.g. a dict)
[ "Load", "JSON", "file", "from", "path", ":", "param", "path", ":", "file", "path", ":", "return", ":", "A", "Python", "object", "(", "e", ".", "g", ".", "a", "dict", ")" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L49-L59
davidhuser/dhis2.py
dhis2/utils.py
partition_payload
def partition_payload(data, key, thresh): """ Yield partitions of a payload e.g. with a threshold of 2: { "dataElements": [1, 2, 3] } --> { "dataElements": [1, 2] } and { "dataElements": [3] } :param data: the payload :param key: the key of the dict to partition :param thresh: the maximum value of a chunk :return: a generator where __next__ is a partition of the payload """ data = data[key] for i in range(0, len(data), thresh): yield {key: data[i:i + thresh]}
python
def partition_payload(data, key, thresh): """ Yield partitions of a payload e.g. with a threshold of 2: { "dataElements": [1, 2, 3] } --> { "dataElements": [1, 2] } and { "dataElements": [3] } :param data: the payload :param key: the key of the dict to partition :param thresh: the maximum value of a chunk :return: a generator where __next__ is a partition of the payload """ data = data[key] for i in range(0, len(data), thresh): yield {key: data[i:i + thresh]}
[ "def", "partition_payload", "(", "data", ",", "key", ",", "thresh", ")", ":", "data", "=", "data", "[", "key", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "data", ")", ",", "thresh", ")", ":", "yield", "{", "key", ":", "data", "[", "i", ":", "i", "+", "thresh", "]", "}" ]
Yield partitions of a payload e.g. with a threshold of 2: { "dataElements": [1, 2, 3] } --> { "dataElements": [1, 2] } and { "dataElements": [3] } :param data: the payload :param key: the key of the dict to partition :param thresh: the maximum value of a chunk :return: a generator where __next__ is a partition of the payload
[ "Yield", "partitions", "of", "a", "payload" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L62-L81
davidhuser/dhis2.py
dhis2/utils.py
search_auth_file
def search_auth_file(filename='dish.json'): """ Search filename in - A) DHIS_HOME (env variable) - B) current user's home folder :param filename: the filename to search for :return: full path of filename """ if 'DHIS_HOME' in os.environ: return os.path.join(os.environ['DHIS_HOME'], filename) else: home_path = os.path.expanduser(os.path.join('~')) for root, dirs, files in os.walk(home_path): if filename in files: return os.path.join(root, filename) raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
python
def search_auth_file(filename='dish.json'): """ Search filename in - A) DHIS_HOME (env variable) - B) current user's home folder :param filename: the filename to search for :return: full path of filename """ if 'DHIS_HOME' in os.environ: return os.path.join(os.environ['DHIS_HOME'], filename) else: home_path = os.path.expanduser(os.path.join('~')) for root, dirs, files in os.walk(home_path): if filename in files: return os.path.join(root, filename) raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
[ "def", "search_auth_file", "(", "filename", "=", "'dish.json'", ")", ":", "if", "'DHIS_HOME'", "in", "os", ".", "environ", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'DHIS_HOME'", "]", ",", "filename", ")", "else", ":", "home_path", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "join", "(", "'~'", ")", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "home_path", ")", ":", "if", "filename", "in", "files", ":", "return", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "raise", "ClientException", "(", "\"'{}' not found - searched in $DHIS_HOME and in home folder\"", ".", "format", "(", "filename", ")", ")" ]
Search filename in - A) DHIS_HOME (env variable) - B) current user's home folder :param filename: the filename to search for :return: full path of filename
[ "Search", "filename", "in", "-", "A", ")", "DHIS_HOME", "(", "env", "variable", ")", "-", "B", ")", "current", "user", "s", "home", "folder", ":", "param", "filename", ":", "the", "filename", "to", "search", "for", ":", "return", ":", "full", "path", "of", "filename" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L84-L99
davidhuser/dhis2.py
dhis2/utils.py
version_to_int
def version_to_int(value): """ Convert version info to integer :param value: the version received from system/info, e.g. "2.28" :return: integer from version, e.g. 28, None if it couldn't be parsed """ # remove '-SNAPSHOT' value = value.replace('-SNAPSHOT', '') # remove '-RCx' if '-RC' in value: value = value.split('-RC', 1)[0] try: return int(value.split('.')[1]) except (ValueError, IndexError): return
python
def version_to_int(value): """ Convert version info to integer :param value: the version received from system/info, e.g. "2.28" :return: integer from version, e.g. 28, None if it couldn't be parsed """ # remove '-SNAPSHOT' value = value.replace('-SNAPSHOT', '') # remove '-RCx' if '-RC' in value: value = value.split('-RC', 1)[0] try: return int(value.split('.')[1]) except (ValueError, IndexError): return
[ "def", "version_to_int", "(", "value", ")", ":", "# remove '-SNAPSHOT'", "value", "=", "value", ".", "replace", "(", "'-SNAPSHOT'", ",", "''", ")", "# remove '-RCx'", "if", "'-RC'", "in", "value", ":", "value", "=", "value", ".", "split", "(", "'-RC'", ",", "1", ")", "[", "0", "]", "try", ":", "return", "int", "(", "value", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "return" ]
Convert version info to integer :param value: the version received from system/info, e.g. "2.28" :return: integer from version, e.g. 28, None if it couldn't be parsed
[ "Convert", "version", "info", "to", "integer", ":", "param", "value", ":", "the", "version", "received", "from", "system", "/", "info", "e", ".", "g", ".", "2", ".", "28", ":", "return", ":", "integer", "from", "version", "e", ".", "g", ".", "28", "None", "if", "it", "couldn", "t", "be", "parsed" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L102-L116
davidhuser/dhis2.py
dhis2/utils.py
generate_uid
def generate_uid(): """ Create DHIS2 UID matching to Regex ^[A-Za-z][A-Za-z0-9]{10}$ :return: UID string """ # first must be a letter first = random.choice(string.ascii_letters) # rest must be letters or numbers rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10)) return first + rest
python
def generate_uid(): """ Create DHIS2 UID matching to Regex ^[A-Za-z][A-Za-z0-9]{10}$ :return: UID string """ # first must be a letter first = random.choice(string.ascii_letters) # rest must be letters or numbers rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10)) return first + rest
[ "def", "generate_uid", "(", ")", ":", "# first must be a letter", "first", "=", "random", ".", "choice", "(", "string", ".", "ascii_letters", ")", "# rest must be letters or numbers", "rest", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "10", ")", ")", "return", "first", "+", "rest" ]
Create DHIS2 UID matching to Regex ^[A-Za-z][A-Za-z0-9]{10}$ :return: UID string
[ "Create", "DHIS2", "UID", "matching", "to", "Regex", "^", "[", "A", "-", "Za", "-", "z", "]", "[", "A", "-", "Za", "-", "z0", "-", "9", "]", "{", "10", "}", "$", ":", "return", ":", "UID", "string" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L119-L129
davidhuser/dhis2.py
dhis2/utils.py
is_valid_uid
def is_valid_uid(uid): """ :return: True if it is a valid DHIS2 UID, False if not """ pattern = r'^[A-Za-z][A-Za-z0-9]{10}$' if not isinstance(uid, string_types): return False return bool(re.compile(pattern).match(uid))
python
def is_valid_uid(uid): """ :return: True if it is a valid DHIS2 UID, False if not """ pattern = r'^[A-Za-z][A-Za-z0-9]{10}$' if not isinstance(uid, string_types): return False return bool(re.compile(pattern).match(uid))
[ "def", "is_valid_uid", "(", "uid", ")", ":", "pattern", "=", "r'^[A-Za-z][A-Za-z0-9]{10}$'", "if", "not", "isinstance", "(", "uid", ",", "string_types", ")", ":", "return", "False", "return", "bool", "(", "re", ".", "compile", "(", "pattern", ")", ".", "match", "(", "uid", ")", ")" ]
:return: True if it is a valid DHIS2 UID, False if not
[ ":", "return", ":", "True", "if", "it", "is", "a", "valid", "DHIS2", "UID", "False", "if", "not" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L132-L139
davidhuser/dhis2.py
dhis2/utils.py
pretty_json
def pretty_json(obj): """ Print JSON with indentation and colours :param obj: the object to print - can be a dict or a string """ if isinstance(obj, string_types): try: obj = json.loads(obj) except ValueError: raise ClientException("`obj` is not a json string") json_str = json.dumps(obj, sort_keys=True, indent=2) print(highlight(json_str, JsonLexer(), TerminalFormatter()))
python
def pretty_json(obj): """ Print JSON with indentation and colours :param obj: the object to print - can be a dict or a string """ if isinstance(obj, string_types): try: obj = json.loads(obj) except ValueError: raise ClientException("`obj` is not a json string") json_str = json.dumps(obj, sort_keys=True, indent=2) print(highlight(json_str, JsonLexer(), TerminalFormatter()))
[ "def", "pretty_json", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "string_types", ")", ":", "try", ":", "obj", "=", "json", ".", "loads", "(", "obj", ")", "except", "ValueError", ":", "raise", "ClientException", "(", "\"`obj` is not a json string\"", ")", "json_str", "=", "json", ".", "dumps", "(", "obj", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ")", "print", "(", "highlight", "(", "json_str", ",", "JsonLexer", "(", ")", ",", "TerminalFormatter", "(", ")", ")", ")" ]
Print JSON with indentation and colours :param obj: the object to print - can be a dict or a string
[ "Print", "JSON", "with", "indentation", "and", "colours", ":", "param", "obj", ":", "the", "object", "to", "print", "-", "can", "be", "a", "dict", "or", "a", "string" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L142-L153
davidhuser/dhis2.py
dhis2/utils.py
clean_obj
def clean_obj(obj, remove): """ Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ..., e.g. remove all sharing keys or remove all 'user' fields This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)` :param obj: the dict to remove keys from :param remove: keys to remove - can be a string or iterable """ if isinstance(remove, string_types): remove = [remove] try: iter(remove) except TypeError: raise ClientException("`remove` could not be removed from object: {}".format(repr(remove))) else: if isinstance(obj, dict): obj = { key: clean_obj(value, remove) for key, value in iteritems(obj) if key not in remove } elif isinstance(obj, list): obj = [ clean_obj(item, remove) for item in obj if item not in remove ] return obj
python
def clean_obj(obj, remove): """ Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ..., e.g. remove all sharing keys or remove all 'user' fields This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)` :param obj: the dict to remove keys from :param remove: keys to remove - can be a string or iterable """ if isinstance(remove, string_types): remove = [remove] try: iter(remove) except TypeError: raise ClientException("`remove` could not be removed from object: {}".format(repr(remove))) else: if isinstance(obj, dict): obj = { key: clean_obj(value, remove) for key, value in iteritems(obj) if key not in remove } elif isinstance(obj, list): obj = [ clean_obj(item, remove) for item in obj if item not in remove ] return obj
[ "def", "clean_obj", "(", "obj", ",", "remove", ")", ":", "if", "isinstance", "(", "remove", ",", "string_types", ")", ":", "remove", "=", "[", "remove", "]", "try", ":", "iter", "(", "remove", ")", "except", "TypeError", ":", "raise", "ClientException", "(", "\"`remove` could not be removed from object: {}\"", ".", "format", "(", "repr", "(", "remove", ")", ")", ")", "else", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "obj", "=", "{", "key", ":", "clean_obj", "(", "value", ",", "remove", ")", "for", "key", ",", "value", "in", "iteritems", "(", "obj", ")", "if", "key", "not", "in", "remove", "}", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "obj", "=", "[", "clean_obj", "(", "item", ",", "remove", ")", "for", "item", "in", "obj", "if", "item", "not", "in", "remove", "]", "return", "obj" ]
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ..., e.g. remove all sharing keys or remove all 'user' fields This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)` :param obj: the dict to remove keys from :param remove: keys to remove - can be a string or iterable
[ "Recursively", "remove", "keys", "from", "list", "/", "dict", "/", "dict", "-", "of", "-", "lists", "/", "list", "-", "of", "-", "keys", "/", "nested", "...", "e", ".", "g", ".", "remove", "all", "sharing", "keys", "or", "remove", "all", "user", "fields", "This", "should", "result", "in", "the", "same", "as", "if", "running", "in", "bash", ":", "jq", "del", "(", "..", "|", ".", "publicAccess?", ".", "userGroupAccesses?", ")", ":", "param", "obj", ":", "the", "dict", "to", "remove", "keys", "from", ":", "param", "remove", ":", "keys", "to", "remove", "-", "can", "be", "a", "string", "or", "iterable" ]
train
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L156-L183
BlueBrain/hpcbench
hpcbench/driver/benchmark.py
BenchmarkCategoryDriver.commands
def commands(self): """Get all commands of the benchmark category :return generator of string """ for child in self._children: with open(osp.join(child, YAML_REPORT_FILE)) as istr: command = yaml.safe_load(istr)['command'] yield ' '.join(map(six.moves.shlex_quote, command))
python
def commands(self): """Get all commands of the benchmark category :return generator of string """ for child in self._children: with open(osp.join(child, YAML_REPORT_FILE)) as istr: command = yaml.safe_load(istr)['command'] yield ' '.join(map(six.moves.shlex_quote, command))
[ "def", "commands", "(", "self", ")", ":", "for", "child", "in", "self", ".", "_children", ":", "with", "open", "(", "osp", ".", "join", "(", "child", ",", "YAML_REPORT_FILE", ")", ")", "as", "istr", ":", "command", "=", "yaml", ".", "safe_load", "(", "istr", ")", "[", "'command'", "]", "yield", "' '", ".", "join", "(", "map", "(", "six", ".", "moves", ".", "shlex_quote", ",", "command", ")", ")" ]
Get all commands of the benchmark category :return generator of string
[ "Get", "all", "commands", "of", "the", "benchmark", "category" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/benchmark.py#L101-L109
BlueBrain/hpcbench
hpcbench/driver/benchmark.py
BenchmarkCategoryDriver._module_env
def _module_env(self, execution): """Set current process environment according to execution `environment` and `modules` """ env = copy.copy(os.environ) try: for mod in execution.get('modules') or []: Module.load(mod) os.environ.update(execution.get('environment') or {}) yield finally: os.environ = env
python
def _module_env(self, execution): """Set current process environment according to execution `environment` and `modules` """ env = copy.copy(os.environ) try: for mod in execution.get('modules') or []: Module.load(mod) os.environ.update(execution.get('environment') or {}) yield finally: os.environ = env
[ "def", "_module_env", "(", "self", ",", "execution", ")", ":", "env", "=", "copy", ".", "copy", "(", "os", ".", "environ", ")", "try", ":", "for", "mod", "in", "execution", ".", "get", "(", "'modules'", ")", "or", "[", "]", ":", "Module", ".", "load", "(", "mod", ")", "os", ".", "environ", ".", "update", "(", "execution", ".", "get", "(", "'environment'", ")", "or", "{", "}", ")", "yield", "finally", ":", "os", ".", "environ", "=", "env" ]
Set current process environment according to execution `environment` and `modules`
[ "Set", "current", "process", "environment", "according", "to", "execution", "environment", "and", "modules" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/benchmark.py#L245-L256
BlueBrain/hpcbench
hpcbench/driver/benchmark.py
BenchmarkCategoryDriver.gather_metrics
def gather_metrics(self, runs): """Write a JSON file with the result of every runs """ for run_dirs in runs.values(): with open(JSON_METRICS_FILE, 'w') as ostr: ostr.write('[\n') for i in range(len(run_dirs)): with open(osp.join(run_dirs[i], YAML_REPORT_FILE)) as istr: data = yaml.safe_load(istr) data.pop('category', None) data.pop('command', None) data['id'] = run_dirs[i] json.dump(data, ostr, indent=2) if i != len(run_dirs) - 1: ostr.write(',') ostr.write('\n') ostr.write(']\n')
python
def gather_metrics(self, runs): """Write a JSON file with the result of every runs """ for run_dirs in runs.values(): with open(JSON_METRICS_FILE, 'w') as ostr: ostr.write('[\n') for i in range(len(run_dirs)): with open(osp.join(run_dirs[i], YAML_REPORT_FILE)) as istr: data = yaml.safe_load(istr) data.pop('category', None) data.pop('command', None) data['id'] = run_dirs[i] json.dump(data, ostr, indent=2) if i != len(run_dirs) - 1: ostr.write(',') ostr.write('\n') ostr.write(']\n')
[ "def", "gather_metrics", "(", "self", ",", "runs", ")", ":", "for", "run_dirs", "in", "runs", ".", "values", "(", ")", ":", "with", "open", "(", "JSON_METRICS_FILE", ",", "'w'", ")", "as", "ostr", ":", "ostr", ".", "write", "(", "'[\\n'", ")", "for", "i", "in", "range", "(", "len", "(", "run_dirs", ")", ")", ":", "with", "open", "(", "osp", ".", "join", "(", "run_dirs", "[", "i", "]", ",", "YAML_REPORT_FILE", ")", ")", "as", "istr", ":", "data", "=", "yaml", ".", "safe_load", "(", "istr", ")", "data", ".", "pop", "(", "'category'", ",", "None", ")", "data", ".", "pop", "(", "'command'", ",", "None", ")", "data", "[", "'id'", "]", "=", "run_dirs", "[", "i", "]", "json", ".", "dump", "(", "data", ",", "ostr", ",", "indent", "=", "2", ")", "if", "i", "!=", "len", "(", "run_dirs", ")", "-", "1", ":", "ostr", ".", "write", "(", "','", ")", "ostr", ".", "write", "(", "'\\n'", ")", "ostr", ".", "write", "(", "']\\n'", ")" ]
Write a JSON file with the result of every runs
[ "Write", "a", "JSON", "file", "with", "the", "result", "of", "every", "runs" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/benchmark.py#L295-L311
BlueBrain/hpcbench
hpcbench/driver/benchmark.py
MetricsDriver._check_metrics
def _check_metrics(cls, schema, metrics): """Ensure that returned metrics are properly exposed """ for name, value in metrics.items(): metric = schema.get(name) if not metric: message = "Unexpected metric '{}' returned".format(name) raise Exception(message) cls._check_metric(schema, metric, name, value)
python
def _check_metrics(cls, schema, metrics): """Ensure that returned metrics are properly exposed """ for name, value in metrics.items(): metric = schema.get(name) if not metric: message = "Unexpected metric '{}' returned".format(name) raise Exception(message) cls._check_metric(schema, metric, name, value)
[ "def", "_check_metrics", "(", "cls", ",", "schema", ",", "metrics", ")", ":", "for", "name", ",", "value", "in", "metrics", ".", "items", "(", ")", ":", "metric", "=", "schema", ".", "get", "(", "name", ")", "if", "not", "metric", ":", "message", "=", "\"Unexpected metric '{}' returned\"", ".", "format", "(", "name", ")", "raise", "Exception", "(", "message", ")", "cls", ".", "_check_metric", "(", "schema", ",", "metric", ",", "name", ",", "value", ")" ]
Ensure that returned metrics are properly exposed
[ "Ensure", "that", "returned", "metrics", "are", "properly", "exposed" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/benchmark.py#L400-L408
eng-tools/sfsimodels
sfsimodels/loader.py
add_inputs_to_object
def add_inputs_to_object(obj, values): """ A generic function to load object parameters based on a dictionary list. :param obj: Object :param values: Dictionary :return: """ for item in obj.inputs: if hasattr(obj, item): # print(item) setattr(obj, item, values[item])
python
def add_inputs_to_object(obj, values): """ A generic function to load object parameters based on a dictionary list. :param obj: Object :param values: Dictionary :return: """ for item in obj.inputs: if hasattr(obj, item): # print(item) setattr(obj, item, values[item])
[ "def", "add_inputs_to_object", "(", "obj", ",", "values", ")", ":", "for", "item", "in", "obj", ".", "inputs", ":", "if", "hasattr", "(", "obj", ",", "item", ")", ":", "# print(item)", "setattr", "(", "obj", ",", "item", ",", "values", "[", "item", "]", ")" ]
A generic function to load object parameters based on a dictionary list. :param obj: Object :param values: Dictionary :return:
[ "A", "generic", "function", "to", "load", "object", "parameters", "based", "on", "a", "dictionary", "list", ".", ":", "param", "obj", ":", "Object", ":", "param", "values", ":", "Dictionary", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L7-L17
eng-tools/sfsimodels
sfsimodels/loader.py
load_sample_data
def load_sample_data(sss): """ Sample data for the SoilStructureSystem object :param sss: :return: """ load_soil_sample_data(sss.sp) # soil load_foundation_sample_data(sss.fd) # foundation load_structure_sample_data(sss.bd) # structure load_hazard_sample_data(sss.hz)
python
def load_sample_data(sss): """ Sample data for the SoilStructureSystem object :param sss: :return: """ load_soil_sample_data(sss.sp) # soil load_foundation_sample_data(sss.fd) # foundation load_structure_sample_data(sss.bd) # structure load_hazard_sample_data(sss.hz)
[ "def", "load_sample_data", "(", "sss", ")", ":", "load_soil_sample_data", "(", "sss", ".", "sp", ")", "# soil", "load_foundation_sample_data", "(", "sss", ".", "fd", ")", "# foundation", "load_structure_sample_data", "(", "sss", ".", "bd", ")", "# structure", "load_hazard_sample_data", "(", "sss", ".", "hz", ")" ]
Sample data for the SoilStructureSystem object :param sss: :return:
[ "Sample", "data", "for", "the", "SoilStructureSystem", "object", ":", "param", "sss", ":", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L20-L30
eng-tools/sfsimodels
sfsimodels/loader.py
load_soil_sample_data
def load_soil_sample_data(sp): """ Sample data for the Soil object :param sp: Soil Object :return: """ # soil sp.g_mod = 60.0e6 # [Pa] sp.phi = 30 # [degrees] sp.relative_density = .40 # [decimal] sp.gwl = 2. # [m], ground water level sp.unit_dry_weight = 17000 # [N/m3] sp.unit_sat_weight = 18000 # [N/m3] sp.unit_weight_water = 9800 # [N/m3] sp.cohesion = 10.0 # [Pa] sp.poissons_ratio = 0.22 sp.e_min = 0.55 sp.e_max = 0.95 sp.e_critical0 = 0.79 # Jin et al. 2015 sp.p_critical0 = 0.7 # Jin et al. 2015 sp.lamb_crl = 0.015
python
def load_soil_sample_data(sp): """ Sample data for the Soil object :param sp: Soil Object :return: """ # soil sp.g_mod = 60.0e6 # [Pa] sp.phi = 30 # [degrees] sp.relative_density = .40 # [decimal] sp.gwl = 2. # [m], ground water level sp.unit_dry_weight = 17000 # [N/m3] sp.unit_sat_weight = 18000 # [N/m3] sp.unit_weight_water = 9800 # [N/m3] sp.cohesion = 10.0 # [Pa] sp.poissons_ratio = 0.22 sp.e_min = 0.55 sp.e_max = 0.95 sp.e_critical0 = 0.79 # Jin et al. 2015 sp.p_critical0 = 0.7 # Jin et al. 2015 sp.lamb_crl = 0.015
[ "def", "load_soil_sample_data", "(", "sp", ")", ":", "# soil", "sp", ".", "g_mod", "=", "60.0e6", "# [Pa]", "sp", ".", "phi", "=", "30", "# [degrees]", "sp", ".", "relative_density", "=", ".40", "# [decimal]", "sp", ".", "gwl", "=", "2.", "# [m], ground water level", "sp", ".", "unit_dry_weight", "=", "17000", "# [N/m3]", "sp", ".", "unit_sat_weight", "=", "18000", "# [N/m3]", "sp", ".", "unit_weight_water", "=", "9800", "# [N/m3]", "sp", ".", "cohesion", "=", "10.0", "# [Pa]", "sp", ".", "poissons_ratio", "=", "0.22", "sp", ".", "e_min", "=", "0.55", "sp", ".", "e_max", "=", "0.95", "sp", ".", "e_critical0", "=", "0.79", "# Jin et al. 2015", "sp", ".", "p_critical0", "=", "0.7", "# Jin et al. 2015", "sp", ".", "lamb_crl", "=", "0.015" ]
Sample data for the Soil object :param sp: Soil Object :return:
[ "Sample", "data", "for", "the", "Soil", "object", ":", "param", "sp", ":", "Soil", "Object", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L33-L53
eng-tools/sfsimodels
sfsimodels/loader.py
load_foundation_sample_data
def load_foundation_sample_data(fd): """ Sample data for the Foundation object :param fd: Foundation Object :return: """ # foundation fd.width = 16.0 # m fd.length = 18.0 # m fd.depth = 0.0 # m fd.mass = 0.0
python
def load_foundation_sample_data(fd): """ Sample data for the Foundation object :param fd: Foundation Object :return: """ # foundation fd.width = 16.0 # m fd.length = 18.0 # m fd.depth = 0.0 # m fd.mass = 0.0
[ "def", "load_foundation_sample_data", "(", "fd", ")", ":", "# foundation", "fd", ".", "width", "=", "16.0", "# m", "fd", ".", "length", "=", "18.0", "# m", "fd", ".", "depth", "=", "0.0", "# m", "fd", ".", "mass", "=", "0.0" ]
Sample data for the Foundation object :param fd: Foundation Object :return:
[ "Sample", "data", "for", "the", "Foundation", "object", ":", "param", "fd", ":", "Foundation", "Object", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L56-L66
eng-tools/sfsimodels
sfsimodels/loader.py
load_structure_sample_data
def load_structure_sample_data(st): """ Sample data for the Structure object :param st: Structure Object :return: """ # structure st.h_eff = 9.0 # m st.mass_eff = 120e3 # kg st.t_eff = 1.0 # s st.mass_ratio = 1.0
python
def load_structure_sample_data(st): """ Sample data for the Structure object :param st: Structure Object :return: """ # structure st.h_eff = 9.0 # m st.mass_eff = 120e3 # kg st.t_eff = 1.0 # s st.mass_ratio = 1.0
[ "def", "load_structure_sample_data", "(", "st", ")", ":", "# structure", "st", ".", "h_eff", "=", "9.0", "# m", "st", ".", "mass_eff", "=", "120e3", "# kg", "st", ".", "t_eff", "=", "1.0", "# s", "st", ".", "mass_ratio", "=", "1.0" ]
Sample data for the Structure object :param st: Structure Object :return:
[ "Sample", "data", "for", "the", "Structure", "object", ":", "param", "st", ":", "Structure", "Object", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L69-L79
eng-tools/sfsimodels
sfsimodels/loader.py
load_hazard_sample_data
def load_hazard_sample_data(hz): """ Sample data for the Hazard object :param hz: Hazard Object :return: """ # hazard hz.z_factor = 0.3 # Hazard factor hz.r_factor = 1.0 # Return period factor hz.n_factor = 1.0 # Near-fault factor hz.magnitude = 7.5 # Magnitude of earthquake hz.corner_period = 4.0 # s hz.corner_acc_factor = 0.55 return hz
python
def load_hazard_sample_data(hz): """ Sample data for the Hazard object :param hz: Hazard Object :return: """ # hazard hz.z_factor = 0.3 # Hazard factor hz.r_factor = 1.0 # Return period factor hz.n_factor = 1.0 # Near-fault factor hz.magnitude = 7.5 # Magnitude of earthquake hz.corner_period = 4.0 # s hz.corner_acc_factor = 0.55 return hz
[ "def", "load_hazard_sample_data", "(", "hz", ")", ":", "# hazard", "hz", ".", "z_factor", "=", "0.3", "# Hazard factor", "hz", ".", "r_factor", "=", "1.0", "# Return period factor", "hz", ".", "n_factor", "=", "1.0", "# Near-fault factor", "hz", ".", "magnitude", "=", "7.5", "# Magnitude of earthquake", "hz", ".", "corner_period", "=", "4.0", "# s", "hz", ".", "corner_acc_factor", "=", "0.55", "return", "hz" ]
Sample data for the Hazard object :param hz: Hazard Object :return:
[ "Sample", "data", "for", "the", "Hazard", "object", ":", "param", "hz", ":", "Hazard", "Object", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L82-L95
eng-tools/sfsimodels
sfsimodels/loader.py
load_building_sample_data
def load_building_sample_data(bd): """ Sample data for the Building object :param bd: :return: """ number_of_storeys = 6 interstorey_height = 3.4 # m masses = 40.0e3 # kg bd.interstorey_heights = interstorey_height * np.ones(number_of_storeys) bd.floor_length = 18.0 # m bd.floor_width = 16.0 # m bd.storey_masses = masses * np.ones(number_of_storeys)
python
def load_building_sample_data(bd): """ Sample data for the Building object :param bd: :return: """ number_of_storeys = 6 interstorey_height = 3.4 # m masses = 40.0e3 # kg bd.interstorey_heights = interstorey_height * np.ones(number_of_storeys) bd.floor_length = 18.0 # m bd.floor_width = 16.0 # m bd.storey_masses = masses * np.ones(number_of_storeys)
[ "def", "load_building_sample_data", "(", "bd", ")", ":", "number_of_storeys", "=", "6", "interstorey_height", "=", "3.4", "# m", "masses", "=", "40.0e3", "# kg", "bd", ".", "interstorey_heights", "=", "interstorey_height", "*", "np", ".", "ones", "(", "number_of_storeys", ")", "bd", ".", "floor_length", "=", "18.0", "# m", "bd", ".", "floor_width", "=", "16.0", "# m", "bd", ".", "storey_masses", "=", "masses", "*", "np", ".", "ones", "(", "number_of_storeys", ")" ]
Sample data for the Building object :param bd: :return:
[ "Sample", "data", "for", "the", "Building", "object", ":", "param", "bd", ":", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L98-L111
eng-tools/sfsimodels
sfsimodels/loader.py
load_frame_building_sample_data
def load_frame_building_sample_data(): """ Sample data for the BuildingFrame object :return: """ number_of_storeys = 6 interstorey_height = 3.4 # m masses = 40.0e3 # kg n_bays = 3 fb = models.BuildingFrame(number_of_storeys, n_bays) fb.interstorey_heights = interstorey_height * np.ones(number_of_storeys) fb.floor_length = 18.0 # m fb.floor_width = 16.0 # m fb.storey_masses = masses * np.ones(number_of_storeys) # kg fb.bay_lengths = [6., 6.0, 6.0] fb.set_beam_prop("depth", [0.5, 0.5, 0.5], repeat="up") fb.set_beam_prop("width", [0.4, 0.4, 0.4], repeat="up") fb.set_column_prop("width", [0.5, 0.5, 0.5, 0.5], repeat="up") fb.set_column_prop("depth", [0.5, 0.5, 0.5, 0.5], repeat="up") fb.n_seismic_frames = 3 fb.n_gravity_frames = 0 return fb
python
def load_frame_building_sample_data(): """ Sample data for the BuildingFrame object :return: """ number_of_storeys = 6 interstorey_height = 3.4 # m masses = 40.0e3 # kg n_bays = 3 fb = models.BuildingFrame(number_of_storeys, n_bays) fb.interstorey_heights = interstorey_height * np.ones(number_of_storeys) fb.floor_length = 18.0 # m fb.floor_width = 16.0 # m fb.storey_masses = masses * np.ones(number_of_storeys) # kg fb.bay_lengths = [6., 6.0, 6.0] fb.set_beam_prop("depth", [0.5, 0.5, 0.5], repeat="up") fb.set_beam_prop("width", [0.4, 0.4, 0.4], repeat="up") fb.set_column_prop("width", [0.5, 0.5, 0.5, 0.5], repeat="up") fb.set_column_prop("depth", [0.5, 0.5, 0.5, 0.5], repeat="up") fb.n_seismic_frames = 3 fb.n_gravity_frames = 0 return fb
[ "def", "load_frame_building_sample_data", "(", ")", ":", "number_of_storeys", "=", "6", "interstorey_height", "=", "3.4", "# m", "masses", "=", "40.0e3", "# kg", "n_bays", "=", "3", "fb", "=", "models", ".", "BuildingFrame", "(", "number_of_storeys", ",", "n_bays", ")", "fb", ".", "interstorey_heights", "=", "interstorey_height", "*", "np", ".", "ones", "(", "number_of_storeys", ")", "fb", ".", "floor_length", "=", "18.0", "# m", "fb", ".", "floor_width", "=", "16.0", "# m", "fb", ".", "storey_masses", "=", "masses", "*", "np", ".", "ones", "(", "number_of_storeys", ")", "# kg", "fb", ".", "bay_lengths", "=", "[", "6.", ",", "6.0", ",", "6.0", "]", "fb", ".", "set_beam_prop", "(", "\"depth\"", ",", "[", "0.5", ",", "0.5", ",", "0.5", "]", ",", "repeat", "=", "\"up\"", ")", "fb", ".", "set_beam_prop", "(", "\"width\"", ",", "[", "0.4", ",", "0.4", ",", "0.4", "]", ",", "repeat", "=", "\"up\"", ")", "fb", ".", "set_column_prop", "(", "\"width\"", ",", "[", "0.5", ",", "0.5", ",", "0.5", ",", "0.5", "]", ",", "repeat", "=", "\"up\"", ")", "fb", ".", "set_column_prop", "(", "\"depth\"", ",", "[", "0.5", ",", "0.5", ",", "0.5", ",", "0.5", "]", ",", "repeat", "=", "\"up\"", ")", "fb", ".", "n_seismic_frames", "=", "3", "fb", ".", "n_gravity_frames", "=", "0", "return", "fb" ]
Sample data for the BuildingFrame object :return:
[ "Sample", "data", "for", "the", "BuildingFrame", "object" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L114-L138
lazygunner/xunleipy
xunleipy/fp.py
_get_random_fp_raw
def _get_random_fp_raw(): ''' 生成随机的原始指纹列表 ''' fp_list = [] fp_list.append(get_random_ua()) # ua fp_list.append('zh-CN') # language fp_list.append('24') # color depth fp_list.append(__get_random_screen_resolution()) fp_list.append('-480') # time zone offsite fp_list.append('true') # session storage fp_list.append('true') # local storage fp_list.append('true') # indexed db fp_list.append('') # add behavior fp_list.append('function') # open database fp_list.append('') # cpu class fp_list.append('MacIntel') # platform fp_list.append('') # do not track fp_list.append( 'Widevine Content Decryption Module::Enables Widevine \ licenses for playback of HTML audio/video content. \ (version: 1.4.8.962)::application/x-ppapi-widevine-cdm~;' ) # plugin string return fp_list
python
def _get_random_fp_raw(): ''' 生成随机的原始指纹列表 ''' fp_list = [] fp_list.append(get_random_ua()) # ua fp_list.append('zh-CN') # language fp_list.append('24') # color depth fp_list.append(__get_random_screen_resolution()) fp_list.append('-480') # time zone offsite fp_list.append('true') # session storage fp_list.append('true') # local storage fp_list.append('true') # indexed db fp_list.append('') # add behavior fp_list.append('function') # open database fp_list.append('') # cpu class fp_list.append('MacIntel') # platform fp_list.append('') # do not track fp_list.append( 'Widevine Content Decryption Module::Enables Widevine \ licenses for playback of HTML audio/video content. \ (version: 1.4.8.962)::application/x-ppapi-widevine-cdm~;' ) # plugin string return fp_list
[ "def", "_get_random_fp_raw", "(", ")", ":", "fp_list", "=", "[", "]", "fp_list", ".", "append", "(", "get_random_ua", "(", ")", ")", "# ua", "fp_list", ".", "append", "(", "'zh-CN'", ")", "# language", "fp_list", ".", "append", "(", "'24'", ")", "# color depth", "fp_list", ".", "append", "(", "__get_random_screen_resolution", "(", ")", ")", "fp_list", ".", "append", "(", "'-480'", ")", "# time zone offsite", "fp_list", ".", "append", "(", "'true'", ")", "# session storage", "fp_list", ".", "append", "(", "'true'", ")", "# local storage", "fp_list", ".", "append", "(", "'true'", ")", "# indexed db", "fp_list", ".", "append", "(", "''", ")", "# add behavior", "fp_list", ".", "append", "(", "'function'", ")", "# open database", "fp_list", ".", "append", "(", "''", ")", "# cpu class", "fp_list", ".", "append", "(", "'MacIntel'", ")", "# platform", "fp_list", ".", "append", "(", "''", ")", "# do not track", "fp_list", ".", "append", "(", "'Widevine Content Decryption Module::Enables Widevine \\\n licenses for playback of HTML audio/video content. \\\n (version: 1.4.8.962)::application/x-ppapi-widevine-cdm~;'", ")", "# plugin string", "return", "fp_list" ]
生成随机的原始指纹列表
[ "生成随机的原始指纹列表" ]
train
https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/fp.py#L23-L47
lazygunner/xunleipy
xunleipy/fp.py
get_fp_raw
def get_fp_raw(): ''' 生成fp_raw_str ''' fp_file_path = os.path.expanduser('~/.xunleipy_fp') fp_list = [] try: with open(fp_file_path, 'r') as fp_file: fp_str = fp_file.readline() if len(fp_str) > 0: fp_list = fp_str.split('###') except IOError: pass if len(fp_list) < 14: fp_list = _get_random_fp_raw() fp_str = '###'.join(fp_list) with open(fp_file_path, 'w') as fp_file: fp_file.write(fp_str) source = fp_str.strip() if six.PY3: source = source.encode('utf-8') fp_raw = base64.b64encode(source) return fp_raw
python
def get_fp_raw(): ''' 生成fp_raw_str ''' fp_file_path = os.path.expanduser('~/.xunleipy_fp') fp_list = [] try: with open(fp_file_path, 'r') as fp_file: fp_str = fp_file.readline() if len(fp_str) > 0: fp_list = fp_str.split('###') except IOError: pass if len(fp_list) < 14: fp_list = _get_random_fp_raw() fp_str = '###'.join(fp_list) with open(fp_file_path, 'w') as fp_file: fp_file.write(fp_str) source = fp_str.strip() if six.PY3: source = source.encode('utf-8') fp_raw = base64.b64encode(source) return fp_raw
[ "def", "get_fp_raw", "(", ")", ":", "fp_file_path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.xunleipy_fp'", ")", "fp_list", "=", "[", "]", "try", ":", "with", "open", "(", "fp_file_path", ",", "'r'", ")", "as", "fp_file", ":", "fp_str", "=", "fp_file", ".", "readline", "(", ")", "if", "len", "(", "fp_str", ")", ">", "0", ":", "fp_list", "=", "fp_str", ".", "split", "(", "'###'", ")", "except", "IOError", ":", "pass", "if", "len", "(", "fp_list", ")", "<", "14", ":", "fp_list", "=", "_get_random_fp_raw", "(", ")", "fp_str", "=", "'###'", ".", "join", "(", "fp_list", ")", "with", "open", "(", "fp_file_path", ",", "'w'", ")", "as", "fp_file", ":", "fp_file", ".", "write", "(", "fp_str", ")", "source", "=", "fp_str", ".", "strip", "(", ")", "if", "six", ".", "PY3", ":", "source", "=", "source", ".", "encode", "(", "'utf-8'", ")", "fp_raw", "=", "base64", ".", "b64encode", "(", "source", ")", "return", "fp_raw" ]
生成fp_raw_str
[ "生成fp_raw_str" ]
train
https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/fp.py#L50-L75
BlueBrain/hpcbench
hpcbench/toolbox/functools_ext.py
compose
def compose(*functions): """Define functions composition like f ∘ g ∘ h :return: callable object that will perform function composition of callables given in argument. """ def _compose2(f, g): # pylint: disable=invalid-name return lambda x: f(g(x)) return functools.reduce(_compose2, functions, lambda x: x)
python
def compose(*functions): """Define functions composition like f ∘ g ∘ h :return: callable object that will perform function composition of callables given in argument. """ def _compose2(f, g): # pylint: disable=invalid-name return lambda x: f(g(x)) return functools.reduce(_compose2, functions, lambda x: x)
[ "def", "compose", "(", "*", "functions", ")", ":", "def", "_compose2", "(", "f", ",", "g", ")", ":", "# pylint: disable=invalid-name", "return", "lambda", "x", ":", "f", "(", "g", "(", "x", ")", ")", "return", "functools", ".", "reduce", "(", "_compose2", ",", "functions", ",", "lambda", "x", ":", "x", ")" ]
Define functions composition like f ∘ g ∘ h :return: callable object that will perform function composition of callables given in argument.
[ "Define", "functions", "composition", "like", "f", "∘", "g", "∘", "h", ":", "return", ":", "callable", "object", "that", "will", "perform", "function", "composition", "of", "callables", "given", "in", "argument", "." ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/functools_ext.py#L8-L17
BlueBrain/hpcbench
hpcbench/toolbox/functools_ext.py
chunks
def chunks(iterator, size): """Split an iterator into chunks with `size` elements each. Warning: ``size`` must be an actual iterator, if you pass this a concrete sequence will get you repeating elements. So ``chunks(iter(range(1000)), 10)`` is fine, but ``chunks(range(1000), 10)`` is not. Example: # size == 2 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) >>> list(x) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] # size == 3 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) >>> list(x) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] """ for item in iterator: yield [item] + list(islice(iterator, size - 1))
python
def chunks(iterator, size): """Split an iterator into chunks with `size` elements each. Warning: ``size`` must be an actual iterator, if you pass this a concrete sequence will get you repeating elements. So ``chunks(iter(range(1000)), 10)`` is fine, but ``chunks(range(1000), 10)`` is not. Example: # size == 2 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) >>> list(x) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] # size == 3 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) >>> list(x) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] """ for item in iterator: yield [item] + list(islice(iterator, size - 1))
[ "def", "chunks", "(", "iterator", ",", "size", ")", ":", "for", "item", "in", "iterator", ":", "yield", "[", "item", "]", "+", "list", "(", "islice", "(", "iterator", ",", "size", "-", "1", ")", ")" ]
Split an iterator into chunks with `size` elements each. Warning: ``size`` must be an actual iterator, if you pass this a concrete sequence will get you repeating elements. So ``chunks(iter(range(1000)), 10)`` is fine, but ``chunks(range(1000), 10)`` is not. Example: # size == 2 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) >>> list(x) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] # size == 3 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) >>> list(x) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]
[ "Split", "an", "iterator", "into", "chunks", "with", "size", "elements", "each", ".", "Warning", ":", "size", "must", "be", "an", "actual", "iterator", "if", "you", "pass", "this", "a", "concrete", "sequence", "will", "get", "you", "repeating", "elements", ".", "So", "chunks", "(", "iter", "(", "range", "(", "1000", "))", "10", ")", "is", "fine", "but", "chunks", "(", "range", "(", "1000", ")", "10", ")", "is", "not", ".", "Example", ":", "#", "size", "==", "2", ">>>", "x", "=", "chunks", "(", "iter", "(", "[", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "]", ")", "2", ")", ">>>", "list", "(", "x", ")", "[[", "0", "1", "]", "[", "2", "3", "]", "[", "4", "5", "]", "[", "6", "7", "]", "[", "8", "9", "]", "[", "10", "]]", "#", "size", "==", "3", ">>>", "x", "=", "chunks", "(", "iter", "(", "[", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "]", ")", "3", ")", ">>>", "list", "(", "x", ")", "[[", "0", "1", "2", "]", "[", "3", "4", "5", "]", "[", "6", "7", "8", "]", "[", "9", "10", "]]" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/functools_ext.py#L20-L38
BlueBrain/hpcbench
hpcbench/toolbox/functools_ext.py
listify
def listify(func=None, wrapper=list): """ A decorator which wraps a function's return value in ``list(...)``. Useful when an algorithm can be expressed more cleanly as a generator but the function should return an list. Example:: >>> @listify ... def get_lengths(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths(["spam", "eggs"]) [4, 4] >>> >>> @listify(wrapper=tuple) ... def get_lengths_tuple(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths_tuple(["foo", "bar"]) (3, 3) """ def _listify_return(func): @functools.wraps(func) def _listify_helper(*args, **kw): return wrapper(func(*args, **kw)) return _listify_helper if func is None: return _listify_return return _listify_return(func)
python
def listify(func=None, wrapper=list): """ A decorator which wraps a function's return value in ``list(...)``. Useful when an algorithm can be expressed more cleanly as a generator but the function should return an list. Example:: >>> @listify ... def get_lengths(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths(["spam", "eggs"]) [4, 4] >>> >>> @listify(wrapper=tuple) ... def get_lengths_tuple(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths_tuple(["foo", "bar"]) (3, 3) """ def _listify_return(func): @functools.wraps(func) def _listify_helper(*args, **kw): return wrapper(func(*args, **kw)) return _listify_helper if func is None: return _listify_return return _listify_return(func)
[ "def", "listify", "(", "func", "=", "None", ",", "wrapper", "=", "list", ")", ":", "def", "_listify_return", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_listify_helper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "wrapper", "(", "func", "(", "*", "args", ",", "*", "*", "kw", ")", ")", "return", "_listify_helper", "if", "func", "is", "None", ":", "return", "_listify_return", "return", "_listify_return", "(", "func", ")" ]
A decorator which wraps a function's return value in ``list(...)``. Useful when an algorithm can be expressed more cleanly as a generator but the function should return an list. Example:: >>> @listify ... def get_lengths(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths(["spam", "eggs"]) [4, 4] >>> >>> @listify(wrapper=tuple) ... def get_lengths_tuple(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths_tuple(["foo", "bar"]) (3, 3)
[ "A", "decorator", "which", "wraps", "a", "function", "s", "return", "value", "in", "list", "(", "...", ")", ".", "Useful", "when", "an", "algorithm", "can", "be", "expressed", "more", "cleanly", "as", "a", "generator", "but", "the", "function", "should", "return", "an", "list", ".", "Example", "::", ">>>" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/functools_ext.py#L41-L71
portfoliome/foil
foil/patterns.py
match_subgroup
def match_subgroup(sequence, pattern): """Yield the sub-group element dictionary that match a regex pattern.""" for element in sequence: match = re.match(pattern, element) if match: yield match.groupdict()
python
def match_subgroup(sequence, pattern): """Yield the sub-group element dictionary that match a regex pattern.""" for element in sequence: match = re.match(pattern, element) if match: yield match.groupdict()
[ "def", "match_subgroup", "(", "sequence", ",", "pattern", ")", ":", "for", "element", "in", "sequence", ":", "match", "=", "re", ".", "match", "(", "pattern", ",", "element", ")", "if", "match", ":", "yield", "match", ".", "groupdict", "(", ")" ]
Yield the sub-group element dictionary that match a regex pattern.
[ "Yield", "the", "sub", "-", "group", "element", "dictionary", "that", "match", "a", "regex", "pattern", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/patterns.py#L10-L17
portfoliome/foil
foil/patterns.py
add_regex_start_end
def add_regex_start_end(pattern_function): """Decorator for adding regex pattern start and end characters.""" @wraps(pattern_function) def func_wrapper(*args, **kwargs): return r'^{}$'.format(pattern_function(*args, **kwargs)) return func_wrapper
python
def add_regex_start_end(pattern_function): """Decorator for adding regex pattern start and end characters.""" @wraps(pattern_function) def func_wrapper(*args, **kwargs): return r'^{}$'.format(pattern_function(*args, **kwargs)) return func_wrapper
[ "def", "add_regex_start_end", "(", "pattern_function", ")", ":", "@", "wraps", "(", "pattern_function", ")", "def", "func_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "r'^{}$'", ".", "format", "(", "pattern_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "func_wrapper" ]
Decorator for adding regex pattern start and end characters.
[ "Decorator", "for", "adding", "regex", "pattern", "start", "and", "end", "characters", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/patterns.py#L24-L30
eng-tools/sfsimodels
sfsimodels/functions.py
convert_stress_to_mass
def convert_stress_to_mass(q, width, length, gravity): """ Converts a foundation stress to an equivalent mass. :param q: applied stress [Pa] :param width: foundation width [m] :param length: foundation length [m] :param gravity: applied gravitational acceleration [m/s2] :return: """ mass = q * width * length / gravity return mass
python
def convert_stress_to_mass(q, width, length, gravity): """ Converts a foundation stress to an equivalent mass. :param q: applied stress [Pa] :param width: foundation width [m] :param length: foundation length [m] :param gravity: applied gravitational acceleration [m/s2] :return: """ mass = q * width * length / gravity return mass
[ "def", "convert_stress_to_mass", "(", "q", ",", "width", ",", "length", ",", "gravity", ")", ":", "mass", "=", "q", "*", "width", "*", "length", "/", "gravity", "return", "mass" ]
Converts a foundation stress to an equivalent mass. :param q: applied stress [Pa] :param width: foundation width [m] :param length: foundation length [m] :param gravity: applied gravitational acceleration [m/s2] :return:
[ "Converts", "a", "foundation", "stress", "to", "an", "equivalent", "mass", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/functions.py#L4-L15
eng-tools/sfsimodels
sfsimodels/functions.py
add_to_obj
def add_to_obj(obj, dictionary, objs=None, exceptions=None, verbose=0): """ Cycles through a dictionary and adds the key-value pairs to an object. :param obj: :param dictionary: :param exceptions: :param verbose: :return: """ if exceptions is None: exceptions = [] for item in dictionary: if item in exceptions: continue if dictionary[item] is not None: if verbose: print("process: ", item, dictionary[item]) key, value = get_key_value(dictionary[item], objs, key=item) if verbose: print("assign: ", key, value) try: setattr(obj, key, value) except AttributeError: raise AttributeError("Can't set {0}={1} on object: {2}".format(key, value, obj))
python
def add_to_obj(obj, dictionary, objs=None, exceptions=None, verbose=0): """ Cycles through a dictionary and adds the key-value pairs to an object. :param obj: :param dictionary: :param exceptions: :param verbose: :return: """ if exceptions is None: exceptions = [] for item in dictionary: if item in exceptions: continue if dictionary[item] is not None: if verbose: print("process: ", item, dictionary[item]) key, value = get_key_value(dictionary[item], objs, key=item) if verbose: print("assign: ", key, value) try: setattr(obj, key, value) except AttributeError: raise AttributeError("Can't set {0}={1} on object: {2}".format(key, value, obj))
[ "def", "add_to_obj", "(", "obj", ",", "dictionary", ",", "objs", "=", "None", ",", "exceptions", "=", "None", ",", "verbose", "=", "0", ")", ":", "if", "exceptions", "is", "None", ":", "exceptions", "=", "[", "]", "for", "item", "in", "dictionary", ":", "if", "item", "in", "exceptions", ":", "continue", "if", "dictionary", "[", "item", "]", "is", "not", "None", ":", "if", "verbose", ":", "print", "(", "\"process: \"", ",", "item", ",", "dictionary", "[", "item", "]", ")", "key", ",", "value", "=", "get_key_value", "(", "dictionary", "[", "item", "]", ",", "objs", ",", "key", "=", "item", ")", "if", "verbose", ":", "print", "(", "\"assign: \"", ",", "key", ",", "value", ")", "try", ":", "setattr", "(", "obj", ",", "key", ",", "value", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"Can't set {0}={1} on object: {2}\"", ".", "format", "(", "key", ",", "value", ",", "obj", ")", ")" ]
Cycles through a dictionary and adds the key-value pairs to an object. :param obj: :param dictionary: :param exceptions: :param verbose: :return:
[ "Cycles", "through", "a", "dictionary", "and", "adds", "the", "key", "-", "value", "pairs", "to", "an", "object", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/functions.py#L76-L100
portfoliome/foil
foil/compose.py
create_quantiles
def create_quantiles(items: Sequence, lower_bound, upper_bound): """Create quantile start and end boundaries.""" interval = (upper_bound - lower_bound) / len(items) quantiles = ((g, (x - interval, x)) for g, x in zip(items, accumulate(repeat(interval, len(items))))) return quantiles
python
def create_quantiles(items: Sequence, lower_bound, upper_bound): """Create quantile start and end boundaries.""" interval = (upper_bound - lower_bound) / len(items) quantiles = ((g, (x - interval, x)) for g, x in zip(items, accumulate(repeat(interval, len(items))))) return quantiles
[ "def", "create_quantiles", "(", "items", ":", "Sequence", ",", "lower_bound", ",", "upper_bound", ")", ":", "interval", "=", "(", "upper_bound", "-", "lower_bound", ")", "/", "len", "(", "items", ")", "quantiles", "=", "(", "(", "g", ",", "(", "x", "-", "interval", ",", "x", ")", ")", "for", "g", ",", "x", "in", "zip", "(", "items", ",", "accumulate", "(", "repeat", "(", "interval", ",", "len", "(", "items", ")", ")", ")", ")", ")", "return", "quantiles" ]
Create quantile start and end boundaries.
[ "Create", "quantile", "start", "and", "end", "boundaries", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/compose.py#L5-L13
portfoliome/foil
foil/compose.py
tupleize
def tupleize(element, ignore_types=(str, bytes)): """Cast a single element to a tuple.""" if hasattr(element, '__iter__') and not isinstance(element, ignore_types): return element else: return tuple((element,))
python
def tupleize(element, ignore_types=(str, bytes)): """Cast a single element to a tuple.""" if hasattr(element, '__iter__') and not isinstance(element, ignore_types): return element else: return tuple((element,))
[ "def", "tupleize", "(", "element", ",", "ignore_types", "=", "(", "str", ",", "bytes", ")", ")", ":", "if", "hasattr", "(", "element", ",", "'__iter__'", ")", "and", "not", "isinstance", "(", "element", ",", "ignore_types", ")", ":", "return", "element", "else", ":", "return", "tuple", "(", "(", "element", ",", ")", ")" ]
Cast a single element to a tuple.
[ "Cast", "a", "single", "element", "to", "a", "tuple", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/compose.py#L16-L21
portfoliome/foil
foil/compose.py
dictionize
def dictionize(fields: Sequence, records: Sequence) -> Generator: """Create dictionaries mapping fields to record data.""" return (dict(zip(fields, rec)) for rec in records)
python
def dictionize(fields: Sequence, records: Sequence) -> Generator: """Create dictionaries mapping fields to record data.""" return (dict(zip(fields, rec)) for rec in records)
[ "def", "dictionize", "(", "fields", ":", "Sequence", ",", "records", ":", "Sequence", ")", "->", "Generator", ":", "return", "(", "dict", "(", "zip", "(", "fields", ",", "rec", ")", ")", "for", "rec", "in", "records", ")" ]
Create dictionaries mapping fields to record data.
[ "Create", "dictionaries", "mapping", "fields", "to", "record", "data", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/compose.py#L43-L46
portfoliome/foil
foil/compose.py
flip_iterable_dict
def flip_iterable_dict(d: dict) -> dict: """Transform dictionary to unpack values to map to respective key.""" value_keys = disjoint_union((cartesian_product((v, k)) for k, v in d.items())) return dict(value_keys)
python
def flip_iterable_dict(d: dict) -> dict: """Transform dictionary to unpack values to map to respective key.""" value_keys = disjoint_union((cartesian_product((v, k)) for k, v in d.items())) return dict(value_keys)
[ "def", "flip_iterable_dict", "(", "d", ":", "dict", ")", "->", "dict", ":", "value_keys", "=", "disjoint_union", "(", "(", "cartesian_product", "(", "(", "v", ",", "k", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", ")", "return", "dict", "(", "value_keys", ")" ]
Transform dictionary to unpack values to map to respective key.
[ "Transform", "dictionary", "to", "unpack", "values", "to", "map", "to", "respective", "key", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/compose.py#L53-L58
sci-bots/svg-model
svg_model/loop.py
Loop.get_signed_area
def get_signed_area(self): """ Return area of a simple (ie. non-self-intersecting) polygon. If verts wind anti-clockwise, this returns a negative number. Assume y-axis points up. """ accum = 0.0 for i in range(len(self.verts)): j = (i + 1) % len(self.verts) accum += ( self.verts[j][0] * self.verts[i][1] - self.verts[i][0] * self.verts[j][1]) return accum / 2
python
def get_signed_area(self): """ Return area of a simple (ie. non-self-intersecting) polygon. If verts wind anti-clockwise, this returns a negative number. Assume y-axis points up. """ accum = 0.0 for i in range(len(self.verts)): j = (i + 1) % len(self.verts) accum += ( self.verts[j][0] * self.verts[i][1] - self.verts[i][0] * self.verts[j][1]) return accum / 2
[ "def", "get_signed_area", "(", "self", ")", ":", "accum", "=", "0.0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "verts", ")", ")", ":", "j", "=", "(", "i", "+", "1", ")", "%", "len", "(", "self", ".", "verts", ")", "accum", "+=", "(", "self", ".", "verts", "[", "j", "]", "[", "0", "]", "*", "self", ".", "verts", "[", "i", "]", "[", "1", "]", "-", "self", ".", "verts", "[", "i", "]", "[", "0", "]", "*", "self", ".", "verts", "[", "j", "]", "[", "1", "]", ")", "return", "accum", "/", "2" ]
Return area of a simple (ie. non-self-intersecting) polygon. If verts wind anti-clockwise, this returns a negative number. Assume y-axis points up.
[ "Return", "area", "of", "a", "simple", "(", "ie", ".", "non", "-", "self", "-", "intersecting", ")", "polygon", ".", "If", "verts", "wind", "anti", "-", "clockwise", "this", "returns", "a", "negative", "number", ".", "Assume", "y", "-", "axis", "points", "up", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/loop.py#L34-L46
PolyJIT/benchbuild
benchbuild/extensions/base.py
Extension.call_next
def call_next(self, *args, **kwargs) -> t.List[run.RunInfo]: """Call all child extensions with the given arguments. This calls all child extensions and collects the results for our own parent. Use this to control the execution of your nested extensions from your own extension. Returns: :obj:`list` of :obj:`RunInfo`: A list of collected results of our child extensions. """ all_results = [] for ext in self.next_extensions: LOG.debug(" %s ", ext) results = ext(*args, **kwargs) LOG.debug(" %s => %s", ext, results) if results is None: LOG.warning("No result from: %s", ext) continue result_list = [] if isinstance(results, c.Iterable): result_list.extend(results) else: result_list.append(results) all_results.extend(result_list) return all_results
python
def call_next(self, *args, **kwargs) -> t.List[run.RunInfo]: """Call all child extensions with the given arguments. This calls all child extensions and collects the results for our own parent. Use this to control the execution of your nested extensions from your own extension. Returns: :obj:`list` of :obj:`RunInfo`: A list of collected results of our child extensions. """ all_results = [] for ext in self.next_extensions: LOG.debug(" %s ", ext) results = ext(*args, **kwargs) LOG.debug(" %s => %s", ext, results) if results is None: LOG.warning("No result from: %s", ext) continue result_list = [] if isinstance(results, c.Iterable): result_list.extend(results) else: result_list.append(results) all_results.extend(result_list) return all_results
[ "def", "call_next", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "t", ".", "List", "[", "run", ".", "RunInfo", "]", ":", "all_results", "=", "[", "]", "for", "ext", "in", "self", ".", "next_extensions", ":", "LOG", ".", "debug", "(", "\" %s \"", ",", "ext", ")", "results", "=", "ext", "(", "*", "args", ",", "*", "*", "kwargs", ")", "LOG", ".", "debug", "(", "\" %s => %s\"", ",", "ext", ",", "results", ")", "if", "results", "is", "None", ":", "LOG", ".", "warning", "(", "\"No result from: %s\"", ",", "ext", ")", "continue", "result_list", "=", "[", "]", "if", "isinstance", "(", "results", ",", "c", ".", "Iterable", ")", ":", "result_list", ".", "extend", "(", "results", ")", "else", ":", "result_list", ".", "append", "(", "results", ")", "all_results", ".", "extend", "(", "result_list", ")", "return", "all_results" ]
Call all child extensions with the given arguments. This calls all child extensions and collects the results for our own parent. Use this to control the execution of your nested extensions from your own extension. Returns: :obj:`list` of :obj:`RunInfo`: A list of collected results of our child extensions.
[ "Call", "all", "child", "extensions", "with", "the", "given", "arguments", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/extensions/base.py#L41-L67
PolyJIT/benchbuild
benchbuild/extensions/base.py
Extension.print
def print(self, indent=0): """Print a structural view of the registered extensions.""" LOG.info("%s:: %s", indent * " ", self.__class__) for ext in self.next_extensions: ext.print(indent=indent + 2)
python
def print(self, indent=0): """Print a structural view of the registered extensions.""" LOG.info("%s:: %s", indent * " ", self.__class__) for ext in self.next_extensions: ext.print(indent=indent + 2)
[ "def", "print", "(", "self", ",", "indent", "=", "0", ")", ":", "LOG", ".", "info", "(", "\"%s:: %s\"", ",", "indent", "*", "\" \"", ",", "self", ".", "__class__", ")", "for", "ext", "in", "self", ".", "next_extensions", ":", "ext", ".", "print", "(", "indent", "=", "indent", "+", "2", ")" ]
Print a structural view of the registered extensions.
[ "Print", "a", "structural", "view", "of", "the", "registered", "extensions", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/extensions/base.py#L73-L77
mromanello/hucitlib
knowledge_base/cli.py
print_results
def print_results(matches): """ :param matches: a list of tuples. """ output = "" for n, match in enumerate(matches): matched_text = match[0][:40]+"..." if len(match[0]) > 40 else match[0] search_result = match[1] if search_result.uri==surf.ns.EFRBROO['F10_Person']: label = unicode(search_result)[:40]+"..." if len(unicode(search_result)) > 40 else unicode(search_result) print("\n{:5}) {:50} {:40} (Matched: \"{}\")\n".format(n+1, label, search_result.get_urn(), matched_text)) elif search_result.uri==surf.ns.EFRBROO['F1_Work']: label = "{}, {}".format(search_result.author, search_result) label = label[:40]+"..." if len(label) > 40 else label print("\n{:5}) {:50} {:40} (Matched: \"{}\")\n".format(n+1, label, search_result.get_urn(), matched_text))
python
def print_results(matches): """ :param matches: a list of tuples. """ output = "" for n, match in enumerate(matches): matched_text = match[0][:40]+"..." if len(match[0]) > 40 else match[0] search_result = match[1] if search_result.uri==surf.ns.EFRBROO['F10_Person']: label = unicode(search_result)[:40]+"..." if len(unicode(search_result)) > 40 else unicode(search_result) print("\n{:5}) {:50} {:40} (Matched: \"{}\")\n".format(n+1, label, search_result.get_urn(), matched_text)) elif search_result.uri==surf.ns.EFRBROO['F1_Work']: label = "{}, {}".format(search_result.author, search_result) label = label[:40]+"..." if len(label) > 40 else label print("\n{:5}) {:50} {:40} (Matched: \"{}\")\n".format(n+1, label, search_result.get_urn(), matched_text))
[ "def", "print_results", "(", "matches", ")", ":", "output", "=", "\"\"", "for", "n", ",", "match", "in", "enumerate", "(", "matches", ")", ":", "matched_text", "=", "match", "[", "0", "]", "[", ":", "40", "]", "+", "\"...\"", "if", "len", "(", "match", "[", "0", "]", ")", ">", "40", "else", "match", "[", "0", "]", "search_result", "=", "match", "[", "1", "]", "if", "search_result", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F10_Person'", "]", ":", "label", "=", "unicode", "(", "search_result", ")", "[", ":", "40", "]", "+", "\"...\"", "if", "len", "(", "unicode", "(", "search_result", ")", ")", ">", "40", "else", "unicode", "(", "search_result", ")", "print", "(", "\"\\n{:5}) {:50} {:40} (Matched: \\\"{}\\\")\\n\"", ".", "format", "(", "n", "+", "1", ",", "label", ",", "search_result", ".", "get_urn", "(", ")", ",", "matched_text", ")", ")", "elif", "search_result", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F1_Work'", "]", ":", "label", "=", "\"{}, {}\"", ".", "format", "(", "search_result", ".", "author", ",", "search_result", ")", "label", "=", "label", "[", ":", "40", "]", "+", "\"...\"", "if", "len", "(", "label", ")", ">", "40", "else", "label", "print", "(", "\"\\n{:5}) {:50} {:40} (Matched: \\\"{}\\\")\\n\"", ".", "format", "(", "n", "+", "1", ",", "label", ",", "search_result", ".", "get_urn", "(", ")", ",", "matched_text", ")", ")" ]
:param matches: a list of tuples.
[ ":", "param", "matches", ":", "a", "list", "of", "tuples", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/cli.py#L34-L48
mromanello/hucitlib
knowledge_base/cli.py
show_result
def show_result(resource, verbose=False): """ TODO """ if resource.uri == surf.ns.EFRBROO['F10_Person']: print("\n{} ({})\n".format(unicode(resource), resource.get_urn())) works = resource.get_works() print("Works by {} ({}):\n".format(resource, len(works))) [show_result(work) for work in works] print("\n") elif resource.uri == surf.ns.EFRBROO['F1_Work']: if verbose: print("\n{} ({})".format(unicode(resource), resource.get_urn())) print("\nTitles:") print("\n".join(["{:20} ({})".format(title, lang) for lang, title in resource.get_titles()])) if len(resource.get_abbreviations()) > 0: print("\nAbbreviations: {}\n".format(", ".join(["{}".format(abbr) for abbr in resource.get_abbreviations()]))) else: print("{:50} {:40}".format(unicode(resource), resource.get_urn()))
python
def show_result(resource, verbose=False): """ TODO """ if resource.uri == surf.ns.EFRBROO['F10_Person']: print("\n{} ({})\n".format(unicode(resource), resource.get_urn())) works = resource.get_works() print("Works by {} ({}):\n".format(resource, len(works))) [show_result(work) for work in works] print("\n") elif resource.uri == surf.ns.EFRBROO['F1_Work']: if verbose: print("\n{} ({})".format(unicode(resource), resource.get_urn())) print("\nTitles:") print("\n".join(["{:20} ({})".format(title, lang) for lang, title in resource.get_titles()])) if len(resource.get_abbreviations()) > 0: print("\nAbbreviations: {}\n".format(", ".join(["{}".format(abbr) for abbr in resource.get_abbreviations()]))) else: print("{:50} {:40}".format(unicode(resource), resource.get_urn()))
[ "def", "show_result", "(", "resource", ",", "verbose", "=", "False", ")", ":", "if", "resource", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F10_Person'", "]", ":", "print", "(", "\"\\n{} ({})\\n\"", ".", "format", "(", "unicode", "(", "resource", ")", ",", "resource", ".", "get_urn", "(", ")", ")", ")", "works", "=", "resource", ".", "get_works", "(", ")", "print", "(", "\"Works by {} ({}):\\n\"", ".", "format", "(", "resource", ",", "len", "(", "works", ")", ")", ")", "[", "show_result", "(", "work", ")", "for", "work", "in", "works", "]", "print", "(", "\"\\n\"", ")", "elif", "resource", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F1_Work'", "]", ":", "if", "verbose", ":", "print", "(", "\"\\n{} ({})\"", ".", "format", "(", "unicode", "(", "resource", ")", ",", "resource", ".", "get_urn", "(", ")", ")", ")", "print", "(", "\"\\nTitles:\"", ")", "print", "(", "\"\\n\"", ".", "join", "(", "[", "\"{:20} ({})\"", ".", "format", "(", "title", ",", "lang", ")", "for", "lang", ",", "title", "in", "resource", ".", "get_titles", "(", ")", "]", ")", ")", "if", "len", "(", "resource", ".", "get_abbreviations", "(", ")", ")", ">", "0", ":", "print", "(", "\"\\nAbbreviations: {}\\n\"", ".", "format", "(", "\", \"", ".", "join", "(", "[", "\"{}\"", ".", "format", "(", "abbr", ")", "for", "abbr", "in", "resource", ".", "get_abbreviations", "(", ")", "]", ")", ")", ")", "else", ":", "print", "(", "\"{:50} {:40}\"", ".", "format", "(", "unicode", "(", "resource", ")", ",", "resource", ".", "get_urn", "(", ")", ")", ")" ]
TODO
[ "TODO" ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/cli.py#L50-L68
mromanello/hucitlib
knowledge_base/cli.py
main
def main(): """Define the CLI inteface/commands.""" arguments = docopt(__doc__) cfg_filename = pkg_resources.resource_filename( 'knowledge_base', 'config/virtuoso.ini' ) kb = KnowledgeBase(cfg_filename) # the user has issued a `find` command if arguments["find"]: search_string = arguments["<search_string>"] try: urn = CTS_URN(search_string) match = kb.get_resource_by_urn(str(urn)) show_result(match, verbose=True) return except BadCtsUrnSyntax as e: pass except IndexError as e: raise e print("\nNo records with this CTS URN!\n") return try: matches = kb.search(search_string) print("\nSearching for \"%s\" yielded %s results" % ( search_string, len(matches) )) print_results(matches) return except SparqlReaderException as e: print("\nWildcard word needs at least 4 leading characters") # the user has issued an `add` command elif arguments["add"]: input_urn = arguments["--to"] # first let's check if it's a valid URN try: urn = CTS_URN(input_urn) except Exception as e: print("The provided URN ({}) is invalid!".format(input_urn)) return try: resource = kb.get_resource_by_urn(urn) assert resource is not None except ResourceNotFound: print("The KB does not contain a resource identified by {}".format( urn )) return print(arguments) #if arguments[""] pass
python
def main(): """Define the CLI inteface/commands.""" arguments = docopt(__doc__) cfg_filename = pkg_resources.resource_filename( 'knowledge_base', 'config/virtuoso.ini' ) kb = KnowledgeBase(cfg_filename) # the user has issued a `find` command if arguments["find"]: search_string = arguments["<search_string>"] try: urn = CTS_URN(search_string) match = kb.get_resource_by_urn(str(urn)) show_result(match, verbose=True) return except BadCtsUrnSyntax as e: pass except IndexError as e: raise e print("\nNo records with this CTS URN!\n") return try: matches = kb.search(search_string) print("\nSearching for \"%s\" yielded %s results" % ( search_string, len(matches) )) print_results(matches) return except SparqlReaderException as e: print("\nWildcard word needs at least 4 leading characters") # the user has issued an `add` command elif arguments["add"]: input_urn = arguments["--to"] # first let's check if it's a valid URN try: urn = CTS_URN(input_urn) except Exception as e: print("The provided URN ({}) is invalid!".format(input_urn)) return try: resource = kb.get_resource_by_urn(urn) assert resource is not None except ResourceNotFound: print("The KB does not contain a resource identified by {}".format( urn )) return print(arguments) #if arguments[""] pass
[ "def", "main", "(", ")", ":", "arguments", "=", "docopt", "(", "__doc__", ")", "cfg_filename", "=", "pkg_resources", ".", "resource_filename", "(", "'knowledge_base'", ",", "'config/virtuoso.ini'", ")", "kb", "=", "KnowledgeBase", "(", "cfg_filename", ")", "# the user has issued a `find` command", "if", "arguments", "[", "\"find\"", "]", ":", "search_string", "=", "arguments", "[", "\"<search_string>\"", "]", "try", ":", "urn", "=", "CTS_URN", "(", "search_string", ")", "match", "=", "kb", ".", "get_resource_by_urn", "(", "str", "(", "urn", ")", ")", "show_result", "(", "match", ",", "verbose", "=", "True", ")", "return", "except", "BadCtsUrnSyntax", "as", "e", ":", "pass", "except", "IndexError", "as", "e", ":", "raise", "e", "print", "(", "\"\\nNo records with this CTS URN!\\n\"", ")", "return", "try", ":", "matches", "=", "kb", ".", "search", "(", "search_string", ")", "print", "(", "\"\\nSearching for \\\"%s\\\" yielded %s results\"", "%", "(", "search_string", ",", "len", "(", "matches", ")", ")", ")", "print_results", "(", "matches", ")", "return", "except", "SparqlReaderException", "as", "e", ":", "print", "(", "\"\\nWildcard word needs at least 4 leading characters\"", ")", "# the user has issued an `add` command", "elif", "arguments", "[", "\"add\"", "]", ":", "input_urn", "=", "arguments", "[", "\"--to\"", "]", "# first let's check if it's a valid URN", "try", ":", "urn", "=", "CTS_URN", "(", "input_urn", ")", "except", "Exception", "as", "e", ":", "print", "(", "\"The provided URN ({}) is invalid!\"", ".", "format", "(", "input_urn", ")", ")", "return", "try", ":", "resource", "=", "kb", ".", "get_resource_by_urn", "(", "urn", ")", "assert", "resource", "is", "not", "None", "except", "ResourceNotFound", ":", "print", "(", "\"The KB does not contain a resource identified by {}\"", ".", "format", "(", "urn", ")", ")", "return", "print", "(", "arguments", ")", "#if arguments[\"\"]", "pass" ]
Define the CLI inteface/commands.
[ "Define", "the", "CLI", "inteface", "/", "commands", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/cli.py#L71-L127
PolyJIT/benchbuild
benchbuild/utils/container.py
cached
def cached(func): """Memoize a function result.""" ret = None def call_or_cache(*args, **kwargs): nonlocal ret if ret is None: ret = func(*args, **kwargs) return ret return call_or_cache
python
def cached(func): """Memoize a function result.""" ret = None def call_or_cache(*args, **kwargs): nonlocal ret if ret is None: ret = func(*args, **kwargs) return ret return call_or_cache
[ "def", "cached", "(", "func", ")", ":", "ret", "=", "None", "def", "call_or_cache", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nonlocal", "ret", "if", "ret", "is", "None", ":", "ret", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ret", "return", "call_or_cache" ]
Memoize a function result.
[ "Memoize", "a", "function", "result", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/container.py#L15-L25
PolyJIT/benchbuild
benchbuild/utils/container.py
is_valid
def is_valid(container, path): """ Checks if a container exists and is unpacked. Args: path: The location where the container is expected. Returns: True if the container is valid, False if the container needs to unpacked or if the path does not exist yet. """ try: tmp_hash_path = container.filename + ".hash" with open(tmp_hash_path, 'r') as tmp_file: tmp_hash = tmp_file.readline() except IOError: LOG.info("No .hash-file in the tmp-directory.") container_hash_path = local.path(path) / "gentoo.tar.bz2.hash" if container_hash_path.exists(): with open(container_hash_path, 'r') as hash_file: container_hash = hash_file.readline() return container_hash == tmp_hash return False
python
def is_valid(container, path): """ Checks if a container exists and is unpacked. Args: path: The location where the container is expected. Returns: True if the container is valid, False if the container needs to unpacked or if the path does not exist yet. """ try: tmp_hash_path = container.filename + ".hash" with open(tmp_hash_path, 'r') as tmp_file: tmp_hash = tmp_file.readline() except IOError: LOG.info("No .hash-file in the tmp-directory.") container_hash_path = local.path(path) / "gentoo.tar.bz2.hash" if container_hash_path.exists(): with open(container_hash_path, 'r') as hash_file: container_hash = hash_file.readline() return container_hash == tmp_hash return False
[ "def", "is_valid", "(", "container", ",", "path", ")", ":", "try", ":", "tmp_hash_path", "=", "container", ".", "filename", "+", "\".hash\"", "with", "open", "(", "tmp_hash_path", ",", "'r'", ")", "as", "tmp_file", ":", "tmp_hash", "=", "tmp_file", ".", "readline", "(", ")", "except", "IOError", ":", "LOG", ".", "info", "(", "\"No .hash-file in the tmp-directory.\"", ")", "container_hash_path", "=", "local", ".", "path", "(", "path", ")", "/", "\"gentoo.tar.bz2.hash\"", "if", "container_hash_path", ".", "exists", "(", ")", ":", "with", "open", "(", "container_hash_path", ",", "'r'", ")", "as", "hash_file", ":", "container_hash", "=", "hash_file", ".", "readline", "(", ")", "return", "container_hash", "==", "tmp_hash", "return", "False" ]
Checks if a container exists and is unpacked. Args: path: The location where the container is expected. Returns: True if the container is valid, False if the container needs to unpacked or if the path does not exist yet.
[ "Checks", "if", "a", "container", "exists", "and", "is", "unpacked", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/container.py#L111-L134
PolyJIT/benchbuild
benchbuild/utils/container.py
unpack
def unpack(container, path): """ Unpack a container usable by uchroot. Method that checks if a directory for the container exists, checks if erlent support is needed and then unpacks the container accordingly. Args: path: The location where the container is, that needs to be unpacked. """ from benchbuild.utils.run import run from benchbuild.utils.uchroot import no_args path = local.path(path) c_filename = local.path(container.filename) name = c_filename.basename if not path.exists(): path.mkdir() with local.cwd(path): Wget(container.remote, name) uchroot = no_args() uchroot = uchroot["-E", "-A", "-C", "-r", "/", "-w", os.path.abspath("."), "--"] # Check, if we need erlent support for this archive. has_erlent = bash[ "-c", "tar --list -f './{0}' | grep --silent '.erlent'".format( name)] has_erlent = (has_erlent & TF) untar = local["/bin/tar"]["xf", "./" + name] if not has_erlent: untar = uchroot[untar] run(untar["--exclude=dev/*"]) if not os.path.samefile(name, container.filename): rm(name) else: LOG.warning("File contents do not match: %s != %s", name, container.filename) cp(container.filename + ".hash", path)
python
def unpack(container, path): """ Unpack a container usable by uchroot. Method that checks if a directory for the container exists, checks if erlent support is needed and then unpacks the container accordingly. Args: path: The location where the container is, that needs to be unpacked. """ from benchbuild.utils.run import run from benchbuild.utils.uchroot import no_args path = local.path(path) c_filename = local.path(container.filename) name = c_filename.basename if not path.exists(): path.mkdir() with local.cwd(path): Wget(container.remote, name) uchroot = no_args() uchroot = uchroot["-E", "-A", "-C", "-r", "/", "-w", os.path.abspath("."), "--"] # Check, if we need erlent support for this archive. has_erlent = bash[ "-c", "tar --list -f './{0}' | grep --silent '.erlent'".format( name)] has_erlent = (has_erlent & TF) untar = local["/bin/tar"]["xf", "./" + name] if not has_erlent: untar = uchroot[untar] run(untar["--exclude=dev/*"]) if not os.path.samefile(name, container.filename): rm(name) else: LOG.warning("File contents do not match: %s != %s", name, container.filename) cp(container.filename + ".hash", path)
[ "def", "unpack", "(", "container", ",", "path", ")", ":", "from", "benchbuild", ".", "utils", ".", "run", "import", "run", "from", "benchbuild", ".", "utils", ".", "uchroot", "import", "no_args", "path", "=", "local", ".", "path", "(", "path", ")", "c_filename", "=", "local", ".", "path", "(", "container", ".", "filename", ")", "name", "=", "c_filename", ".", "basename", "if", "not", "path", ".", "exists", "(", ")", ":", "path", ".", "mkdir", "(", ")", "with", "local", ".", "cwd", "(", "path", ")", ":", "Wget", "(", "container", ".", "remote", ",", "name", ")", "uchroot", "=", "no_args", "(", ")", "uchroot", "=", "uchroot", "[", "\"-E\"", ",", "\"-A\"", ",", "\"-C\"", ",", "\"-r\"", ",", "\"/\"", ",", "\"-w\"", ",", "os", ".", "path", ".", "abspath", "(", "\".\"", ")", ",", "\"--\"", "]", "# Check, if we need erlent support for this archive.", "has_erlent", "=", "bash", "[", "\"-c\"", ",", "\"tar --list -f './{0}' | grep --silent '.erlent'\"", ".", "format", "(", "name", ")", "]", "has_erlent", "=", "(", "has_erlent", "&", "TF", ")", "untar", "=", "local", "[", "\"/bin/tar\"", "]", "[", "\"xf\"", ",", "\"./\"", "+", "name", "]", "if", "not", "has_erlent", ":", "untar", "=", "uchroot", "[", "untar", "]", "run", "(", "untar", "[", "\"--exclude=dev/*\"", "]", ")", "if", "not", "os", ".", "path", ".", "samefile", "(", "name", ",", "container", ".", "filename", ")", ":", "rm", "(", "name", ")", "else", ":", "LOG", ".", "warning", "(", "\"File contents do not match: %s != %s\"", ",", "name", ",", "container", ".", "filename", ")", "cp", "(", "container", ".", "filename", "+", "\".hash\"", ",", "path", ")" ]
Unpack a container usable by uchroot. Method that checks if a directory for the container exists, checks if erlent support is needed and then unpacks the container accordingly. Args: path: The location where the container is, that needs to be unpacked.
[ "Unpack", "a", "container", "usable", "by", "uchroot", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/container.py#L137-L182
PolyJIT/benchbuild
benchbuild/utils/container.py
Container.local
def local(self): """ Finds the current location of a container. Also unpacks the project if necessary. Returns: target: The path, where the container lies in the end. """ assert self.name in CFG["container"]["images"].value tmp_dir = local.path(str(CFG["tmp_dir"])) target_dir = tmp_dir / self.name if not target_dir.exists() or not is_valid(self, target_dir): unpack(self, target_dir) return target_dir
python
def local(self): """ Finds the current location of a container. Also unpacks the project if necessary. Returns: target: The path, where the container lies in the end. """ assert self.name in CFG["container"]["images"].value tmp_dir = local.path(str(CFG["tmp_dir"])) target_dir = tmp_dir / self.name if not target_dir.exists() or not is_valid(self, target_dir): unpack(self, target_dir) return target_dir
[ "def", "local", "(", "self", ")", ":", "assert", "self", ".", "name", "in", "CFG", "[", "\"container\"", "]", "[", "\"images\"", "]", ".", "value", "tmp_dir", "=", "local", ".", "path", "(", "str", "(", "CFG", "[", "\"tmp_dir\"", "]", ")", ")", "target_dir", "=", "tmp_dir", "/", "self", ".", "name", "if", "not", "target_dir", ".", "exists", "(", ")", "or", "not", "is_valid", "(", "self", ",", "target_dir", ")", ":", "unpack", "(", "self", ",", "target_dir", ")", "return", "target_dir" ]
Finds the current location of a container. Also unpacks the project if necessary. Returns: target: The path, where the container lies in the end.
[ "Finds", "the", "current", "location", "of", "a", "container", ".", "Also", "unpacks", "the", "project", "if", "necessary", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/container.py#L46-L61
PolyJIT/benchbuild
benchbuild/utils/container.py
Gentoo.src_file
def src_file(self): """ Get the latest src_uri for a stage 3 tarball. Returns (str): Latest src_uri from gentoo's distfiles mirror. """ try: src_uri = (curl[Gentoo._LATEST_TXT] | tail["-n", "+3"] | cut["-f1", "-d "])().strip() except ProcessExecutionError as proc_ex: src_uri = "NOT-FOUND" LOG.error("Could not determine latest stage3 src uri: %s", str(proc_ex)) return src_uri
python
def src_file(self): """ Get the latest src_uri for a stage 3 tarball. Returns (str): Latest src_uri from gentoo's distfiles mirror. """ try: src_uri = (curl[Gentoo._LATEST_TXT] | tail["-n", "+3"] | cut["-f1", "-d "])().strip() except ProcessExecutionError as proc_ex: src_uri = "NOT-FOUND" LOG.error("Could not determine latest stage3 src uri: %s", str(proc_ex)) return src_uri
[ "def", "src_file", "(", "self", ")", ":", "try", ":", "src_uri", "=", "(", "curl", "[", "Gentoo", ".", "_LATEST_TXT", "]", "|", "tail", "[", "\"-n\"", ",", "\"+3\"", "]", "|", "cut", "[", "\"-f1\"", ",", "\"-d \"", "]", ")", "(", ")", ".", "strip", "(", ")", "except", "ProcessExecutionError", "as", "proc_ex", ":", "src_uri", "=", "\"NOT-FOUND\"", "LOG", ".", "error", "(", "\"Could not determine latest stage3 src uri: %s\"", ",", "str", "(", "proc_ex", ")", ")", "return", "src_uri" ]
Get the latest src_uri for a stage 3 tarball. Returns (str): Latest src_uri from gentoo's distfiles mirror.
[ "Get", "the", "latest", "src_uri", "for", "a", "stage", "3", "tarball", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/container.py#L72-L86
PolyJIT/benchbuild
benchbuild/utils/container.py
Gentoo.version
def version(self): """Return the build date of the gentoo container.""" try: _version = (curl[Gentoo._LATEST_TXT] | \ awk['NR==2{print}'] | \ cut["-f2", "-d="])().strip() _version = datetime.utcfromtimestamp(int(_version))\ .strftime("%Y-%m-%d") except ProcessExecutionError as proc_ex: _version = "unknown" LOG.error("Could not determine timestamp: %s", str(proc_ex)) return _version
python
def version(self): """Return the build date of the gentoo container.""" try: _version = (curl[Gentoo._LATEST_TXT] | \ awk['NR==2{print}'] | \ cut["-f2", "-d="])().strip() _version = datetime.utcfromtimestamp(int(_version))\ .strftime("%Y-%m-%d") except ProcessExecutionError as proc_ex: _version = "unknown" LOG.error("Could not determine timestamp: %s", str(proc_ex)) return _version
[ "def", "version", "(", "self", ")", ":", "try", ":", "_version", "=", "(", "curl", "[", "Gentoo", ".", "_LATEST_TXT", "]", "|", "awk", "[", "'NR==2{print}'", "]", "|", "cut", "[", "\"-f2\"", ",", "\"-d=\"", "]", ")", "(", ")", ".", "strip", "(", ")", "_version", "=", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "_version", ")", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "except", "ProcessExecutionError", "as", "proc_ex", ":", "_version", "=", "\"unknown\"", "LOG", ".", "error", "(", "\"Could not determine timestamp: %s\"", ",", "str", "(", "proc_ex", ")", ")", "return", "_version" ]
Return the build date of the gentoo container.
[ "Return", "the", "build", "date", "of", "the", "gentoo", "container", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/container.py#L90-L102
BlueBrain/hpcbench
hpcbench/cli/bencsv.py
main
def main(argv=None): """ben-csv entry point""" arguments = cli_common(__doc__, argv=argv) csv_export = CSVExporter(arguments['CAMPAIGN-DIR'], arguments['--output']) if arguments['--peek']: csv_export.peek() else: fieldsstr = arguments.get('--fields') fields = fieldsstr.split(',') if fieldsstr else None csv_export.export(fields) if argv is not None: return csv_export
python
def main(argv=None): """ben-csv entry point""" arguments = cli_common(__doc__, argv=argv) csv_export = CSVExporter(arguments['CAMPAIGN-DIR'], arguments['--output']) if arguments['--peek']: csv_export.peek() else: fieldsstr = arguments.get('--fields') fields = fieldsstr.split(',') if fieldsstr else None csv_export.export(fields) if argv is not None: return csv_export
[ "def", "main", "(", "argv", "=", "None", ")", ":", "arguments", "=", "cli_common", "(", "__doc__", ",", "argv", "=", "argv", ")", "csv_export", "=", "CSVExporter", "(", "arguments", "[", "'CAMPAIGN-DIR'", "]", ",", "arguments", "[", "'--output'", "]", ")", "if", "arguments", "[", "'--peek'", "]", ":", "csv_export", ".", "peek", "(", ")", "else", ":", "fieldsstr", "=", "arguments", ".", "get", "(", "'--fields'", ")", "fields", "=", "fieldsstr", ".", "split", "(", "','", ")", "if", "fieldsstr", "else", "None", "csv_export", ".", "export", "(", "fields", ")", "if", "argv", "is", "not", "None", ":", "return", "csv_export" ]
ben-csv entry point
[ "ben", "-", "csv", "entry", "point" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/bencsv.py#L24-L35
eng-tools/sfsimodels
sfsimodels/models/time.py
time_indices
def time_indices(npts, dt, start, end, index): """ Determine the new start and end indices of the time series. :param npts: Number of points in original time series :param dt: Time step of original time series :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. :return: tuple, start index, end index """ if index is False: # Convert time values into indices if end != -1: e_index = int(end / dt) + 1 else: e_index = end s_index = int(start / dt) else: s_index = start e_index = end if e_index > npts: raise exceptions.ModelWarning("Cut point is greater than time series length") return s_index, e_index
python
def time_indices(npts, dt, start, end, index): """ Determine the new start and end indices of the time series. :param npts: Number of points in original time series :param dt: Time step of original time series :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. :return: tuple, start index, end index """ if index is False: # Convert time values into indices if end != -1: e_index = int(end / dt) + 1 else: e_index = end s_index = int(start / dt) else: s_index = start e_index = end if e_index > npts: raise exceptions.ModelWarning("Cut point is greater than time series length") return s_index, e_index
[ "def", "time_indices", "(", "npts", ",", "dt", ",", "start", ",", "end", ",", "index", ")", ":", "if", "index", "is", "False", ":", "# Convert time values into indices", "if", "end", "!=", "-", "1", ":", "e_index", "=", "int", "(", "end", "/", "dt", ")", "+", "1", "else", ":", "e_index", "=", "end", "s_index", "=", "int", "(", "start", "/", "dt", ")", "else", ":", "s_index", "=", "start", "e_index", "=", "end", "if", "e_index", ">", "npts", ":", "raise", "exceptions", ".", "ModelWarning", "(", "\"Cut point is greater than time series length\"", ")", "return", "s_index", ",", "e_index" ]
Determine the new start and end indices of the time series. :param npts: Number of points in original time series :param dt: Time step of original time series :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. :return: tuple, start index, end index
[ "Determine", "the", "new", "start", "and", "end", "indices", "of", "the", "time", "series", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/time.py#L64-L86
eng-tools/sfsimodels
sfsimodels/models/time.py
TimeSeries.cut
def cut(self, start=0, end=-1, index=False): """ The method cuts the time series to reduce its length. :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. """ s_index, e_index = time_indices(self.npts, self.dt, start, end, index) self._values = np.array(self.values[s_index:e_index])
python
def cut(self, start=0, end=-1, index=False): """ The method cuts the time series to reduce its length. :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. """ s_index, e_index = time_indices(self.npts, self.dt, start, end, index) self._values = np.array(self.values[s_index:e_index])
[ "def", "cut", "(", "self", ",", "start", "=", "0", ",", "end", "=", "-", "1", ",", "index", "=", "False", ")", ":", "s_index", ",", "e_index", "=", "time_indices", "(", "self", ".", "npts", ",", "self", ".", "dt", ",", "start", ",", "end", ",", "index", ")", "self", ".", "_values", "=", "np", ".", "array", "(", "self", ".", "values", "[", "s_index", ":", "e_index", "]", ")" ]
The method cuts the time series to reduce its length. :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time.
[ "The", "method", "cuts", "the", "time", "series", "to", "reduce", "its", "length", ".", ":", "param", "start", ":", "int", "or", "float", "optional", "New", "start", "point", ":", "param", "end", ":", "int", "or", "float", "optional", "New", "end", "point", ":", "param", "index", ":", "bool", "optional", "if", "False", "then", "start", "and", "end", "are", "considered", "values", "in", "time", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/time.py#L53-L61
BlueBrain/hpcbench
hpcbench/toolbox/slurm/job.py
Job.finished
def finished(cls, jobid): """Check whether a SLURM job is finished or not""" output = subprocess.check_output( [SACCT, '-n', '-X', '-o', "end", '-j', str(jobid)] ) end = output.strip().decode() return end not in {'Unknown', ''}
python
def finished(cls, jobid): """Check whether a SLURM job is finished or not""" output = subprocess.check_output( [SACCT, '-n', '-X', '-o', "end", '-j', str(jobid)] ) end = output.strip().decode() return end not in {'Unknown', ''}
[ "def", "finished", "(", "cls", ",", "jobid", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "[", "SACCT", ",", "'-n'", ",", "'-X'", ",", "'-o'", ",", "\"end\"", ",", "'-j'", ",", "str", "(", "jobid", ")", "]", ")", "end", "=", "output", ".", "strip", "(", ")", ".", "decode", "(", ")", "return", "end", "not", "in", "{", "'Unknown'", ",", "''", "}" ]
Check whether a SLURM job is finished or not
[ "Check", "whether", "a", "SLURM", "job", "is", "finished", "or", "not" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/slurm/job.py#L53-L59
PolyJIT/benchbuild
benchbuild/utils/run.py
begin_run_group
def begin_run_group(project): """ Begin a run_group in the database. A run_group groups a set of runs for a given project. This models a series of runs that form a complete binary runtime test. Args: project: The project we begin a new run_group for. Returns: ``(group, session)`` where group is the created group in the database and session is the database session this group lives in. """ from benchbuild.utils.db import create_run_group from datetime import datetime group, session = create_run_group(project) group.begin = datetime.now() group.status = 'running' session.commit() return group, session
python
def begin_run_group(project): """ Begin a run_group in the database. A run_group groups a set of runs for a given project. This models a series of runs that form a complete binary runtime test. Args: project: The project we begin a new run_group for. Returns: ``(group, session)`` where group is the created group in the database and session is the database session this group lives in. """ from benchbuild.utils.db import create_run_group from datetime import datetime group, session = create_run_group(project) group.begin = datetime.now() group.status = 'running' session.commit() return group, session
[ "def", "begin_run_group", "(", "project", ")", ":", "from", "benchbuild", ".", "utils", ".", "db", "import", "create_run_group", "from", "datetime", "import", "datetime", "group", ",", "session", "=", "create_run_group", "(", "project", ")", "group", ".", "begin", "=", "datetime", ".", "now", "(", ")", "group", ".", "status", "=", "'running'", "session", ".", "commit", "(", ")", "return", "group", ",", "session" ]
Begin a run_group in the database. A run_group groups a set of runs for a given project. This models a series of runs that form a complete binary runtime test. Args: project: The project we begin a new run_group for. Returns: ``(group, session)`` where group is the created group in the database and session is the database session this group lives in.
[ "Begin", "a", "run_group", "in", "the", "database", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L205-L227
PolyJIT/benchbuild
benchbuild/utils/run.py
end_run_group
def end_run_group(group, session): """ End the run_group successfully. Args: group: The run_group we want to complete. session: The database transaction we will finish. """ from datetime import datetime group.end = datetime.now() group.status = 'completed' session.commit()
python
def end_run_group(group, session): """ End the run_group successfully. Args: group: The run_group we want to complete. session: The database transaction we will finish. """ from datetime import datetime group.end = datetime.now() group.status = 'completed' session.commit()
[ "def", "end_run_group", "(", "group", ",", "session", ")", ":", "from", "datetime", "import", "datetime", "group", ".", "end", "=", "datetime", ".", "now", "(", ")", "group", ".", "status", "=", "'completed'", "session", ".", "commit", "(", ")" ]
End the run_group successfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
[ "End", "the", "run_group", "successfully", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L230-L242
PolyJIT/benchbuild
benchbuild/utils/run.py
fail_run_group
def fail_run_group(group, session): """ End the run_group unsuccessfully. Args: group: The run_group we want to complete. session: The database transaction we will finish. """ from datetime import datetime group.end = datetime.now() group.status = 'failed' session.commit()
python
def fail_run_group(group, session): """ End the run_group unsuccessfully. Args: group: The run_group we want to complete. session: The database transaction we will finish. """ from datetime import datetime group.end = datetime.now() group.status = 'failed' session.commit()
[ "def", "fail_run_group", "(", "group", ",", "session", ")", ":", "from", "datetime", "import", "datetime", "group", ".", "end", "=", "datetime", ".", "now", "(", ")", "group", ".", "status", "=", "'failed'", "session", ".", "commit", "(", ")" ]
End the run_group unsuccessfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
[ "End", "the", "run_group", "unsuccessfully", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L245-L257
PolyJIT/benchbuild
benchbuild/utils/run.py
exit_code_from_run_infos
def exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int: """Generate a single exit code from a list of RunInfo objects. Takes a list of RunInfos and returns the exit code that is furthest away from 0. Args: run_infos (t.List[RunInfo]): [description] Returns: int: [description] """ assert run_infos is not None if not hasattr(run_infos, "__iter__"): return run_infos.retcode rcs = [ri.retcode for ri in run_infos] max_rc = max(rcs) min_rc = min(rcs) if max_rc == 0: return min_rc return max_rc
python
def exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int: """Generate a single exit code from a list of RunInfo objects. Takes a list of RunInfos and returns the exit code that is furthest away from 0. Args: run_infos (t.List[RunInfo]): [description] Returns: int: [description] """ assert run_infos is not None if not hasattr(run_infos, "__iter__"): return run_infos.retcode rcs = [ri.retcode for ri in run_infos] max_rc = max(rcs) min_rc = min(rcs) if max_rc == 0: return min_rc return max_rc
[ "def", "exit_code_from_run_infos", "(", "run_infos", ":", "t", ".", "List", "[", "RunInfo", "]", ")", "->", "int", ":", "assert", "run_infos", "is", "not", "None", "if", "not", "hasattr", "(", "run_infos", ",", "\"__iter__\"", ")", ":", "return", "run_infos", ".", "retcode", "rcs", "=", "[", "ri", ".", "retcode", "for", "ri", "in", "run_infos", "]", "max_rc", "=", "max", "(", "rcs", ")", "min_rc", "=", "min", "(", "rcs", ")", "if", "max_rc", "==", "0", ":", "return", "min_rc", "return", "max_rc" ]
Generate a single exit code from a list of RunInfo objects. Takes a list of RunInfos and returns the exit code that is furthest away from 0. Args: run_infos (t.List[RunInfo]): [description] Returns: int: [description]
[ "Generate", "a", "single", "exit", "code", "from", "a", "list", "of", "RunInfo", "objects", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L260-L282
PolyJIT/benchbuild
benchbuild/utils/run.py
track_execution
def track_execution(cmd, project, experiment, **kwargs): """Guard the execution of the given command. The given command (`cmd`) will be executed inside a database context. As soon as you leave the context we will commit the transaction. Any necessary modifications to the database can be identified inside the context with the RunInfo object. Args: cmd: The command we guard. project: The project we track for. experiment: The experiment we track for. Yields: RunInfo: A context object that carries the necessary database transaction. """ runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs) yield runner runner.commit()
python
def track_execution(cmd, project, experiment, **kwargs): """Guard the execution of the given command. The given command (`cmd`) will be executed inside a database context. As soon as you leave the context we will commit the transaction. Any necessary modifications to the database can be identified inside the context with the RunInfo object. Args: cmd: The command we guard. project: The project we track for. experiment: The experiment we track for. Yields: RunInfo: A context object that carries the necessary database transaction. """ runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs) yield runner runner.commit()
[ "def", "track_execution", "(", "cmd", ",", "project", ",", "experiment", ",", "*", "*", "kwargs", ")", ":", "runner", "=", "RunInfo", "(", "cmd", "=", "cmd", ",", "project", "=", "project", ",", "experiment", "=", "experiment", ",", "*", "*", "kwargs", ")", "yield", "runner", "runner", ".", "commit", "(", ")" ]
Guard the execution of the given command. The given command (`cmd`) will be executed inside a database context. As soon as you leave the context we will commit the transaction. Any necessary modifications to the database can be identified inside the context with the RunInfo object. Args: cmd: The command we guard. project: The project we track for. experiment: The experiment we track for. Yields: RunInfo: A context object that carries the necessary database transaction.
[ "Guard", "the", "execution", "of", "the", "given", "command", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L286-L306
PolyJIT/benchbuild
benchbuild/utils/run.py
with_env_recursive
def with_env_recursive(cmd, **envvars): """ Recursively updates the environment of cmd and all its subcommands. Args: cmd - A plumbum command-like object **envvars - The environment variables to update Returns: The updated command. """ from plumbum.commands.base import BoundCommand, BoundEnvCommand if isinstance(cmd, BoundCommand): cmd.cmd = with_env_recursive(cmd.cmd, **envvars) elif isinstance(cmd, BoundEnvCommand): cmd.envvars.update(envvars) cmd.cmd = with_env_recursive(cmd.cmd, **envvars) return cmd
python
def with_env_recursive(cmd, **envvars): """ Recursively updates the environment of cmd and all its subcommands. Args: cmd - A plumbum command-like object **envvars - The environment variables to update Returns: The updated command. """ from plumbum.commands.base import BoundCommand, BoundEnvCommand if isinstance(cmd, BoundCommand): cmd.cmd = with_env_recursive(cmd.cmd, **envvars) elif isinstance(cmd, BoundEnvCommand): cmd.envvars.update(envvars) cmd.cmd = with_env_recursive(cmd.cmd, **envvars) return cmd
[ "def", "with_env_recursive", "(", "cmd", ",", "*", "*", "envvars", ")", ":", "from", "plumbum", ".", "commands", ".", "base", "import", "BoundCommand", ",", "BoundEnvCommand", "if", "isinstance", "(", "cmd", ",", "BoundCommand", ")", ":", "cmd", ".", "cmd", "=", "with_env_recursive", "(", "cmd", ".", "cmd", ",", "*", "*", "envvars", ")", "elif", "isinstance", "(", "cmd", ",", "BoundEnvCommand", ")", ":", "cmd", ".", "envvars", ".", "update", "(", "envvars", ")", "cmd", ".", "cmd", "=", "with_env_recursive", "(", "cmd", ".", "cmd", ",", "*", "*", "envvars", ")", "return", "cmd" ]
Recursively updates the environment of cmd and all its subcommands. Args: cmd - A plumbum command-like object **envvars - The environment variables to update Returns: The updated command.
[ "Recursively", "updates", "the", "environment", "of", "cmd", "and", "all", "its", "subcommands", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L318-L335
PolyJIT/benchbuild
benchbuild/utils/run.py
in_builddir
def in_builddir(sub='.'): """ Decorate a project phase with a local working directory change. Args: sub: An optional subdirectory to change into. """ from functools import wraps def wrap_in_builddir(func): """Wrap the function for the new build directory.""" @wraps(func) def wrap_in_builddir_func(self, *args, **kwargs): """The actual function inside the wrapper for the new builddir.""" p = local.path(self.builddir) / sub if not p.exists(): LOG.error("%s does not exist.", p) if p == local.cwd: LOG.debug("CWD already is %s", p) return func(self, *args, *kwargs) with local.cwd(p): return func(self, *args, **kwargs) return wrap_in_builddir_func return wrap_in_builddir
python
def in_builddir(sub='.'): """ Decorate a project phase with a local working directory change. Args: sub: An optional subdirectory to change into. """ from functools import wraps def wrap_in_builddir(func): """Wrap the function for the new build directory.""" @wraps(func) def wrap_in_builddir_func(self, *args, **kwargs): """The actual function inside the wrapper for the new builddir.""" p = local.path(self.builddir) / sub if not p.exists(): LOG.error("%s does not exist.", p) if p == local.cwd: LOG.debug("CWD already is %s", p) return func(self, *args, *kwargs) with local.cwd(p): return func(self, *args, **kwargs) return wrap_in_builddir_func return wrap_in_builddir
[ "def", "in_builddir", "(", "sub", "=", "'.'", ")", ":", "from", "functools", "import", "wraps", "def", "wrap_in_builddir", "(", "func", ")", ":", "\"\"\"Wrap the function for the new build directory.\"\"\"", "@", "wraps", "(", "func", ")", "def", "wrap_in_builddir_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"The actual function inside the wrapper for the new builddir.\"\"\"", "p", "=", "local", ".", "path", "(", "self", ".", "builddir", ")", "/", "sub", "if", "not", "p", ".", "exists", "(", ")", ":", "LOG", ".", "error", "(", "\"%s does not exist.\"", ",", "p", ")", "if", "p", "==", "local", ".", "cwd", ":", "LOG", ".", "debug", "(", "\"CWD already is %s\"", ",", "p", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "kwargs", ")", "with", "local", ".", "cwd", "(", "p", ")", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrap_in_builddir_func", "return", "wrap_in_builddir" ]
Decorate a project phase with a local working directory change. Args: sub: An optional subdirectory to change into.
[ "Decorate", "a", "project", "phase", "with", "a", "local", "working", "directory", "change", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L338-L365
PolyJIT/benchbuild
benchbuild/utils/run.py
store_config
def store_config(func): """Decorator for storing the configuration in the project's builddir.""" from functools import wraps @wraps(func) def wrap_store_config(self, *args, **kwargs): """Wrapper that contains the actual storage call for the config.""" CFG.store(local.path(self.builddir) / ".benchbuild.yml") return func(self, *args, **kwargs) return wrap_store_config
python
def store_config(func): """Decorator for storing the configuration in the project's builddir.""" from functools import wraps @wraps(func) def wrap_store_config(self, *args, **kwargs): """Wrapper that contains the actual storage call for the config.""" CFG.store(local.path(self.builddir) / ".benchbuild.yml") return func(self, *args, **kwargs) return wrap_store_config
[ "def", "store_config", "(", "func", ")", ":", "from", "functools", "import", "wraps", "@", "wraps", "(", "func", ")", "def", "wrap_store_config", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper that contains the actual storage call for the config.\"\"\"", "CFG", ".", "store", "(", "local", ".", "path", "(", "self", ".", "builddir", ")", "/", "\".benchbuild.yml\"", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrap_store_config" ]
Decorator for storing the configuration in the project's builddir.
[ "Decorator", "for", "storing", "the", "configuration", "in", "the", "project", "s", "builddir", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L368-L378
PolyJIT/benchbuild
benchbuild/container.py
clean_directories
def clean_directories(builddir, in_dir=True, out_dir=True): """Remove the in and out of the container if confirmed by the user.""" container_in = local.path(builddir) / "container-in" container_out = local.path(builddir) / "container-out" if in_dir and container_in.exists(): if ui.ask("Should I delete '{0}'?".format(container_in)): container_in.delete() if out_dir and container_out.exists(): if ui.ask("Should I delete '{0}'?".format(container_out)): container_out.delete()
python
def clean_directories(builddir, in_dir=True, out_dir=True): """Remove the in and out of the container if confirmed by the user.""" container_in = local.path(builddir) / "container-in" container_out = local.path(builddir) / "container-out" if in_dir and container_in.exists(): if ui.ask("Should I delete '{0}'?".format(container_in)): container_in.delete() if out_dir and container_out.exists(): if ui.ask("Should I delete '{0}'?".format(container_out)): container_out.delete()
[ "def", "clean_directories", "(", "builddir", ",", "in_dir", "=", "True", ",", "out_dir", "=", "True", ")", ":", "container_in", "=", "local", ".", "path", "(", "builddir", ")", "/", "\"container-in\"", "container_out", "=", "local", ".", "path", "(", "builddir", ")", "/", "\"container-out\"", "if", "in_dir", "and", "container_in", ".", "exists", "(", ")", ":", "if", "ui", ".", "ask", "(", "\"Should I delete '{0}'?\"", ".", "format", "(", "container_in", ")", ")", ":", "container_in", ".", "delete", "(", ")", "if", "out_dir", "and", "container_out", ".", "exists", "(", ")", ":", "if", "ui", ".", "ask", "(", "\"Should I delete '{0}'?\"", ".", "format", "(", "container_out", ")", ")", ":", "container_out", ".", "delete", "(", ")" ]
Remove the in and out of the container if confirmed by the user.
[ "Remove", "the", "in", "and", "out", "of", "the", "container", "if", "confirmed", "by", "the", "user", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L23-L33
PolyJIT/benchbuild
benchbuild/container.py
setup_directories
def setup_directories(builddir): """Create the in and out directories of the container.""" build_dir = local.path(builddir) in_dir = build_dir / "container-in" out_dir = build_dir / "container-out" if not in_dir.exists(): in_dir.mkdir() if not out_dir.exists(): out_dir.mkdir()
python
def setup_directories(builddir): """Create the in and out directories of the container.""" build_dir = local.path(builddir) in_dir = build_dir / "container-in" out_dir = build_dir / "container-out" if not in_dir.exists(): in_dir.mkdir() if not out_dir.exists(): out_dir.mkdir()
[ "def", "setup_directories", "(", "builddir", ")", ":", "build_dir", "=", "local", ".", "path", "(", "builddir", ")", "in_dir", "=", "build_dir", "/", "\"container-in\"", "out_dir", "=", "build_dir", "/", "\"container-out\"", "if", "not", "in_dir", ".", "exists", "(", ")", ":", "in_dir", ".", "mkdir", "(", ")", "if", "not", "out_dir", ".", "exists", "(", ")", ":", "out_dir", ".", "mkdir", "(", ")" ]
Create the in and out directories of the container.
[ "Create", "the", "in", "and", "out", "directories", "of", "the", "container", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L36-L45
PolyJIT/benchbuild
benchbuild/container.py
setup_container
def setup_container(builddir, _container): """Prepare the container and returns the path where it can be found.""" build_dir = local.path(builddir) in_dir = build_dir / "container-in" container_path = local.path(_container) with local.cwd(builddir): container_bin = container_path.basename container_in = in_dir / container_bin download.Copy(_container, container_in) uchrt = uchroot.no_args() with local.cwd("container-in"): uchrt = uchrt["-E", "-A", "-u", "0", "-g", "0", "-C", "-r", "/", "-w", os.path.abspath("."), "--"] # Check, if we need erlent support for this archive. has_erlent = bash[ "-c", "tar --list -f './{0}' | grep --silent '.erlent'".format( container_in)] has_erlent = (has_erlent & TF) # Unpack input container to: container-in if not has_erlent: cmd = local["/bin/tar"]["xf"] cmd = uchrt[cmd[container_bin]] else: cmd = tar["xf"] cmd = cmd[container_in] with local.cwd("container-in"): cmd("--exclude=dev/*") rm(container_in) return in_dir
python
def setup_container(builddir, _container): """Prepare the container and returns the path where it can be found.""" build_dir = local.path(builddir) in_dir = build_dir / "container-in" container_path = local.path(_container) with local.cwd(builddir): container_bin = container_path.basename container_in = in_dir / container_bin download.Copy(_container, container_in) uchrt = uchroot.no_args() with local.cwd("container-in"): uchrt = uchrt["-E", "-A", "-u", "0", "-g", "0", "-C", "-r", "/", "-w", os.path.abspath("."), "--"] # Check, if we need erlent support for this archive. has_erlent = bash[ "-c", "tar --list -f './{0}' | grep --silent '.erlent'".format( container_in)] has_erlent = (has_erlent & TF) # Unpack input container to: container-in if not has_erlent: cmd = local["/bin/tar"]["xf"] cmd = uchrt[cmd[container_bin]] else: cmd = tar["xf"] cmd = cmd[container_in] with local.cwd("container-in"): cmd("--exclude=dev/*") rm(container_in) return in_dir
[ "def", "setup_container", "(", "builddir", ",", "_container", ")", ":", "build_dir", "=", "local", ".", "path", "(", "builddir", ")", "in_dir", "=", "build_dir", "/", "\"container-in\"", "container_path", "=", "local", ".", "path", "(", "_container", ")", "with", "local", ".", "cwd", "(", "builddir", ")", ":", "container_bin", "=", "container_path", ".", "basename", "container_in", "=", "in_dir", "/", "container_bin", "download", ".", "Copy", "(", "_container", ",", "container_in", ")", "uchrt", "=", "uchroot", ".", "no_args", "(", ")", "with", "local", ".", "cwd", "(", "\"container-in\"", ")", ":", "uchrt", "=", "uchrt", "[", "\"-E\"", ",", "\"-A\"", ",", "\"-u\"", ",", "\"0\"", ",", "\"-g\"", ",", "\"0\"", ",", "\"-C\"", ",", "\"-r\"", ",", "\"/\"", ",", "\"-w\"", ",", "os", ".", "path", ".", "abspath", "(", "\".\"", ")", ",", "\"--\"", "]", "# Check, if we need erlent support for this archive.", "has_erlent", "=", "bash", "[", "\"-c\"", ",", "\"tar --list -f './{0}' | grep --silent '.erlent'\"", ".", "format", "(", "container_in", ")", "]", "has_erlent", "=", "(", "has_erlent", "&", "TF", ")", "# Unpack input container to: container-in", "if", "not", "has_erlent", ":", "cmd", "=", "local", "[", "\"/bin/tar\"", "]", "[", "\"xf\"", "]", "cmd", "=", "uchrt", "[", "cmd", "[", "container_bin", "]", "]", "else", ":", "cmd", "=", "tar", "[", "\"xf\"", "]", "cmd", "=", "cmd", "[", "container_in", "]", "with", "local", ".", "cwd", "(", "\"container-in\"", ")", ":", "cmd", "(", "\"--exclude=dev/*\"", ")", "rm", "(", "container_in", ")", "return", "in_dir" ]
Prepare the container and returns the path where it can be found.
[ "Prepare", "the", "container", "and", "returns", "the", "path", "where", "it", "can", "be", "found", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L48-L81
PolyJIT/benchbuild
benchbuild/container.py
run_in_container
def run_in_container(command, container_dir): """ Run a given command inside a container. Mounts a directory as a container at the given mountpoint and tries to run the given command inside the new container. """ container_p = local.path(container_dir) with local.cwd(container_p): uchrt = uchroot.with_mounts() uchrt = uchrt["-E", "-A", "-u", "0", "-g", "0", "-C", "-w", "/", "-r", container_p] uchrt = uchrt["--"] cmd_path = container_p / command[0].lstrip('/') if not cmd_path.exists(): LOG.error("The command does not exist inside the container! %s", cmd_path) return cmd = uchrt[command] return cmd & FG
python
def run_in_container(command, container_dir): """ Run a given command inside a container. Mounts a directory as a container at the given mountpoint and tries to run the given command inside the new container. """ container_p = local.path(container_dir) with local.cwd(container_p): uchrt = uchroot.with_mounts() uchrt = uchrt["-E", "-A", "-u", "0", "-g", "0", "-C", "-w", "/", "-r", container_p] uchrt = uchrt["--"] cmd_path = container_p / command[0].lstrip('/') if not cmd_path.exists(): LOG.error("The command does not exist inside the container! %s", cmd_path) return cmd = uchrt[command] return cmd & FG
[ "def", "run_in_container", "(", "command", ",", "container_dir", ")", ":", "container_p", "=", "local", ".", "path", "(", "container_dir", ")", "with", "local", ".", "cwd", "(", "container_p", ")", ":", "uchrt", "=", "uchroot", ".", "with_mounts", "(", ")", "uchrt", "=", "uchrt", "[", "\"-E\"", ",", "\"-A\"", ",", "\"-u\"", ",", "\"0\"", ",", "\"-g\"", ",", "\"0\"", ",", "\"-C\"", ",", "\"-w\"", ",", "\"/\"", ",", "\"-r\"", ",", "container_p", "]", "uchrt", "=", "uchrt", "[", "\"--\"", "]", "cmd_path", "=", "container_p", "/", "command", "[", "0", "]", ".", "lstrip", "(", "'/'", ")", "if", "not", "cmd_path", ".", "exists", "(", ")", ":", "LOG", ".", "error", "(", "\"The command does not exist inside the container! %s\"", ",", "cmd_path", ")", "return", "cmd", "=", "uchrt", "[", "command", "]", "return", "cmd", "&", "FG" ]
Run a given command inside a container. Mounts a directory as a container at the given mountpoint and tries to run the given command inside the new container.
[ "Run", "a", "given", "command", "inside", "a", "container", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L84-L105
PolyJIT/benchbuild
benchbuild/container.py
pack_container
def pack_container(in_container, out_file): """ Pack a container image into a .tar.bz2 archive. Args: in_container (str): Path string to the container image. out_file (str): Output file name. """ container_filename = local.path(out_file).basename out_container = local.cwd / "container-out" / container_filename out_dir = out_container.dirname # Pack the results to: container-out with local.cwd(in_container): tar("cjf", out_container, ".") c_hash = download.update_hash(out_container) if out_dir.exists(): mkdir("-p", out_dir) mv(out_container, out_file) mv(out_container + ".hash", out_file + ".hash") new_container = {"path": out_file, "hash": str(c_hash)} CFG["container"]["known"] += new_container
python
def pack_container(in_container, out_file): """ Pack a container image into a .tar.bz2 archive. Args: in_container (str): Path string to the container image. out_file (str): Output file name. """ container_filename = local.path(out_file).basename out_container = local.cwd / "container-out" / container_filename out_dir = out_container.dirname # Pack the results to: container-out with local.cwd(in_container): tar("cjf", out_container, ".") c_hash = download.update_hash(out_container) if out_dir.exists(): mkdir("-p", out_dir) mv(out_container, out_file) mv(out_container + ".hash", out_file + ".hash") new_container = {"path": out_file, "hash": str(c_hash)} CFG["container"]["known"] += new_container
[ "def", "pack_container", "(", "in_container", ",", "out_file", ")", ":", "container_filename", "=", "local", ".", "path", "(", "out_file", ")", ".", "basename", "out_container", "=", "local", ".", "cwd", "/", "\"container-out\"", "/", "container_filename", "out_dir", "=", "out_container", ".", "dirname", "# Pack the results to: container-out", "with", "local", ".", "cwd", "(", "in_container", ")", ":", "tar", "(", "\"cjf\"", ",", "out_container", ",", "\".\"", ")", "c_hash", "=", "download", ".", "update_hash", "(", "out_container", ")", "if", "out_dir", ".", "exists", "(", ")", ":", "mkdir", "(", "\"-p\"", ",", "out_dir", ")", "mv", "(", "out_container", ",", "out_file", ")", "mv", "(", "out_container", "+", "\".hash\"", ",", "out_file", "+", "\".hash\"", ")", "new_container", "=", "{", "\"path\"", ":", "out_file", ",", "\"hash\"", ":", "str", "(", "c_hash", ")", "}", "CFG", "[", "\"container\"", "]", "[", "\"known\"", "]", "+=", "new_container" ]
Pack a container image into a .tar.bz2 archive. Args: in_container (str): Path string to the container image. out_file (str): Output file name.
[ "Pack", "a", "container", "image", "into", "a", ".", "tar", ".", "bz2", "archive", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L108-L130
PolyJIT/benchbuild
benchbuild/container.py
setup_bash_in_container
def setup_bash_in_container(builddir, _container, outfile, shell): """ Setup a bash environment inside a container. Creates a new chroot, which the user can use as a bash to run the wanted projects inside the mounted container, that also gets returned afterwards. """ with local.cwd(builddir): # Switch to bash inside uchroot print("Entering bash inside User-Chroot. Prepare your image and " "type 'exit' when you are done. If bash exits with a non-zero" "exit code, no new container will be stored.") store_new_container = True try: run_in_container(shell, _container) except ProcessExecutionError: store_new_container = False if store_new_container: print("Packing new container image.") pack_container(_container, outfile) config_path = str(CFG["config_file"]) CFG.store(config_path) print("Storing config in {0}".format(os.path.abspath(config_path)))
python
def setup_bash_in_container(builddir, _container, outfile, shell): """ Setup a bash environment inside a container. Creates a new chroot, which the user can use as a bash to run the wanted projects inside the mounted container, that also gets returned afterwards. """ with local.cwd(builddir): # Switch to bash inside uchroot print("Entering bash inside User-Chroot. Prepare your image and " "type 'exit' when you are done. If bash exits with a non-zero" "exit code, no new container will be stored.") store_new_container = True try: run_in_container(shell, _container) except ProcessExecutionError: store_new_container = False if store_new_container: print("Packing new container image.") pack_container(_container, outfile) config_path = str(CFG["config_file"]) CFG.store(config_path) print("Storing config in {0}".format(os.path.abspath(config_path)))
[ "def", "setup_bash_in_container", "(", "builddir", ",", "_container", ",", "outfile", ",", "shell", ")", ":", "with", "local", ".", "cwd", "(", "builddir", ")", ":", "# Switch to bash inside uchroot", "print", "(", "\"Entering bash inside User-Chroot. Prepare your image and \"", "\"type 'exit' when you are done. If bash exits with a non-zero\"", "\"exit code, no new container will be stored.\"", ")", "store_new_container", "=", "True", "try", ":", "run_in_container", "(", "shell", ",", "_container", ")", "except", "ProcessExecutionError", ":", "store_new_container", "=", "False", "if", "store_new_container", ":", "print", "(", "\"Packing new container image.\"", ")", "pack_container", "(", "_container", ",", "outfile", ")", "config_path", "=", "str", "(", "CFG", "[", "\"config_file\"", "]", ")", "CFG", ".", "store", "(", "config_path", ")", "print", "(", "\"Storing config in {0}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "config_path", ")", ")", ")" ]
Setup a bash environment inside a container. Creates a new chroot, which the user can use as a bash to run the wanted projects inside the mounted container, that also gets returned afterwards.
[ "Setup", "a", "bash", "environment", "inside", "a", "container", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L133-L156
PolyJIT/benchbuild
benchbuild/container.py
set_input_container
def set_input_container(_container, cfg): """Save the input for the container in the configurations.""" if not _container: return False if _container.exists(): cfg["container"]["input"] = str(_container) return True return False
python
def set_input_container(_container, cfg): """Save the input for the container in the configurations.""" if not _container: return False if _container.exists(): cfg["container"]["input"] = str(_container) return True return False
[ "def", "set_input_container", "(", "_container", ",", "cfg", ")", ":", "if", "not", "_container", ":", "return", "False", "if", "_container", ".", "exists", "(", ")", ":", "cfg", "[", "\"container\"", "]", "[", "\"input\"", "]", "=", "str", "(", "_container", ")", "return", "True", "return", "False" ]
Save the input for the container in the configurations.
[ "Save", "the", "input", "for", "the", "container", "in", "the", "configurations", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L167-L174
PolyJIT/benchbuild
benchbuild/container.py
SetupPolyJITGentooStrategy.run
def run(self, context): """Setup a gentoo container suitable for PolyJIT.""" # Don't do something when running non-interactive. if not sys.stdout.isatty(): return with local.cwd(context.in_container): from benchbuild.projects.gentoo import gentoo gentoo.setup_networking() gentoo.configure_portage() sed_in_chroot = uchroot.uchroot()["/bin/sed"] emerge_in_chroot = uchroot.uchroot()["/usr/bin/emerge"] has_pkg = uchroot.uchroot()["/usr/bin/qlist", "-I"] run.run(sed_in_chroot["-i", '/CC=/d', "/etc/portage/make.conf"]) run.run(sed_in_chroot["-i", '/CXX=/d', "/etc/portage/make.conf"]) want_sync = bool(CFG["container"]["strategy"]["polyjit"]["sync"]) want_upgrade = bool( CFG["container"]["strategy"]["polyjit"]["upgrade"]) packages = \ CFG["container"]["strategy"]["polyjit"]["packages"].value with local.env(MAKEOPTS="-j{0}".format(int(CFG["jobs"]))): if want_sync: LOG.debug("Synchronizing portage.") run.run(emerge_in_chroot["--sync"]) if want_upgrade: LOG.debug("Upgrading world.") run.run(emerge_in_chroot["--autounmask-only=y", "-uUDN", "--with-bdeps=y", "@world"]) for pkg in packages: if has_pkg[pkg["name"]] & TF: continue env = pkg["env"] with local.env(**env): run.run(emerge_in_chroot[pkg["name"]]) gentoo.setup_benchbuild() print("Packing new container image.") with local.cwd(context.builddir): pack_container(context.in_container, context.out_container)
python
def run(self, context): """Setup a gentoo container suitable for PolyJIT.""" # Don't do something when running non-interactive. if not sys.stdout.isatty(): return with local.cwd(context.in_container): from benchbuild.projects.gentoo import gentoo gentoo.setup_networking() gentoo.configure_portage() sed_in_chroot = uchroot.uchroot()["/bin/sed"] emerge_in_chroot = uchroot.uchroot()["/usr/bin/emerge"] has_pkg = uchroot.uchroot()["/usr/bin/qlist", "-I"] run.run(sed_in_chroot["-i", '/CC=/d', "/etc/portage/make.conf"]) run.run(sed_in_chroot["-i", '/CXX=/d', "/etc/portage/make.conf"]) want_sync = bool(CFG["container"]["strategy"]["polyjit"]["sync"]) want_upgrade = bool( CFG["container"]["strategy"]["polyjit"]["upgrade"]) packages = \ CFG["container"]["strategy"]["polyjit"]["packages"].value with local.env(MAKEOPTS="-j{0}".format(int(CFG["jobs"]))): if want_sync: LOG.debug("Synchronizing portage.") run.run(emerge_in_chroot["--sync"]) if want_upgrade: LOG.debug("Upgrading world.") run.run(emerge_in_chroot["--autounmask-only=y", "-uUDN", "--with-bdeps=y", "@world"]) for pkg in packages: if has_pkg[pkg["name"]] & TF: continue env = pkg["env"] with local.env(**env): run.run(emerge_in_chroot[pkg["name"]]) gentoo.setup_benchbuild() print("Packing new container image.") with local.cwd(context.builddir): pack_container(context.in_container, context.out_container)
[ "def", "run", "(", "self", ",", "context", ")", ":", "# Don't do something when running non-interactive.", "if", "not", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "return", "with", "local", ".", "cwd", "(", "context", ".", "in_container", ")", ":", "from", "benchbuild", ".", "projects", ".", "gentoo", "import", "gentoo", "gentoo", ".", "setup_networking", "(", ")", "gentoo", ".", "configure_portage", "(", ")", "sed_in_chroot", "=", "uchroot", ".", "uchroot", "(", ")", "[", "\"/bin/sed\"", "]", "emerge_in_chroot", "=", "uchroot", ".", "uchroot", "(", ")", "[", "\"/usr/bin/emerge\"", "]", "has_pkg", "=", "uchroot", ".", "uchroot", "(", ")", "[", "\"/usr/bin/qlist\"", ",", "\"-I\"", "]", "run", ".", "run", "(", "sed_in_chroot", "[", "\"-i\"", ",", "'/CC=/d'", ",", "\"/etc/portage/make.conf\"", "]", ")", "run", ".", "run", "(", "sed_in_chroot", "[", "\"-i\"", ",", "'/CXX=/d'", ",", "\"/etc/portage/make.conf\"", "]", ")", "want_sync", "=", "bool", "(", "CFG", "[", "\"container\"", "]", "[", "\"strategy\"", "]", "[", "\"polyjit\"", "]", "[", "\"sync\"", "]", ")", "want_upgrade", "=", "bool", "(", "CFG", "[", "\"container\"", "]", "[", "\"strategy\"", "]", "[", "\"polyjit\"", "]", "[", "\"upgrade\"", "]", ")", "packages", "=", "CFG", "[", "\"container\"", "]", "[", "\"strategy\"", "]", "[", "\"polyjit\"", "]", "[", "\"packages\"", "]", ".", "value", "with", "local", ".", "env", "(", "MAKEOPTS", "=", "\"-j{0}\"", ".", "format", "(", "int", "(", "CFG", "[", "\"jobs\"", "]", ")", ")", ")", ":", "if", "want_sync", ":", "LOG", ".", "debug", "(", "\"Synchronizing portage.\"", ")", "run", ".", "run", "(", "emerge_in_chroot", "[", "\"--sync\"", "]", ")", "if", "want_upgrade", ":", "LOG", ".", "debug", "(", "\"Upgrading world.\"", ")", "run", ".", "run", "(", "emerge_in_chroot", "[", "\"--autounmask-only=y\"", ",", "\"-uUDN\"", ",", "\"--with-bdeps=y\"", ",", "\"@world\"", "]", ")", "for", "pkg", "in", "packages", ":", "if", "has_pkg", "[", "pkg", "[", "\"name\"", "]", "]", "&", "TF", ":", "continue", "env", "=", "pkg", "[", "\"env\"", "]", "with", "local", ".", "env", "(", "*", "*", "env", ")", ":", "run", ".", "run", "(", "emerge_in_chroot", "[", "pkg", "[", "\"name\"", "]", "]", ")", "gentoo", ".", "setup_benchbuild", "(", ")", "print", "(", "\"Packing new container image.\"", ")", "with", "local", ".", "cwd", "(", "context", ".", "builddir", ")", ":", "pack_container", "(", "context", ".", "in_container", ",", "context", ".", "out_container", ")" ]
Setup a gentoo container suitable for PolyJIT.
[ "Setup", "a", "gentoo", "container", "suitable", "for", "PolyJIT", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L213-L256
PolyJIT/benchbuild
benchbuild/container.py
Container.input_file
def input_file(self, _container): """Find the input path of a uchroot container.""" p = local.path(_container) if set_input_container(p, CFG): return p = find_hash(CFG["container"]["known"].value, container) if set_input_container(p, CFG): return raise ValueError("The path '{0}' does not exist.".format(p))
python
def input_file(self, _container): """Find the input path of a uchroot container.""" p = local.path(_container) if set_input_container(p, CFG): return p = find_hash(CFG["container"]["known"].value, container) if set_input_container(p, CFG): return raise ValueError("The path '{0}' does not exist.".format(p))
[ "def", "input_file", "(", "self", ",", "_container", ")", ":", "p", "=", "local", ".", "path", "(", "_container", ")", "if", "set_input_container", "(", "p", ",", "CFG", ")", ":", "return", "p", "=", "find_hash", "(", "CFG", "[", "\"container\"", "]", "[", "\"known\"", "]", ".", "value", ",", "container", ")", "if", "set_input_container", "(", "p", ",", "CFG", ")", ":", "return", "raise", "ValueError", "(", "\"The path '{0}' does not exist.\"", ".", "format", "(", "p", ")", ")" ]
Find the input path of a uchroot container.
[ "Find", "the", "input", "path", "of", "a", "uchroot", "container", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L265-L275
PolyJIT/benchbuild
benchbuild/container.py
Container.output_file
def output_file(self, _container): """Find and writes the output path of a chroot container.""" p = local.path(_container) if p.exists(): if not ui.ask("Path '{0}' already exists." " Overwrite?".format(p)): sys.exit(0) CFG["container"]["output"] = str(p)
python
def output_file(self, _container): """Find and writes the output path of a chroot container.""" p = local.path(_container) if p.exists(): if not ui.ask("Path '{0}' already exists." " Overwrite?".format(p)): sys.exit(0) CFG["container"]["output"] = str(p)
[ "def", "output_file", "(", "self", ",", "_container", ")", ":", "p", "=", "local", ".", "path", "(", "_container", ")", "if", "p", ".", "exists", "(", ")", ":", "if", "not", "ui", ".", "ask", "(", "\"Path '{0}' already exists.\"", "\" Overwrite?\"", ".", "format", "(", "p", ")", ")", ":", "sys", ".", "exit", "(", "0", ")", "CFG", "[", "\"container\"", "]", "[", "\"output\"", "]", "=", "str", "(", "p", ")" ]
Find and writes the output path of a chroot container.
[ "Find", "and", "writes", "the", "output", "path", "of", "a", "chroot", "container", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/container.py#L278-L285
eng-tools/sfsimodels
sfsimodels/models/soils.py
discretize_soil_profile
def discretize_soil_profile(sp, incs=None, target=1.0): """ Splits the soil profile into slices and stores as dictionary :param sp: SoilProfile :param incs: array_like, increments of depth to use for each layer :param target: target depth increment size :return: dict """ if incs is None: incs = np.ones(sp.n_layers) * target dd = {} dd["thickness"] = [] dd["unit_mass"] = [] dd["shear_vel"] = [] cum_thickness = 0 for i in range(sp.n_layers): sl = sp.layer(i + 1) thickness = sp.layer_height(i + 1) n_slices = max(int(thickness / incs[i]), 1) slice_thickness = float(thickness) / n_slices for j in range(n_slices): cum_thickness += slice_thickness if cum_thickness >= sp.gwl: rho = sl.unit_sat_mass saturation = True else: rho = sl.unit_dry_mass saturation = False if hasattr(sl, "get_shear_vel_at_v_eff_stress"): v_eff = sp.vertical_effective_stress(cum_thickness) vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation) else: vs = sl.calc_shear_vel(saturation) dd["shear_vel"].append(vs) dd["unit_mass"].append(rho) dd["thickness"].append(slice_thickness) for item in dd: dd[item] = np.array(dd[item]) return dd
python
def discretize_soil_profile(sp, incs=None, target=1.0): """ Splits the soil profile into slices and stores as dictionary :param sp: SoilProfile :param incs: array_like, increments of depth to use for each layer :param target: target depth increment size :return: dict """ if incs is None: incs = np.ones(sp.n_layers) * target dd = {} dd["thickness"] = [] dd["unit_mass"] = [] dd["shear_vel"] = [] cum_thickness = 0 for i in range(sp.n_layers): sl = sp.layer(i + 1) thickness = sp.layer_height(i + 1) n_slices = max(int(thickness / incs[i]), 1) slice_thickness = float(thickness) / n_slices for j in range(n_slices): cum_thickness += slice_thickness if cum_thickness >= sp.gwl: rho = sl.unit_sat_mass saturation = True else: rho = sl.unit_dry_mass saturation = False if hasattr(sl, "get_shear_vel_at_v_eff_stress"): v_eff = sp.vertical_effective_stress(cum_thickness) vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation) else: vs = sl.calc_shear_vel(saturation) dd["shear_vel"].append(vs) dd["unit_mass"].append(rho) dd["thickness"].append(slice_thickness) for item in dd: dd[item] = np.array(dd[item]) return dd
[ "def", "discretize_soil_profile", "(", "sp", ",", "incs", "=", "None", ",", "target", "=", "1.0", ")", ":", "if", "incs", "is", "None", ":", "incs", "=", "np", ".", "ones", "(", "sp", ".", "n_layers", ")", "*", "target", "dd", "=", "{", "}", "dd", "[", "\"thickness\"", "]", "=", "[", "]", "dd", "[", "\"unit_mass\"", "]", "=", "[", "]", "dd", "[", "\"shear_vel\"", "]", "=", "[", "]", "cum_thickness", "=", "0", "for", "i", "in", "range", "(", "sp", ".", "n_layers", ")", ":", "sl", "=", "sp", ".", "layer", "(", "i", "+", "1", ")", "thickness", "=", "sp", ".", "layer_height", "(", "i", "+", "1", ")", "n_slices", "=", "max", "(", "int", "(", "thickness", "/", "incs", "[", "i", "]", ")", ",", "1", ")", "slice_thickness", "=", "float", "(", "thickness", ")", "/", "n_slices", "for", "j", "in", "range", "(", "n_slices", ")", ":", "cum_thickness", "+=", "slice_thickness", "if", "cum_thickness", ">=", "sp", ".", "gwl", ":", "rho", "=", "sl", ".", "unit_sat_mass", "saturation", "=", "True", "else", ":", "rho", "=", "sl", ".", "unit_dry_mass", "saturation", "=", "False", "if", "hasattr", "(", "sl", ",", "\"get_shear_vel_at_v_eff_stress\"", ")", ":", "v_eff", "=", "sp", ".", "vertical_effective_stress", "(", "cum_thickness", ")", "vs", "=", "sl", ".", "get_shear_vel_at_v_eff_stress", "(", "v_eff", ",", "saturation", ")", "else", ":", "vs", "=", "sl", ".", "calc_shear_vel", "(", "saturation", ")", "dd", "[", "\"shear_vel\"", "]", ".", "append", "(", "vs", ")", "dd", "[", "\"unit_mass\"", "]", ".", "append", "(", "rho", ")", "dd", "[", "\"thickness\"", "]", ".", "append", "(", "slice_thickness", ")", "for", "item", "in", "dd", ":", "dd", "[", "item", "]", "=", "np", ".", "array", "(", "dd", "[", "item", "]", ")", "return", "dd" ]
Splits the soil profile into slices and stores as dictionary :param sp: SoilProfile :param incs: array_like, increments of depth to use for each layer :param target: target depth increment size :return: dict
[ "Splits", "the", "soil", "profile", "into", "slices", "and", "stores", "as", "dictionary" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1272-L1312