repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
openearth/mmi-python
mmi/mmi_client.py
MMIClient.subscribe
def subscribe(self, topic=b''): """subscribe to the SUB socket, to listen for incomming variables, return a stream that can be listened to.""" self.sockets[zmq.SUB].setsockopt(zmq.SUBSCRIBE, topic) poller = self.pollers[zmq.SUB] return poller
python
def subscribe(self, topic=b''): """subscribe to the SUB socket, to listen for incomming variables, return a stream that can be listened to.""" self.sockets[zmq.SUB].setsockopt(zmq.SUBSCRIBE, topic) poller = self.pollers[zmq.SUB] return poller
[ "def", "subscribe", "(", "self", ",", "topic", "=", "b''", ")", ":", "self", ".", "sockets", "[", "zmq", ".", "SUB", "]", ".", "setsockopt", "(", "zmq", ".", "SUBSCRIBE", ",", "topic", ")", "poller", "=", "self", ".", "pollers", "[", "zmq", ".", "SUB", "]", "return", "poller" ]
subscribe to the SUB socket, to listen for incomming variables, return a stream that can be listened to.
[ "subscribe", "to", "the", "SUB", "socket", "to", "listen", "for", "incomming", "variables", "return", "a", "stream", "that", "can", "be", "listened", "to", "." ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L349-L353
train
djaodjin/djaodjin-deployutils
deployutils/copy.py
download
def download(remote_location, remotes=None, prefix="", dry_run=False): """ Download resources from a stage server. """ if remotes is None: remotes, _ = _resources_files( abs_paths=remote_location.startswith('s3://')) if remote_location.startswith('s3://'): from .s3 import S3Backend backend = S3Backend(remote_location, dry_run=dry_run) backend.download(list_local(remotes, prefix), prefix) else: dest_root = '.' shell_command([ '/usr/bin/rsync', '-thrRvz', '--rsync-path', '/usr/bin/rsync', '%s/./' % remote_location, dest_root], dry_run=dry_run)
python
def download(remote_location, remotes=None, prefix="", dry_run=False): """ Download resources from a stage server. """ if remotes is None: remotes, _ = _resources_files( abs_paths=remote_location.startswith('s3://')) if remote_location.startswith('s3://'): from .s3 import S3Backend backend = S3Backend(remote_location, dry_run=dry_run) backend.download(list_local(remotes, prefix), prefix) else: dest_root = '.' shell_command([ '/usr/bin/rsync', '-thrRvz', '--rsync-path', '/usr/bin/rsync', '%s/./' % remote_location, dest_root], dry_run=dry_run)
[ "def", "download", "(", "remote_location", ",", "remotes", "=", "None", ",", "prefix", "=", "\"\"", ",", "dry_run", "=", "False", ")", ":", "if", "remotes", "is", "None", ":", "remotes", ",", "_", "=", "_resources_files", "(", "abs_paths", "=", "remote_location", ".", "startswith", "(", "'s3://'", ")", ")", "if", "remote_location", ".", "startswith", "(", "'s3://'", ")", ":", "from", ".", "s3", "import", "S3Backend", "backend", "=", "S3Backend", "(", "remote_location", ",", "dry_run", "=", "dry_run", ")", "backend", ".", "download", "(", "list_local", "(", "remotes", ",", "prefix", ")", ",", "prefix", ")", "else", ":", "dest_root", "=", "'.'", "shell_command", "(", "[", "'/usr/bin/rsync'", ",", "'-thrRvz'", ",", "'--rsync-path'", ",", "'/usr/bin/rsync'", ",", "'%s/./'", "%", "remote_location", ",", "dest_root", "]", ",", "dry_run", "=", "dry_run", ")" ]
Download resources from a stage server.
[ "Download", "resources", "from", "a", "stage", "server", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/copy.py#L58-L74
train
djaodjin/djaodjin-deployutils
deployutils/copy.py
upload
def upload(remote_location, remotes=None, ignores=None, static_root="/static/", prefix="", dry_run=False): # pylint:disable=too-many-arguments """ Upload resources to a stage server. """ if remotes is None: remotes, ignores = _resources_files( abs_paths=remote_location.startswith('s3://')) if remote_location.startswith('s3://'): from deployutils.s3 import S3Backend backend = S3Backend(remote_location, static_root=static_root, dry_run=dry_run) backend.upload(list_local(remotes, prefix), prefix) else: excludes = [] if ignores: for ignore in ignores: excludes += ['--exclude', ignore] # -O omit to set mod times on directories to avoid permissions error. shell_command(['/usr/bin/rsync'] + excludes + ['-pOthrRvz', '--rsync-path', '/usr/bin/rsync'] + remotes + [remote_location], dry_run=dry_run)
python
def upload(remote_location, remotes=None, ignores=None, static_root="/static/", prefix="", dry_run=False): # pylint:disable=too-many-arguments """ Upload resources to a stage server. """ if remotes is None: remotes, ignores = _resources_files( abs_paths=remote_location.startswith('s3://')) if remote_location.startswith('s3://'): from deployutils.s3 import S3Backend backend = S3Backend(remote_location, static_root=static_root, dry_run=dry_run) backend.upload(list_local(remotes, prefix), prefix) else: excludes = [] if ignores: for ignore in ignores: excludes += ['--exclude', ignore] # -O omit to set mod times on directories to avoid permissions error. shell_command(['/usr/bin/rsync'] + excludes + ['-pOthrRvz', '--rsync-path', '/usr/bin/rsync'] + remotes + [remote_location], dry_run=dry_run)
[ "def", "upload", "(", "remote_location", ",", "remotes", "=", "None", ",", "ignores", "=", "None", ",", "static_root", "=", "\"/static/\"", ",", "prefix", "=", "\"\"", ",", "dry_run", "=", "False", ")", ":", "# pylint:disable=too-many-arguments", "if", "remotes", "is", "None", ":", "remotes", ",", "ignores", "=", "_resources_files", "(", "abs_paths", "=", "remote_location", ".", "startswith", "(", "'s3://'", ")", ")", "if", "remote_location", ".", "startswith", "(", "'s3://'", ")", ":", "from", "deployutils", ".", "s3", "import", "S3Backend", "backend", "=", "S3Backend", "(", "remote_location", ",", "static_root", "=", "static_root", ",", "dry_run", "=", "dry_run", ")", "backend", ".", "upload", "(", "list_local", "(", "remotes", ",", "prefix", ")", ",", "prefix", ")", "else", ":", "excludes", "=", "[", "]", "if", "ignores", ":", "for", "ignore", "in", "ignores", ":", "excludes", "+=", "[", "'--exclude'", ",", "ignore", "]", "# -O omit to set mod times on directories to avoid permissions error.", "shell_command", "(", "[", "'/usr/bin/rsync'", "]", "+", "excludes", "+", "[", "'-pOthrRvz'", ",", "'--rsync-path'", ",", "'/usr/bin/rsync'", "]", "+", "remotes", "+", "[", "remote_location", "]", ",", "dry_run", "=", "dry_run", ")" ]
Upload resources to a stage server.
[ "Upload", "resources", "to", "a", "stage", "server", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/copy.py#L92-L114
train
JosuaKrause/quick_server
quick_server/quick_server.py
json_dumps
def json_dumps(obj): """A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer. """ try: return json.dumps(obj, indent=2, sort_keys=True, allow_nan=False) except ValueError: pass # we don't want to call do_map on the original object since it can # contain objects that need to be converted for JSON. after reading # in the created JSON we get a limited set of possible types we # can encounter json_str = json.dumps(obj, indent=2, sort_keys=True, allow_nan=True) json_obj = json.loads(json_str) def do_map(obj): if obj is None: return None if isinstance(obj, basestring): return obj if isinstance(obj, dict): res = {} for (key, value) in obj.items(): res[key] = do_map(value) return res if isinstance(obj, collections.Iterable): res = [] for el in obj: res.append(do_map(el)) return res # diverging numbers need to be passed as strings otherwise it # will throw a parsing error on the ECMAscript consumer side if math.isnan(obj): return "NaN" if math.isinf(obj): return "Infinity" if obj > 0 else "-Infinity" return obj return json.dumps( do_map(json_obj), indent=2, sort_keys=True, allow_nan=False)
python
def json_dumps(obj): """A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer. """ try: return json.dumps(obj, indent=2, sort_keys=True, allow_nan=False) except ValueError: pass # we don't want to call do_map on the original object since it can # contain objects that need to be converted for JSON. after reading # in the created JSON we get a limited set of possible types we # can encounter json_str = json.dumps(obj, indent=2, sort_keys=True, allow_nan=True) json_obj = json.loads(json_str) def do_map(obj): if obj is None: return None if isinstance(obj, basestring): return obj if isinstance(obj, dict): res = {} for (key, value) in obj.items(): res[key] = do_map(value) return res if isinstance(obj, collections.Iterable): res = [] for el in obj: res.append(do_map(el)) return res # diverging numbers need to be passed as strings otherwise it # will throw a parsing error on the ECMAscript consumer side if math.isnan(obj): return "NaN" if math.isinf(obj): return "Infinity" if obj > 0 else "-Infinity" return obj return json.dumps( do_map(json_obj), indent=2, sort_keys=True, allow_nan=False)
[ "def", "json_dumps", "(", "obj", ")", ":", "try", ":", "return", "json", ".", "dumps", "(", "obj", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ",", "allow_nan", "=", "False", ")", "except", "ValueError", ":", "pass", "# we don't want to call do_map on the original object since it can", "# contain objects that need to be converted for JSON. after reading", "# in the created JSON we get a limited set of possible types we", "# can encounter", "json_str", "=", "json", ".", "dumps", "(", "obj", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ",", "allow_nan", "=", "True", ")", "json_obj", "=", "json", ".", "loads", "(", "json_str", ")", "def", "do_map", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "None", "if", "isinstance", "(", "obj", ",", "basestring", ")", ":", "return", "obj", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "res", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "obj", ".", "items", "(", ")", ":", "res", "[", "key", "]", "=", "do_map", "(", "value", ")", "return", "res", "if", "isinstance", "(", "obj", ",", "collections", ".", "Iterable", ")", ":", "res", "=", "[", "]", "for", "el", "in", "obj", ":", "res", ".", "append", "(", "do_map", "(", "el", ")", ")", "return", "res", "# diverging numbers need to be passed as strings otherwise it", "# will throw a parsing error on the ECMAscript consumer side", "if", "math", ".", "isnan", "(", "obj", ")", ":", "return", "\"NaN\"", "if", "math", ".", "isinf", "(", "obj", ")", ":", "return", "\"Infinity\"", "if", "obj", ">", "0", "else", "\"-Infinity\"", "return", "obj", "return", "json", ".", "dumps", "(", "do_map", "(", "json_obj", ")", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ",", "allow_nan", "=", "False", ")" ]
A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer.
[ "A", "safe", "JSON", "dump", "function", "that", "provides", "correct", "diverging", "numbers", "for", "a", "ECMAscript", "consumer", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L148-L187
train
JosuaKrause/quick_server
quick_server/quick_server.py
msg
def msg(message, *args, **kwargs): """Prints a message from the server to the log file.""" global log_file if log_file is None: log_file = sys.stderr if long_msg: file_name, line = caller_trace() file_name, file_type = os.path.splitext(file_name) if file_name.endswith('/__init__'): file_name = os.path.basename(os.path.dirname(file_name)) elif file_name.endswith('/__main__'): file_name = "(-m) {0}".format( os.path.basename(os.path.dirname(file_name))) else: file_name = os.path.basename(file_name) head = '{0}{1} ({2}): '.format(file_name, file_type, line) else: head = '[SERVER] ' out = StringIO() for line in message.format(*args, **kwargs).split('\n'): out.write('{0}{1}\n'.format(head, line)) out.flush() out.seek(0) if _msg_stderr: sys.stderr.write(out.read()) sys.stderr.flush() else: log_file.write(out.read()) log_file.flush() out.close()
python
def msg(message, *args, **kwargs): """Prints a message from the server to the log file.""" global log_file if log_file is None: log_file = sys.stderr if long_msg: file_name, line = caller_trace() file_name, file_type = os.path.splitext(file_name) if file_name.endswith('/__init__'): file_name = os.path.basename(os.path.dirname(file_name)) elif file_name.endswith('/__main__'): file_name = "(-m) {0}".format( os.path.basename(os.path.dirname(file_name))) else: file_name = os.path.basename(file_name) head = '{0}{1} ({2}): '.format(file_name, file_type, line) else: head = '[SERVER] ' out = StringIO() for line in message.format(*args, **kwargs).split('\n'): out.write('{0}{1}\n'.format(head, line)) out.flush() out.seek(0) if _msg_stderr: sys.stderr.write(out.read()) sys.stderr.flush() else: log_file.write(out.read()) log_file.flush() out.close()
[ "def", "msg", "(", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "log_file", "if", "log_file", "is", "None", ":", "log_file", "=", "sys", ".", "stderr", "if", "long_msg", ":", "file_name", ",", "line", "=", "caller_trace", "(", ")", "file_name", ",", "file_type", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "if", "file_name", ".", "endswith", "(", "'/__init__'", ")", ":", "file_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "dirname", "(", "file_name", ")", ")", "elif", "file_name", ".", "endswith", "(", "'/__main__'", ")", ":", "file_name", "=", "\"(-m) {0}\"", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "dirname", "(", "file_name", ")", ")", ")", "else", ":", "file_name", "=", "os", ".", "path", ".", "basename", "(", "file_name", ")", "head", "=", "'{0}{1} ({2}): '", ".", "format", "(", "file_name", ",", "file_type", ",", "line", ")", "else", ":", "head", "=", "'[SERVER] '", "out", "=", "StringIO", "(", ")", "for", "line", "in", "message", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", ".", "split", "(", "'\\n'", ")", ":", "out", ".", "write", "(", "'{0}{1}\\n'", ".", "format", "(", "head", ",", "line", ")", ")", "out", ".", "flush", "(", ")", "out", ".", "seek", "(", "0", ")", "if", "_msg_stderr", ":", "sys", ".", "stderr", ".", "write", "(", "out", ".", "read", "(", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "else", ":", "log_file", ".", "write", "(", "out", ".", "read", "(", ")", ")", "log_file", ".", "flush", "(", ")", "out", ".", "close", "(", ")" ]
Prints a message from the server to the log file.
[ "Prints", "a", "message", "from", "the", "server", "to", "the", "log", "file", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L232-L261
train
JosuaKrause/quick_server
quick_server/quick_server.py
setup_restart
def setup_restart(): """Sets up restart functionality that doesn't keep the first process alive. The function needs to be called before the actual process starts but after loading the program. It will restart the program in a child process and immediately returns in the child process. The call in the parent process never returns. Calling this function is not necessary for using restart functionality but avoids potential errors originating from rogue threads. """ exit_code = os.environ.get('QUICK_SERVER_RESTART', None) if exit_code is None: try: atexit.unregister(_on_exit) except AttributeError: atexit._exithandlers = filter( lambda exit_hnd: exit_hnd[0] != _on_exit, atexit._exithandlers) _start_restart_loop(None, in_atexit=False)
python
def setup_restart(): """Sets up restart functionality that doesn't keep the first process alive. The function needs to be called before the actual process starts but after loading the program. It will restart the program in a child process and immediately returns in the child process. The call in the parent process never returns. Calling this function is not necessary for using restart functionality but avoids potential errors originating from rogue threads. """ exit_code = os.environ.get('QUICK_SERVER_RESTART', None) if exit_code is None: try: atexit.unregister(_on_exit) except AttributeError: atexit._exithandlers = filter( lambda exit_hnd: exit_hnd[0] != _on_exit, atexit._exithandlers) _start_restart_loop(None, in_atexit=False)
[ "def", "setup_restart", "(", ")", ":", "exit_code", "=", "os", ".", "environ", ".", "get", "(", "'QUICK_SERVER_RESTART'", ",", "None", ")", "if", "exit_code", "is", "None", ":", "try", ":", "atexit", ".", "unregister", "(", "_on_exit", ")", "except", "AttributeError", ":", "atexit", ".", "_exithandlers", "=", "filter", "(", "lambda", "exit_hnd", ":", "exit_hnd", "[", "0", "]", "!=", "_on_exit", ",", "atexit", ".", "_exithandlers", ")", "_start_restart_loop", "(", "None", ",", "in_atexit", "=", "False", ")" ]
Sets up restart functionality that doesn't keep the first process alive. The function needs to be called before the actual process starts but after loading the program. It will restart the program in a child process and immediately returns in the child process. The call in the parent process never returns. Calling this function is not necessary for using restart functionality but avoids potential errors originating from rogue threads.
[ "Sets", "up", "restart", "functionality", "that", "doesn", "t", "keep", "the", "first", "process", "alive", ".", "The", "function", "needs", "to", "be", "called", "before", "the", "actual", "process", "starts", "but", "after", "loading", "the", "program", ".", "It", "will", "restart", "the", "program", "in", "a", "child", "process", "and", "immediately", "returns", "in", "the", "child", "process", ".", "The", "call", "in", "the", "parent", "process", "never", "returns", ".", "Calling", "this", "function", "is", "not", "necessary", "for", "using", "restart", "functionality", "but", "avoids", "potential", "errors", "originating", "from", "rogue", "threads", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L393-L409
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.convert_argmap
def convert_argmap(self, query): """Converts the query string of an URL to a map. Parameters ---------- query : string The URL to parse. Returns ------- A map object containing all fields as keys with their value. Fields without '=' in the URL are interpreted as flags and the value is set to True. """ res = {} if isinstance(query, bytes): query = query.decode('utf8') for section in query.split('&'): eqs = section.split('=', 1) name = urlparse_unquote(eqs[0]) if len(eqs) > 1: res[name] = urlparse_unquote(eqs[1]) else: res[name] = True return res
python
def convert_argmap(self, query): """Converts the query string of an URL to a map. Parameters ---------- query : string The URL to parse. Returns ------- A map object containing all fields as keys with their value. Fields without '=' in the URL are interpreted as flags and the value is set to True. """ res = {} if isinstance(query, bytes): query = query.decode('utf8') for section in query.split('&'): eqs = section.split('=', 1) name = urlparse_unquote(eqs[0]) if len(eqs) > 1: res[name] = urlparse_unquote(eqs[1]) else: res[name] = True return res
[ "def", "convert_argmap", "(", "self", ",", "query", ")", ":", "res", "=", "{", "}", "if", "isinstance", "(", "query", ",", "bytes", ")", ":", "query", "=", "query", ".", "decode", "(", "'utf8'", ")", "for", "section", "in", "query", ".", "split", "(", "'&'", ")", ":", "eqs", "=", "section", ".", "split", "(", "'='", ",", "1", ")", "name", "=", "urlparse_unquote", "(", "eqs", "[", "0", "]", ")", "if", "len", "(", "eqs", ")", ">", "1", ":", "res", "[", "name", "]", "=", "urlparse_unquote", "(", "eqs", "[", "1", "]", ")", "else", ":", "res", "[", "name", "]", "=", "True", "return", "res" ]
Converts the query string of an URL to a map. Parameters ---------- query : string The URL to parse. Returns ------- A map object containing all fields as keys with their value. Fields without '=' in the URL are interpreted as flags and the value is set to True.
[ "Converts", "the", "query", "string", "of", "an", "URL", "to", "a", "map", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L452-L476
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.convert_args
def convert_args(self, rem_path, args): """Splits the rest of a URL into its argument parts. The URL is assumed to start with the dynamic request prefix already removed. Parameters ---------- rem_path : string The URL to parse. The URL must start with the dynamic request prefix already removed. args : map The map to fill. Returns ------- args enriched with 'paths', an array containing the remaining path segments, 'query', a map containing the query fields and flags, and 'fragment' containing the fragment part as string. """ fragment_split = rem_path.split('#', 1) query_split = fragment_split[0].split('?', 1) segs = filter( lambda p: len(p) and p != '.', os.path.normpath(query_split[0]).split('/')) paths = [urlparse_unquote(p) for p in segs] query = self.convert_argmap(query_split[1]) \ if len(query_split) > 1 else {} args['paths'] = paths args['query'] = query args['fragment'] = urlparse_unquote(fragment_split[1]).decode('utf8') \ if len(fragment_split) > 1 else '' return args
python
def convert_args(self, rem_path, args): """Splits the rest of a URL into its argument parts. The URL is assumed to start with the dynamic request prefix already removed. Parameters ---------- rem_path : string The URL to parse. The URL must start with the dynamic request prefix already removed. args : map The map to fill. Returns ------- args enriched with 'paths', an array containing the remaining path segments, 'query', a map containing the query fields and flags, and 'fragment' containing the fragment part as string. """ fragment_split = rem_path.split('#', 1) query_split = fragment_split[0].split('?', 1) segs = filter( lambda p: len(p) and p != '.', os.path.normpath(query_split[0]).split('/')) paths = [urlparse_unquote(p) for p in segs] query = self.convert_argmap(query_split[1]) \ if len(query_split) > 1 else {} args['paths'] = paths args['query'] = query args['fragment'] = urlparse_unquote(fragment_split[1]).decode('utf8') \ if len(fragment_split) > 1 else '' return args
[ "def", "convert_args", "(", "self", ",", "rem_path", ",", "args", ")", ":", "fragment_split", "=", "rem_path", ".", "split", "(", "'#'", ",", "1", ")", "query_split", "=", "fragment_split", "[", "0", "]", ".", "split", "(", "'?'", ",", "1", ")", "segs", "=", "filter", "(", "lambda", "p", ":", "len", "(", "p", ")", "and", "p", "!=", "'.'", ",", "os", ".", "path", ".", "normpath", "(", "query_split", "[", "0", "]", ")", ".", "split", "(", "'/'", ")", ")", "paths", "=", "[", "urlparse_unquote", "(", "p", ")", "for", "p", "in", "segs", "]", "query", "=", "self", ".", "convert_argmap", "(", "query_split", "[", "1", "]", ")", "if", "len", "(", "query_split", ")", ">", "1", "else", "{", "}", "args", "[", "'paths'", "]", "=", "paths", "args", "[", "'query'", "]", "=", "query", "args", "[", "'fragment'", "]", "=", "urlparse_unquote", "(", "fragment_split", "[", "1", "]", ")", ".", "decode", "(", "'utf8'", ")", "if", "len", "(", "fragment_split", ")", ">", "1", "else", "''", "return", "args" ]
Splits the rest of a URL into its argument parts. The URL is assumed to start with the dynamic request prefix already removed. Parameters ---------- rem_path : string The URL to parse. The URL must start with the dynamic request prefix already removed. args : map The map to fill. Returns ------- args enriched with 'paths', an array containing the remaining path segments, 'query', a map containing the query fields and flags, and 'fragment' containing the fragment part as string.
[ "Splits", "the", "rest", "of", "a", "URL", "into", "its", "argument", "parts", ".", "The", "URL", "is", "assumed", "to", "start", "with", "the", "dynamic", "request", "prefix", "already", "removed", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L478-L509
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.handle_special
def handle_special(self, send_body, method_str): """Handles a dynamic request. If this method returns False the request is interpreted as static file request. Methods can be registered using the `add_TYPE_METHOD_mask` methods of QuickServer. Parameters ---------- send_body : bool Whether to actually send the result body. This is False if the URL was requested as HEAD. method_str : string The method as string: POST, GET, or HEAD. Returns ------- A bool whether the request was handled. If it was not handled the requested URL is interpreted as static file. """ ongoing = True if self.server.report_slow_requests: path = self.path def do_report(): if not ongoing: return msg("request takes longer than expected: \"{0} {1}\"", method_str, path) alarm = threading.Timer(5.0, do_report) alarm.start() else: alarm = None try: return self._handle_special(send_body, method_str) finally: if alarm is not None: alarm.cancel() ongoing = False
python
def handle_special(self, send_body, method_str): """Handles a dynamic request. If this method returns False the request is interpreted as static file request. Methods can be registered using the `add_TYPE_METHOD_mask` methods of QuickServer. Parameters ---------- send_body : bool Whether to actually send the result body. This is False if the URL was requested as HEAD. method_str : string The method as string: POST, GET, or HEAD. Returns ------- A bool whether the request was handled. If it was not handled the requested URL is interpreted as static file. """ ongoing = True if self.server.report_slow_requests: path = self.path def do_report(): if not ongoing: return msg("request takes longer than expected: \"{0} {1}\"", method_str, path) alarm = threading.Timer(5.0, do_report) alarm.start() else: alarm = None try: return self._handle_special(send_body, method_str) finally: if alarm is not None: alarm.cancel() ongoing = False
[ "def", "handle_special", "(", "self", ",", "send_body", ",", "method_str", ")", ":", "ongoing", "=", "True", "if", "self", ".", "server", ".", "report_slow_requests", ":", "path", "=", "self", ".", "path", "def", "do_report", "(", ")", ":", "if", "not", "ongoing", ":", "return", "msg", "(", "\"request takes longer than expected: \\\"{0} {1}\\\"\"", ",", "method_str", ",", "path", ")", "alarm", "=", "threading", ".", "Timer", "(", "5.0", ",", "do_report", ")", "alarm", ".", "start", "(", ")", "else", ":", "alarm", "=", "None", "try", ":", "return", "self", ".", "_handle_special", "(", "send_body", ",", "method_str", ")", "finally", ":", "if", "alarm", "is", "not", "None", ":", "alarm", ".", "cancel", "(", ")", "ongoing", "=", "False" ]
Handles a dynamic request. If this method returns False the request is interpreted as static file request. Methods can be registered using the `add_TYPE_METHOD_mask` methods of QuickServer. Parameters ---------- send_body : bool Whether to actually send the result body. This is False if the URL was requested as HEAD. method_str : string The method as string: POST, GET, or HEAD. Returns ------- A bool whether the request was handled. If it was not handled the requested URL is interpreted as static file.
[ "Handles", "a", "dynamic", "request", ".", "If", "this", "method", "returns", "False", "the", "request", "is", "interpreted", "as", "static", "file", "request", ".", "Methods", "can", "be", "registered", "using", "the", "add_TYPE_METHOD_mask", "methods", "of", "QuickServer", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L635-L673
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.check_cache
def check_cache(self, e_tag, match): """Checks the ETag and sends a cache match response if it matches.""" if e_tag != match: return False self.send_response(304) self.send_header("ETag", e_tag) self.send_header("Cache-Control", "max-age={0}".format(self.server.max_age)) self.end_headers() thread_local.size = 0 return True
python
def check_cache(self, e_tag, match): """Checks the ETag and sends a cache match response if it matches.""" if e_tag != match: return False self.send_response(304) self.send_header("ETag", e_tag) self.send_header("Cache-Control", "max-age={0}".format(self.server.max_age)) self.end_headers() thread_local.size = 0 return True
[ "def", "check_cache", "(", "self", ",", "e_tag", ",", "match", ")", ":", "if", "e_tag", "!=", "match", ":", "return", "False", "self", ".", "send_response", "(", "304", ")", "self", ".", "send_header", "(", "\"ETag\"", ",", "e_tag", ")", "self", ".", "send_header", "(", "\"Cache-Control\"", ",", "\"max-age={0}\"", ".", "format", "(", "self", ".", "server", ".", "max_age", ")", ")", "self", ".", "end_headers", "(", ")", "thread_local", ".", "size", "=", "0", "return", "True" ]
Checks the ETag and sends a cache match response if it matches.
[ "Checks", "the", "ETag", "and", "sends", "a", "cache", "match", "response", "if", "it", "matches", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L878-L888
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.handle_error
def handle_error(self): """Tries to send an 500 error after encountering an exception.""" if self.server.can_ignore_error(self): return if thread_local.status_code is None: msg("ERROR: Cannot send error status code! " + "Header already sent!\n{0}", traceback.format_exc()) else: msg("ERROR: Error while processing request:\n{0}", traceback.format_exc()) try: self.send_error(500, "Internal Error") except: # nopep8 if self.server.can_ignore_error(self): return msg("ERROR: Cannot send error status code:\n{0}", traceback.format_exc())
python
def handle_error(self): """Tries to send an 500 error after encountering an exception.""" if self.server.can_ignore_error(self): return if thread_local.status_code is None: msg("ERROR: Cannot send error status code! " + "Header already sent!\n{0}", traceback.format_exc()) else: msg("ERROR: Error while processing request:\n{0}", traceback.format_exc()) try: self.send_error(500, "Internal Error") except: # nopep8 if self.server.can_ignore_error(self): return msg("ERROR: Cannot send error status code:\n{0}", traceback.format_exc())
[ "def", "handle_error", "(", "self", ")", ":", "if", "self", ".", "server", ".", "can_ignore_error", "(", "self", ")", ":", "return", "if", "thread_local", ".", "status_code", "is", "None", ":", "msg", "(", "\"ERROR: Cannot send error status code! \"", "+", "\"Header already sent!\\n{0}\"", ",", "traceback", ".", "format_exc", "(", ")", ")", "else", ":", "msg", "(", "\"ERROR: Error while processing request:\\n{0}\"", ",", "traceback", ".", "format_exc", "(", ")", ")", "try", ":", "self", ".", "send_error", "(", "500", ",", "\"Internal Error\"", ")", "except", ":", "# nopep8", "if", "self", ".", "server", ".", "can_ignore_error", "(", "self", ")", ":", "return", "msg", "(", "\"ERROR: Cannot send error status code:\\n{0}\"", ",", "traceback", ".", "format_exc", "(", ")", ")" ]
Tries to send an 500 error after encountering an exception.
[ "Tries", "to", "send", "an", "500", "error", "after", "encountering", "an", "exception", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L925-L941
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.cross_origin_headers
def cross_origin_headers(self): """Sends cross origin headers.""" if not self.is_cross_origin(): return False # we allow everything self.send_header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, HEAD") allow_headers = _getheader(self.headers, 'access-control-request-headers') if allow_headers is not None: self.send_header("Access-Control-Allow-Headers", allow_headers) self.send_header("Access-Control-Allow-Origin", "*") self.send_header("Access-Control-Allow-Credentials", "true") return allow_headers is not None
python
def cross_origin_headers(self): """Sends cross origin headers.""" if not self.is_cross_origin(): return False # we allow everything self.send_header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, HEAD") allow_headers = _getheader(self.headers, 'access-control-request-headers') if allow_headers is not None: self.send_header("Access-Control-Allow-Headers", allow_headers) self.send_header("Access-Control-Allow-Origin", "*") self.send_header("Access-Control-Allow-Credentials", "true") return allow_headers is not None
[ "def", "cross_origin_headers", "(", "self", ")", ":", "if", "not", "self", ".", "is_cross_origin", "(", ")", ":", "return", "False", "# we allow everything", "self", ".", "send_header", "(", "\"Access-Control-Allow-Methods\"", ",", "\"GET, POST, PUT, DELETE, HEAD\"", ")", "allow_headers", "=", "_getheader", "(", "self", ".", "headers", ",", "'access-control-request-headers'", ")", "if", "allow_headers", "is", "not", "None", ":", "self", ".", "send_header", "(", "\"Access-Control-Allow-Headers\"", ",", "allow_headers", ")", "self", ".", "send_header", "(", "\"Access-Control-Allow-Origin\"", ",", "\"*\"", ")", "self", ".", "send_header", "(", "\"Access-Control-Allow-Credentials\"", ",", "\"true\"", ")", "return", "allow_headers", "is", "not", "None" ]
Sends cross origin headers.
[ "Sends", "cross", "origin", "headers", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L946-L959
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.do_OPTIONS
def do_OPTIONS(self): """Handles an OPTIONS request.""" thread_local.clock_start = get_time() thread_local.status_code = 200 thread_local.message = None thread_local.headers = [] thread_local.end_headers = [] thread_local.size = -1 thread_local.method = 'OPTIONS' self.send_response(200) if self.is_cross_origin(): no_caching = self.cross_origin_headers() # ten minutes if no custom headers requested self.send_header("Access-Control-Max-Age", 0 if no_caching else 10*60) self.send_header("Content-Length", 0) self.end_headers() thread_local.size = 0
python
def do_OPTIONS(self): """Handles an OPTIONS request.""" thread_local.clock_start = get_time() thread_local.status_code = 200 thread_local.message = None thread_local.headers = [] thread_local.end_headers = [] thread_local.size = -1 thread_local.method = 'OPTIONS' self.send_response(200) if self.is_cross_origin(): no_caching = self.cross_origin_headers() # ten minutes if no custom headers requested self.send_header("Access-Control-Max-Age", 0 if no_caching else 10*60) self.send_header("Content-Length", 0) self.end_headers() thread_local.size = 0
[ "def", "do_OPTIONS", "(", "self", ")", ":", "thread_local", ".", "clock_start", "=", "get_time", "(", ")", "thread_local", ".", "status_code", "=", "200", "thread_local", ".", "message", "=", "None", "thread_local", ".", "headers", "=", "[", "]", "thread_local", ".", "end_headers", "=", "[", "]", "thread_local", ".", "size", "=", "-", "1", "thread_local", ".", "method", "=", "'OPTIONS'", "self", ".", "send_response", "(", "200", ")", "if", "self", ".", "is_cross_origin", "(", ")", ":", "no_caching", "=", "self", ".", "cross_origin_headers", "(", ")", "# ten minutes if no custom headers requested", "self", ".", "send_header", "(", "\"Access-Control-Max-Age\"", ",", "0", "if", "no_caching", "else", "10", "*", "60", ")", "self", ".", "send_header", "(", "\"Content-Length\"", ",", "0", ")", "self", ".", "end_headers", "(", ")", "thread_local", ".", "size", "=", "0" ]
Handles an OPTIONS request.
[ "Handles", "an", "OPTIONS", "request", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L961-L978
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.do_GET
def do_GET(self): """Handles a GET request.""" thread_local.clock_start = get_time() thread_local.status_code = 200 thread_local.message = None thread_local.headers = [] thread_local.end_headers = [] thread_local.size = -1 thread_local.method = 'GET' try: self.cross_origin_headers() if self.handle_special(True, 'GET'): return SimpleHTTPRequestHandler.do_GET(self) except PreventDefaultResponse as pdr: if pdr.code: self.send_error(pdr.code, pdr.msg) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handle_error()
python
def do_GET(self): """Handles a GET request.""" thread_local.clock_start = get_time() thread_local.status_code = 200 thread_local.message = None thread_local.headers = [] thread_local.end_headers = [] thread_local.size = -1 thread_local.method = 'GET' try: self.cross_origin_headers() if self.handle_special(True, 'GET'): return SimpleHTTPRequestHandler.do_GET(self) except PreventDefaultResponse as pdr: if pdr.code: self.send_error(pdr.code, pdr.msg) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handle_error()
[ "def", "do_GET", "(", "self", ")", ":", "thread_local", ".", "clock_start", "=", "get_time", "(", ")", "thread_local", ".", "status_code", "=", "200", "thread_local", ".", "message", "=", "None", "thread_local", ".", "headers", "=", "[", "]", "thread_local", ".", "end_headers", "=", "[", "]", "thread_local", ".", "size", "=", "-", "1", "thread_local", ".", "method", "=", "'GET'", "try", ":", "self", ".", "cross_origin_headers", "(", ")", "if", "self", ".", "handle_special", "(", "True", ",", "'GET'", ")", ":", "return", "SimpleHTTPRequestHandler", ".", "do_GET", "(", "self", ")", "except", "PreventDefaultResponse", "as", "pdr", ":", "if", "pdr", ".", "code", ":", "self", ".", "send_error", "(", "pdr", ".", "code", ",", "pdr", ".", "msg", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", "Exception", ":", "self", ".", "handle_error", "(", ")" ]
Handles a GET request.
[ "Handles", "a", "GET", "request", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1040-L1060
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServerRequestHandler.log_request
def log_request(self, code='-', size='-'): """Logs the current request.""" print_size = getattr(thread_local, 'size', -1) if size != '-': size_str = ' (%s)' % size elif print_size >= 0: size_str = self.log_size_string(print_size) + ' ' else: size_str = '' if not self.server.suppress_noise or (code != 200 and code != 304): self.log_message( '%s"%s" %s', size_str, self.requestline, str(code)) if print_size >= 0: thread_local.size = -1
python
def log_request(self, code='-', size='-'): """Logs the current request.""" print_size = getattr(thread_local, 'size', -1) if size != '-': size_str = ' (%s)' % size elif print_size >= 0: size_str = self.log_size_string(print_size) + ' ' else: size_str = '' if not self.server.suppress_noise or (code != 200 and code != 304): self.log_message( '%s"%s" %s', size_str, self.requestline, str(code)) if print_size >= 0: thread_local.size = -1
[ "def", "log_request", "(", "self", ",", "code", "=", "'-'", ",", "size", "=", "'-'", ")", ":", "print_size", "=", "getattr", "(", "thread_local", ",", "'size'", ",", "-", "1", ")", "if", "size", "!=", "'-'", ":", "size_str", "=", "' (%s)'", "%", "size", "elif", "print_size", ">=", "0", ":", "size_str", "=", "self", ".", "log_size_string", "(", "print_size", ")", "+", "' '", "else", ":", "size_str", "=", "''", "if", "not", "self", ".", "server", ".", "suppress_noise", "or", "(", "code", "!=", "200", "and", "code", "!=", "304", ")", ":", "self", ".", "log_message", "(", "'%s\"%s\" %s'", ",", "size_str", ",", "self", ".", "requestline", ",", "str", "(", "code", ")", ")", "if", "print_size", ">=", "0", ":", "thread_local", ".", "size", "=", "-", "1" ]
Logs the current request.
[ "Logs", "the", "current", "request", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1189-L1202
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer._process_request
def _process_request(self, request, client_address): """Actually processes the request.""" try: self.finish_request(request, client_address) except Exception: self.handle_error(request, client_address) finally: self.shutdown_request(request)
python
def _process_request(self, request, client_address): """Actually processes the request.""" try: self.finish_request(request, client_address) except Exception: self.handle_error(request, client_address) finally: self.shutdown_request(request)
[ "def", "_process_request", "(", "self", ",", "request", ",", "client_address", ")", ":", "try", ":", "self", ".", "finish_request", "(", "request", ",", "client_address", ")", "except", "Exception", ":", "self", ".", "handle_error", "(", "request", ",", "client_address", ")", "finally", ":", "self", ".", "shutdown_request", "(", "request", ")" ]
Actually processes the request.
[ "Actually", "processes", "the", "request", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1355-L1362
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.process_request
def process_request(self, request, client_address): """Processes the request by delegating to `_process_request`.""" if not self._parallel: self._process_request(request, client_address) return t = self._thread_factory( target=self._process_request, args=(request, client_address)) t.daemon = True t.start()
python
def process_request(self, request, client_address): """Processes the request by delegating to `_process_request`.""" if not self._parallel: self._process_request(request, client_address) return t = self._thread_factory( target=self._process_request, args=(request, client_address)) t.daemon = True t.start()
[ "def", "process_request", "(", "self", ",", "request", ",", "client_address", ")", ":", "if", "not", "self", ".", "_parallel", ":", "self", ".", "_process_request", "(", "request", ",", "client_address", ")", "return", "t", "=", "self", ".", "_thread_factory", "(", "target", "=", "self", ".", "_process_request", ",", "args", "=", "(", "request", ",", "client_address", ")", ")", "t", ".", "daemon", "=", "True", "t", ".", "start", "(", ")" ]
Processes the request by delegating to `_process_request`.
[ "Processes", "the", "request", "by", "delegating", "to", "_process_request", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1364-L1372
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.add_file_patterns
def add_file_patterns(self, patterns, blacklist): """Adds a list of file patterns to either the black- or white-list. Note that this pattern is applied to the absolute path of the file that will be delivered. For including or excluding folders use `add_folder_mask` or `add_folder_fallback`. """ bl = self._pattern_black if blacklist else self._pattern_white for pattern in patterns: bl.append(pattern)
python
def add_file_patterns(self, patterns, blacklist): """Adds a list of file patterns to either the black- or white-list. Note that this pattern is applied to the absolute path of the file that will be delivered. For including or excluding folders use `add_folder_mask` or `add_folder_fallback`. """ bl = self._pattern_black if blacklist else self._pattern_white for pattern in patterns: bl.append(pattern)
[ "def", "add_file_patterns", "(", "self", ",", "patterns", ",", "blacklist", ")", ":", "bl", "=", "self", ".", "_pattern_black", "if", "blacklist", "else", "self", ".", "_pattern_white", "for", "pattern", "in", "patterns", ":", "bl", ".", "append", "(", "pattern", ")" ]
Adds a list of file patterns to either the black- or white-list. Note that this pattern is applied to the absolute path of the file that will be delivered. For including or excluding folders use `add_folder_mask` or `add_folder_fallback`.
[ "Adds", "a", "list", "of", "file", "patterns", "to", "either", "the", "black", "-", "or", "white", "-", "list", ".", "Note", "that", "this", "pattern", "is", "applied", "to", "the", "absolute", "path", "of", "the", "file", "that", "will", "be", "delivered", ".", "For", "including", "or", "excluding", "folders", "use", "add_folder_mask", "or", "add_folder_fallback", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1376-L1384
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.bind_path
def bind_path(self, name, folder): """Adds a mask that maps to a given folder relative to `base_path`.""" if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_masks.insert(0, (name, folder))
python
def bind_path(self, name, folder): """Adds a mask that maps to a given folder relative to `base_path`.""" if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_masks.insert(0, (name, folder))
[ "def", "bind_path", "(", "self", ",", "name", ",", "folder", ")", ":", "if", "not", "len", "(", "name", ")", "or", "name", "[", "0", "]", "!=", "'/'", "or", "name", "[", "-", "1", "]", "!=", "'/'", ":", "raise", "ValueError", "(", "\"name must start and end with '/': {0}\"", ".", "format", "(", "name", ")", ")", "self", ".", "_folder_masks", ".", "insert", "(", "0", ",", "(", "name", ",", "folder", ")", ")" ]
Adds a mask that maps to a given folder relative to `base_path`.
[ "Adds", "a", "mask", "that", "maps", "to", "a", "given", "folder", "relative", "to", "base_path", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1412-L1417
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.bind_path_fallback
def bind_path_fallback(self, name, folder): """Adds a fallback for a given folder relative to `base_path`.""" if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_masks.append((name, folder))
python
def bind_path_fallback(self, name, folder): """Adds a fallback for a given folder relative to `base_path`.""" if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_masks.append((name, folder))
[ "def", "bind_path_fallback", "(", "self", ",", "name", ",", "folder", ")", ":", "if", "not", "len", "(", "name", ")", "or", "name", "[", "0", "]", "!=", "'/'", "or", "name", "[", "-", "1", "]", "!=", "'/'", ":", "raise", "ValueError", "(", "\"name must start and end with '/': {0}\"", ".", "format", "(", "name", ")", ")", "self", ".", "_folder_masks", ".", "append", "(", "(", "name", ",", "folder", ")", ")" ]
Adds a fallback for a given folder relative to `base_path`.
[ "Adds", "a", "fallback", "for", "a", "given", "folder", "relative", "to", "base_path", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1419-L1424
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.bind_proxy
def bind_proxy(self, name, proxy): """Adds a mask that maps to a given proxy.""" if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_proxys.insert(0, (name, proxy))
python
def bind_proxy(self, name, proxy): """Adds a mask that maps to a given proxy.""" if not len(name) or name[0] != '/' or name[-1] != '/': raise ValueError( "name must start and end with '/': {0}".format(name)) self._folder_proxys.insert(0, (name, proxy))
[ "def", "bind_proxy", "(", "self", ",", "name", ",", "proxy", ")", ":", "if", "not", "len", "(", "name", ")", "or", "name", "[", "0", "]", "!=", "'/'", "or", "name", "[", "-", "1", "]", "!=", "'/'", ":", "raise", "ValueError", "(", "\"name must start and end with '/': {0}\"", ".", "format", "(", "name", ")", ")", "self", ".", "_folder_proxys", ".", "insert", "(", "0", ",", "(", "name", ",", "proxy", ")", ")" ]
Adds a mask that maps to a given proxy.
[ "Adds", "a", "mask", "that", "maps", "to", "a", "given", "proxy", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1426-L1431
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.add_cmd_method
def add_cmd_method(self, name, method, argc=None, complete=None): """Adds a command to the command line interface loop. Parameters ---------- name : string The command. method : function(args) The function to execute when this command is issued. The argument of the function is a list of space separated arguments to the command. argc : int, optional (default=None) The number of expected further arguments. If None arguments are not restricted. complete : function(args, text), optional (default=None) A function that is called to complete further arguments. If None no suggestions are made. The function gets the arguments up to the incomplete argument (args). text contains the to be completed argument. The function must returns a list of suggestions or None if text is valid already and there are no further suggestions. """ if ' ' in name: raise ValueError("' ' cannot be in command name {0}".format(name)) self._cmd_methods[name] = method self._cmd_argc[name] = argc self._cmd_complete[name] = complete
python
def add_cmd_method(self, name, method, argc=None, complete=None): """Adds a command to the command line interface loop. Parameters ---------- name : string The command. method : function(args) The function to execute when this command is issued. The argument of the function is a list of space separated arguments to the command. argc : int, optional (default=None) The number of expected further arguments. If None arguments are not restricted. complete : function(args, text), optional (default=None) A function that is called to complete further arguments. If None no suggestions are made. The function gets the arguments up to the incomplete argument (args). text contains the to be completed argument. The function must returns a list of suggestions or None if text is valid already and there are no further suggestions. """ if ' ' in name: raise ValueError("' ' cannot be in command name {0}".format(name)) self._cmd_methods[name] = method self._cmd_argc[name] = argc self._cmd_complete[name] = complete
[ "def", "add_cmd_method", "(", "self", ",", "name", ",", "method", ",", "argc", "=", "None", ",", "complete", "=", "None", ")", ":", "if", "' '", "in", "name", ":", "raise", "ValueError", "(", "\"' ' cannot be in command name {0}\"", ".", "format", "(", "name", ")", ")", "self", ".", "_cmd_methods", "[", "name", "]", "=", "method", "self", ".", "_cmd_argc", "[", "name", "]", "=", "argc", "self", ".", "_cmd_complete", "[", "name", "]", "=", "complete" ]
Adds a command to the command line interface loop. Parameters ---------- name : string The command. method : function(args) The function to execute when this command is issued. The argument of the function is a list of space separated arguments to the command. argc : int, optional (default=None) The number of expected further arguments. If None arguments are not restricted. complete : function(args, text), optional (default=None) A function that is called to complete further arguments. If None no suggestions are made. The function gets the arguments up to the incomplete argument (args). text contains the to be completed argument. The function must returns a list of suggestions or None if text is valid already and there are no further suggestions.
[ "Adds", "a", "command", "to", "the", "command", "line", "interface", "loop", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1433-L1461
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer._add_file_mask
def _add_file_mask(self, start, method_str, method): """Adds a raw file mask for dynamic requests. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. method : function(esrh, args) The function to execute to perform the request. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). The function must return a file object containing the response (preferably BytesIO). If the result is None no response body is sent. In this case make sure to send an appropriate error code. """ fm = self._f_mask.get(method_str, []) fm.append((start, method)) fm.sort(key=lambda k: len(k[0]), reverse=True) self._f_mask[method_str] = fm self._f_argc[method_str] = None
python
def _add_file_mask(self, start, method_str, method): """Adds a raw file mask for dynamic requests. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. method : function(esrh, args) The function to execute to perform the request. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). The function must return a file object containing the response (preferably BytesIO). If the result is None no response body is sent. In this case make sure to send an appropriate error code. """ fm = self._f_mask.get(method_str, []) fm.append((start, method)) fm.sort(key=lambda k: len(k[0]), reverse=True) self._f_mask[method_str] = fm self._f_argc[method_str] = None
[ "def", "_add_file_mask", "(", "self", ",", "start", ",", "method_str", ",", "method", ")", ":", "fm", "=", "self", ".", "_f_mask", ".", "get", "(", "method_str", ",", "[", "]", ")", "fm", ".", "append", "(", "(", "start", ",", "method", ")", ")", "fm", ".", "sort", "(", "key", "=", "lambda", "k", ":", "len", "(", "k", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", "self", ".", "_f_mask", "[", "method_str", "]", "=", "fm", "self", ".", "_f_argc", "[", "method_str", "]", "=", "None" ]
Adds a raw file mask for dynamic requests. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. method : function(esrh, args) The function to execute to perform the request. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). The function must return a file object containing the response (preferably BytesIO). If the result is None no response body is sent. In this case make sure to send an appropriate error code.
[ "Adds", "a", "raw", "file", "mask", "for", "dynamic", "requests", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1477-L1504
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.add_json_mask
def add_json_mask(self, start, method_str, json_producer): """Adds a handler that produces a JSON response. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. json_producer : function(esrh, args) A function returning an object that can be converted to JSON. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). If the result is None a 404 error is sent. """ def send_json(drh, rem_path): obj = json_producer(drh, rem_path) if not isinstance(obj, Response): obj = Response(obj) ctype = obj.get_ctype("application/json") code = obj.code obj = obj.response if obj is None: drh.send_error(404, "File not found") return None f = BytesIO() json_str = json_dumps(obj) if isinstance(json_str, (str, unicode)): try: json_str = json_str.decode('utf8') except AttributeError: pass json_str = json_str.encode('utf8') f.write(json_str) f.flush() size = f.tell() f.seek(0) # handle ETag caching if drh.request_version >= "HTTP/1.1": e_tag = "{0:x}".format(zlib.crc32(f.read()) & 0xFFFFFFFF) f.seek(0) match = _getheader(drh.headers, 'if-none-match') if match is not None: if drh.check_cache(e_tag, match): f.close() return None drh.send_header("ETag", e_tag, end_header=True) drh.send_header("Cache-Control", "max-age={0}".format(self.max_age), end_header=True) drh.send_response(code) drh.send_header("Content-Type", ctype) drh.send_header("Content-Length", size) drh.end_headers() return f self._add_file_mask(start, method_str, send_json)
python
def add_json_mask(self, start, method_str, json_producer): """Adds a handler that produces a JSON response. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. json_producer : function(esrh, args) A function returning an object that can be converted to JSON. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). If the result is None a 404 error is sent. """ def send_json(drh, rem_path): obj = json_producer(drh, rem_path) if not isinstance(obj, Response): obj = Response(obj) ctype = obj.get_ctype("application/json") code = obj.code obj = obj.response if obj is None: drh.send_error(404, "File not found") return None f = BytesIO() json_str = json_dumps(obj) if isinstance(json_str, (str, unicode)): try: json_str = json_str.decode('utf8') except AttributeError: pass json_str = json_str.encode('utf8') f.write(json_str) f.flush() size = f.tell() f.seek(0) # handle ETag caching if drh.request_version >= "HTTP/1.1": e_tag = "{0:x}".format(zlib.crc32(f.read()) & 0xFFFFFFFF) f.seek(0) match = _getheader(drh.headers, 'if-none-match') if match is not None: if drh.check_cache(e_tag, match): f.close() return None drh.send_header("ETag", e_tag, end_header=True) drh.send_header("Cache-Control", "max-age={0}".format(self.max_age), end_header=True) drh.send_response(code) drh.send_header("Content-Type", ctype) drh.send_header("Content-Length", size) drh.end_headers() return f self._add_file_mask(start, method_str, send_json)
[ "def", "add_json_mask", "(", "self", ",", "start", ",", "method_str", ",", "json_producer", ")", ":", "def", "send_json", "(", "drh", ",", "rem_path", ")", ":", "obj", "=", "json_producer", "(", "drh", ",", "rem_path", ")", "if", "not", "isinstance", "(", "obj", ",", "Response", ")", ":", "obj", "=", "Response", "(", "obj", ")", "ctype", "=", "obj", ".", "get_ctype", "(", "\"application/json\"", ")", "code", "=", "obj", ".", "code", "obj", "=", "obj", ".", "response", "if", "obj", "is", "None", ":", "drh", ".", "send_error", "(", "404", ",", "\"File not found\"", ")", "return", "None", "f", "=", "BytesIO", "(", ")", "json_str", "=", "json_dumps", "(", "obj", ")", "if", "isinstance", "(", "json_str", ",", "(", "str", ",", "unicode", ")", ")", ":", "try", ":", "json_str", "=", "json_str", ".", "decode", "(", "'utf8'", ")", "except", "AttributeError", ":", "pass", "json_str", "=", "json_str", ".", "encode", "(", "'utf8'", ")", "f", ".", "write", "(", "json_str", ")", "f", ".", "flush", "(", ")", "size", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ")", "# handle ETag caching", "if", "drh", ".", "request_version", ">=", "\"HTTP/1.1\"", ":", "e_tag", "=", "\"{0:x}\"", ".", "format", "(", "zlib", ".", "crc32", "(", "f", ".", "read", "(", ")", ")", "&", "0xFFFFFFFF", ")", "f", ".", "seek", "(", "0", ")", "match", "=", "_getheader", "(", "drh", ".", "headers", ",", "'if-none-match'", ")", "if", "match", "is", "not", "None", ":", "if", "drh", ".", "check_cache", "(", "e_tag", ",", "match", ")", ":", "f", ".", "close", "(", ")", "return", "None", "drh", ".", "send_header", "(", "\"ETag\"", ",", "e_tag", ",", "end_header", "=", "True", ")", "drh", ".", "send_header", "(", "\"Cache-Control\"", ",", "\"max-age={0}\"", ".", "format", "(", "self", ".", "max_age", ")", ",", "end_header", "=", "True", ")", "drh", ".", "send_response", "(", "code", ")", "drh", ".", "send_header", "(", "\"Content-Type\"", ",", "ctype", ")", "drh", ".", "send_header", "(", "\"Content-Length\"", ",", "size", ")", "drh", ".", "end_headers", "(", ")", "return", "f", "self", ".", "_add_file_mask", "(", "start", ",", "method_str", ",", "send_json", ")" ]
Adds a handler that produces a JSON response. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. json_producer : function(esrh, args) A function returning an object that can be converted to JSON. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). If the result is None a 404 error is sent.
[ "Adds", "a", "handler", "that", "produces", "a", "JSON", "response", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1506-L1566
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.add_text_mask
def add_text_mask(self, start, method_str, text_producer): """Adds a handler that produces a plain text response. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. text_producer : function(esrh, args) A function returning a string. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). If the result is None a 404 error is sent. """ def send_text(drh, rem_path): text = text_producer(drh, rem_path) if not isinstance(text, Response): text = Response(text) ctype = text.get_ctype("text/plain") code = text.code text = text.response if text is None: drh.send_error(404, "File not found") return None f = BytesIO() if isinstance(text, (str, unicode)): try: text = text.decode('utf8') except AttributeError: pass text = text.encode('utf8') f.write(text) f.flush() size = f.tell() f.seek(0) # handle ETag caching if drh.request_version >= "HTTP/1.1": e_tag = "{0:x}".format(zlib.crc32(f.read()) & 0xFFFFFFFF) f.seek(0) match = _getheader(drh.headers, 'if-none-match') if match is not None: if drh.check_cache(e_tag, match): f.close() return None drh.send_header("ETag", e_tag, end_header=True) drh.send_header("Cache-Control", "max-age={0}".format(self.max_age), end_header=True) drh.send_response(code) drh.send_header("Content-Type", ctype) drh.send_header("Content-Length", size) drh.end_headers() return f self._add_file_mask(start, method_str, send_text)
python
def add_text_mask(self, start, method_str, text_producer): """Adds a handler that produces a plain text response. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. text_producer : function(esrh, args) A function returning a string. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). If the result is None a 404 error is sent. """ def send_text(drh, rem_path): text = text_producer(drh, rem_path) if not isinstance(text, Response): text = Response(text) ctype = text.get_ctype("text/plain") code = text.code text = text.response if text is None: drh.send_error(404, "File not found") return None f = BytesIO() if isinstance(text, (str, unicode)): try: text = text.decode('utf8') except AttributeError: pass text = text.encode('utf8') f.write(text) f.flush() size = f.tell() f.seek(0) # handle ETag caching if drh.request_version >= "HTTP/1.1": e_tag = "{0:x}".format(zlib.crc32(f.read()) & 0xFFFFFFFF) f.seek(0) match = _getheader(drh.headers, 'if-none-match') if match is not None: if drh.check_cache(e_tag, match): f.close() return None drh.send_header("ETag", e_tag, end_header=True) drh.send_header("Cache-Control", "max-age={0}".format(self.max_age), end_header=True) drh.send_response(code) drh.send_header("Content-Type", ctype) drh.send_header("Content-Length", size) drh.end_headers() return f self._add_file_mask(start, method_str, send_text)
[ "def", "add_text_mask", "(", "self", ",", "start", ",", "method_str", ",", "text_producer", ")", ":", "def", "send_text", "(", "drh", ",", "rem_path", ")", ":", "text", "=", "text_producer", "(", "drh", ",", "rem_path", ")", "if", "not", "isinstance", "(", "text", ",", "Response", ")", ":", "text", "=", "Response", "(", "text", ")", "ctype", "=", "text", ".", "get_ctype", "(", "\"text/plain\"", ")", "code", "=", "text", ".", "code", "text", "=", "text", ".", "response", "if", "text", "is", "None", ":", "drh", ".", "send_error", "(", "404", ",", "\"File not found\"", ")", "return", "None", "f", "=", "BytesIO", "(", ")", "if", "isinstance", "(", "text", ",", "(", "str", ",", "unicode", ")", ")", ":", "try", ":", "text", "=", "text", ".", "decode", "(", "'utf8'", ")", "except", "AttributeError", ":", "pass", "text", "=", "text", ".", "encode", "(", "'utf8'", ")", "f", ".", "write", "(", "text", ")", "f", ".", "flush", "(", ")", "size", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ")", "# handle ETag caching", "if", "drh", ".", "request_version", ">=", "\"HTTP/1.1\"", ":", "e_tag", "=", "\"{0:x}\"", ".", "format", "(", "zlib", ".", "crc32", "(", "f", ".", "read", "(", ")", ")", "&", "0xFFFFFFFF", ")", "f", ".", "seek", "(", "0", ")", "match", "=", "_getheader", "(", "drh", ".", "headers", ",", "'if-none-match'", ")", "if", "match", "is", "not", "None", ":", "if", "drh", ".", "check_cache", "(", "e_tag", ",", "match", ")", ":", "f", ".", "close", "(", ")", "return", "None", "drh", ".", "send_header", "(", "\"ETag\"", ",", "e_tag", ",", "end_header", "=", "True", ")", "drh", ".", "send_header", "(", "\"Cache-Control\"", ",", "\"max-age={0}\"", ".", "format", "(", "self", ".", "max_age", ")", ",", "end_header", "=", "True", ")", "drh", ".", "send_response", "(", "code", ")", "drh", ".", "send_header", "(", "\"Content-Type\"", ",", "ctype", ")", "drh", ".", "send_header", "(", "\"Content-Length\"", ",", "size", ")", "drh", ".", "end_headers", "(", ")", "return", "f", "self", ".", "_add_file_mask", "(", "start", ",", "method_str", ",", "send_text", ")" ]
Adds a handler that produces a plain text response. Parameters ---------- start : string The URL prefix that must be matched to perform this request. method_str : string The HTTP method for which to trigger the request. text_producer : function(esrh, args) A function returning a string. The function takes two arguments. esrh is the QuickServerRequestHandler object that called the function. args is a map containing the arguments to the request (i.e., the rest of the URL as path segment array 'paths', a map of all query fields / flags 'query', the fragment string 'fragment', and if the method was a POST the JSON form content 'post'). If the result is None a 404 error is sent.
[ "Adds", "a", "handler", "that", "produces", "a", "plain", "text", "response", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1644-L1703
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.add_special_file
def add_special_file(self, mask, path, from_quick_server, ctype=None): """Adds a special file that might have a different actual path than its address. Parameters ---------- mask : string The URL that must be matched to perform this request. path : string The actual file path. from_quick_server : bool If set the file path is relative to *this* script otherwise it is relative to the process. ctype : string Optional content type. """ full_path = path if not from_quick_server else os.path.join( os.path.dirname(__file__), path) def read_file(_req, _args): with open(full_path, 'rb') as f_out: return Response(f_out.read(), ctype=ctype) self.add_text_get_mask(mask, read_file) self.set_file_argc(mask, 0)
python
def add_special_file(self, mask, path, from_quick_server, ctype=None): """Adds a special file that might have a different actual path than its address. Parameters ---------- mask : string The URL that must be matched to perform this request. path : string The actual file path. from_quick_server : bool If set the file path is relative to *this* script otherwise it is relative to the process. ctype : string Optional content type. """ full_path = path if not from_quick_server else os.path.join( os.path.dirname(__file__), path) def read_file(_req, _args): with open(full_path, 'rb') as f_out: return Response(f_out.read(), ctype=ctype) self.add_text_get_mask(mask, read_file) self.set_file_argc(mask, 0)
[ "def", "add_special_file", "(", "self", ",", "mask", ",", "path", ",", "from_quick_server", ",", "ctype", "=", "None", ")", ":", "full_path", "=", "path", "if", "not", "from_quick_server", "else", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "path", ")", "def", "read_file", "(", "_req", ",", "_args", ")", ":", "with", "open", "(", "full_path", ",", "'rb'", ")", "as", "f_out", ":", "return", "Response", "(", "f_out", ".", "read", "(", ")", ",", "ctype", "=", "ctype", ")", "self", ".", "add_text_get_mask", "(", "mask", ",", "read_file", ")", "self", ".", "set_file_argc", "(", "mask", ",", "0", ")" ]
Adds a special file that might have a different actual path than its address. Parameters ---------- mask : string The URL that must be matched to perform this request. path : string The actual file path. from_quick_server : bool If set the file path is relative to *this* script otherwise it is relative to the process. ctype : string Optional content type.
[ "Adds", "a", "special", "file", "that", "might", "have", "a", "different", "actual", "path", "than", "its", "address", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1846-L1873
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.mirror_file
def mirror_file(self, path_to, path_from, from_quick_server=True): """Mirrors a file to a different location. Each time the file changes while the process is running it will be copied to 'path_to', overwriting the destination. Parameters ---------- path_to : string The mirror destination. path_from : string The mirror origin. from_quick_server : bool If set the origin path is relative to *this* script otherwise it is relative to the process. """ full_path = path_from if not from_quick_server else os.path.join( os.path.dirname(__file__), path_from) if self._mirror is None: if not self._symlink_mirror(path_to, full_path, init=True): self._poll_mirror(path_to, full_path, init=True) return impl = self._mirror["impl"] if impl == "symlink": self._symlink_mirror(path_to, full_path, init=False) elif impl == "poll": self._poll_mirror(path_to, full_path, init=False) else: raise ValueError("unknown mirror implementation: {0}".format(impl))
python
def mirror_file(self, path_to, path_from, from_quick_server=True): """Mirrors a file to a different location. Each time the file changes while the process is running it will be copied to 'path_to', overwriting the destination. Parameters ---------- path_to : string The mirror destination. path_from : string The mirror origin. from_quick_server : bool If set the origin path is relative to *this* script otherwise it is relative to the process. """ full_path = path_from if not from_quick_server else os.path.join( os.path.dirname(__file__), path_from) if self._mirror is None: if not self._symlink_mirror(path_to, full_path, init=True): self._poll_mirror(path_to, full_path, init=True) return impl = self._mirror["impl"] if impl == "symlink": self._symlink_mirror(path_to, full_path, init=False) elif impl == "poll": self._poll_mirror(path_to, full_path, init=False) else: raise ValueError("unknown mirror implementation: {0}".format(impl))
[ "def", "mirror_file", "(", "self", ",", "path_to", ",", "path_from", ",", "from_quick_server", "=", "True", ")", ":", "full_path", "=", "path_from", "if", "not", "from_quick_server", "else", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "path_from", ")", "if", "self", ".", "_mirror", "is", "None", ":", "if", "not", "self", ".", "_symlink_mirror", "(", "path_to", ",", "full_path", ",", "init", "=", "True", ")", ":", "self", ".", "_poll_mirror", "(", "path_to", ",", "full_path", ",", "init", "=", "True", ")", "return", "impl", "=", "self", ".", "_mirror", "[", "\"impl\"", "]", "if", "impl", "==", "\"symlink\"", ":", "self", ".", "_symlink_mirror", "(", "path_to", ",", "full_path", ",", "init", "=", "False", ")", "elif", "impl", "==", "\"poll\"", ":", "self", ".", "_poll_mirror", "(", "path_to", ",", "full_path", ",", "init", "=", "False", ")", "else", ":", "raise", "ValueError", "(", "\"unknown mirror implementation: {0}\"", ".", "format", "(", "impl", ")", ")" ]
Mirrors a file to a different location. Each time the file changes while the process is running it will be copied to 'path_to', overwriting the destination. Parameters ---------- path_to : string The mirror destination. path_from : string The mirror origin. from_quick_server : bool If set the origin path is relative to *this* script otherwise it is relative to the process.
[ "Mirrors", "a", "file", "to", "a", "different", "location", ".", "Each", "time", "the", "file", "changes", "while", "the", "process", "is", "running", "it", "will", "be", "copied", "to", "path_to", "overwriting", "the", "destination", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1875-L1904
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.link_empty_favicon_fallback
def link_empty_favicon_fallback(self): """Links the empty favicon as default favicon.""" self.favicon_fallback = os.path.join( os.path.dirname(__file__), 'favicon.ico')
python
def link_empty_favicon_fallback(self): """Links the empty favicon as default favicon.""" self.favicon_fallback = os.path.join( os.path.dirname(__file__), 'favicon.ico')
[ "def", "link_empty_favicon_fallback", "(", "self", ")", ":", "self", ".", "favicon_fallback", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'favicon.ico'", ")" ]
Links the empty favicon as default favicon.
[ "Links", "the", "empty", "favicon", "as", "default", "favicon", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1980-L1983
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.get_token_obj
def get_token_obj(self, token, expire=_token_default): """Returns or creates the object associaten with the given token. Parameters ---------- token : string The token for the object as returned by `create_token`. expire : number or None The number of seconds until the object associated with the token expires or `None` if it should not expire. If the argument is omitted the value returned by `get_default_token_expiration` is used. The expiration of an object is lazy. That means the memory of the expired object is not freed until the next call of `get_token_obj`. An expiration of 0 or less immediately frees the memory of the token. """ if expire == _token_default: expire = self.get_default_token_expiration() now = get_time() until = now + expire if expire is not None else None with self._token_lock: # _token_timings is keys sorted by time first_valid = None for (pos, k) in enumerate(self._token_timings): t = self._token_map[k][0] if t is None or t > now: first_valid = pos break if first_valid is None: self._token_map = {} self._token_timings = [] else: for k in self._token_timings[:first_valid]: del self._token_map[k] self._token_timings = self._token_timings[first_valid:] if until is None or until > now: if token not in self._token_map: self._token_map[token] = (until, {}) self._token_timings.append(token) else: self._token_map[token] = (until, self._token_map[token][1]) self._token_timings.sort(key=lambda k: ( 1 if self._token_map[k][0] is None else 0, self._token_map[k][0] )) return self._token_map[token][1] else: if token in self._token_map: self._token_timings = [ k for k in self._token_timings if k != token ] del self._token_map[token] return {}
python
def get_token_obj(self, token, expire=_token_default): """Returns or creates the object associaten with the given token. Parameters ---------- token : string The token for the object as returned by `create_token`. expire : number or None The number of seconds until the object associated with the token expires or `None` if it should not expire. If the argument is omitted the value returned by `get_default_token_expiration` is used. The expiration of an object is lazy. That means the memory of the expired object is not freed until the next call of `get_token_obj`. An expiration of 0 or less immediately frees the memory of the token. """ if expire == _token_default: expire = self.get_default_token_expiration() now = get_time() until = now + expire if expire is not None else None with self._token_lock: # _token_timings is keys sorted by time first_valid = None for (pos, k) in enumerate(self._token_timings): t = self._token_map[k][0] if t is None or t > now: first_valid = pos break if first_valid is None: self._token_map = {} self._token_timings = [] else: for k in self._token_timings[:first_valid]: del self._token_map[k] self._token_timings = self._token_timings[first_valid:] if until is None or until > now: if token not in self._token_map: self._token_map[token] = (until, {}) self._token_timings.append(token) else: self._token_map[token] = (until, self._token_map[token][1]) self._token_timings.sort(key=lambda k: ( 1 if self._token_map[k][0] is None else 0, self._token_map[k][0] )) return self._token_map[token][1] else: if token in self._token_map: self._token_timings = [ k for k in self._token_timings if k != token ] del self._token_map[token] return {}
[ "def", "get_token_obj", "(", "self", ",", "token", ",", "expire", "=", "_token_default", ")", ":", "if", "expire", "==", "_token_default", ":", "expire", "=", "self", ".", "get_default_token_expiration", "(", ")", "now", "=", "get_time", "(", ")", "until", "=", "now", "+", "expire", "if", "expire", "is", "not", "None", "else", "None", "with", "self", ".", "_token_lock", ":", "# _token_timings is keys sorted by time", "first_valid", "=", "None", "for", "(", "pos", ",", "k", ")", "in", "enumerate", "(", "self", ".", "_token_timings", ")", ":", "t", "=", "self", ".", "_token_map", "[", "k", "]", "[", "0", "]", "if", "t", "is", "None", "or", "t", ">", "now", ":", "first_valid", "=", "pos", "break", "if", "first_valid", "is", "None", ":", "self", ".", "_token_map", "=", "{", "}", "self", ".", "_token_timings", "=", "[", "]", "else", ":", "for", "k", "in", "self", ".", "_token_timings", "[", ":", "first_valid", "]", ":", "del", "self", ".", "_token_map", "[", "k", "]", "self", ".", "_token_timings", "=", "self", ".", "_token_timings", "[", "first_valid", ":", "]", "if", "until", "is", "None", "or", "until", ">", "now", ":", "if", "token", "not", "in", "self", ".", "_token_map", ":", "self", ".", "_token_map", "[", "token", "]", "=", "(", "until", ",", "{", "}", ")", "self", ".", "_token_timings", ".", "append", "(", "token", ")", "else", ":", "self", ".", "_token_map", "[", "token", "]", "=", "(", "until", ",", "self", ".", "_token_map", "[", "token", "]", "[", "1", "]", ")", "self", ".", "_token_timings", ".", "sort", "(", "key", "=", "lambda", "k", ":", "(", "1", "if", "self", ".", "_token_map", "[", "k", "]", "[", "0", "]", "is", "None", "else", "0", ",", "self", ".", "_token_map", "[", "k", "]", "[", "0", "]", ")", ")", "return", "self", ".", "_token_map", "[", "token", "]", "[", "1", "]", "else", ":", "if", "token", "in", "self", ".", "_token_map", ":", "self", ".", "_token_timings", "=", "[", "k", "for", "k", "in", "self", ".", "_token_timings", "if", "k", "!=", "token", "]", "del", "self", ".", "_token_map", "[", "token", "]", "return", "{", "}" ]
Returns or creates the object associaten with the given token. Parameters ---------- token : string The token for the object as returned by `create_token`. expire : number or None The number of seconds until the object associated with the token expires or `None` if it should not expire. If the argument is omitted the value returned by `get_default_token_expiration` is used. The expiration of an object is lazy. That means the memory of the expired object is not freed until the next call of `get_token_obj`. An expiration of 0 or less immediately frees the memory of the token.
[ "Returns", "or", "creates", "the", "object", "associaten", "with", "the", "given", "token", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2356-L2409
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.handle_cmd
def handle_cmd(self, cmd): """Handles a single server command.""" cmd = cmd.strip() segments = [] for s in cmd.split(): # remove bash-like comments if s.startswith('#'): break # TODO implement escape sequences (also for \#) segments.append(s) args = [] if not len(segments): return # process more specific commands first while segments: cur_cmd = "_".join(segments) if cur_cmd in self._cmd_methods: argc = self._cmd_argc[cur_cmd] if argc is not None and len(args) != argc: msg('command {0} expects {1} argument(s), got {2}', " ".join(segments), argc, len(args)) return self._cmd_methods[cur_cmd](args) return args.insert(0, segments.pop()) # invalid command prefix = '_'.join(args) + '_' matches = filter( lambda cmd: cmd.startswith(prefix), self._cmd_methods.keys()) candidates = set([]) for m in matches: if len(m) <= len(prefix): continue m = m[len(prefix):] if '_' in m: m = m[:m.index('_')] candidates.add(m) if len(candidates): msg('command "{0}" needs more arguments:', ' '.join(args)) for c in candidates: msg(' {0}', c) else: msg('command "{0}" invalid; type ' + 'help or use <TAB> for a list of commands', ' '.join(args))
python
def handle_cmd(self, cmd): """Handles a single server command.""" cmd = cmd.strip() segments = [] for s in cmd.split(): # remove bash-like comments if s.startswith('#'): break # TODO implement escape sequences (also for \#) segments.append(s) args = [] if not len(segments): return # process more specific commands first while segments: cur_cmd = "_".join(segments) if cur_cmd in self._cmd_methods: argc = self._cmd_argc[cur_cmd] if argc is not None and len(args) != argc: msg('command {0} expects {1} argument(s), got {2}', " ".join(segments), argc, len(args)) return self._cmd_methods[cur_cmd](args) return args.insert(0, segments.pop()) # invalid command prefix = '_'.join(args) + '_' matches = filter( lambda cmd: cmd.startswith(prefix), self._cmd_methods.keys()) candidates = set([]) for m in matches: if len(m) <= len(prefix): continue m = m[len(prefix):] if '_' in m: m = m[:m.index('_')] candidates.add(m) if len(candidates): msg('command "{0}" needs more arguments:', ' '.join(args)) for c in candidates: msg(' {0}', c) else: msg('command "{0}" invalid; type ' + 'help or use <TAB> for a list of commands', ' '.join(args))
[ "def", "handle_cmd", "(", "self", ",", "cmd", ")", ":", "cmd", "=", "cmd", ".", "strip", "(", ")", "segments", "=", "[", "]", "for", "s", "in", "cmd", ".", "split", "(", ")", ":", "# remove bash-like comments", "if", "s", ".", "startswith", "(", "'#'", ")", ":", "break", "# TODO implement escape sequences (also for \\#)", "segments", ".", "append", "(", "s", ")", "args", "=", "[", "]", "if", "not", "len", "(", "segments", ")", ":", "return", "# process more specific commands first", "while", "segments", ":", "cur_cmd", "=", "\"_\"", ".", "join", "(", "segments", ")", "if", "cur_cmd", "in", "self", ".", "_cmd_methods", ":", "argc", "=", "self", ".", "_cmd_argc", "[", "cur_cmd", "]", "if", "argc", "is", "not", "None", "and", "len", "(", "args", ")", "!=", "argc", ":", "msg", "(", "'command {0} expects {1} argument(s), got {2}'", ",", "\" \"", ".", "join", "(", "segments", ")", ",", "argc", ",", "len", "(", "args", ")", ")", "return", "self", ".", "_cmd_methods", "[", "cur_cmd", "]", "(", "args", ")", "return", "args", ".", "insert", "(", "0", ",", "segments", ".", "pop", "(", ")", ")", "# invalid command", "prefix", "=", "'_'", ".", "join", "(", "args", ")", "+", "'_'", "matches", "=", "filter", "(", "lambda", "cmd", ":", "cmd", ".", "startswith", "(", "prefix", ")", ",", "self", ".", "_cmd_methods", ".", "keys", "(", ")", ")", "candidates", "=", "set", "(", "[", "]", ")", "for", "m", "in", "matches", ":", "if", "len", "(", "m", ")", "<=", "len", "(", "prefix", ")", ":", "continue", "m", "=", "m", "[", "len", "(", "prefix", ")", ":", "]", "if", "'_'", "in", "m", ":", "m", "=", "m", "[", ":", "m", ".", "index", "(", "'_'", ")", "]", "candidates", ".", "add", "(", "m", ")", "if", "len", "(", "candidates", ")", ":", "msg", "(", "'command \"{0}\" needs more arguments:'", ",", "' '", ".", "join", "(", "args", ")", ")", "for", "c", "in", "candidates", ":", "msg", "(", "' {0}'", ",", "c", ")", "else", ":", "msg", "(", "'command \"{0}\" invalid; type '", "+", "'help or use <TAB> for a list of commands'", ",", "' '", ".", "join", "(", "args", ")", ")" ]
Handles a single server command.
[ "Handles", "a", "single", "server", "command", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2514-L2558
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.handle_request
def handle_request(self): """Handles an HTTP request.The actual HTTP request is handled using a different thread. """ timeout = self.socket.gettimeout() if timeout is None: timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) ctime = get_time() done_req = False shutdown_latency = self.shutdown_latency if timeout is not None: shutdown_latency = min(shutdown_latency, timeout) \ if shutdown_latency is not None else timeout while not (self.done or done_req) and (timeout is None or timeout == 0 or (get_time() - ctime) < timeout): try: fd_sets = select.select([self], [], [], shutdown_latency) except (OSError, select.error) as e: if e.args[0] != errno.EINTR: raise # treat EINTR as shutdown_latency timeout fd_sets = [[], [], []] for _fd in fd_sets[0]: done_req = True self._handle_request_noblock() if timeout == 0: break if not (self.done or done_req): # don't handle timeouts if we should shut down the server instead self.handle_timeout()
python
def handle_request(self): """Handles an HTTP request.The actual HTTP request is handled using a different thread. """ timeout = self.socket.gettimeout() if timeout is None: timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) ctime = get_time() done_req = False shutdown_latency = self.shutdown_latency if timeout is not None: shutdown_latency = min(shutdown_latency, timeout) \ if shutdown_latency is not None else timeout while not (self.done or done_req) and (timeout is None or timeout == 0 or (get_time() - ctime) < timeout): try: fd_sets = select.select([self], [], [], shutdown_latency) except (OSError, select.error) as e: if e.args[0] != errno.EINTR: raise # treat EINTR as shutdown_latency timeout fd_sets = [[], [], []] for _fd in fd_sets[0]: done_req = True self._handle_request_noblock() if timeout == 0: break if not (self.done or done_req): # don't handle timeouts if we should shut down the server instead self.handle_timeout()
[ "def", "handle_request", "(", "self", ")", ":", "timeout", "=", "self", ".", "socket", ".", "gettimeout", "(", ")", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "timeout", "elif", "self", ".", "timeout", "is", "not", "None", ":", "timeout", "=", "min", "(", "timeout", ",", "self", ".", "timeout", ")", "ctime", "=", "get_time", "(", ")", "done_req", "=", "False", "shutdown_latency", "=", "self", ".", "shutdown_latency", "if", "timeout", "is", "not", "None", ":", "shutdown_latency", "=", "min", "(", "shutdown_latency", ",", "timeout", ")", "if", "shutdown_latency", "is", "not", "None", "else", "timeout", "while", "not", "(", "self", ".", "done", "or", "done_req", ")", "and", "(", "timeout", "is", "None", "or", "timeout", "==", "0", "or", "(", "get_time", "(", ")", "-", "ctime", ")", "<", "timeout", ")", ":", "try", ":", "fd_sets", "=", "select", ".", "select", "(", "[", "self", "]", ",", "[", "]", ",", "[", "]", ",", "shutdown_latency", ")", "except", "(", "OSError", ",", "select", ".", "error", ")", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "!=", "errno", ".", "EINTR", ":", "raise", "# treat EINTR as shutdown_latency timeout", "fd_sets", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "_fd", "in", "fd_sets", "[", "0", "]", ":", "done_req", "=", "True", "self", ".", "_handle_request_noblock", "(", ")", "if", "timeout", "==", "0", ":", "break", "if", "not", "(", "self", ".", "done", "or", "done_req", ")", ":", "# don't handle timeouts if we should shut down the server instead", "self", ".", "handle_timeout", "(", ")" ]
Handles an HTTP request.The actual HTTP request is handled using a different thread.
[ "Handles", "an", "HTTP", "request", ".", "The", "actual", "HTTP", "request", "is", "handled", "using", "a", "different", "thread", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2716-L2748
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.serve_forever
def serve_forever(self): """Starts the server handling commands and HTTP requests. The server will loop until done is True or a KeyboardInterrupt is received. """ self.start_cmd_loop() try: while not self.done: self.handle_request() except KeyboardInterrupt: # clean error output if log file is STD_ERR if log_file == sys.stderr: log_file.write("\n") finally: if self._clean_up_call is not None: self._clean_up_call() self.done = True
python
def serve_forever(self): """Starts the server handling commands and HTTP requests. The server will loop until done is True or a KeyboardInterrupt is received. """ self.start_cmd_loop() try: while not self.done: self.handle_request() except KeyboardInterrupt: # clean error output if log file is STD_ERR if log_file == sys.stderr: log_file.write("\n") finally: if self._clean_up_call is not None: self._clean_up_call() self.done = True
[ "def", "serve_forever", "(", "self", ")", ":", "self", ".", "start_cmd_loop", "(", ")", "try", ":", "while", "not", "self", ".", "done", ":", "self", ".", "handle_request", "(", ")", "except", "KeyboardInterrupt", ":", "# clean error output if log file is STD_ERR", "if", "log_file", "==", "sys", ".", "stderr", ":", "log_file", ".", "write", "(", "\"\\n\"", ")", "finally", ":", "if", "self", ".", "_clean_up_call", "is", "not", "None", ":", "self", ".", "_clean_up_call", "(", ")", "self", ".", "done", "=", "True" ]
Starts the server handling commands and HTTP requests. The server will loop until done is True or a KeyboardInterrupt is received.
[ "Starts", "the", "server", "handling", "commands", "and", "HTTP", "requests", ".", "The", "server", "will", "loop", "until", "done", "is", "True", "or", "a", "KeyboardInterrupt", "is", "received", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2750-L2766
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.can_ignore_error
def can_ignore_error(self, reqhnd=None): """Tests if the error is worth reporting. """ value = sys.exc_info()[1] try: if isinstance(value, BrokenPipeError) or \ isinstance(value, ConnectionResetError): return True except NameError: pass if not self.done: return False if not isinstance(value, socket.error): return False need_close = value.errno == 9 if need_close and reqhnd is not None: reqhnd.close_connection = 1 return need_close
python
def can_ignore_error(self, reqhnd=None): """Tests if the error is worth reporting. """ value = sys.exc_info()[1] try: if isinstance(value, BrokenPipeError) or \ isinstance(value, ConnectionResetError): return True except NameError: pass if not self.done: return False if not isinstance(value, socket.error): return False need_close = value.errno == 9 if need_close and reqhnd is not None: reqhnd.close_connection = 1 return need_close
[ "def", "can_ignore_error", "(", "self", ",", "reqhnd", "=", "None", ")", ":", "value", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "try", ":", "if", "isinstance", "(", "value", ",", "BrokenPipeError", ")", "or", "isinstance", "(", "value", ",", "ConnectionResetError", ")", ":", "return", "True", "except", "NameError", ":", "pass", "if", "not", "self", ".", "done", ":", "return", "False", "if", "not", "isinstance", "(", "value", ",", "socket", ".", "error", ")", ":", "return", "False", "need_close", "=", "value", ".", "errno", "==", "9", "if", "need_close", "and", "reqhnd", "is", "not", "None", ":", "reqhnd", ".", "close_connection", "=", "1", "return", "need_close" ]
Tests if the error is worth reporting.
[ "Tests", "if", "the", "error", "is", "worth", "reporting", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2768-L2785
train
JosuaKrause/quick_server
quick_server/quick_server.py
QuickServer.handle_error
def handle_error(self, request, client_address): """Handle an error gracefully. """ if self.can_ignore_error(): return thread = threading.current_thread() msg("Error in request ({0}): {1} in {2}\n{3}", client_address, repr(request), thread.name, traceback.format_exc())
python
def handle_error(self, request, client_address): """Handle an error gracefully. """ if self.can_ignore_error(): return thread = threading.current_thread() msg("Error in request ({0}): {1} in {2}\n{3}", client_address, repr(request), thread.name, traceback.format_exc())
[ "def", "handle_error", "(", "self", ",", "request", ",", "client_address", ")", ":", "if", "self", ".", "can_ignore_error", "(", ")", ":", "return", "thread", "=", "threading", ".", "current_thread", "(", ")", "msg", "(", "\"Error in request ({0}): {1} in {2}\\n{3}\"", ",", "client_address", ",", "repr", "(", "request", ")", ",", "thread", ".", "name", ",", "traceback", ".", "format_exc", "(", ")", ")" ]
Handle an error gracefully.
[ "Handle", "an", "error", "gracefully", "." ]
55dc7c5fe726a341f8476f749fe0f9da156fc1cb
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2787-L2794
train
mgoral/subconvert
src/subconvert/gui/tools/Synchronizer.py
_findRow
def _findRow(subNo, model): """Finds a row in a given model which has a column with a given number.""" items = model.findItems(str(subNo)) if len(items) == 0: return None if len(items) > 1: raise IndexError("Too many items with sub number %s" % subNo) return items[0].row()
python
def _findRow(subNo, model): """Finds a row in a given model which has a column with a given number.""" items = model.findItems(str(subNo)) if len(items) == 0: return None if len(items) > 1: raise IndexError("Too many items with sub number %s" % subNo) return items[0].row()
[ "def", "_findRow", "(", "subNo", ",", "model", ")", ":", "items", "=", "model", ".", "findItems", "(", "str", "(", "subNo", ")", ")", "if", "len", "(", "items", ")", "==", "0", ":", "return", "None", "if", "len", "(", "items", ")", ">", "1", ":", "raise", "IndexError", "(", "\"Too many items with sub number %s\"", "%", "subNo", ")", "return", "items", "[", "0", "]", ".", "row", "(", ")" ]
Finds a row in a given model which has a column with a given number.
[ "Finds", "a", "row", "in", "a", "given", "model", "which", "has", "a", "column", "with", "a", "given", "number", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/tools/Synchronizer.py#L317-L325
train
mgoral/subconvert
src/subconvert/gui/tools/Synchronizer.py
Synchronizer._subtitlesAdded
def _subtitlesAdded(self, path, subNos): """When subtitle is added, all syncPoints greater or equal than a new subtitle are incremented.""" def action(current, count, model, row): _setSubNo(current + count, model, row) def count(current, nos): ret = 0 for no in nos: if current >= no: ret += 1 # consider: current = 0, nos = [0, 1, 2, 3] # in that case, current should be prepended by all nos current += 1 return ret self._changeSubNos(path, subNos, count, action)
python
def _subtitlesAdded(self, path, subNos): """When subtitle is added, all syncPoints greater or equal than a new subtitle are incremented.""" def action(current, count, model, row): _setSubNo(current + count, model, row) def count(current, nos): ret = 0 for no in nos: if current >= no: ret += 1 # consider: current = 0, nos = [0, 1, 2, 3] # in that case, current should be prepended by all nos current += 1 return ret self._changeSubNos(path, subNos, count, action)
[ "def", "_subtitlesAdded", "(", "self", ",", "path", ",", "subNos", ")", ":", "def", "action", "(", "current", ",", "count", ",", "model", ",", "row", ")", ":", "_setSubNo", "(", "current", "+", "count", ",", "model", ",", "row", ")", "def", "count", "(", "current", ",", "nos", ")", ":", "ret", "=", "0", "for", "no", "in", "nos", ":", "if", "current", ">=", "no", ":", "ret", "+=", "1", "# consider: current = 0, nos = [0, 1, 2, 3]", "# in that case, current should be prepended by all nos", "current", "+=", "1", "return", "ret", "self", ".", "_changeSubNos", "(", "path", ",", "subNos", ",", "count", ",", "action", ")" ]
When subtitle is added, all syncPoints greater or equal than a new subtitle are incremented.
[ "When", "subtitle", "is", "added", "all", "syncPoints", "greater", "or", "equal", "than", "a", "new", "subtitle", "are", "incremented", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/tools/Synchronizer.py#L222-L238
train
mgoral/subconvert
src/subconvert/gui/tools/Synchronizer.py
Synchronizer._subtitlesRemoved
def _subtitlesRemoved(self, path, subNos): """When subtitle is removed, all syncPoints greater than removed subtitle are decremented. SyncPoint equal to removed subtitle is also removed.""" def action(current, count, model, row): if count.equal > 0: model.removeRow(row) else: _setSubNo(current - count.greater_equal, model, row) def count(current, nos): return _GtEqCount(current, nos) self._changeSubNos(path, subNos, count, action)
python
def _subtitlesRemoved(self, path, subNos): """When subtitle is removed, all syncPoints greater than removed subtitle are decremented. SyncPoint equal to removed subtitle is also removed.""" def action(current, count, model, row): if count.equal > 0: model.removeRow(row) else: _setSubNo(current - count.greater_equal, model, row) def count(current, nos): return _GtEqCount(current, nos) self._changeSubNos(path, subNos, count, action)
[ "def", "_subtitlesRemoved", "(", "self", ",", "path", ",", "subNos", ")", ":", "def", "action", "(", "current", ",", "count", ",", "model", ",", "row", ")", ":", "if", "count", ".", "equal", ">", "0", ":", "model", ".", "removeRow", "(", "row", ")", "else", ":", "_setSubNo", "(", "current", "-", "count", ".", "greater_equal", ",", "model", ",", "row", ")", "def", "count", "(", "current", ",", "nos", ")", ":", "return", "_GtEqCount", "(", "current", ",", "nos", ")", "self", ".", "_changeSubNos", "(", "path", ",", "subNos", ",", "count", ",", "action", ")" ]
When subtitle is removed, all syncPoints greater than removed subtitle are decremented. SyncPoint equal to removed subtitle is also removed.
[ "When", "subtitle", "is", "removed", "all", "syncPoints", "greater", "than", "removed", "subtitle", "are", "decremented", ".", "SyncPoint", "equal", "to", "removed", "subtitle", "is", "also", "removed", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/tools/Synchronizer.py#L240-L253
train
mkoura/dump2polarion
dump2polarion/results/csvtools.py
_get_csv_fieldnames
def _get_csv_fieldnames(csv_reader): """Finds fieldnames in Polarion exported csv file.""" fieldnames = [] for row in csv_reader: for col in row: field = ( col.strip() .replace('"', "") .replace(" ", "") .replace("(", "") .replace(")", "") .lower() ) fieldnames.append(field) if "id" in fieldnames: break else: # this is not a row with fieldnames del fieldnames[:] if not fieldnames: return None # remove trailing unannotated fields while True: field = fieldnames.pop() if field: fieldnames.append(field) break # name unannotated fields suffix = 1 for index, field in enumerate(fieldnames): if not field: fieldnames[index] = "field{}".format(suffix) suffix += 1 return fieldnames
python
def _get_csv_fieldnames(csv_reader): """Finds fieldnames in Polarion exported csv file.""" fieldnames = [] for row in csv_reader: for col in row: field = ( col.strip() .replace('"', "") .replace(" ", "") .replace("(", "") .replace(")", "") .lower() ) fieldnames.append(field) if "id" in fieldnames: break else: # this is not a row with fieldnames del fieldnames[:] if not fieldnames: return None # remove trailing unannotated fields while True: field = fieldnames.pop() if field: fieldnames.append(field) break # name unannotated fields suffix = 1 for index, field in enumerate(fieldnames): if not field: fieldnames[index] = "field{}".format(suffix) suffix += 1 return fieldnames
[ "def", "_get_csv_fieldnames", "(", "csv_reader", ")", ":", "fieldnames", "=", "[", "]", "for", "row", "in", "csv_reader", ":", "for", "col", "in", "row", ":", "field", "=", "(", "col", ".", "strip", "(", ")", ".", "replace", "(", "'\"'", ",", "\"\"", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\"(\"", ",", "\"\"", ")", ".", "replace", "(", "\")\"", ",", "\"\"", ")", ".", "lower", "(", ")", ")", "fieldnames", ".", "append", "(", "field", ")", "if", "\"id\"", "in", "fieldnames", ":", "break", "else", ":", "# this is not a row with fieldnames", "del", "fieldnames", "[", ":", "]", "if", "not", "fieldnames", ":", "return", "None", "# remove trailing unannotated fields", "while", "True", ":", "field", "=", "fieldnames", ".", "pop", "(", ")", "if", "field", ":", "fieldnames", ".", "append", "(", "field", ")", "break", "# name unannotated fields", "suffix", "=", "1", "for", "index", ",", "field", "in", "enumerate", "(", "fieldnames", ")", ":", "if", "not", "field", ":", "fieldnames", "[", "index", "]", "=", "\"field{}\"", ".", "format", "(", "suffix", ")", "suffix", "+=", "1", "return", "fieldnames" ]
Finds fieldnames in Polarion exported csv file.
[ "Finds", "fieldnames", "in", "Polarion", "exported", "csv", "file", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/csvtools.py#L18-L52
train
mkoura/dump2polarion
dump2polarion/results/csvtools.py
_get_results
def _get_results(csv_reader, fieldnames): """Maps data to fieldnames. The reader needs to be at position after fieldnames, before the results data. """ fieldnames_count = len(fieldnames) results = [] for row in csv_reader: for col in row: if col: break else: # empty row, skip it continue record = OrderedDict(list(zip(fieldnames, row))) # skip rows that were already exported if record.get("exported") == "yes": continue row_len = len(row) if fieldnames_count > row_len: for key in fieldnames[row_len:]: record[key] = None results.append(record) return results
python
def _get_results(csv_reader, fieldnames): """Maps data to fieldnames. The reader needs to be at position after fieldnames, before the results data. """ fieldnames_count = len(fieldnames) results = [] for row in csv_reader: for col in row: if col: break else: # empty row, skip it continue record = OrderedDict(list(zip(fieldnames, row))) # skip rows that were already exported if record.get("exported") == "yes": continue row_len = len(row) if fieldnames_count > row_len: for key in fieldnames[row_len:]: record[key] = None results.append(record) return results
[ "def", "_get_results", "(", "csv_reader", ",", "fieldnames", ")", ":", "fieldnames_count", "=", "len", "(", "fieldnames", ")", "results", "=", "[", "]", "for", "row", "in", "csv_reader", ":", "for", "col", "in", "row", ":", "if", "col", ":", "break", "else", ":", "# empty row, skip it", "continue", "record", "=", "OrderedDict", "(", "list", "(", "zip", "(", "fieldnames", ",", "row", ")", ")", ")", "# skip rows that were already exported", "if", "record", ".", "get", "(", "\"exported\"", ")", "==", "\"yes\"", ":", "continue", "row_len", "=", "len", "(", "row", ")", "if", "fieldnames_count", ">", "row_len", ":", "for", "key", "in", "fieldnames", "[", "row_len", ":", "]", ":", "record", "[", "key", "]", "=", "None", "results", ".", "append", "(", "record", ")", "return", "results" ]
Maps data to fieldnames. The reader needs to be at position after fieldnames, before the results data.
[ "Maps", "data", "to", "fieldnames", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/csvtools.py#L83-L107
train
mkoura/dump2polarion
dump2polarion/results/csvtools.py
get_imported_data
def get_imported_data(csv_file, **kwargs): """Reads the content of the Polarion exported csv file and returns imported data.""" open_args = [] open_kwargs = {} try: # pylint: disable=pointless-statement unicode open_args.append("rb") except NameError: open_kwargs["encoding"] = "utf-8" with open(os.path.expanduser(csv_file), *open_args, **open_kwargs) as input_file: reader = _get_csv_reader(input_file) fieldnames = _get_csv_fieldnames(reader) if not fieldnames: raise Dump2PolarionException( "Cannot find field names in CSV file '{}'".format(csv_file) ) results = _get_results(reader, fieldnames) if not results: raise Dump2PolarionException("No results read from CSV file '{}'".format(csv_file)) testrun = _get_testrun_from_csv(input_file, reader) return xunit_exporter.ImportedData(results=results, testrun=testrun)
python
def get_imported_data(csv_file, **kwargs): """Reads the content of the Polarion exported csv file and returns imported data.""" open_args = [] open_kwargs = {} try: # pylint: disable=pointless-statement unicode open_args.append("rb") except NameError: open_kwargs["encoding"] = "utf-8" with open(os.path.expanduser(csv_file), *open_args, **open_kwargs) as input_file: reader = _get_csv_reader(input_file) fieldnames = _get_csv_fieldnames(reader) if not fieldnames: raise Dump2PolarionException( "Cannot find field names in CSV file '{}'".format(csv_file) ) results = _get_results(reader, fieldnames) if not results: raise Dump2PolarionException("No results read from CSV file '{}'".format(csv_file)) testrun = _get_testrun_from_csv(input_file, reader) return xunit_exporter.ImportedData(results=results, testrun=testrun)
[ "def", "get_imported_data", "(", "csv_file", ",", "*", "*", "kwargs", ")", ":", "open_args", "=", "[", "]", "open_kwargs", "=", "{", "}", "try", ":", "# pylint: disable=pointless-statement", "unicode", "open_args", ".", "append", "(", "\"rb\"", ")", "except", "NameError", ":", "open_kwargs", "[", "\"encoding\"", "]", "=", "\"utf-8\"", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "csv_file", ")", ",", "*", "open_args", ",", "*", "*", "open_kwargs", ")", "as", "input_file", ":", "reader", "=", "_get_csv_reader", "(", "input_file", ")", "fieldnames", "=", "_get_csv_fieldnames", "(", "reader", ")", "if", "not", "fieldnames", ":", "raise", "Dump2PolarionException", "(", "\"Cannot find field names in CSV file '{}'\"", ".", "format", "(", "csv_file", ")", ")", "results", "=", "_get_results", "(", "reader", ",", "fieldnames", ")", "if", "not", "results", ":", "raise", "Dump2PolarionException", "(", "\"No results read from CSV file '{}'\"", ".", "format", "(", "csv_file", ")", ")", "testrun", "=", "_get_testrun_from_csv", "(", "input_file", ",", "reader", ")", "return", "xunit_exporter", ".", "ImportedData", "(", "results", "=", "results", ",", "testrun", "=", "testrun", ")" ]
Reads the content of the Polarion exported csv file and returns imported data.
[ "Reads", "the", "content", "of", "the", "Polarion", "exported", "csv", "file", "and", "returns", "imported", "data", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/csvtools.py#L118-L143
train
mkoura/dump2polarion
dump2polarion/results/csvtools.py
import_csv
def import_csv(csv_file, **kwargs): """Imports data and checks that all required columns are there.""" records = get_imported_data(csv_file, **kwargs) _check_required_columns(csv_file, records.results) return records
python
def import_csv(csv_file, **kwargs): """Imports data and checks that all required columns are there.""" records = get_imported_data(csv_file, **kwargs) _check_required_columns(csv_file, records.results) return records
[ "def", "import_csv", "(", "csv_file", ",", "*", "*", "kwargs", ")", ":", "records", "=", "get_imported_data", "(", "csv_file", ",", "*", "*", "kwargs", ")", "_check_required_columns", "(", "csv_file", ",", "records", ".", "results", ")", "return", "records" ]
Imports data and checks that all required columns are there.
[ "Imports", "data", "and", "checks", "that", "all", "required", "columns", "are", "there", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/csvtools.py#L157-L161
train
oemof/oemof.db
oemof/db/config.py
load_config
def load_config(filename): """ Load data from config file to `cfg` that can be accessed by get, set afterwards. Specify absolute or relative path to your config file. :param filename: Relative or absolute path :type filename: str """ if filename is None: filename = '' abs_filename = os.path.join(os.getcwd(), filename) global FILE # find the config file if os.path.isfile(filename): FILE = filename elif os.path.isfile(abs_filename): FILE = abs_filename elif os.path.isfile(FILE): pass else: if os.path.dirname(filename): file_not_found = filename else: file_not_found = abs_filename file_not_found_message(file_not_found) # load config init(FILE)
python
def load_config(filename): """ Load data from config file to `cfg` that can be accessed by get, set afterwards. Specify absolute or relative path to your config file. :param filename: Relative or absolute path :type filename: str """ if filename is None: filename = '' abs_filename = os.path.join(os.getcwd(), filename) global FILE # find the config file if os.path.isfile(filename): FILE = filename elif os.path.isfile(abs_filename): FILE = abs_filename elif os.path.isfile(FILE): pass else: if os.path.dirname(filename): file_not_found = filename else: file_not_found = abs_filename file_not_found_message(file_not_found) # load config init(FILE)
[ "def", "load_config", "(", "filename", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "''", "abs_filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "filename", ")", "global", "FILE", "# find the config file", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "FILE", "=", "filename", "elif", "os", ".", "path", ".", "isfile", "(", "abs_filename", ")", ":", "FILE", "=", "abs_filename", "elif", "os", ".", "path", ".", "isfile", "(", "FILE", ")", ":", "pass", "else", ":", "if", "os", ".", "path", ".", "dirname", "(", "filename", ")", ":", "file_not_found", "=", "filename", "else", ":", "file_not_found", "=", "abs_filename", "file_not_found_message", "(", "file_not_found", ")", "# load config", "init", "(", "FILE", ")" ]
Load data from config file to `cfg` that can be accessed by get, set afterwards. Specify absolute or relative path to your config file. :param filename: Relative or absolute path :type filename: str
[ "Load", "data", "from", "config", "file", "to", "cfg", "that", "can", "be", "accessed", "by", "get", "set", "afterwards", "." ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L48-L81
train
oemof/oemof.db
oemof/db/config.py
init
def init(FILE): """ Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str """ try: cfg.read(FILE) global _loaded _loaded = True except: file_not_found_message(FILE)
python
def init(FILE): """ Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str """ try: cfg.read(FILE) global _loaded _loaded = True except: file_not_found_message(FILE)
[ "def", "init", "(", "FILE", ")", ":", "try", ":", "cfg", ".", "read", "(", "FILE", ")", "global", "_loaded", "_loaded", "=", "True", "except", ":", "file_not_found_message", "(", "FILE", ")" ]
Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str
[ "Read", "config", "file" ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L113-L125
train
oemof/oemof.db
oemof/db/config.py
get
def get(section, key): """ returns the value of a given key of a given section of the main config file. :param section: the section. :type section: str. :param key: the key. :type key: str. :returns: the value which will be casted to float, int or boolean. if no cast is successfull, the raw string will be returned. """ # FILE = 'config_misc' if not _loaded: init(FILE) try: return cfg.getfloat(section, key) except Exception: try: return cfg.getint(section, key) except: try: return cfg.getboolean(section, key) except: return cfg.get(section, key)
python
def get(section, key): """ returns the value of a given key of a given section of the main config file. :param section: the section. :type section: str. :param key: the key. :type key: str. :returns: the value which will be casted to float, int or boolean. if no cast is successfull, the raw string will be returned. """ # FILE = 'config_misc' if not _loaded: init(FILE) try: return cfg.getfloat(section, key) except Exception: try: return cfg.getint(section, key) except: try: return cfg.getboolean(section, key) except: return cfg.get(section, key)
[ "def", "get", "(", "section", ",", "key", ")", ":", "# FILE = 'config_misc'", "if", "not", "_loaded", ":", "init", "(", "FILE", ")", "try", ":", "return", "cfg", ".", "getfloat", "(", "section", ",", "key", ")", "except", "Exception", ":", "try", ":", "return", "cfg", ".", "getint", "(", "section", ",", "key", ")", "except", ":", "try", ":", "return", "cfg", ".", "getboolean", "(", "section", ",", "key", ")", "except", ":", "return", "cfg", ".", "get", "(", "section", ",", "key", ")" ]
returns the value of a given key of a given section of the main config file. :param section: the section. :type section: str. :param key: the key. :type key: str. :returns: the value which will be casted to float, int or boolean. if no cast is successfull, the raw string will be returned.
[ "returns", "the", "value", "of", "a", "given", "key", "of", "a", "given", "section", "of", "the", "main", "config", "file", "." ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L128-L154
train
praekeltfoundation/seed-message-sender
message_sender/serializers.py
InboundSerializer.to_internal_value
def to_internal_value(self, data): """ Adds extra data to the helper_metadata field. """ if "session_event" in data: data["helper_metadata"]["session_event"] = data["session_event"] return super(InboundSerializer, self).to_internal_value(data)
python
def to_internal_value(self, data): """ Adds extra data to the helper_metadata field. """ if "session_event" in data: data["helper_metadata"]["session_event"] = data["session_event"] return super(InboundSerializer, self).to_internal_value(data)
[ "def", "to_internal_value", "(", "self", ",", "data", ")", ":", "if", "\"session_event\"", "in", "data", ":", "data", "[", "\"helper_metadata\"", "]", "[", "\"session_event\"", "]", "=", "data", "[", "\"session_event\"", "]", "return", "super", "(", "InboundSerializer", ",", "self", ")", ".", "to_internal_value", "(", "data", ")" ]
Adds extra data to the helper_metadata field.
[ "Adds", "extra", "data", "to", "the", "helper_metadata", "field", "." ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/serializers.py#L97-L104
train
mgoral/subconvert
src/subconvert/utils/Alias.py
acceptAlias
def acceptAlias(decoratedFunction): """This function should be used as a decorator. Each class method that is decorated will be able to accept alias or original names as a first function positional parameter.""" def wrapper(self, *args, **kwargs): SubAssert(isinstance(self, AliasBase)) if len(args) > 0: key = args[0] if args[0] in self._aliases.keys(): key = self._aliases[args[0]] return decoratedFunction(self, key, *args[1:], **kwargs) return decoratedFunction(self, *args, **kwargs) return wrapper
python
def acceptAlias(decoratedFunction): """This function should be used as a decorator. Each class method that is decorated will be able to accept alias or original names as a first function positional parameter.""" def wrapper(self, *args, **kwargs): SubAssert(isinstance(self, AliasBase)) if len(args) > 0: key = args[0] if args[0] in self._aliases.keys(): key = self._aliases[args[0]] return decoratedFunction(self, key, *args[1:], **kwargs) return decoratedFunction(self, *args, **kwargs) return wrapper
[ "def", "acceptAlias", "(", "decoratedFunction", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "SubAssert", "(", "isinstance", "(", "self", ",", "AliasBase", ")", ")", "if", "len", "(", "args", ")", ">", "0", ":", "key", "=", "args", "[", "0", "]", "if", "args", "[", "0", "]", "in", "self", ".", "_aliases", ".", "keys", "(", ")", ":", "key", "=", "self", ".", "_aliases", "[", "args", "[", "0", "]", "]", "return", "decoratedFunction", "(", "self", ",", "key", ",", "*", "args", "[", "1", ":", "]", ",", "*", "*", "kwargs", ")", "return", "decoratedFunction", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
This function should be used as a decorator. Each class method that is decorated will be able to accept alias or original names as a first function positional parameter.
[ "This", "function", "should", "be", "used", "as", "a", "decorator", ".", "Each", "class", "method", "that", "is", "decorated", "will", "be", "able", "to", "accept", "alias", "or", "original", "names", "as", "a", "first", "function", "positional", "parameter", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/utils/Alias.py#L24-L36
train
romanorac/discomll
discomll/ensemble/core/measures.py
h
def h(values): """ Function calculates entropy. values: list of integers """ ent = np.true_divide(values, np.sum(values)) return -np.sum(np.multiply(ent, np.log2(ent)))
python
def h(values): """ Function calculates entropy. values: list of integers """ ent = np.true_divide(values, np.sum(values)) return -np.sum(np.multiply(ent, np.log2(ent)))
[ "def", "h", "(", "values", ")", ":", "ent", "=", "np", ".", "true_divide", "(", "values", ",", "np", ".", "sum", "(", "values", ")", ")", "return", "-", "np", ".", "sum", "(", "np", ".", "multiply", "(", "ent", ",", "np", ".", "log2", "(", "ent", ")", ")", ")" ]
Function calculates entropy. values: list of integers
[ "Function", "calculates", "entropy", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/measures.py#L56-L63
train
romanorac/discomll
discomll/ensemble/core/measures.py
info_gain_nominal
def info_gain_nominal(x, y, separate_max): """ Function calculates information gain for discrete features. If feature is continuous it is firstly discretized. x: numpy array - numerical or discrete feature y: numpy array - labels ft: string - feature type ("c" - continuous, "d" - discrete) split_fun: function - function for discretization of numerical features """ x_vals = np.unique(x) # unique values if len(x_vals) < 3: # if there is just one unique value return None y_dist = Counter(y) # label distribution h_y = h(y_dist.values()) # class entropy # calculate distributions and splits in accordance with feature type dist, splits = nominal_splits(x, y, x_vals, y_dist, separate_max) indices, repeat = (range(1, len(dist)), 1) if len(dist) < 50 else (range(1, len(dist), len(dist) / 10), 3) interval = len(dist) / 10 max_ig, max_i, iteration = 0, 1, 0 while iteration < repeat: for i in indices: dist0 = np.sum([el for el in dist[:i]]) # iter 0: take first distribution dist1 = np.sum([el for el in dist[i:]]) # iter 0: take the other distributions without first coef = np.true_divide([np.sum(dist0.values()), np.sum(dist1.values())], len(y)) ig = h_y - np.dot(coef, [h(dist0.values()), h(dist1.values())]) # calculate information gain if ig > max_ig: max_ig, max_i = ig, i # store index and value of maximal information gain iteration += 1 if repeat > 1: interval = int(interval * 0.5) if max_i in indices and interval > 0: middle_index = indices.index(max_i) else: break min_index = middle_index if middle_index == 0 else middle_index - 1 max_index = middle_index if middle_index == len(indices) - 1 else middle_index + 1 indices = range(indices[min_index], indices[max_index], interval) # store splits of maximal information gain in accordance with feature type return float(max_ig), [splits[:max_i], splits[max_i:]]
python
def info_gain_nominal(x, y, separate_max): """ Function calculates information gain for discrete features. If feature is continuous it is firstly discretized. x: numpy array - numerical or discrete feature y: numpy array - labels ft: string - feature type ("c" - continuous, "d" - discrete) split_fun: function - function for discretization of numerical features """ x_vals = np.unique(x) # unique values if len(x_vals) < 3: # if there is just one unique value return None y_dist = Counter(y) # label distribution h_y = h(y_dist.values()) # class entropy # calculate distributions and splits in accordance with feature type dist, splits = nominal_splits(x, y, x_vals, y_dist, separate_max) indices, repeat = (range(1, len(dist)), 1) if len(dist) < 50 else (range(1, len(dist), len(dist) / 10), 3) interval = len(dist) / 10 max_ig, max_i, iteration = 0, 1, 0 while iteration < repeat: for i in indices: dist0 = np.sum([el for el in dist[:i]]) # iter 0: take first distribution dist1 = np.sum([el for el in dist[i:]]) # iter 0: take the other distributions without first coef = np.true_divide([np.sum(dist0.values()), np.sum(dist1.values())], len(y)) ig = h_y - np.dot(coef, [h(dist0.values()), h(dist1.values())]) # calculate information gain if ig > max_ig: max_ig, max_i = ig, i # store index and value of maximal information gain iteration += 1 if repeat > 1: interval = int(interval * 0.5) if max_i in indices and interval > 0: middle_index = indices.index(max_i) else: break min_index = middle_index if middle_index == 0 else middle_index - 1 max_index = middle_index if middle_index == len(indices) - 1 else middle_index + 1 indices = range(indices[min_index], indices[max_index], interval) # store splits of maximal information gain in accordance with feature type return float(max_ig), [splits[:max_i], splits[max_i:]]
[ "def", "info_gain_nominal", "(", "x", ",", "y", ",", "separate_max", ")", ":", "x_vals", "=", "np", ".", "unique", "(", "x", ")", "# unique values", "if", "len", "(", "x_vals", ")", "<", "3", ":", "# if there is just one unique value", "return", "None", "y_dist", "=", "Counter", "(", "y", ")", "# label distribution", "h_y", "=", "h", "(", "y_dist", ".", "values", "(", ")", ")", "# class entropy", "# calculate distributions and splits in accordance with feature type", "dist", ",", "splits", "=", "nominal_splits", "(", "x", ",", "y", ",", "x_vals", ",", "y_dist", ",", "separate_max", ")", "indices", ",", "repeat", "=", "(", "range", "(", "1", ",", "len", "(", "dist", ")", ")", ",", "1", ")", "if", "len", "(", "dist", ")", "<", "50", "else", "(", "range", "(", "1", ",", "len", "(", "dist", ")", ",", "len", "(", "dist", ")", "/", "10", ")", ",", "3", ")", "interval", "=", "len", "(", "dist", ")", "/", "10", "max_ig", ",", "max_i", ",", "iteration", "=", "0", ",", "1", ",", "0", "while", "iteration", "<", "repeat", ":", "for", "i", "in", "indices", ":", "dist0", "=", "np", ".", "sum", "(", "[", "el", "for", "el", "in", "dist", "[", ":", "i", "]", "]", ")", "# iter 0: take first distribution", "dist1", "=", "np", ".", "sum", "(", "[", "el", "for", "el", "in", "dist", "[", "i", ":", "]", "]", ")", "# iter 0: take the other distributions without first", "coef", "=", "np", ".", "true_divide", "(", "[", "np", ".", "sum", "(", "dist0", ".", "values", "(", ")", ")", ",", "np", ".", "sum", "(", "dist1", ".", "values", "(", ")", ")", "]", ",", "len", "(", "y", ")", ")", "ig", "=", "h_y", "-", "np", ".", "dot", "(", "coef", ",", "[", "h", "(", "dist0", ".", "values", "(", ")", ")", ",", "h", "(", "dist1", ".", "values", "(", ")", ")", "]", ")", "# calculate information gain", "if", "ig", ">", "max_ig", ":", "max_ig", ",", "max_i", "=", "ig", ",", "i", "# store index and value of maximal information gain", "iteration", "+=", "1", "if", "repeat", ">", "1", ":", "interval", "=", "int", "(", "interval", "*", "0.5", ")", "if", "max_i", "in", "indices", "and", "interval", ">", "0", ":", "middle_index", "=", "indices", ".", "index", "(", "max_i", ")", "else", ":", "break", "min_index", "=", "middle_index", "if", "middle_index", "==", "0", "else", "middle_index", "-", "1", "max_index", "=", "middle_index", "if", "middle_index", "==", "len", "(", "indices", ")", "-", "1", "else", "middle_index", "+", "1", "indices", "=", "range", "(", "indices", "[", "min_index", "]", ",", "indices", "[", "max_index", "]", ",", "interval", ")", "# store splits of maximal information gain in accordance with feature type", "return", "float", "(", "max_ig", ")", ",", "[", "splits", "[", ":", "max_i", "]", ",", "splits", "[", "max_i", ":", "]", "]" ]
Function calculates information gain for discrete features. If feature is continuous it is firstly discretized. x: numpy array - numerical or discrete feature y: numpy array - labels ft: string - feature type ("c" - continuous, "d" - discrete) split_fun: function - function for discretization of numerical features
[ "Function", "calculates", "information", "gain", "for", "discrete", "features", ".", "If", "feature", "is", "continuous", "it", "is", "firstly", "discretized", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/measures.py#L66-L109
train
romanorac/discomll
discomll/ensemble/core/measures.py
multinomLog2
def multinomLog2(selectors): """ Function calculates logarithm 2 of a kind of multinom. selectors: list of integers """ ln2 = 0.69314718055994528622 noAll = sum(selectors) lgNf = math.lgamma(noAll + 1.0) / ln2 # log2(N!) lgnFac = [] for selector in selectors: if selector == 0 or selector == 1: lgnFac.append(0.0) elif selector == 2: lgnFac.append(1.0) elif selector == noAll: lgnFac.append(lgNf) else: lgnFac.append(math.lgamma(selector + 1.0) / ln2) return lgNf - sum(lgnFac)
python
def multinomLog2(selectors): """ Function calculates logarithm 2 of a kind of multinom. selectors: list of integers """ ln2 = 0.69314718055994528622 noAll = sum(selectors) lgNf = math.lgamma(noAll + 1.0) / ln2 # log2(N!) lgnFac = [] for selector in selectors: if selector == 0 or selector == 1: lgnFac.append(0.0) elif selector == 2: lgnFac.append(1.0) elif selector == noAll: lgnFac.append(lgNf) else: lgnFac.append(math.lgamma(selector + 1.0) / ln2) return lgNf - sum(lgnFac)
[ "def", "multinomLog2", "(", "selectors", ")", ":", "ln2", "=", "0.69314718055994528622", "noAll", "=", "sum", "(", "selectors", ")", "lgNf", "=", "math", ".", "lgamma", "(", "noAll", "+", "1.0", ")", "/", "ln2", "# log2(N!)", "lgnFac", "=", "[", "]", "for", "selector", "in", "selectors", ":", "if", "selector", "==", "0", "or", "selector", "==", "1", ":", "lgnFac", ".", "append", "(", "0.0", ")", "elif", "selector", "==", "2", ":", "lgnFac", ".", "append", "(", "1.0", ")", "elif", "selector", "==", "noAll", ":", "lgnFac", ".", "append", "(", "lgNf", ")", "else", ":", "lgnFac", ".", "append", "(", "math", ".", "lgamma", "(", "selector", "+", "1.0", ")", "/", "ln2", ")", "return", "lgNf", "-", "sum", "(", "lgnFac", ")" ]
Function calculates logarithm 2 of a kind of multinom. selectors: list of integers
[ "Function", "calculates", "logarithm", "2", "of", "a", "kind", "of", "multinom", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/measures.py#L154-L175
train
romanorac/discomll
discomll/ensemble/core/measures.py
calc_mdl
def calc_mdl(yx_dist, y_dist): """ Function calculates mdl with given label distributions. yx_dist: list of dictionaries - for every split it contains a dictionary with label distributions y_dist: dictionary - all label distributions Reference: Igor Kononenko. On biases in estimating multi-valued attributes. In IJCAI, volume 95, pages 1034-1040, 1995. """ prior = multinomLog2(y_dist.values()) prior += multinomLog2([len(y_dist.keys()) - 1, sum(y_dist.values())]) post = 0 for x_val in yx_dist: post += multinomLog2([x_val.get(c, 0) for c in y_dist.keys()]) post += multinomLog2([len(y_dist.keys()) - 1, sum(x_val.values())]) return (prior - post) / float(sum(y_dist.values()))
python
def calc_mdl(yx_dist, y_dist): """ Function calculates mdl with given label distributions. yx_dist: list of dictionaries - for every split it contains a dictionary with label distributions y_dist: dictionary - all label distributions Reference: Igor Kononenko. On biases in estimating multi-valued attributes. In IJCAI, volume 95, pages 1034-1040, 1995. """ prior = multinomLog2(y_dist.values()) prior += multinomLog2([len(y_dist.keys()) - 1, sum(y_dist.values())]) post = 0 for x_val in yx_dist: post += multinomLog2([x_val.get(c, 0) for c in y_dist.keys()]) post += multinomLog2([len(y_dist.keys()) - 1, sum(x_val.values())]) return (prior - post) / float(sum(y_dist.values()))
[ "def", "calc_mdl", "(", "yx_dist", ",", "y_dist", ")", ":", "prior", "=", "multinomLog2", "(", "y_dist", ".", "values", "(", ")", ")", "prior", "+=", "multinomLog2", "(", "[", "len", "(", "y_dist", ".", "keys", "(", ")", ")", "-", "1", ",", "sum", "(", "y_dist", ".", "values", "(", ")", ")", "]", ")", "post", "=", "0", "for", "x_val", "in", "yx_dist", ":", "post", "+=", "multinomLog2", "(", "[", "x_val", ".", "get", "(", "c", ",", "0", ")", "for", "c", "in", "y_dist", ".", "keys", "(", ")", "]", ")", "post", "+=", "multinomLog2", "(", "[", "len", "(", "y_dist", ".", "keys", "(", ")", ")", "-", "1", ",", "sum", "(", "x_val", ".", "values", "(", ")", ")", "]", ")", "return", "(", "prior", "-", "post", ")", "/", "float", "(", "sum", "(", "y_dist", ".", "values", "(", ")", ")", ")" ]
Function calculates mdl with given label distributions. yx_dist: list of dictionaries - for every split it contains a dictionary with label distributions y_dist: dictionary - all label distributions Reference: Igor Kononenko. On biases in estimating multi-valued attributes. In IJCAI, volume 95, pages 1034-1040, 1995.
[ "Function", "calculates", "mdl", "with", "given", "label", "distributions", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/measures.py#L178-L195
train
romanorac/discomll
discomll/ensemble/core/measures.py
mdl_nominal
def mdl_nominal(x, y, separate_max): """ Function calculates minimum description length for discrete features. If feature is continuous it is firstly discretized. x: numpy array - numerical or discrete feature y: numpy array - labels """ x_vals = np.unique(x) # unique values if len(x_vals) == 1: # if there is just one unique value return None y_dist = Counter(y) # label distribution # calculate distributions and splits in accordance with feature type dist, splits = nominal_splits(x, y, x_vals, y_dist, separate_max) prior_mdl = calc_mdl(dist, y_dist) max_mdl, max_i = 0, 1 for i in range(1, len(dist)): # iter 0: take first distribution dist0_x = [el for el in dist[:i]] dist0_y = np.sum(dist0_x) post_mdl0 = calc_mdl(dist0_x, dist0_y) # iter 0: take the other distributions without first dist1_x = [el for el in dist[i:]] dist1_y = np.sum(dist1_x) post_mdl1 = calc_mdl(dist1_x, dist1_y) coef = np.true_divide([sum(dist0_y.values()), sum(dist1_y.values())], len(x)) mdl_val = prior_mdl - np.dot(coef, [post_mdl0, post_mdl1]) # calculate mdl if mdl_val > max_mdl: max_mdl, max_i = mdl_val, i # store splits of maximal mdl in accordance with feature type split = [splits[:max_i], splits[max_i:]] return (max_mdl, split)
python
def mdl_nominal(x, y, separate_max): """ Function calculates minimum description length for discrete features. If feature is continuous it is firstly discretized. x: numpy array - numerical or discrete feature y: numpy array - labels """ x_vals = np.unique(x) # unique values if len(x_vals) == 1: # if there is just one unique value return None y_dist = Counter(y) # label distribution # calculate distributions and splits in accordance with feature type dist, splits = nominal_splits(x, y, x_vals, y_dist, separate_max) prior_mdl = calc_mdl(dist, y_dist) max_mdl, max_i = 0, 1 for i in range(1, len(dist)): # iter 0: take first distribution dist0_x = [el for el in dist[:i]] dist0_y = np.sum(dist0_x) post_mdl0 = calc_mdl(dist0_x, dist0_y) # iter 0: take the other distributions without first dist1_x = [el for el in dist[i:]] dist1_y = np.sum(dist1_x) post_mdl1 = calc_mdl(dist1_x, dist1_y) coef = np.true_divide([sum(dist0_y.values()), sum(dist1_y.values())], len(x)) mdl_val = prior_mdl - np.dot(coef, [post_mdl0, post_mdl1]) # calculate mdl if mdl_val > max_mdl: max_mdl, max_i = mdl_val, i # store splits of maximal mdl in accordance with feature type split = [splits[:max_i], splits[max_i:]] return (max_mdl, split)
[ "def", "mdl_nominal", "(", "x", ",", "y", ",", "separate_max", ")", ":", "x_vals", "=", "np", ".", "unique", "(", "x", ")", "# unique values", "if", "len", "(", "x_vals", ")", "==", "1", ":", "# if there is just one unique value", "return", "None", "y_dist", "=", "Counter", "(", "y", ")", "# label distribution", "# calculate distributions and splits in accordance with feature type", "dist", ",", "splits", "=", "nominal_splits", "(", "x", ",", "y", ",", "x_vals", ",", "y_dist", ",", "separate_max", ")", "prior_mdl", "=", "calc_mdl", "(", "dist", ",", "y_dist", ")", "max_mdl", ",", "max_i", "=", "0", ",", "1", "for", "i", "in", "range", "(", "1", ",", "len", "(", "dist", ")", ")", ":", "# iter 0: take first distribution", "dist0_x", "=", "[", "el", "for", "el", "in", "dist", "[", ":", "i", "]", "]", "dist0_y", "=", "np", ".", "sum", "(", "dist0_x", ")", "post_mdl0", "=", "calc_mdl", "(", "dist0_x", ",", "dist0_y", ")", "# iter 0: take the other distributions without first", "dist1_x", "=", "[", "el", "for", "el", "in", "dist", "[", "i", ":", "]", "]", "dist1_y", "=", "np", ".", "sum", "(", "dist1_x", ")", "post_mdl1", "=", "calc_mdl", "(", "dist1_x", ",", "dist1_y", ")", "coef", "=", "np", ".", "true_divide", "(", "[", "sum", "(", "dist0_y", ".", "values", "(", ")", ")", ",", "sum", "(", "dist1_y", ".", "values", "(", ")", ")", "]", ",", "len", "(", "x", ")", ")", "mdl_val", "=", "prior_mdl", "-", "np", ".", "dot", "(", "coef", ",", "[", "post_mdl0", ",", "post_mdl1", "]", ")", "# calculate mdl", "if", "mdl_val", ">", "max_mdl", ":", "max_mdl", ",", "max_i", "=", "mdl_val", ",", "i", "# store splits of maximal mdl in accordance with feature type", "split", "=", "[", "splits", "[", ":", "max_i", "]", ",", "splits", "[", "max_i", ":", "]", "]", "return", "(", "max_mdl", ",", "split", ")" ]
Function calculates minimum description length for discrete features. If feature is continuous it is firstly discretized. x: numpy array - numerical or discrete feature y: numpy array - labels
[ "Function", "calculates", "minimum", "description", "length", "for", "discrete", "features", ".", "If", "feature", "is", "continuous", "it", "is", "firstly", "discretized", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/measures.py#L198-L233
train
oemof/oemof.db
oemof/db/__init__.py
url
def url(section="postGIS", config_file=None): """ Retrieve the URL used to connect to the database. Use this if you have your own means of accessing the database and do not want to use :func:`engine` or :func:`connection`. Parameters ---------- section : str, optional The `config.ini` section corresponding to the targeted database. It should contain all the details that needed to set up a connection. Returns ------- database URL : str The URL with which one can connect to the database. Be careful as this will probably contain sensitive data like the username/password combination. config_file : str, optional Relative of absolute of config.ini. If not specified, it tries to read from .oemof/config.ini in your HOME dir Notes ----- For documentation on config.ini see the README section on :ref:`configuring <readme#configuration>` :mod:`oemof.db`. """ cfg.load_config(config_file) try: pw = keyring.get_password(cfg.get(section, "database"), cfg.get(section, "username")) except NoSectionError as e: print("There is no section {section} in your config file. Please " "choose one available section from your config file or " "specify a new one!".format( section=section)) exit(-1) if pw is None: try: pw = cfg.get(section, "pw") except option: pw = getpass.getpass(prompt="No password available in your "\ "keyring for database {database}. " "\n\nEnter your password to " \ "store it in " "keyring:".format(database=section)) keyring.set_password(section, cfg.get(section, "username"), pw) except NoSectionError: print("Unable to find the 'postGIS' section in oemof's config." + "\nExiting.") exit(-1) return "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}".format( user=cfg.get(section, "username"), passwd=pw, host=cfg.get(section, "host"), db=cfg.get(section, "database"), port=int(cfg.get(section, "port")))
python
def url(section="postGIS", config_file=None): """ Retrieve the URL used to connect to the database. Use this if you have your own means of accessing the database and do not want to use :func:`engine` or :func:`connection`. Parameters ---------- section : str, optional The `config.ini` section corresponding to the targeted database. It should contain all the details that needed to set up a connection. Returns ------- database URL : str The URL with which one can connect to the database. Be careful as this will probably contain sensitive data like the username/password combination. config_file : str, optional Relative of absolute of config.ini. If not specified, it tries to read from .oemof/config.ini in your HOME dir Notes ----- For documentation on config.ini see the README section on :ref:`configuring <readme#configuration>` :mod:`oemof.db`. """ cfg.load_config(config_file) try: pw = keyring.get_password(cfg.get(section, "database"), cfg.get(section, "username")) except NoSectionError as e: print("There is no section {section} in your config file. Please " "choose one available section from your config file or " "specify a new one!".format( section=section)) exit(-1) if pw is None: try: pw = cfg.get(section, "pw") except option: pw = getpass.getpass(prompt="No password available in your "\ "keyring for database {database}. " "\n\nEnter your password to " \ "store it in " "keyring:".format(database=section)) keyring.set_password(section, cfg.get(section, "username"), pw) except NoSectionError: print("Unable to find the 'postGIS' section in oemof's config." + "\nExiting.") exit(-1) return "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}".format( user=cfg.get(section, "username"), passwd=pw, host=cfg.get(section, "host"), db=cfg.get(section, "database"), port=int(cfg.get(section, "port")))
[ "def", "url", "(", "section", "=", "\"postGIS\"", ",", "config_file", "=", "None", ")", ":", "cfg", ".", "load_config", "(", "config_file", ")", "try", ":", "pw", "=", "keyring", ".", "get_password", "(", "cfg", ".", "get", "(", "section", ",", "\"database\"", ")", ",", "cfg", ".", "get", "(", "section", ",", "\"username\"", ")", ")", "except", "NoSectionError", "as", "e", ":", "print", "(", "\"There is no section {section} in your config file. Please \"", "\"choose one available section from your config file or \"", "\"specify a new one!\"", ".", "format", "(", "section", "=", "section", ")", ")", "exit", "(", "-", "1", ")", "if", "pw", "is", "None", ":", "try", ":", "pw", "=", "cfg", ".", "get", "(", "section", ",", "\"pw\"", ")", "except", "option", ":", "pw", "=", "getpass", ".", "getpass", "(", "prompt", "=", "\"No password available in your \"", "\"keyring for database {database}. \"", "\"\\n\\nEnter your password to \"", "\"store it in \"", "\"keyring:\"", ".", "format", "(", "database", "=", "section", ")", ")", "keyring", ".", "set_password", "(", "section", ",", "cfg", ".", "get", "(", "section", ",", "\"username\"", ")", ",", "pw", ")", "except", "NoSectionError", ":", "print", "(", "\"Unable to find the 'postGIS' section in oemof's config.\"", "+", "\"\\nExiting.\"", ")", "exit", "(", "-", "1", ")", "return", "\"postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}\"", ".", "format", "(", "user", "=", "cfg", ".", "get", "(", "section", ",", "\"username\"", ")", ",", "passwd", "=", "pw", ",", "host", "=", "cfg", ".", "get", "(", "section", ",", "\"host\"", ")", ",", "db", "=", "cfg", ".", "get", "(", "section", ",", "\"database\"", ")", ",", "port", "=", "int", "(", "cfg", ".", "get", "(", "section", ",", "\"port\"", ")", ")", ")" ]
Retrieve the URL used to connect to the database. Use this if you have your own means of accessing the database and do not want to use :func:`engine` or :func:`connection`. Parameters ---------- section : str, optional The `config.ini` section corresponding to the targeted database. It should contain all the details that needed to set up a connection. Returns ------- database URL : str The URL with which one can connect to the database. Be careful as this will probably contain sensitive data like the username/password combination. config_file : str, optional Relative of absolute of config.ini. If not specified, it tries to read from .oemof/config.ini in your HOME dir Notes ----- For documentation on config.ini see the README section on :ref:`configuring <readme#configuration>` :mod:`oemof.db`.
[ "Retrieve", "the", "URL", "used", "to", "connect", "to", "the", "database", "." ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/__init__.py#L11-L73
train
ResidentMario/pysocrata
pysocrata/pysocrata.py
get_endpoints_using_raw_json_emission
def get_endpoints_using_raw_json_emission(domain): """ Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the first of the two ways of getting this information, the raw JSON endpoint. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. Returns ------- Portal dataset metadata from the JSON endpoint. """ uri = "http://{0}/data.json".format(domain) r = requests.get(uri) r.raise_for_status() return r.json()
python
def get_endpoints_using_raw_json_emission(domain): """ Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the first of the two ways of getting this information, the raw JSON endpoint. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. Returns ------- Portal dataset metadata from the JSON endpoint. """ uri = "http://{0}/data.json".format(domain) r = requests.get(uri) r.raise_for_status() return r.json()
[ "def", "get_endpoints_using_raw_json_emission", "(", "domain", ")", ":", "uri", "=", "\"http://{0}/data.json\"", ".", "format", "(", "domain", ")", "r", "=", "requests", ".", "get", "(", "uri", ")", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "json", "(", ")" ]
Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the first of the two ways of getting this information, the raw JSON endpoint. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. Returns ------- Portal dataset metadata from the JSON endpoint.
[ "Implements", "a", "raw", "HTTP", "GET", "against", "the", "entire", "Socrata", "portal", "for", "the", "domain", "in", "question", ".", "This", "method", "uses", "the", "first", "of", "the", "two", "ways", "of", "getting", "this", "information", "the", "raw", "JSON", "endpoint", "." ]
78d31ed24f9966284043eee45acebd62aa67e5b1
https://github.com/ResidentMario/pysocrata/blob/78d31ed24f9966284043eee45acebd62aa67e5b1/pysocrata/pysocrata.py#L31-L48
train
ResidentMario/pysocrata
pysocrata/pysocrata.py
get_endpoints_using_catalog_api
def get_endpoints_using_catalog_api(domain, token): """ Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API. """ # Token required for all requests. Providing login info instead is also possible but I didn't implement it. headers = {"X-App-Token": token} # The API will return only 100 requests at a time by default. We can ask for more, but the server seems to start # to lag after a certain N requested. Instead, let's pick a less conservative pagination limit and spool up with # offsets. # # At the time this library was written, Socrata would return all of its results in a contiguous list. Once you # maxed out, you wouldn't get any more list items. Later on this was changed so that now if you exhaust portal # entities, it will actually take you back to the beginning of the list again! # # As a result we need to perform our own set-wise check to make sure that what we get isn't just a bit of the # same list all over again. uri = "http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000" ret = [] endpoints_thus_far = set() offset = 0 while True: try: r = requests.get(uri.format(domain, offset), headers=headers) r.raise_for_status() except requests.HTTPError: raise requests.HTTPError("An HTTP error was raised during Socrata API ingestion.".format(domain)) data = r.json() endpoints_returned = {r['resource']['id'] for r in data['results']} new_endpoints = endpoints_returned.difference(endpoints_thus_far) if len(new_endpoints) >= 999: # we are continuing to stream # TODO: 999 not 1000 b/c the API suffers off-by-one errors. Can also do worse, however. Compensate? # cf. https://github.com/ResidentMario/pysocrata/issues/1 ret += data['results'] endpoints_thus_far.update(new_endpoints) offset += 1000 continue else: # we are ending on a stream with some old endpoints on it ret += [r for r in data['results'] if r['resource']['id'] in new_endpoints] break return ret
python
def get_endpoints_using_catalog_api(domain, token): """ Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API. """ # Token required for all requests. Providing login info instead is also possible but I didn't implement it. headers = {"X-App-Token": token} # The API will return only 100 requests at a time by default. We can ask for more, but the server seems to start # to lag after a certain N requested. Instead, let's pick a less conservative pagination limit and spool up with # offsets. # # At the time this library was written, Socrata would return all of its results in a contiguous list. Once you # maxed out, you wouldn't get any more list items. Later on this was changed so that now if you exhaust portal # entities, it will actually take you back to the beginning of the list again! # # As a result we need to perform our own set-wise check to make sure that what we get isn't just a bit of the # same list all over again. uri = "http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000" ret = [] endpoints_thus_far = set() offset = 0 while True: try: r = requests.get(uri.format(domain, offset), headers=headers) r.raise_for_status() except requests.HTTPError: raise requests.HTTPError("An HTTP error was raised during Socrata API ingestion.".format(domain)) data = r.json() endpoints_returned = {r['resource']['id'] for r in data['results']} new_endpoints = endpoints_returned.difference(endpoints_thus_far) if len(new_endpoints) >= 999: # we are continuing to stream # TODO: 999 not 1000 b/c the API suffers off-by-one errors. Can also do worse, however. Compensate? # cf. https://github.com/ResidentMario/pysocrata/issues/1 ret += data['results'] endpoints_thus_far.update(new_endpoints) offset += 1000 continue else: # we are ending on a stream with some old endpoints on it ret += [r for r in data['results'] if r['resource']['id'] in new_endpoints] break return ret
[ "def", "get_endpoints_using_catalog_api", "(", "domain", ",", "token", ")", ":", "# Token required for all requests. Providing login info instead is also possible but I didn't implement it.", "headers", "=", "{", "\"X-App-Token\"", ":", "token", "}", "# The API will return only 100 requests at a time by default. We can ask for more, but the server seems to start", "# to lag after a certain N requested. Instead, let's pick a less conservative pagination limit and spool up with", "# offsets.", "#", "# At the time this library was written, Socrata would return all of its results in a contiguous list. Once you", "# maxed out, you wouldn't get any more list items. Later on this was changed so that now if you exhaust portal", "# entities, it will actually take you back to the beginning of the list again!", "#", "# As a result we need to perform our own set-wise check to make sure that what we get isn't just a bit of the", "# same list all over again.", "uri", "=", "\"http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000\"", "ret", "=", "[", "]", "endpoints_thus_far", "=", "set", "(", ")", "offset", "=", "0", "while", "True", ":", "try", ":", "r", "=", "requests", ".", "get", "(", "uri", ".", "format", "(", "domain", ",", "offset", ")", ",", "headers", "=", "headers", ")", "r", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", ":", "raise", "requests", ".", "HTTPError", "(", "\"An HTTP error was raised during Socrata API ingestion.\"", ".", "format", "(", "domain", ")", ")", "data", "=", "r", ".", "json", "(", ")", "endpoints_returned", "=", "{", "r", "[", "'resource'", "]", "[", "'id'", "]", "for", "r", "in", "data", "[", "'results'", "]", "}", "new_endpoints", "=", "endpoints_returned", ".", "difference", "(", "endpoints_thus_far", ")", "if", "len", "(", "new_endpoints", ")", ">=", "999", ":", "# we are continuing to stream", "# TODO: 999 not 1000 b/c the API suffers off-by-one errors. Can also do worse, however. Compensate?", "# cf. https://github.com/ResidentMario/pysocrata/issues/1", "ret", "+=", "data", "[", "'results'", "]", "endpoints_thus_far", ".", "update", "(", "new_endpoints", ")", "offset", "+=", "1000", "continue", "else", ":", "# we are ending on a stream with some old endpoints on it", "ret", "+=", "[", "r", "for", "r", "in", "data", "[", "'results'", "]", "if", "r", "[", "'resource'", "]", "[", "'id'", "]", "in", "new_endpoints", "]", "break", "return", "ret" ]
Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API.
[ "Implements", "a", "raw", "HTTP", "GET", "against", "the", "entire", "Socrata", "portal", "for", "the", "domain", "in", "question", ".", "This", "method", "uses", "the", "second", "of", "the", "two", "ways", "of", "getting", "this", "information", "the", "catalog", "API", "." ]
78d31ed24f9966284043eee45acebd62aa67e5b1
https://github.com/ResidentMario/pysocrata/blob/78d31ed24f9966284043eee45acebd62aa67e5b1/pysocrata/pysocrata.py#L51-L108
train
ResidentMario/pysocrata
pysocrata/pysocrata.py
count_resources
def count_resources(domain, token): """ Given the domain in question, generates counts for that domain of each of the different data types. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- A dict with counts of the different endpoint types classifiable as published public datasets. """ resources = get_resources(domain, token) return dict(Counter([r['resource']['type'] for r in resources if r['resource']['type'] != 'story']))
python
def count_resources(domain, token): """ Given the domain in question, generates counts for that domain of each of the different data types. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- A dict with counts of the different endpoint types classifiable as published public datasets. """ resources = get_resources(domain, token) return dict(Counter([r['resource']['type'] for r in resources if r['resource']['type'] != 'story']))
[ "def", "count_resources", "(", "domain", ",", "token", ")", ":", "resources", "=", "get_resources", "(", "domain", ",", "token", ")", "return", "dict", "(", "Counter", "(", "[", "r", "[", "'resource'", "]", "[", "'type'", "]", "for", "r", "in", "resources", "if", "r", "[", "'resource'", "]", "[", "'type'", "]", "!=", "'story'", "]", ")", ")" ]
Given the domain in question, generates counts for that domain of each of the different data types. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- A dict with counts of the different endpoint types classifiable as published public datasets.
[ "Given", "the", "domain", "in", "question", "generates", "counts", "for", "that", "domain", "of", "each", "of", "the", "different", "data", "types", "." ]
78d31ed24f9966284043eee45acebd62aa67e5b1
https://github.com/ResidentMario/pysocrata/blob/78d31ed24f9966284043eee45acebd62aa67e5b1/pysocrata/pysocrata.py#L162-L179
train
ngmarchant/oasis
oasis/stratification.py
stratify_by_features
def stratify_by_features(features, n_strata, **kwargs): """Stratify by clustering the items in feature space Parameters ---------- features : array-like, shape=(n_items,n_features) feature matrix for the pool, where rows correspond to items and columns correspond to features. n_strata : int number of strata to create. **kwargs : passed to sklearn.cluster.KMeans Returns ------- Strata instance """ n_items = features.shape[0] km = KMeans(n_clusters=n_strata, **kwargs) allocations = km.fit_predict(X=features) return Strata(allocations)
python
def stratify_by_features(features, n_strata, **kwargs): """Stratify by clustering the items in feature space Parameters ---------- features : array-like, shape=(n_items,n_features) feature matrix for the pool, where rows correspond to items and columns correspond to features. n_strata : int number of strata to create. **kwargs : passed to sklearn.cluster.KMeans Returns ------- Strata instance """ n_items = features.shape[0] km = KMeans(n_clusters=n_strata, **kwargs) allocations = km.fit_predict(X=features) return Strata(allocations)
[ "def", "stratify_by_features", "(", "features", ",", "n_strata", ",", "*", "*", "kwargs", ")", ":", "n_items", "=", "features", ".", "shape", "[", "0", "]", "km", "=", "KMeans", "(", "n_clusters", "=", "n_strata", ",", "*", "*", "kwargs", ")", "allocations", "=", "km", ".", "fit_predict", "(", "X", "=", "features", ")", "return", "Strata", "(", "allocations", ")" ]
Stratify by clustering the items in feature space Parameters ---------- features : array-like, shape=(n_items,n_features) feature matrix for the pool, where rows correspond to items and columns correspond to features. n_strata : int number of strata to create. **kwargs : passed to sklearn.cluster.KMeans Returns ------- Strata instance
[ "Stratify", "by", "clustering", "the", "items", "in", "feature", "space" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L7-L29
train
ngmarchant/oasis
oasis/stratification.py
_heuristic_bin_width
def _heuristic_bin_width(obs): """Optimal histogram bin width based on the Freedman-Diaconis rule""" IQR = sp.percentile(obs, 75) - sp.percentile(obs, 25) N = len(obs) return 2*IQR*N**(-1/3)
python
def _heuristic_bin_width(obs): """Optimal histogram bin width based on the Freedman-Diaconis rule""" IQR = sp.percentile(obs, 75) - sp.percentile(obs, 25) N = len(obs) return 2*IQR*N**(-1/3)
[ "def", "_heuristic_bin_width", "(", "obs", ")", ":", "IQR", "=", "sp", ".", "percentile", "(", "obs", ",", "75", ")", "-", "sp", ".", "percentile", "(", "obs", ",", "25", ")", "N", "=", "len", "(", "obs", ")", "return", "2", "*", "IQR", "*", "N", "**", "(", "-", "1", "/", "3", ")" ]
Optimal histogram bin width based on the Freedman-Diaconis rule
[ "Optimal", "histogram", "bin", "width", "based", "on", "the", "Freedman", "-", "Diaconis", "rule" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L31-L35
train
ngmarchant/oasis
oasis/stratification.py
stratify_by_scores
def stratify_by_scores(scores, goal_n_strata='auto', method='cum_sqrt_F', n_bins = 'auto'): """Stratify by binning the items based on their scores Parameters ---------- scores : array-like, shape=(n_items,) ordered array of scores which quantify the classifier confidence for the items in the pool. High scores indicate a high confidence that the true label is a "1" (and vice versa for label "0"). goal_n_strata : int or 'auto', optional, default 'auto' desired number of strata. If set to 'auto', the number is selected using the Freedman-Diaconis rule. Note that for the 'cum_sqrt_F' method this number is a goal -- the actual number of strata created may be less than the goal. method : {'cum_sqrt_F' or 'equal_size'}, optional, default 'cum_sqrt_F' stratification method to use. 'equal_size' aims to create s Other Parameters ---------------- n_bins : int or 'auto', optional, default 'auto' specify the number of bins to use when estimating the distribution of the score function. This is used when ``goal_n_strata = 'auto'`` and/or when ``method = 'cum_sqrt_F'``. If set to 'auto', the number is selected using the Freedman-Diaconis rule. Returns ------- Strata instance """ available_methods = ['equal_size', 'cum_sqrt_F'] if method not in available_methods: raise ValueError("method argument is invalid") if (method == 'cum_sqrt_F') or (goal_n_strata == 'auto'): # computation below is needed for cum_sqrt_F method OR if we need to # determine the number of strata for equal_size method automatically if n_bins == 'auto': # choose n_bins heuristically width_score = _heuristic_bin_width(scores) n_bins = np.ceil(sp.ptp(scores)/width_score).astype(int) print("Automatically setting n_bins = {}.".format(n_bins)) # approx distribution of scores -- called F counts, score_bins = np.histogram(scores, bins=n_bins) # generate cumulative dist of sqrt(F) sqrt_counts = np.sqrt(counts) csf = np.cumsum(sqrt_counts) if goal_n_strata == 'auto': # choose heuristically width_csf = _heuristic_bin_width(csf) goal_n_strata = np.ceil(sp.ptp(csf)/width_csf).astype(int) print("Automatically setting goal_n_strata = {}.".format(goal_n_strata)) elif method == 'cum_sqrt_F': width_csf = csf[-1]/goal_n_strata # goal_n_strata is now guaranteed to have a valid integer value if method == 'equal_size': sorted_ids = scores.argsort() n_items = len(sorted_ids) quotient = n_items // goal_n_strata remainder = n_items % goal_n_strata allocations = np.empty(n_items, dtype='int') st_pops = [quotient for i in range(goal_n_strata)] for i in range(remainder): st_pops[i] += 1 j = 0 for k,nk in enumerate(st_pops): start = j end = j + nk allocations[sorted_ids[start:end]] = k j = end if method == 'cum_sqrt_F': if goal_n_strata > n_bins: warnings.warn("goal_n_strata > n_bins. " "Consider increasing n_bins.") # calculate roughly equal bins on cum sqrt(F) scale csf_bins = [x * width_csf for x in np.arange(goal_n_strata + 1)] # map cum sqrt(F) bins to score bins j = 0 new_bins = [] for (idx,value) in enumerate(csf): if j == (len(csf_bins) - 1) or idx == (len(csf) - 1): new_bins.append(score_bins[-1]) break if value >= csf_bins[j]: new_bins.append(score_bins[idx]) j += 1 new_bins[0] -= 0.01 new_bins[-1] += 0.01 # bin scores based on new_bins allocations = np.digitize(scores, bins=new_bins, right=True) - 1 # remove empty strata nonempty_ids = np.unique(allocations) n_strata = len(nonempty_ids) indices = np.arange(n_strata) allocations = np.digitize(allocations, nonempty_ids, right=True) if n_strata < goal_n_strata: warnings.warn("Failed to create {} strata".format(goal_n_strata)) return Strata(allocations)
python
def stratify_by_scores(scores, goal_n_strata='auto', method='cum_sqrt_F', n_bins = 'auto'): """Stratify by binning the items based on their scores Parameters ---------- scores : array-like, shape=(n_items,) ordered array of scores which quantify the classifier confidence for the items in the pool. High scores indicate a high confidence that the true label is a "1" (and vice versa for label "0"). goal_n_strata : int or 'auto', optional, default 'auto' desired number of strata. If set to 'auto', the number is selected using the Freedman-Diaconis rule. Note that for the 'cum_sqrt_F' method this number is a goal -- the actual number of strata created may be less than the goal. method : {'cum_sqrt_F' or 'equal_size'}, optional, default 'cum_sqrt_F' stratification method to use. 'equal_size' aims to create s Other Parameters ---------------- n_bins : int or 'auto', optional, default 'auto' specify the number of bins to use when estimating the distribution of the score function. This is used when ``goal_n_strata = 'auto'`` and/or when ``method = 'cum_sqrt_F'``. If set to 'auto', the number is selected using the Freedman-Diaconis rule. Returns ------- Strata instance """ available_methods = ['equal_size', 'cum_sqrt_F'] if method not in available_methods: raise ValueError("method argument is invalid") if (method == 'cum_sqrt_F') or (goal_n_strata == 'auto'): # computation below is needed for cum_sqrt_F method OR if we need to # determine the number of strata for equal_size method automatically if n_bins == 'auto': # choose n_bins heuristically width_score = _heuristic_bin_width(scores) n_bins = np.ceil(sp.ptp(scores)/width_score).astype(int) print("Automatically setting n_bins = {}.".format(n_bins)) # approx distribution of scores -- called F counts, score_bins = np.histogram(scores, bins=n_bins) # generate cumulative dist of sqrt(F) sqrt_counts = np.sqrt(counts) csf = np.cumsum(sqrt_counts) if goal_n_strata == 'auto': # choose heuristically width_csf = _heuristic_bin_width(csf) goal_n_strata = np.ceil(sp.ptp(csf)/width_csf).astype(int) print("Automatically setting goal_n_strata = {}.".format(goal_n_strata)) elif method == 'cum_sqrt_F': width_csf = csf[-1]/goal_n_strata # goal_n_strata is now guaranteed to have a valid integer value if method == 'equal_size': sorted_ids = scores.argsort() n_items = len(sorted_ids) quotient = n_items // goal_n_strata remainder = n_items % goal_n_strata allocations = np.empty(n_items, dtype='int') st_pops = [quotient for i in range(goal_n_strata)] for i in range(remainder): st_pops[i] += 1 j = 0 for k,nk in enumerate(st_pops): start = j end = j + nk allocations[sorted_ids[start:end]] = k j = end if method == 'cum_sqrt_F': if goal_n_strata > n_bins: warnings.warn("goal_n_strata > n_bins. " "Consider increasing n_bins.") # calculate roughly equal bins on cum sqrt(F) scale csf_bins = [x * width_csf for x in np.arange(goal_n_strata + 1)] # map cum sqrt(F) bins to score bins j = 0 new_bins = [] for (idx,value) in enumerate(csf): if j == (len(csf_bins) - 1) or idx == (len(csf) - 1): new_bins.append(score_bins[-1]) break if value >= csf_bins[j]: new_bins.append(score_bins[idx]) j += 1 new_bins[0] -= 0.01 new_bins[-1] += 0.01 # bin scores based on new_bins allocations = np.digitize(scores, bins=new_bins, right=True) - 1 # remove empty strata nonempty_ids = np.unique(allocations) n_strata = len(nonempty_ids) indices = np.arange(n_strata) allocations = np.digitize(allocations, nonempty_ids, right=True) if n_strata < goal_n_strata: warnings.warn("Failed to create {} strata".format(goal_n_strata)) return Strata(allocations)
[ "def", "stratify_by_scores", "(", "scores", ",", "goal_n_strata", "=", "'auto'", ",", "method", "=", "'cum_sqrt_F'", ",", "n_bins", "=", "'auto'", ")", ":", "available_methods", "=", "[", "'equal_size'", ",", "'cum_sqrt_F'", "]", "if", "method", "not", "in", "available_methods", ":", "raise", "ValueError", "(", "\"method argument is invalid\"", ")", "if", "(", "method", "==", "'cum_sqrt_F'", ")", "or", "(", "goal_n_strata", "==", "'auto'", ")", ":", "# computation below is needed for cum_sqrt_F method OR if we need to", "# determine the number of strata for equal_size method automatically", "if", "n_bins", "==", "'auto'", ":", "# choose n_bins heuristically", "width_score", "=", "_heuristic_bin_width", "(", "scores", ")", "n_bins", "=", "np", ".", "ceil", "(", "sp", ".", "ptp", "(", "scores", ")", "/", "width_score", ")", ".", "astype", "(", "int", ")", "print", "(", "\"Automatically setting n_bins = {}.\"", ".", "format", "(", "n_bins", ")", ")", "# approx distribution of scores -- called F", "counts", ",", "score_bins", "=", "np", ".", "histogram", "(", "scores", ",", "bins", "=", "n_bins", ")", "# generate cumulative dist of sqrt(F)", "sqrt_counts", "=", "np", ".", "sqrt", "(", "counts", ")", "csf", "=", "np", ".", "cumsum", "(", "sqrt_counts", ")", "if", "goal_n_strata", "==", "'auto'", ":", "# choose heuristically", "width_csf", "=", "_heuristic_bin_width", "(", "csf", ")", "goal_n_strata", "=", "np", ".", "ceil", "(", "sp", ".", "ptp", "(", "csf", ")", "/", "width_csf", ")", ".", "astype", "(", "int", ")", "print", "(", "\"Automatically setting goal_n_strata = {}.\"", ".", "format", "(", "goal_n_strata", ")", ")", "elif", "method", "==", "'cum_sqrt_F'", ":", "width_csf", "=", "csf", "[", "-", "1", "]", "/", "goal_n_strata", "# goal_n_strata is now guaranteed to have a valid integer value", "if", "method", "==", "'equal_size'", ":", "sorted_ids", "=", "scores", ".", "argsort", "(", ")", "n_items", "=", "len", "(", "sorted_ids", ")", "quotient", "=", "n_items", "//", "goal_n_strata", "remainder", "=", "n_items", "%", "goal_n_strata", "allocations", "=", "np", ".", "empty", "(", "n_items", ",", "dtype", "=", "'int'", ")", "st_pops", "=", "[", "quotient", "for", "i", "in", "range", "(", "goal_n_strata", ")", "]", "for", "i", "in", "range", "(", "remainder", ")", ":", "st_pops", "[", "i", "]", "+=", "1", "j", "=", "0", "for", "k", ",", "nk", "in", "enumerate", "(", "st_pops", ")", ":", "start", "=", "j", "end", "=", "j", "+", "nk", "allocations", "[", "sorted_ids", "[", "start", ":", "end", "]", "]", "=", "k", "j", "=", "end", "if", "method", "==", "'cum_sqrt_F'", ":", "if", "goal_n_strata", ">", "n_bins", ":", "warnings", ".", "warn", "(", "\"goal_n_strata > n_bins. \"", "\"Consider increasing n_bins.\"", ")", "# calculate roughly equal bins on cum sqrt(F) scale", "csf_bins", "=", "[", "x", "*", "width_csf", "for", "x", "in", "np", ".", "arange", "(", "goal_n_strata", "+", "1", ")", "]", "# map cum sqrt(F) bins to score bins", "j", "=", "0", "new_bins", "=", "[", "]", "for", "(", "idx", ",", "value", ")", "in", "enumerate", "(", "csf", ")", ":", "if", "j", "==", "(", "len", "(", "csf_bins", ")", "-", "1", ")", "or", "idx", "==", "(", "len", "(", "csf", ")", "-", "1", ")", ":", "new_bins", ".", "append", "(", "score_bins", "[", "-", "1", "]", ")", "break", "if", "value", ">=", "csf_bins", "[", "j", "]", ":", "new_bins", ".", "append", "(", "score_bins", "[", "idx", "]", ")", "j", "+=", "1", "new_bins", "[", "0", "]", "-=", "0.01", "new_bins", "[", "-", "1", "]", "+=", "0.01", "# bin scores based on new_bins", "allocations", "=", "np", ".", "digitize", "(", "scores", ",", "bins", "=", "new_bins", ",", "right", "=", "True", ")", "-", "1", "# remove empty strata", "nonempty_ids", "=", "np", ".", "unique", "(", "allocations", ")", "n_strata", "=", "len", "(", "nonempty_ids", ")", "indices", "=", "np", ".", "arange", "(", "n_strata", ")", "allocations", "=", "np", ".", "digitize", "(", "allocations", ",", "nonempty_ids", ",", "right", "=", "True", ")", "if", "n_strata", "<", "goal_n_strata", ":", "warnings", ".", "warn", "(", "\"Failed to create {} strata\"", ".", "format", "(", "goal_n_strata", ")", ")", "return", "Strata", "(", "allocations", ")" ]
Stratify by binning the items based on their scores Parameters ---------- scores : array-like, shape=(n_items,) ordered array of scores which quantify the classifier confidence for the items in the pool. High scores indicate a high confidence that the true label is a "1" (and vice versa for label "0"). goal_n_strata : int or 'auto', optional, default 'auto' desired number of strata. If set to 'auto', the number is selected using the Freedman-Diaconis rule. Note that for the 'cum_sqrt_F' method this number is a goal -- the actual number of strata created may be less than the goal. method : {'cum_sqrt_F' or 'equal_size'}, optional, default 'cum_sqrt_F' stratification method to use. 'equal_size' aims to create s Other Parameters ---------------- n_bins : int or 'auto', optional, default 'auto' specify the number of bins to use when estimating the distribution of the score function. This is used when ``goal_n_strata = 'auto'`` and/or when ``method = 'cum_sqrt_F'``. If set to 'auto', the number is selected using the Freedman-Diaconis rule. Returns ------- Strata instance
[ "Stratify", "by", "binning", "the", "items", "based", "on", "their", "scores" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L37-L150
train
ngmarchant/oasis
oasis/stratification.py
auto_stratify
def auto_stratify(scores, **kwargs): """Generate Strata instance automatically Parameters ---------- scores : array-like, shape=(n_items,) ordered array of scores which quantify the classifier confidence for the items in the pool. High scores indicate a high confidence that the true label is a "1" (and vice versa for label "0"). **kwargs : optional keyword arguments. May include 'stratification_method', 'stratification_n_strata', 'stratification_n_bins'. Returns ------- Strata instance """ if 'stratification_method' in kwargs: method = kwargs['stratification_method'] else: method = 'cum_sqrt_F' if 'stratification_n_strata' in kwargs: n_strata = kwargs['stratification_n_strata'] else: n_strata = 'auto' if 'stratification_n_bins' in kwargs: n_bins = kwargs['stratification_n_bins'] strata = stratify_by_scores(scores, n_strata, method = method, \ n_bins = n_bins) else: strata = stratify_by_scores(scores, n_strata, method = method) return strata
python
def auto_stratify(scores, **kwargs): """Generate Strata instance automatically Parameters ---------- scores : array-like, shape=(n_items,) ordered array of scores which quantify the classifier confidence for the items in the pool. High scores indicate a high confidence that the true label is a "1" (and vice versa for label "0"). **kwargs : optional keyword arguments. May include 'stratification_method', 'stratification_n_strata', 'stratification_n_bins'. Returns ------- Strata instance """ if 'stratification_method' in kwargs: method = kwargs['stratification_method'] else: method = 'cum_sqrt_F' if 'stratification_n_strata' in kwargs: n_strata = kwargs['stratification_n_strata'] else: n_strata = 'auto' if 'stratification_n_bins' in kwargs: n_bins = kwargs['stratification_n_bins'] strata = stratify_by_scores(scores, n_strata, method = method, \ n_bins = n_bins) else: strata = stratify_by_scores(scores, n_strata, method = method) return strata
[ "def", "auto_stratify", "(", "scores", ",", "*", "*", "kwargs", ")", ":", "if", "'stratification_method'", "in", "kwargs", ":", "method", "=", "kwargs", "[", "'stratification_method'", "]", "else", ":", "method", "=", "'cum_sqrt_F'", "if", "'stratification_n_strata'", "in", "kwargs", ":", "n_strata", "=", "kwargs", "[", "'stratification_n_strata'", "]", "else", ":", "n_strata", "=", "'auto'", "if", "'stratification_n_bins'", "in", "kwargs", ":", "n_bins", "=", "kwargs", "[", "'stratification_n_bins'", "]", "strata", "=", "stratify_by_scores", "(", "scores", ",", "n_strata", ",", "method", "=", "method", ",", "n_bins", "=", "n_bins", ")", "else", ":", "strata", "=", "stratify_by_scores", "(", "scores", ",", "n_strata", ",", "method", "=", "method", ")", "return", "strata" ]
Generate Strata instance automatically Parameters ---------- scores : array-like, shape=(n_items,) ordered array of scores which quantify the classifier confidence for the items in the pool. High scores indicate a high confidence that the true label is a "1" (and vice versa for label "0"). **kwargs : optional keyword arguments. May include 'stratification_method', 'stratification_n_strata', 'stratification_n_bins'. Returns ------- Strata instance
[ "Generate", "Strata", "instance", "automatically" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L152-L184
train
ngmarchant/oasis
oasis/stratification.py
Strata._sample_stratum
def _sample_stratum(self, pmf=None, replace=True): """Sample a stratum Parameters ---------- pmf : array-like, shape=(n_strata,), optional, default None probability distribution to use when sampling from the strata. If not given, use the stratum weights. replace : bool, optional, default True whether to sample with replacement Returns ------- int a randomly selected stratum index """ if pmf is None: # Use weights pmf = self.weights_ if not replace: # Find strata which have been fully sampled (i.e. are now empty) empty = (self._n_sampled >= self.sizes_) if np.any(empty): pmf = copy.copy(pmf) pmf[empty] = 0 if np.sum(pmf) == 0: raise(RuntimeError) pmf /= np.sum(pmf) return np.random.choice(self.indices_, p = pmf)
python
def _sample_stratum(self, pmf=None, replace=True): """Sample a stratum Parameters ---------- pmf : array-like, shape=(n_strata,), optional, default None probability distribution to use when sampling from the strata. If not given, use the stratum weights. replace : bool, optional, default True whether to sample with replacement Returns ------- int a randomly selected stratum index """ if pmf is None: # Use weights pmf = self.weights_ if not replace: # Find strata which have been fully sampled (i.e. are now empty) empty = (self._n_sampled >= self.sizes_) if np.any(empty): pmf = copy.copy(pmf) pmf[empty] = 0 if np.sum(pmf) == 0: raise(RuntimeError) pmf /= np.sum(pmf) return np.random.choice(self.indices_, p = pmf)
[ "def", "_sample_stratum", "(", "self", ",", "pmf", "=", "None", ",", "replace", "=", "True", ")", ":", "if", "pmf", "is", "None", ":", "# Use weights", "pmf", "=", "self", ".", "weights_", "if", "not", "replace", ":", "# Find strata which have been fully sampled (i.e. are now empty)", "empty", "=", "(", "self", ".", "_n_sampled", ">=", "self", ".", "sizes_", ")", "if", "np", ".", "any", "(", "empty", ")", ":", "pmf", "=", "copy", ".", "copy", "(", "pmf", ")", "pmf", "[", "empty", "]", "=", "0", "if", "np", ".", "sum", "(", "pmf", ")", "==", "0", ":", "raise", "(", "RuntimeError", ")", "pmf", "/=", "np", ".", "sum", "(", "pmf", ")", "return", "np", ".", "random", ".", "choice", "(", "self", ".", "indices_", ",", "p", "=", "pmf", ")" ]
Sample a stratum Parameters ---------- pmf : array-like, shape=(n_strata,), optional, default None probability distribution to use when sampling from the strata. If not given, use the stratum weights. replace : bool, optional, default True whether to sample with replacement Returns ------- int a randomly selected stratum index
[ "Sample", "a", "stratum" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L258-L289
train
ngmarchant/oasis
oasis/stratification.py
Strata._sample_in_stratum
def _sample_in_stratum(self, stratum_idx, replace = True): """Sample an item uniformly from a stratum Parameters ---------- stratum_idx : int stratum index to sample from replace : bool, optional, default True whether to sample with replacement Returns ------- int location of the randomly selected item in the original input array """ if replace: stratum_loc = np.random.choice(self.sizes_[stratum_idx]) else: # Extract only the unsampled items stratum_locs = np.where(~self._sampled[stratum_idx])[0] stratum_loc = np.random.choice(stratum_locs) # Record that item has been sampled self._sampled[stratum_idx][stratum_loc] = True self._n_sampled[stratum_idx] += 1 # Get generic location loc = self.allocations_[stratum_idx][stratum_loc] return loc
python
def _sample_in_stratum(self, stratum_idx, replace = True): """Sample an item uniformly from a stratum Parameters ---------- stratum_idx : int stratum index to sample from replace : bool, optional, default True whether to sample with replacement Returns ------- int location of the randomly selected item in the original input array """ if replace: stratum_loc = np.random.choice(self.sizes_[stratum_idx]) else: # Extract only the unsampled items stratum_locs = np.where(~self._sampled[stratum_idx])[0] stratum_loc = np.random.choice(stratum_locs) # Record that item has been sampled self._sampled[stratum_idx][stratum_loc] = True self._n_sampled[stratum_idx] += 1 # Get generic location loc = self.allocations_[stratum_idx][stratum_loc] return loc
[ "def", "_sample_in_stratum", "(", "self", ",", "stratum_idx", ",", "replace", "=", "True", ")", ":", "if", "replace", ":", "stratum_loc", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "sizes_", "[", "stratum_idx", "]", ")", "else", ":", "# Extract only the unsampled items", "stratum_locs", "=", "np", ".", "where", "(", "~", "self", ".", "_sampled", "[", "stratum_idx", "]", ")", "[", "0", "]", "stratum_loc", "=", "np", ".", "random", ".", "choice", "(", "stratum_locs", ")", "# Record that item has been sampled", "self", ".", "_sampled", "[", "stratum_idx", "]", "[", "stratum_loc", "]", "=", "True", "self", ".", "_n_sampled", "[", "stratum_idx", "]", "+=", "1", "# Get generic location", "loc", "=", "self", ".", "allocations_", "[", "stratum_idx", "]", "[", "stratum_loc", "]", "return", "loc" ]
Sample an item uniformly from a stratum Parameters ---------- stratum_idx : int stratum index to sample from replace : bool, optional, default True whether to sample with replacement Returns ------- int location of the randomly selected item in the original input array
[ "Sample", "an", "item", "uniformly", "from", "a", "stratum" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L291-L319
train
ngmarchant/oasis
oasis/stratification.py
Strata.intra_mean
def intra_mean(self, values): """Calculate the mean of a quantity within strata Parameters ---------- values : array-like, shape=(n_items,n_class) array containing the values of the quantity for each item in the pool Returns ------- numpy.ndarray, shape=(n_strata,n_class) array containing the mean value of the quantity within each stratum """ # TODO Check that quantity is valid if values.ndim > 1: return np.array([np.mean(values[x,:], axis=0) for x in self.allocations_]) else: return np.array([np.mean(values[x]) for x in self.allocations_])
python
def intra_mean(self, values): """Calculate the mean of a quantity within strata Parameters ---------- values : array-like, shape=(n_items,n_class) array containing the values of the quantity for each item in the pool Returns ------- numpy.ndarray, shape=(n_strata,n_class) array containing the mean value of the quantity within each stratum """ # TODO Check that quantity is valid if values.ndim > 1: return np.array([np.mean(values[x,:], axis=0) for x in self.allocations_]) else: return np.array([np.mean(values[x]) for x in self.allocations_])
[ "def", "intra_mean", "(", "self", ",", "values", ")", ":", "# TODO Check that quantity is valid", "if", "values", ".", "ndim", ">", "1", ":", "return", "np", ".", "array", "(", "[", "np", ".", "mean", "(", "values", "[", "x", ",", ":", "]", ",", "axis", "=", "0", ")", "for", "x", "in", "self", ".", "allocations_", "]", ")", "else", ":", "return", "np", ".", "array", "(", "[", "np", ".", "mean", "(", "values", "[", "x", "]", ")", "for", "x", "in", "self", ".", "allocations_", "]", ")" ]
Calculate the mean of a quantity within strata Parameters ---------- values : array-like, shape=(n_items,n_class) array containing the values of the quantity for each item in the pool Returns ------- numpy.ndarray, shape=(n_strata,n_class) array containing the mean value of the quantity within each stratum
[ "Calculate", "the", "mean", "of", "a", "quantity", "within", "strata" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L345-L363
train
ngmarchant/oasis
oasis/stratification.py
Strata.reset
def reset(self): """Reset the instance to begin sampling from scratch""" self._sampled = [np.repeat(False, x) for x in self.sizes_] self._n_sampled = np.zeros(self.n_strata_, dtype=int)
python
def reset(self): """Reset the instance to begin sampling from scratch""" self._sampled = [np.repeat(False, x) for x in self.sizes_] self._n_sampled = np.zeros(self.n_strata_, dtype=int)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "_sampled", "=", "[", "np", ".", "repeat", "(", "False", ",", "x", ")", "for", "x", "in", "self", ".", "sizes_", "]", "self", ".", "_n_sampled", "=", "np", ".", "zeros", "(", "self", ".", "n_strata_", ",", "dtype", "=", "int", ")" ]
Reset the instance to begin sampling from scratch
[ "Reset", "the", "instance", "to", "begin", "sampling", "from", "scratch" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/stratification.py#L365-L368
train
ludeeus/GHLocalApi
examples/bluetooth_scan_network.py
bluetooth_scan
async def bluetooth_scan(): """ Get devices from all GH units on the network. This will scan the IPRANGE defined above for GH units. Then do a multirun scan on each unit. Compiling all devices from all units so you can see which unit has the stronges signal to the device. """ devices = {} async with aiohttp.ClientSession() as session: ghlocalapi = NetworkScan(LOOP, session) result = await ghlocalapi.scan_for_units(IPRANGE) for host in result: if host['assistant_supported']: async with aiohttp.ClientSession() as session: ghlocalapi = DeviceInfo(LOOP, session, host['host']) await ghlocalapi.get_device_info() ghname = ghlocalapi.device_info.get('name') async with aiohttp.ClientSession() as session: ghlocalapi = Bluetooth(LOOP, session, host['host']) await ghlocalapi.scan_for_devices_multi_run() await ghlocalapi.get_scan_result() for device in ghlocalapi.devices: mac = device['mac_address'] if not devices.get(mac, False): # New device devices[mac] = {} devices[mac]['rssi'] = device['rssi'] devices[mac]['ghunit'] = ghname elif devices[mac]['rssi'] < device['rssi']: # Better RSSI value on this device devices[mac]['rssi'] = device['rssi'] devices[mac]['ghunit'] = ghname print(devices)
python
async def bluetooth_scan(): """ Get devices from all GH units on the network. This will scan the IPRANGE defined above for GH units. Then do a multirun scan on each unit. Compiling all devices from all units so you can see which unit has the stronges signal to the device. """ devices = {} async with aiohttp.ClientSession() as session: ghlocalapi = NetworkScan(LOOP, session) result = await ghlocalapi.scan_for_units(IPRANGE) for host in result: if host['assistant_supported']: async with aiohttp.ClientSession() as session: ghlocalapi = DeviceInfo(LOOP, session, host['host']) await ghlocalapi.get_device_info() ghname = ghlocalapi.device_info.get('name') async with aiohttp.ClientSession() as session: ghlocalapi = Bluetooth(LOOP, session, host['host']) await ghlocalapi.scan_for_devices_multi_run() await ghlocalapi.get_scan_result() for device in ghlocalapi.devices: mac = device['mac_address'] if not devices.get(mac, False): # New device devices[mac] = {} devices[mac]['rssi'] = device['rssi'] devices[mac]['ghunit'] = ghname elif devices[mac]['rssi'] < device['rssi']: # Better RSSI value on this device devices[mac]['rssi'] = device['rssi'] devices[mac]['ghunit'] = ghname print(devices)
[ "async", "def", "bluetooth_scan", "(", ")", ":", "devices", "=", "{", "}", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "ghlocalapi", "=", "NetworkScan", "(", "LOOP", ",", "session", ")", "result", "=", "await", "ghlocalapi", ".", "scan_for_units", "(", "IPRANGE", ")", "for", "host", "in", "result", ":", "if", "host", "[", "'assistant_supported'", "]", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "ghlocalapi", "=", "DeviceInfo", "(", "LOOP", ",", "session", ",", "host", "[", "'host'", "]", ")", "await", "ghlocalapi", ".", "get_device_info", "(", ")", "ghname", "=", "ghlocalapi", ".", "device_info", ".", "get", "(", "'name'", ")", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "ghlocalapi", "=", "Bluetooth", "(", "LOOP", ",", "session", ",", "host", "[", "'host'", "]", ")", "await", "ghlocalapi", ".", "scan_for_devices_multi_run", "(", ")", "await", "ghlocalapi", ".", "get_scan_result", "(", ")", "for", "device", "in", "ghlocalapi", ".", "devices", ":", "mac", "=", "device", "[", "'mac_address'", "]", "if", "not", "devices", ".", "get", "(", "mac", ",", "False", ")", ":", "# New device", "devices", "[", "mac", "]", "=", "{", "}", "devices", "[", "mac", "]", "[", "'rssi'", "]", "=", "device", "[", "'rssi'", "]", "devices", "[", "mac", "]", "[", "'ghunit'", "]", "=", "ghname", "elif", "devices", "[", "mac", "]", "[", "'rssi'", "]", "<", "device", "[", "'rssi'", "]", ":", "# Better RSSI value on this device", "devices", "[", "mac", "]", "[", "'rssi'", "]", "=", "device", "[", "'rssi'", "]", "devices", "[", "mac", "]", "[", "'ghunit'", "]", "=", "ghname", "print", "(", "devices", ")" ]
Get devices from all GH units on the network. This will scan the IPRANGE defined above for GH units. Then do a multirun scan on each unit. Compiling all devices from all units so you can see which unit has the stronges signal to the device.
[ "Get", "devices", "from", "all", "GH", "units", "on", "the", "network", "." ]
93abdee299c4a4b65aa9dd03c77ec34e174e3c56
https://github.com/ludeeus/GHLocalApi/blob/93abdee299c4a4b65aa9dd03c77ec34e174e3c56/examples/bluetooth_scan_network.py#L11-L45
train
mkoura/dump2polarion
dump2polarion/verify.py
get_queue_obj
def get_queue_obj(session, queue_url, log_url): """Checks that all the data that is needed for submit verification is available.""" skip = False if not queue_url: logger.error("The queue url is not configured, skipping submit verification") skip = True if not session: logger.error("Missing requests session, skipping submit verification") skip = True queue = QueueSearch(session=session, queue_url=queue_url, log_url=log_url) queue.skip = skip return queue
python
def get_queue_obj(session, queue_url, log_url): """Checks that all the data that is needed for submit verification is available.""" skip = False if not queue_url: logger.error("The queue url is not configured, skipping submit verification") skip = True if not session: logger.error("Missing requests session, skipping submit verification") skip = True queue = QueueSearch(session=session, queue_url=queue_url, log_url=log_url) queue.skip = skip return queue
[ "def", "get_queue_obj", "(", "session", ",", "queue_url", ",", "log_url", ")", ":", "skip", "=", "False", "if", "not", "queue_url", ":", "logger", ".", "error", "(", "\"The queue url is not configured, skipping submit verification\"", ")", "skip", "=", "True", "if", "not", "session", ":", "logger", ".", "error", "(", "\"Missing requests session, skipping submit verification\"", ")", "skip", "=", "True", "queue", "=", "QueueSearch", "(", "session", "=", "session", ",", "queue_url", "=", "queue_url", ",", "log_url", "=", "log_url", ")", "queue", ".", "skip", "=", "skip", "return", "queue" ]
Checks that all the data that is needed for submit verification is available.
[ "Checks", "that", "all", "the", "data", "that", "is", "needed", "for", "submit", "verification", "is", "available", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L176-L188
train
mkoura/dump2polarion
dump2polarion/verify.py
QueueSearch.download_queue
def download_queue(self, job_ids): """Downloads data of completed jobs.""" if self.skip: return None url = "{}?jobtype=completed&jobIds={}".format( self.queue_url, ",".join(str(x) for x in job_ids) ) try: response = self.session.get(url, headers={"Accept": "application/json"}) if response: response = response.json() else: response = None # pylint: disable=broad-except except Exception as err: logger.error(err) response = None return response
python
def download_queue(self, job_ids): """Downloads data of completed jobs.""" if self.skip: return None url = "{}?jobtype=completed&jobIds={}".format( self.queue_url, ",".join(str(x) for x in job_ids) ) try: response = self.session.get(url, headers={"Accept": "application/json"}) if response: response = response.json() else: response = None # pylint: disable=broad-except except Exception as err: logger.error(err) response = None return response
[ "def", "download_queue", "(", "self", ",", "job_ids", ")", ":", "if", "self", ".", "skip", ":", "return", "None", "url", "=", "\"{}?jobtype=completed&jobIds={}\"", ".", "format", "(", "self", ".", "queue_url", ",", "\",\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "job_ids", ")", ")", "try", ":", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "headers", "=", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ":", "response", "=", "response", ".", "json", "(", ")", "else", ":", "response", "=", "None", "# pylint: disable=broad-except", "except", "Exception", "as", "err", ":", "logger", ".", "error", "(", "err", ")", "response", "=", "None", "return", "response" ]
Downloads data of completed jobs.
[ "Downloads", "data", "of", "completed", "jobs", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L31-L50
train
mkoura/dump2polarion
dump2polarion/verify.py
QueueSearch.find_jobs
def find_jobs(self, job_ids): """Finds the jobs in the completed job queue.""" matched_jobs = [] if self.skip: return matched_jobs json_data = self.download_queue(job_ids) if not json_data: return matched_jobs jobs = json_data["jobs"] for job in jobs: if ( job.get("id") in job_ids and job.get("status", "").lower() not in _NOT_FINISHED_STATUSES ): matched_jobs.append(job) return matched_jobs
python
def find_jobs(self, job_ids): """Finds the jobs in the completed job queue.""" matched_jobs = [] if self.skip: return matched_jobs json_data = self.download_queue(job_ids) if not json_data: return matched_jobs jobs = json_data["jobs"] for job in jobs: if ( job.get("id") in job_ids and job.get("status", "").lower() not in _NOT_FINISHED_STATUSES ): matched_jobs.append(job) return matched_jobs
[ "def", "find_jobs", "(", "self", ",", "job_ids", ")", ":", "matched_jobs", "=", "[", "]", "if", "self", ".", "skip", ":", "return", "matched_jobs", "json_data", "=", "self", ".", "download_queue", "(", "job_ids", ")", "if", "not", "json_data", ":", "return", "matched_jobs", "jobs", "=", "json_data", "[", "\"jobs\"", "]", "for", "job", "in", "jobs", ":", "if", "(", "job", ".", "get", "(", "\"id\"", ")", "in", "job_ids", "and", "job", ".", "get", "(", "\"status\"", ",", "\"\"", ")", ".", "lower", "(", ")", "not", "in", "_NOT_FINISHED_STATUSES", ")", ":", "matched_jobs", ".", "append", "(", "job", ")", "return", "matched_jobs" ]
Finds the jobs in the completed job queue.
[ "Finds", "the", "jobs", "in", "the", "completed", "job", "queue", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L52-L70
train
mkoura/dump2polarion
dump2polarion/verify.py
QueueSearch.wait_for_jobs
def wait_for_jobs(self, job_ids, timeout, delay): """Waits until the jobs appears in the completed job queue.""" if self.skip: return logger.debug("Waiting up to %d sec for completion of the job IDs %s", timeout, job_ids) remaining_job_ids = set(job_ids) found_jobs = [] countdown = timeout while countdown > 0: matched_jobs = self.find_jobs(remaining_job_ids) if matched_jobs: remaining_job_ids.difference_update({job["id"] for job in matched_jobs}) found_jobs.extend(matched_jobs) if not remaining_job_ids: return found_jobs time.sleep(delay) countdown -= delay logger.error( "Timed out while waiting for completion of the job IDs %s. Results not updated.", list(remaining_job_ids), )
python
def wait_for_jobs(self, job_ids, timeout, delay): """Waits until the jobs appears in the completed job queue.""" if self.skip: return logger.debug("Waiting up to %d sec for completion of the job IDs %s", timeout, job_ids) remaining_job_ids = set(job_ids) found_jobs = [] countdown = timeout while countdown > 0: matched_jobs = self.find_jobs(remaining_job_ids) if matched_jobs: remaining_job_ids.difference_update({job["id"] for job in matched_jobs}) found_jobs.extend(matched_jobs) if not remaining_job_ids: return found_jobs time.sleep(delay) countdown -= delay logger.error( "Timed out while waiting for completion of the job IDs %s. Results not updated.", list(remaining_job_ids), )
[ "def", "wait_for_jobs", "(", "self", ",", "job_ids", ",", "timeout", ",", "delay", ")", ":", "if", "self", ".", "skip", ":", "return", "logger", ".", "debug", "(", "\"Waiting up to %d sec for completion of the job IDs %s\"", ",", "timeout", ",", "job_ids", ")", "remaining_job_ids", "=", "set", "(", "job_ids", ")", "found_jobs", "=", "[", "]", "countdown", "=", "timeout", "while", "countdown", ">", "0", ":", "matched_jobs", "=", "self", ".", "find_jobs", "(", "remaining_job_ids", ")", "if", "matched_jobs", ":", "remaining_job_ids", ".", "difference_update", "(", "{", "job", "[", "\"id\"", "]", "for", "job", "in", "matched_jobs", "}", ")", "found_jobs", ".", "extend", "(", "matched_jobs", ")", "if", "not", "remaining_job_ids", ":", "return", "found_jobs", "time", ".", "sleep", "(", "delay", ")", "countdown", "-=", "delay", "logger", ".", "error", "(", "\"Timed out while waiting for completion of the job IDs %s. Results not updated.\"", ",", "list", "(", "remaining_job_ids", ")", ",", ")" ]
Waits until the jobs appears in the completed job queue.
[ "Waits", "until", "the", "jobs", "appears", "in", "the", "completed", "job", "queue", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L73-L97
train
mkoura/dump2polarion
dump2polarion/verify.py
QueueSearch._check_outcome
def _check_outcome(self, jobs): """Parses returned messages and checks submit outcome.""" if self.skip: return False if not jobs: logger.error("Import failed!") return False failed_jobs = [] for job in jobs: status = job.get("status") if not status: failed_jobs.append(job) continue if status.lower() != "success": failed_jobs.append(job) for job in failed_jobs: logger.error("job: %s; status: %s", job.get("id"), job.get("status")) if len(failed_jobs) == len(jobs): logger.error("Import failed!") elif failed_jobs: logger.error("Some import jobs failed!") else: logger.info("Results successfully updated!") return not failed_jobs
python
def _check_outcome(self, jobs): """Parses returned messages and checks submit outcome.""" if self.skip: return False if not jobs: logger.error("Import failed!") return False failed_jobs = [] for job in jobs: status = job.get("status") if not status: failed_jobs.append(job) continue if status.lower() != "success": failed_jobs.append(job) for job in failed_jobs: logger.error("job: %s; status: %s", job.get("id"), job.get("status")) if len(failed_jobs) == len(jobs): logger.error("Import failed!") elif failed_jobs: logger.error("Some import jobs failed!") else: logger.info("Results successfully updated!") return not failed_jobs
[ "def", "_check_outcome", "(", "self", ",", "jobs", ")", ":", "if", "self", ".", "skip", ":", "return", "False", "if", "not", "jobs", ":", "logger", ".", "error", "(", "\"Import failed!\"", ")", "return", "False", "failed_jobs", "=", "[", "]", "for", "job", "in", "jobs", ":", "status", "=", "job", ".", "get", "(", "\"status\"", ")", "if", "not", "status", ":", "failed_jobs", ".", "append", "(", "job", ")", "continue", "if", "status", ".", "lower", "(", ")", "!=", "\"success\"", ":", "failed_jobs", ".", "append", "(", "job", ")", "for", "job", "in", "failed_jobs", ":", "logger", ".", "error", "(", "\"job: %s; status: %s\"", ",", "job", ".", "get", "(", "\"id\"", ")", ",", "job", ".", "get", "(", "\"status\"", ")", ")", "if", "len", "(", "failed_jobs", ")", "==", "len", "(", "jobs", ")", ":", "logger", ".", "error", "(", "\"Import failed!\"", ")", "elif", "failed_jobs", ":", "logger", ".", "error", "(", "\"Some import jobs failed!\"", ")", "else", ":", "logger", ".", "info", "(", "\"Results successfully updated!\"", ")", "return", "not", "failed_jobs" ]
Parses returned messages and checks submit outcome.
[ "Parses", "returned", "messages", "and", "checks", "submit", "outcome", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L99-L127
train
mkoura/dump2polarion
dump2polarion/verify.py
QueueSearch._download_log
def _download_log(self, url, output_file): """Saves log returned by the message bus.""" logger.info("Saving log %s to %s", url, output_file) def _do_log_download(): try: return self.session.get(url) # pylint: disable=broad-except except Exception as err: logger.error(err) # log file may not be ready yet, wait a bit for __ in range(5): log_data = _do_log_download() if log_data or log_data is None: break time.sleep(2) if not (log_data and log_data.content): logger.error("Failed to download log file %s.", url) return with open(os.path.expanduser(output_file), "ab") as out: out.write(log_data.content)
python
def _download_log(self, url, output_file): """Saves log returned by the message bus.""" logger.info("Saving log %s to %s", url, output_file) def _do_log_download(): try: return self.session.get(url) # pylint: disable=broad-except except Exception as err: logger.error(err) # log file may not be ready yet, wait a bit for __ in range(5): log_data = _do_log_download() if log_data or log_data is None: break time.sleep(2) if not (log_data and log_data.content): logger.error("Failed to download log file %s.", url) return with open(os.path.expanduser(output_file), "ab") as out: out.write(log_data.content)
[ "def", "_download_log", "(", "self", ",", "url", ",", "output_file", ")", ":", "logger", ".", "info", "(", "\"Saving log %s to %s\"", ",", "url", ",", "output_file", ")", "def", "_do_log_download", "(", ")", ":", "try", ":", "return", "self", ".", "session", ".", "get", "(", "url", ")", "# pylint: disable=broad-except", "except", "Exception", "as", "err", ":", "logger", ".", "error", "(", "err", ")", "# log file may not be ready yet, wait a bit", "for", "__", "in", "range", "(", "5", ")", ":", "log_data", "=", "_do_log_download", "(", ")", "if", "log_data", "or", "log_data", "is", "None", ":", "break", "time", ".", "sleep", "(", "2", ")", "if", "not", "(", "log_data", "and", "log_data", ".", "content", ")", ":", "logger", ".", "error", "(", "\"Failed to download log file %s.\"", ",", "url", ")", "return", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "output_file", ")", ",", "\"ab\"", ")", "as", "out", ":", "out", ".", "write", "(", "log_data", ".", "content", ")" ]
Saves log returned by the message bus.
[ "Saves", "log", "returned", "by", "the", "message", "bus", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L129-L151
train
mkoura/dump2polarion
dump2polarion/verify.py
QueueSearch.get_logs
def get_logs(self, jobs, log_file=None): """Get log or log url of the jobs.""" if not (jobs and self.log_url): return for job in jobs: url = "{}?jobId={}".format(self.log_url, job.get("id")) if log_file: self._download_log("{}&download".format(url), log_file) else: logger.info("Submit log for job %s: %s", job.get("id"), url)
python
def get_logs(self, jobs, log_file=None): """Get log or log url of the jobs.""" if not (jobs and self.log_url): return for job in jobs: url = "{}?jobId={}".format(self.log_url, job.get("id")) if log_file: self._download_log("{}&download".format(url), log_file) else: logger.info("Submit log for job %s: %s", job.get("id"), url)
[ "def", "get_logs", "(", "self", ",", "jobs", ",", "log_file", "=", "None", ")", ":", "if", "not", "(", "jobs", "and", "self", ".", "log_url", ")", ":", "return", "for", "job", "in", "jobs", ":", "url", "=", "\"{}?jobId={}\"", ".", "format", "(", "self", ".", "log_url", ",", "job", ".", "get", "(", "\"id\"", ")", ")", "if", "log_file", ":", "self", ".", "_download_log", "(", "\"{}&download\"", ".", "format", "(", "url", ")", ",", "log_file", ")", "else", ":", "logger", ".", "info", "(", "\"Submit log for job %s: %s\"", ",", "job", ".", "get", "(", "\"id\"", ")", ",", "url", ")" ]
Get log or log url of the jobs.
[ "Get", "log", "or", "log", "url", "of", "the", "jobs", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/verify.py#L153-L163
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.submodules
def submodules(self): """ Property to return all sub-modules of the node, recursively. Returns: list of Module: the sub-modules. """ submodules = [] submodules.extend(self.modules) for p in self.packages: submodules.extend(p.submodules) return submodules
python
def submodules(self): """ Property to return all sub-modules of the node, recursively. Returns: list of Module: the sub-modules. """ submodules = [] submodules.extend(self.modules) for p in self.packages: submodules.extend(p.submodules) return submodules
[ "def", "submodules", "(", "self", ")", ":", "submodules", "=", "[", "]", "submodules", ".", "extend", "(", "self", ".", "modules", ")", "for", "p", "in", "self", ".", "packages", ":", "submodules", ".", "extend", "(", "p", ".", "submodules", ")", "return", "submodules" ]
Property to return all sub-modules of the node, recursively. Returns: list of Module: the sub-modules.
[ "Property", "to", "return", "all", "sub", "-", "modules", "of", "the", "node", "recursively", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L112-L123
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.get_target
def get_target(self, target): """ Get the result of _get_target, cache it and return it. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module. """ if target not in self._target_cache: self._target_cache[target] = self._get_target(target) return self._target_cache[target]
python
def get_target(self, target): """ Get the result of _get_target, cache it and return it. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module. """ if target not in self._target_cache: self._target_cache[target] = self._get_target(target) return self._target_cache[target]
[ "def", "get_target", "(", "self", ",", "target", ")", ":", "if", "target", "not", "in", "self", ".", "_target_cache", ":", "self", ".", "_target_cache", "[", "target", "]", "=", "self", ".", "_get_target", "(", "target", ")", "return", "self", ".", "_target_cache", "[", "target", "]" ]
Get the result of _get_target, cache it and return it. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module.
[ "Get", "the", "result", "of", "_get_target", "cache", "it", "and", "return", "it", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L167-L179
train
Genida/dependenpy
src/dependenpy/node.py
RootNode._get_target
def _get_target(self, target): """ Get the Package or Module related to given target. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module. """ depth = target.count('.') + 1 parts = target.split('.', 1) for m in self.modules: if parts[0] == m.name: if depth < 3: return m for p in self.packages: if parts[0] == p.name: if depth == 1: return p # pylama:ignore=W0212 target = p._get_target(parts[1]) if target: return target # FIXME: can lead to internal dep instead of external # see example with django.contrib.auth.forms # importing forms from django # Idea: when parsing files with ast, record what objects # are defined in the module. Then check here if the given # part is one of these objects. if depth < 3: return p return None
python
def _get_target(self, target): """ Get the Package or Module related to given target. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module. """ depth = target.count('.') + 1 parts = target.split('.', 1) for m in self.modules: if parts[0] == m.name: if depth < 3: return m for p in self.packages: if parts[0] == p.name: if depth == 1: return p # pylama:ignore=W0212 target = p._get_target(parts[1]) if target: return target # FIXME: can lead to internal dep instead of external # see example with django.contrib.auth.forms # importing forms from django # Idea: when parsing files with ast, record what objects # are defined in the module. Then check here if the given # part is one of these objects. if depth < 3: return p return None
[ "def", "_get_target", "(", "self", ",", "target", ")", ":", "depth", "=", "target", ".", "count", "(", "'.'", ")", "+", "1", "parts", "=", "target", ".", "split", "(", "'.'", ",", "1", ")", "for", "m", "in", "self", ".", "modules", ":", "if", "parts", "[", "0", "]", "==", "m", ".", "name", ":", "if", "depth", "<", "3", ":", "return", "m", "for", "p", "in", "self", ".", "packages", ":", "if", "parts", "[", "0", "]", "==", "p", ".", "name", ":", "if", "depth", "==", "1", ":", "return", "p", "# pylama:ignore=W0212", "target", "=", "p", ".", "_get_target", "(", "parts", "[", "1", "]", ")", "if", "target", ":", "return", "target", "# FIXME: can lead to internal dep instead of external", "# see example with django.contrib.auth.forms", "# importing forms from django", "# Idea: when parsing files with ast, record what objects", "# are defined in the module. Then check here if the given", "# part is one of these objects.", "if", "depth", "<", "3", ":", "return", "p", "return", "None" ]
Get the Package or Module related to given target. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module.
[ "Get", "the", "Package", "or", "Module", "related", "to", "given", "target", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L181-L213
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.build_dependencies
def build_dependencies(self): """ Recursively build the dependencies for sub-modules and sub-packages. Iterate on node's modules then packages and call their build_dependencies methods. """ for m in self.modules: m.build_dependencies() for p in self.packages: p.build_dependencies()
python
def build_dependencies(self): """ Recursively build the dependencies for sub-modules and sub-packages. Iterate on node's modules then packages and call their build_dependencies methods. """ for m in self.modules: m.build_dependencies() for p in self.packages: p.build_dependencies()
[ "def", "build_dependencies", "(", "self", ")", ":", "for", "m", "in", "self", ".", "modules", ":", "m", ".", "build_dependencies", "(", ")", "for", "p", "in", "self", ".", "packages", ":", "p", ".", "build_dependencies", "(", ")" ]
Recursively build the dependencies for sub-modules and sub-packages. Iterate on node's modules then packages and call their build_dependencies methods.
[ "Recursively", "build", "the", "dependencies", "for", "sub", "-", "modules", "and", "sub", "-", "packages", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L215-L225
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.print_graph
def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs): """ Print the graph for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the graph. """ graph = self.as_graph(depth=depth) graph.print(format=format, output=output, **kwargs)
python
def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs): """ Print the graph for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the graph. """ graph = self.as_graph(depth=depth) graph.print(format=format, output=output, **kwargs)
[ "def", "print_graph", "(", "self", ",", "format", "=", "None", ",", "output", "=", "sys", ".", "stdout", ",", "depth", "=", "0", ",", "*", "*", "kwargs", ")", ":", "graph", "=", "self", ".", "as_graph", "(", "depth", "=", "depth", ")", "graph", ".", "print", "(", "format", "=", "format", ",", "output", "=", "output", ",", "*", "*", "kwargs", ")" ]
Print the graph for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the graph.
[ "Print", "the", "graph", "for", "self", "s", "nodes", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L227-L237
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.as_graph
def as_graph(self, depth=0): """ Create a graph with self as node, cache it, return it. Args: depth (int): depth of the graph. Returns: Graph: an instance of Graph. """ if depth in self._graph_cache: return self._graph_cache[depth] self._graph_cache[depth] = graph = Graph(self, depth=depth) return graph
python
def as_graph(self, depth=0): """ Create a graph with self as node, cache it, return it. Args: depth (int): depth of the graph. Returns: Graph: an instance of Graph. """ if depth in self._graph_cache: return self._graph_cache[depth] self._graph_cache[depth] = graph = Graph(self, depth=depth) return graph
[ "def", "as_graph", "(", "self", ",", "depth", "=", "0", ")", ":", "if", "depth", "in", "self", ".", "_graph_cache", ":", "return", "self", ".", "_graph_cache", "[", "depth", "]", "self", ".", "_graph_cache", "[", "depth", "]", "=", "graph", "=", "Graph", "(", "self", ",", "depth", "=", "depth", ")", "return", "graph" ]
Create a graph with self as node, cache it, return it. Args: depth (int): depth of the graph. Returns: Graph: an instance of Graph.
[ "Create", "a", "graph", "with", "self", "as", "node", "cache", "it", "return", "it", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L300-L313
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.as_matrix
def as_matrix(self, depth=0): """ Create a matrix with self as node, cache it, return it. Args: depth (int): depth of the matrix. Returns: Matrix: an instance of Matrix. """ if depth in self._matrix_cache: return self._matrix_cache[depth] self._matrix_cache[depth] = matrix = Matrix(self, depth=depth) return matrix
python
def as_matrix(self, depth=0): """ Create a matrix with self as node, cache it, return it. Args: depth (int): depth of the matrix. Returns: Matrix: an instance of Matrix. """ if depth in self._matrix_cache: return self._matrix_cache[depth] self._matrix_cache[depth] = matrix = Matrix(self, depth=depth) return matrix
[ "def", "as_matrix", "(", "self", ",", "depth", "=", "0", ")", ":", "if", "depth", "in", "self", ".", "_matrix_cache", ":", "return", "self", ".", "_matrix_cache", "[", "depth", "]", "self", ".", "_matrix_cache", "[", "depth", "]", "=", "matrix", "=", "Matrix", "(", "self", ",", "depth", "=", "depth", ")", "return", "matrix" ]
Create a matrix with self as node, cache it, return it. Args: depth (int): depth of the matrix. Returns: Matrix: an instance of Matrix.
[ "Create", "a", "matrix", "with", "self", "as", "node", "cache", "it", "return", "it", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L315-L328
train
Genida/dependenpy
src/dependenpy/node.py
RootNode.as_treemap
def as_treemap(self): """ Return the dependencies as a TreeMap. Returns: TreeMap: instance of TreeMap. """ if self._treemap_cache: return self._treemap_cache self._treemap_cache = treemap = TreeMap(self) return treemap
python
def as_treemap(self): """ Return the dependencies as a TreeMap. Returns: TreeMap: instance of TreeMap. """ if self._treemap_cache: return self._treemap_cache self._treemap_cache = treemap = TreeMap(self) return treemap
[ "def", "as_treemap", "(", "self", ")", ":", "if", "self", ".", "_treemap_cache", ":", "return", "self", ".", "_treemap_cache", "self", ".", "_treemap_cache", "=", "treemap", "=", "TreeMap", "(", "self", ")", "return", "treemap" ]
Return the dependencies as a TreeMap. Returns: TreeMap: instance of TreeMap.
[ "Return", "the", "dependencies", "as", "a", "TreeMap", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L330-L340
train
Genida/dependenpy
src/dependenpy/node.py
LeafNode.root
def root(self): """ Property to return the root of this node. Returns: Package: this node's root package. """ node = self while node.package is not None: node = node.package return node
python
def root(self): """ Property to return the root of this node. Returns: Package: this node's root package. """ node = self while node.package is not None: node = node.package return node
[ "def", "root", "(", "self", ")", ":", "node", "=", "self", "while", "node", ".", "package", "is", "not", "None", ":", "node", "=", "node", ".", "package", "return", "node" ]
Property to return the root of this node. Returns: Package: this node's root package.
[ "Property", "to", "return", "the", "root", "of", "this", "node", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L355-L365
train
Genida/dependenpy
src/dependenpy/node.py
LeafNode.depth
def depth(self): """ Property to tell the depth of the node in the tree. Returns: int: the node's depth in the tree. """ if self._depth_cache is not None: return self._depth_cache depth, node = 1, self while node.package is not None: depth += 1 node = node.package self._depth_cache = depth return depth
python
def depth(self): """ Property to tell the depth of the node in the tree. Returns: int: the node's depth in the tree. """ if self._depth_cache is not None: return self._depth_cache depth, node = 1, self while node.package is not None: depth += 1 node = node.package self._depth_cache = depth return depth
[ "def", "depth", "(", "self", ")", ":", "if", "self", ".", "_depth_cache", "is", "not", "None", ":", "return", "self", ".", "_depth_cache", "depth", ",", "node", "=", "1", ",", "self", "while", "node", ".", "package", "is", "not", "None", ":", "depth", "+=", "1", "node", "=", "node", ".", "package", "self", ".", "_depth_cache", "=", "depth", "return", "depth" ]
Property to tell the depth of the node in the tree. Returns: int: the node's depth in the tree.
[ "Property", "to", "tell", "the", "depth", "of", "the", "node", "in", "the", "tree", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L368-L382
train
Genida/dependenpy
src/dependenpy/node.py
LeafNode.absolute_name
def absolute_name(self, depth=0): """ Return the absolute name of the node. Concatenate names from root to self within depth. Args: depth (int): maximum depth to go to. Returns: str: absolute name of the node (until given depth is reached). """ node, node_depth = self, self.depth if depth < 1: depth = node_depth while node_depth > depth and node.package is not None: node = node.package node_depth -= 1 names = [] while node is not None: names.append(node.name) node = node.package return '.'.join(reversed(names))
python
def absolute_name(self, depth=0): """ Return the absolute name of the node. Concatenate names from root to self within depth. Args: depth (int): maximum depth to go to. Returns: str: absolute name of the node (until given depth is reached). """ node, node_depth = self, self.depth if depth < 1: depth = node_depth while node_depth > depth and node.package is not None: node = node.package node_depth -= 1 names = [] while node is not None: names.append(node.name) node = node.package return '.'.join(reversed(names))
[ "def", "absolute_name", "(", "self", ",", "depth", "=", "0", ")", ":", "node", ",", "node_depth", "=", "self", ",", "self", ".", "depth", "if", "depth", "<", "1", ":", "depth", "=", "node_depth", "while", "node_depth", ">", "depth", "and", "node", ".", "package", "is", "not", "None", ":", "node", "=", "node", ".", "package", "node_depth", "-=", "1", "names", "=", "[", "]", "while", "node", "is", "not", "None", ":", "names", ".", "append", "(", "node", ".", "name", ")", "node", "=", "node", ".", "package", "return", "'.'", ".", "join", "(", "reversed", "(", "names", ")", ")" ]
Return the absolute name of the node. Concatenate names from root to self within depth. Args: depth (int): maximum depth to go to. Returns: str: absolute name of the node (until given depth is reached).
[ "Return", "the", "absolute", "name", "of", "the", "node", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L384-L406
train
klen/zeta-library
zetalibrary/utils.py
color_msg
def color_msg(msg, color): " Return colored message " return ''.join((COLORS.get(color, COLORS['endc']), msg, COLORS['endc']))
python
def color_msg(msg, color): " Return colored message " return ''.join((COLORS.get(color, COLORS['endc']), msg, COLORS['endc']))
[ "def", "color_msg", "(", "msg", ",", "color", ")", ":", "return", "''", ".", "join", "(", "(", "COLORS", ".", "get", "(", "color", ",", "COLORS", "[", "'endc'", "]", ")", ",", "msg", ",", "COLORS", "[", "'endc'", "]", ")", ")" ]
Return colored message
[ "Return", "colored", "message" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/utils.py#L9-L11
train
klen/zeta-library
zetalibrary/utils.py
gen_files
def gen_files(path, prefix="_"): " Return file generator " if op.isdir(path): for name in listdir(path): fpath = op.join(path, name) if is_parsed_file(fpath): yield op.abspath(fpath) elif is_parsed_file(path): yield op.abspath(path)
python
def gen_files(path, prefix="_"): " Return file generator " if op.isdir(path): for name in listdir(path): fpath = op.join(path, name) if is_parsed_file(fpath): yield op.abspath(fpath) elif is_parsed_file(path): yield op.abspath(path)
[ "def", "gen_files", "(", "path", ",", "prefix", "=", "\"_\"", ")", ":", "if", "op", ".", "isdir", "(", "path", ")", ":", "for", "name", "in", "listdir", "(", "path", ")", ":", "fpath", "=", "op", ".", "join", "(", "path", ",", "name", ")", "if", "is_parsed_file", "(", "fpath", ")", ":", "yield", "op", ".", "abspath", "(", "fpath", ")", "elif", "is_parsed_file", "(", "path", ")", ":", "yield", "op", ".", "abspath", "(", "path", ")" ]
Return file generator
[ "Return", "file", "generator" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/utils.py#L19-L29
train
klen/zeta-library
zetalibrary/utils.py
pack
def pack(args): " Pack files. " from zetalibrary.packer import Packer args = parse_config(args) for path in gen_files(args.source, prefix=args.prefix): Packer(path, args).pack()
python
def pack(args): " Pack files. " from zetalibrary.packer import Packer args = parse_config(args) for path in gen_files(args.source, prefix=args.prefix): Packer(path, args).pack()
[ "def", "pack", "(", "args", ")", ":", "from", "zetalibrary", ".", "packer", "import", "Packer", "args", "=", "parse_config", "(", "args", ")", "for", "path", "in", "gen_files", "(", "args", ".", "source", ",", "prefix", "=", "args", ".", "prefix", ")", ":", "Packer", "(", "path", ",", "args", ")", ".", "pack", "(", ")" ]
Pack files.
[ "Pack", "files", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/utils.py#L41-L47
train
skioo/django-customer-billing
billing/total.py
Total.nonzero_monies
def nonzero_monies(self): """Get a list of the underlying ``Money`` instances that are not zero Returns: ([Money]): A list of zero or more money instances. Currencies will be unique. """ return [copy.copy(m) for m in self._money_obs if m.amount != 0]
python
def nonzero_monies(self): """Get a list of the underlying ``Money`` instances that are not zero Returns: ([Money]): A list of zero or more money instances. Currencies will be unique. """ return [copy.copy(m) for m in self._money_obs if m.amount != 0]
[ "def", "nonzero_monies", "(", "self", ")", ":", "return", "[", "copy", ".", "copy", "(", "m", ")", "for", "m", "in", "self", ".", "_money_obs", "if", "m", ".", "amount", "!=", "0", "]" ]
Get a list of the underlying ``Money`` instances that are not zero Returns: ([Money]): A list of zero or more money instances. Currencies will be unique.
[ "Get", "a", "list", "of", "the", "underlying", "Money", "instances", "that", "are", "not", "zero" ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/total.py#L117-L123
train
lashex/pyutu
pyutu/cli.py
index
def index(pc): """ Show details about the Pricing API Index. """ click.echo("Format Version: {0}".format(pc.idx['formatVersion'])) click.echo("Publication Date: {0}".format(pc.idx['publicationDate'])) olist = '' for i,o in enumerate(pc.idx['offers']): if i < len(pc.idx['offers']) - 1: olist += o + ", " else: olist += o click.echo("Services Offered: {0}".format(olist))
python
def index(pc): """ Show details about the Pricing API Index. """ click.echo("Format Version: {0}".format(pc.idx['formatVersion'])) click.echo("Publication Date: {0}".format(pc.idx['publicationDate'])) olist = '' for i,o in enumerate(pc.idx['offers']): if i < len(pc.idx['offers']) - 1: olist += o + ", " else: olist += o click.echo("Services Offered: {0}".format(olist))
[ "def", "index", "(", "pc", ")", ":", "click", ".", "echo", "(", "\"Format Version: {0}\"", ".", "format", "(", "pc", ".", "idx", "[", "'formatVersion'", "]", ")", ")", "click", ".", "echo", "(", "\"Publication Date: {0}\"", ".", "format", "(", "pc", ".", "idx", "[", "'publicationDate'", "]", ")", ")", "olist", "=", "''", "for", "i", ",", "o", "in", "enumerate", "(", "pc", ".", "idx", "[", "'offers'", "]", ")", ":", "if", "i", "<", "len", "(", "pc", ".", "idx", "[", "'offers'", "]", ")", "-", "1", ":", "olist", "+=", "o", "+", "\", \"", "else", ":", "olist", "+=", "o", "click", ".", "echo", "(", "\"Services Offered: {0}\"", ".", "format", "(", "olist", ")", ")" ]
Show details about the Pricing API Index.
[ "Show", "details", "about", "the", "Pricing", "API", "Index", "." ]
22a9e2e5473e316aa6fffe67f0069b2a7757a441
https://github.com/lashex/pyutu/blob/22a9e2e5473e316aa6fffe67f0069b2a7757a441/pyutu/cli.py#L36-L49
train
lashex/pyutu
pyutu/cli.py
product
def product(pc, service, attrib, sku): """ Get a list of a service's products. The list will be in the given region, matching the specific terms and any given attribute filters or a SKU. """ pc.service = service.lower() pc.sku = sku pc.add_attributes(attribs=attrib) click.echo("Service Alias: {0}".format(pc.service_alias)) click.echo("URL: {0}".format(pc.service_url)) click.echo("Region: {0}".format(pc.region)) click.echo("Product Terms: {0}".format(pc.terms)) click.echo("Filtering Attributes: {0}".format(pc.attributes)) prods = pyutu.find_products(pc) for p in prods: click.echo("Product SKU: {0} product: {1}".format( p, json.dumps(prods[p], indent=2, sort_keys=True)) ) click.echo("Total Products Found: {0}".format(len(prods))) click.echo("Time: {0} secs".format(time.process_time()))
python
def product(pc, service, attrib, sku): """ Get a list of a service's products. The list will be in the given region, matching the specific terms and any given attribute filters or a SKU. """ pc.service = service.lower() pc.sku = sku pc.add_attributes(attribs=attrib) click.echo("Service Alias: {0}".format(pc.service_alias)) click.echo("URL: {0}".format(pc.service_url)) click.echo("Region: {0}".format(pc.region)) click.echo("Product Terms: {0}".format(pc.terms)) click.echo("Filtering Attributes: {0}".format(pc.attributes)) prods = pyutu.find_products(pc) for p in prods: click.echo("Product SKU: {0} product: {1}".format( p, json.dumps(prods[p], indent=2, sort_keys=True)) ) click.echo("Total Products Found: {0}".format(len(prods))) click.echo("Time: {0} secs".format(time.process_time()))
[ "def", "product", "(", "pc", ",", "service", ",", "attrib", ",", "sku", ")", ":", "pc", ".", "service", "=", "service", ".", "lower", "(", ")", "pc", ".", "sku", "=", "sku", "pc", ".", "add_attributes", "(", "attribs", "=", "attrib", ")", "click", ".", "echo", "(", "\"Service Alias: {0}\"", ".", "format", "(", "pc", ".", "service_alias", ")", ")", "click", ".", "echo", "(", "\"URL: {0}\"", ".", "format", "(", "pc", ".", "service_url", ")", ")", "click", ".", "echo", "(", "\"Region: {0}\"", ".", "format", "(", "pc", ".", "region", ")", ")", "click", ".", "echo", "(", "\"Product Terms: {0}\"", ".", "format", "(", "pc", ".", "terms", ")", ")", "click", ".", "echo", "(", "\"Filtering Attributes: {0}\"", ".", "format", "(", "pc", ".", "attributes", ")", ")", "prods", "=", "pyutu", ".", "find_products", "(", "pc", ")", "for", "p", "in", "prods", ":", "click", ".", "echo", "(", "\"Product SKU: {0} product: {1}\"", ".", "format", "(", "p", ",", "json", ".", "dumps", "(", "prods", "[", "p", "]", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")", ")", ")", "click", ".", "echo", "(", "\"Total Products Found: {0}\"", ".", "format", "(", "len", "(", "prods", ")", ")", ")", "click", ".", "echo", "(", "\"Time: {0} secs\"", ".", "format", "(", "time", ".", "process_time", "(", ")", ")", ")" ]
Get a list of a service's products. The list will be in the given region, matching the specific terms and any given attribute filters or a SKU.
[ "Get", "a", "list", "of", "a", "service", "s", "products", ".", "The", "list", "will", "be", "in", "the", "given", "region", "matching", "the", "specific", "terms", "and", "any", "given", "attribute", "filters", "or", "a", "SKU", "." ]
22a9e2e5473e316aa6fffe67f0069b2a7757a441
https://github.com/lashex/pyutu/blob/22a9e2e5473e316aa6fffe67f0069b2a7757a441/pyutu/cli.py#L60-L82
train
lashex/pyutu
pyutu/cli.py
price
def price(pc, service, attrib, sku): """ Get a list of a service's prices. The list will be in the given region, matching the specific terms and any given attribute filters or a SKU. """ pc.service = service.lower() pc.sku = sku pc.add_attributes(attribs=attrib) click.echo("Service Alias: {0}".format(pc.service_alias)) click.echo("URL: {0}".format(pc.service_url)) click.echo("Region: {0}".format(pc.region)) click.echo("Product Terms: {0}".format(pc.terms)) click.echo("Filtering Attributes: {0}".format(pc.attributes)) prices = pyutu.get_prices(pc) for p in prices: click.echo("Rate Code: {0} price: {1}".format( p, json.dumps(prices[p], indent=2, sort_keys=True)) ) click.echo("Total Prices Found: {0}".format(len(prices))) if sys.version_info >= (3, 3): click.echo("Time: {0} secs".format(time.process_time()))
python
def price(pc, service, attrib, sku): """ Get a list of a service's prices. The list will be in the given region, matching the specific terms and any given attribute filters or a SKU. """ pc.service = service.lower() pc.sku = sku pc.add_attributes(attribs=attrib) click.echo("Service Alias: {0}".format(pc.service_alias)) click.echo("URL: {0}".format(pc.service_url)) click.echo("Region: {0}".format(pc.region)) click.echo("Product Terms: {0}".format(pc.terms)) click.echo("Filtering Attributes: {0}".format(pc.attributes)) prices = pyutu.get_prices(pc) for p in prices: click.echo("Rate Code: {0} price: {1}".format( p, json.dumps(prices[p], indent=2, sort_keys=True)) ) click.echo("Total Prices Found: {0}".format(len(prices))) if sys.version_info >= (3, 3): click.echo("Time: {0} secs".format(time.process_time()))
[ "def", "price", "(", "pc", ",", "service", ",", "attrib", ",", "sku", ")", ":", "pc", ".", "service", "=", "service", ".", "lower", "(", ")", "pc", ".", "sku", "=", "sku", "pc", ".", "add_attributes", "(", "attribs", "=", "attrib", ")", "click", ".", "echo", "(", "\"Service Alias: {0}\"", ".", "format", "(", "pc", ".", "service_alias", ")", ")", "click", ".", "echo", "(", "\"URL: {0}\"", ".", "format", "(", "pc", ".", "service_url", ")", ")", "click", ".", "echo", "(", "\"Region: {0}\"", ".", "format", "(", "pc", ".", "region", ")", ")", "click", ".", "echo", "(", "\"Product Terms: {0}\"", ".", "format", "(", "pc", ".", "terms", ")", ")", "click", ".", "echo", "(", "\"Filtering Attributes: {0}\"", ".", "format", "(", "pc", ".", "attributes", ")", ")", "prices", "=", "pyutu", ".", "get_prices", "(", "pc", ")", "for", "p", "in", "prices", ":", "click", ".", "echo", "(", "\"Rate Code: {0} price: {1}\"", ".", "format", "(", "p", ",", "json", ".", "dumps", "(", "prices", "[", "p", "]", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")", ")", ")", "click", ".", "echo", "(", "\"Total Prices Found: {0}\"", ".", "format", "(", "len", "(", "prices", ")", ")", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "3", ")", ":", "click", ".", "echo", "(", "\"Time: {0} secs\"", ".", "format", "(", "time", ".", "process_time", "(", ")", ")", ")" ]
Get a list of a service's prices. The list will be in the given region, matching the specific terms and any given attribute filters or a SKU.
[ "Get", "a", "list", "of", "a", "service", "s", "prices", ".", "The", "list", "will", "be", "in", "the", "given", "region", "matching", "the", "specific", "terms", "and", "any", "given", "attribute", "filters", "or", "a", "SKU", "." ]
22a9e2e5473e316aa6fffe67f0069b2a7757a441
https://github.com/lashex/pyutu/blob/22a9e2e5473e316aa6fffe67f0069b2a7757a441/pyutu/cli.py#L93-L116
train
romanorac/discomll
discomll/ensemble/distributed_weighted_forest_rand.py
map_init
def map_init(interface, params): """Intialize random number generator with given seed `params.seed`.""" import numpy as np import random np.random.seed(params['seed']) random.seed(params['seed']) return params
python
def map_init(interface, params): """Intialize random number generator with given seed `params.seed`.""" import numpy as np import random np.random.seed(params['seed']) random.seed(params['seed']) return params
[ "def", "map_init", "(", "interface", ",", "params", ")", ":", "import", "numpy", "as", "np", "import", "random", "np", ".", "random", ".", "seed", "(", "params", "[", "'seed'", "]", ")", "random", ".", "seed", "(", "params", "[", "'seed'", "]", ")", "return", "params" ]
Intialize random number generator with given seed `params.seed`.
[ "Intialize", "random", "number", "generator", "with", "given", "seed", "params", ".", "seed", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/distributed_weighted_forest_rand.py#L38-L44
train
davidfokkema/artist
artist/utils.py
create_graph_name
def create_graph_name(suffix='', dirname=None): """Create a graph name using the name of the caller. :param suffix: optional suffix to add to name :param dirname: optional directory name :return: path for the named graph :rtype: string """ if suffix: suffix = '-%s' % suffix caller = get_callers_name(level=3) name = '%s%s%s%s' % (__prefix, caller, suffix, __suffix) if dirname: name = os.path.join(dirname, name) return name
python
def create_graph_name(suffix='', dirname=None): """Create a graph name using the name of the caller. :param suffix: optional suffix to add to name :param dirname: optional directory name :return: path for the named graph :rtype: string """ if suffix: suffix = '-%s' % suffix caller = get_callers_name(level=3) name = '%s%s%s%s' % (__prefix, caller, suffix, __suffix) if dirname: name = os.path.join(dirname, name) return name
[ "def", "create_graph_name", "(", "suffix", "=", "''", ",", "dirname", "=", "None", ")", ":", "if", "suffix", ":", "suffix", "=", "'-%s'", "%", "suffix", "caller", "=", "get_callers_name", "(", "level", "=", "3", ")", "name", "=", "'%s%s%s%s'", "%", "(", "__prefix", ",", "caller", ",", "suffix", ",", "__suffix", ")", "if", "dirname", ":", "name", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "name", ")", "return", "name" ]
Create a graph name using the name of the caller. :param suffix: optional suffix to add to name :param dirname: optional directory name :return: path for the named graph :rtype: string
[ "Create", "a", "graph", "name", "using", "the", "name", "of", "the", "caller", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/utils.py#L51-L67
train
davidfokkema/artist
artist/utils.py
save_graph
def save_graph(graph, suffix='', dirname=None, pdf=False): """Save a graph using caller's name. :type graph: GraphArtist instance :param suffix: optional suffix to add to name :param dirname: optional directory name :param pdf: if True, the saved graph is additionally rendered and saved as a pdf, alongside the LaTeX file. """ name = create_graph_name(suffix, dirname) graph.save(name) if pdf: graph.save_as_pdf(name)
python
def save_graph(graph, suffix='', dirname=None, pdf=False): """Save a graph using caller's name. :type graph: GraphArtist instance :param suffix: optional suffix to add to name :param dirname: optional directory name :param pdf: if True, the saved graph is additionally rendered and saved as a pdf, alongside the LaTeX file. """ name = create_graph_name(suffix, dirname) graph.save(name) if pdf: graph.save_as_pdf(name)
[ "def", "save_graph", "(", "graph", ",", "suffix", "=", "''", ",", "dirname", "=", "None", ",", "pdf", "=", "False", ")", ":", "name", "=", "create_graph_name", "(", "suffix", ",", "dirname", ")", "graph", ".", "save", "(", "name", ")", "if", "pdf", ":", "graph", ".", "save_as_pdf", "(", "name", ")" ]
Save a graph using caller's name. :type graph: GraphArtist instance :param suffix: optional suffix to add to name :param dirname: optional directory name :param pdf: if True, the saved graph is additionally rendered and saved as a pdf, alongside the LaTeX file.
[ "Save", "a", "graph", "using", "caller", "s", "name", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/utils.py#L70-L83
train
davidfokkema/artist
artist/utils.py
save_data
def save_data(data, suffix='', dirname=None): """Save a dataset using caller's name. :param data: a list or numpy array containing the data :param suffix: optional suffix to add to name :param dirname: optional directory name """ if type(data) == list: data = np.array(data).T name = create_graph_name(suffix, dirname) + '.txt' np.savetxt(name, data)
python
def save_data(data, suffix='', dirname=None): """Save a dataset using caller's name. :param data: a list or numpy array containing the data :param suffix: optional suffix to add to name :param dirname: optional directory name """ if type(data) == list: data = np.array(data).T name = create_graph_name(suffix, dirname) + '.txt' np.savetxt(name, data)
[ "def", "save_data", "(", "data", ",", "suffix", "=", "''", ",", "dirname", "=", "None", ")", ":", "if", "type", "(", "data", ")", "==", "list", ":", "data", "=", "np", ".", "array", "(", "data", ")", ".", "T", "name", "=", "create_graph_name", "(", "suffix", ",", "dirname", ")", "+", "'.txt'", "np", ".", "savetxt", "(", "name", ",", "data", ")" ]
Save a dataset using caller's name. :param data: a list or numpy array containing the data :param suffix: optional suffix to add to name :param dirname: optional directory name
[ "Save", "a", "dataset", "using", "caller", "s", "name", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/utils.py#L86-L98
train
klen/zeta-library
zetalibrary/parser.py
Parser.read
def read(path, savedir): " Read file from path " if path.startswith('http://'): name = op.basename(path) save_path = op.join(savedir, name) if not op.exists(save_path): src = urllib2.urlopen(path).read() try: open(save_path, 'w').write(src) except IOError: return src path = save_path return open(path, 'r').read()
python
def read(path, savedir): " Read file from path " if path.startswith('http://'): name = op.basename(path) save_path = op.join(savedir, name) if not op.exists(save_path): src = urllib2.urlopen(path).read() try: open(save_path, 'w').write(src) except IOError: return src path = save_path return open(path, 'r').read()
[ "def", "read", "(", "path", ",", "savedir", ")", ":", "if", "path", ".", "startswith", "(", "'http://'", ")", ":", "name", "=", "op", ".", "basename", "(", "path", ")", "save_path", "=", "op", ".", "join", "(", "savedir", ",", "name", ")", "if", "not", "op", ".", "exists", "(", "save_path", ")", ":", "src", "=", "urllib2", ".", "urlopen", "(", "path", ")", ".", "read", "(", ")", "try", ":", "open", "(", "save_path", ",", "'w'", ")", ".", "write", "(", "src", ")", "except", "IOError", ":", "return", "src", "path", "=", "save_path", "return", "open", "(", "path", ",", "'r'", ")", ".", "read", "(", ")" ]
Read file from path
[ "Read", "file", "from", "path" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/parser.py#L25-L37
train
klen/zeta-library
zetalibrary/parser.py
Parser.parse_imports
def parse_imports(self, src): " Parse imports from source. " result = [] def child(obj): result.append(obj.group(1)) src = self.import_re.sub(child, src) return src, result
python
def parse_imports(self, src): " Parse imports from source. " result = [] def child(obj): result.append(obj.group(1)) src = self.import_re.sub(child, src) return src, result
[ "def", "parse_imports", "(", "self", ",", "src", ")", ":", "result", "=", "[", "]", "def", "child", "(", "obj", ")", ":", "result", ".", "append", "(", "obj", ".", "group", "(", "1", ")", ")", "src", "=", "self", ".", "import_re", ".", "sub", "(", "child", ",", "src", ")", "return", "src", ",", "result" ]
Parse imports from source.
[ "Parse", "imports", "from", "source", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/parser.py#L43-L50
train
carta/ldap_tools
src/ldap_tools/audit.py
API.__get_users
def __get_users(self): # pragma: no cover """Get user list.""" filter = ['(objectclass=posixAccount)'] results = self.client.search(filter, ['uid']) for result in results: yield result.uid.value
python
def __get_users(self): # pragma: no cover """Get user list.""" filter = ['(objectclass=posixAccount)'] results = self.client.search(filter, ['uid']) for result in results: yield result.uid.value
[ "def", "__get_users", "(", "self", ")", ":", "# pragma: no cover", "filter", "=", "[", "'(objectclass=posixAccount)'", "]", "results", "=", "self", ".", "client", ".", "search", "(", "filter", ",", "[", "'uid'", "]", ")", "for", "result", "in", "results", ":", "yield", "result", ".", "uid", ".", "value" ]
Get user list.
[ "Get", "user", "list", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/audit.py#L63-L68
train
carta/ldap_tools
src/ldap_tools/audit.py
CLI.by_user
def by_user(config): """Display LDAP group membership sorted by user.""" client = Client() client.prepare_connection() audit_api = API(client) CLI.parse_membership('Groups by User', audit_api.by_user())
python
def by_user(config): """Display LDAP group membership sorted by user.""" client = Client() client.prepare_connection() audit_api = API(client) CLI.parse_membership('Groups by User', audit_api.by_user())
[ "def", "by_user", "(", "config", ")", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "audit_api", "=", "API", "(", "client", ")", "CLI", ".", "parse_membership", "(", "'Groups by User'", ",", "audit_api", ".", "by_user", "(", ")", ")" ]
Display LDAP group membership sorted by user.
[ "Display", "LDAP", "group", "membership", "sorted", "by", "user", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/audit.py#L82-L87
train
carta/ldap_tools
src/ldap_tools/audit.py
CLI.raw
def raw(config): # pragma: no cover """Dump the contents of LDAP to console in raw format.""" client = Client() client.prepare_connection() audit_api = API(client) print(audit_api.raw())
python
def raw(config): # pragma: no cover """Dump the contents of LDAP to console in raw format.""" client = Client() client.prepare_connection() audit_api = API(client) print(audit_api.raw())
[ "def", "raw", "(", "config", ")", ":", "# pragma: no cover", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "audit_api", "=", "API", "(", "client", ")", "print", "(", "audit_api", ".", "raw", "(", ")", ")" ]
Dump the contents of LDAP to console in raw format.
[ "Dump", "the", "contents", "of", "LDAP", "to", "console", "in", "raw", "format", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/audit.py#L100-L105
train
klichukb/django-migrate-sql
migrate_sql/operations.py
MigrateSQLMixin.get_sql_state
def get_sql_state(self, state): """ Get SQLStateGraph from state. """ if not hasattr(state, 'sql_state'): setattr(state, 'sql_state', SQLStateGraph()) return state.sql_state
python
def get_sql_state(self, state): """ Get SQLStateGraph from state. """ if not hasattr(state, 'sql_state'): setattr(state, 'sql_state', SQLStateGraph()) return state.sql_state
[ "def", "get_sql_state", "(", "self", ",", "state", ")", ":", "if", "not", "hasattr", "(", "state", ",", "'sql_state'", ")", ":", "setattr", "(", "state", ",", "'sql_state'", ",", "SQLStateGraph", "(", ")", ")", "return", "state", ".", "sql_state" ]
Get SQLStateGraph from state.
[ "Get", "SQLStateGraph", "from", "state", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/operations.py#L12-L18
train
smnorris/bcdata
bcdata/wfs.py
get_sortkey
def get_sortkey(table): """Get a field to sort by """ # Just pick the first column in the table in alphabetical order. # Ideally we would get the primary key from bcdc api, but it doesn't # seem to be available wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
python
def get_sortkey(table): """Get a field to sort by """ # Just pick the first column in the table in alphabetical order. # Ideally we would get the primary key from bcdc api, but it doesn't # seem to be available wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
[ "def", "get_sortkey", "(", "table", ")", ":", "# Just pick the first column in the table in alphabetical order.", "# Ideally we would get the primary key from bcdc api, but it doesn't", "# seem to be available", "wfs", "=", "WebFeatureService", "(", "url", "=", "bcdata", ".", "OWS_URL", ",", "version", "=", "\"2.0.0\"", ")", "return", "sorted", "(", "wfs", ".", "get_schema", "(", "\"pub:\"", "+", "table", ")", "[", "\"properties\"", "]", ".", "keys", "(", ")", ")", "[", "0", "]" ]
Get a field to sort by
[ "Get", "a", "field", "to", "sort", "by" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L26-L33
train