repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
amcat/amcatclient
amcatclient/amcatclient.py
serialize
def serialize(obj): """JSON serializer that accepts datetime & date""" from datetime import datetime, date, time if isinstance(obj, date) and not isinstance(obj, datetime): obj = datetime.combine(obj, time.min) if isinstance(obj, datetime): return obj.isoformat()
python
def serialize(obj): """JSON serializer that accepts datetime & date""" from datetime import datetime, date, time if isinstance(obj, date) and not isinstance(obj, datetime): obj = datetime.combine(obj, time.min) if isinstance(obj, datetime): return obj.isoformat()
[ "def", "serialize", "(", "obj", ")", ":", "from", "datetime", "import", "datetime", ",", "date", ",", "time", "if", "isinstance", "(", "obj", ",", "date", ")", "and", "not", "isinstance", "(", "obj", ",", "datetime", ")", ":", "obj", "=", "datetime", ".", "combine", "(", "obj", ",", "time", ".", "min", ")", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "return", "obj", ".", "isoformat", "(", ")" ]
JSON serializer that accepts datetime & date
[ "JSON", "serializer", "that", "accepts", "datetime", "&", "date" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L48-L54
amcat/amcatclient
amcatclient/amcatclient.py
check
def check(response, expected_status=200, url=None): """ Check whether the status code of the response equals expected_status and raise an APIError otherwise. @param url: The url of the response (for error messages). Defaults to response.url @param json: if True, return r.json(), otherwise return r.text """ if response.status_code != expected_status: if url is None: url = response.url try: err = response.json() except: err = {} # force generic error if all(x in err for x in ("status", "message", "description", "details")): raise _APIError(err["status"], err['message'], url, err, err["description"], err["details"]) else: # generic error suffix = ".html" if "<html" in response.text else ".txt" msg = response.text if len(msg) > 200: with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f: f.write(response.text.encode("utf-8")) msg = "{}...\n\n[snipped; full response written to {f.name}".format(msg[:100], **locals()) msg = ("Request {url!r} returned code {response.status_code}," " expected {expected_status}. \n{msg}".format(**locals())) raise _APIError(response.status_code, msg, url, response.text) if response.headers.get('Content-Type') == 'application/json': try: return response.json() except: raise Exception("Cannot decode json; text={response.text!r}" .format(**locals())) else: return response.text
python
def check(response, expected_status=200, url=None): """ Check whether the status code of the response equals expected_status and raise an APIError otherwise. @param url: The url of the response (for error messages). Defaults to response.url @param json: if True, return r.json(), otherwise return r.text """ if response.status_code != expected_status: if url is None: url = response.url try: err = response.json() except: err = {} # force generic error if all(x in err for x in ("status", "message", "description", "details")): raise _APIError(err["status"], err['message'], url, err, err["description"], err["details"]) else: # generic error suffix = ".html" if "<html" in response.text else ".txt" msg = response.text if len(msg) > 200: with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f: f.write(response.text.encode("utf-8")) msg = "{}...\n\n[snipped; full response written to {f.name}".format(msg[:100], **locals()) msg = ("Request {url!r} returned code {response.status_code}," " expected {expected_status}. \n{msg}".format(**locals())) raise _APIError(response.status_code, msg, url, response.text) if response.headers.get('Content-Type') == 'application/json': try: return response.json() except: raise Exception("Cannot decode json; text={response.text!r}" .format(**locals())) else: return response.text
[ "def", "check", "(", "response", ",", "expected_status", "=", "200", ",", "url", "=", "None", ")", ":", "if", "response", ".", "status_code", "!=", "expected_status", ":", "if", "url", "is", "None", ":", "url", "=", "response", ".", "url", "try", ":", "err", "=", "response", ".", "json", "(", ")", "except", ":", "err", "=", "{", "}", "# force generic error", "if", "all", "(", "x", "in", "err", "for", "x", "in", "(", "\"status\"", ",", "\"message\"", ",", "\"description\"", ",", "\"details\"", ")", ")", ":", "raise", "_APIError", "(", "err", "[", "\"status\"", "]", ",", "err", "[", "'message'", "]", ",", "url", ",", "err", ",", "err", "[", "\"description\"", "]", ",", "err", "[", "\"details\"", "]", ")", "else", ":", "# generic error", "suffix", "=", "\".html\"", "if", "\"<html\"", "in", "response", ".", "text", "else", "\".txt\"", "msg", "=", "response", ".", "text", "if", "len", "(", "msg", ")", ">", "200", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "suffix", ",", "delete", "=", "False", ")", "as", "f", ":", "f", ".", "write", "(", "response", ".", "text", ".", "encode", "(", "\"utf-8\"", ")", ")", "msg", "=", "\"{}...\\n\\n[snipped; full response written to {f.name}\"", ".", "format", "(", "msg", "[", ":", "100", "]", ",", "*", "*", "locals", "(", ")", ")", "msg", "=", "(", "\"Request {url!r} returned code {response.status_code},\"", "\" expected {expected_status}. \\n{msg}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "raise", "_APIError", "(", "response", ".", "status_code", ",", "msg", ",", "url", ",", "response", ".", "text", ")", "if", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ")", "==", "'application/json'", ":", "try", ":", "return", "response", ".", "json", "(", ")", "except", ":", "raise", "Exception", "(", "\"Cannot decode json; text={response.text!r}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "else", ":", "return", "response", ".", "text" ]
Check whether the status code of the response equals expected_status and raise an APIError otherwise. @param url: The url of the response (for error messages). Defaults to response.url @param json: if True, return r.json(), otherwise return r.text
[ "Check", "whether", "the", "status", "code", "of", "the", "response", "equals", "expected_status", "and", "raise", "an", "APIError", "otherwise", "." ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L94-L132
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI._get_auth
def _get_auth(self, user=None, password=None): """ Get the authentication info for the current user, from 1) a ~/.amcatauth file, which should be a csv file containing host, username, password entries 2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables """ fn = os.path.expanduser(AUTH_FILE) if os.path.exists(fn): for i, line in enumerate(csv.reader(open(fn))): if len(line) != 3: log.warning("Cannot parse line {i} in {fn}".format(**locals())) continue hostname, username, pwd = line if (hostname in ("", "*", self.host) and (user is None or username == user)): return (username, pwd) if user is None: user = os.environ.get("AMCAT_USER", os.environ.get("USER")) if password is None: password = os.environ.get("AMCAT_PASSWORD") if user is None or password is None: raise Exception("No authentication info for {user}@{self.host} " "from {fn} or AMCAT_USER / AMCAT_PASSWORD " "variables".format(**locals())) return user, password
python
def _get_auth(self, user=None, password=None): """ Get the authentication info for the current user, from 1) a ~/.amcatauth file, which should be a csv file containing host, username, password entries 2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables """ fn = os.path.expanduser(AUTH_FILE) if os.path.exists(fn): for i, line in enumerate(csv.reader(open(fn))): if len(line) != 3: log.warning("Cannot parse line {i} in {fn}".format(**locals())) continue hostname, username, pwd = line if (hostname in ("", "*", self.host) and (user is None or username == user)): return (username, pwd) if user is None: user = os.environ.get("AMCAT_USER", os.environ.get("USER")) if password is None: password = os.environ.get("AMCAT_PASSWORD") if user is None or password is None: raise Exception("No authentication info for {user}@{self.host} " "from {fn} or AMCAT_USER / AMCAT_PASSWORD " "variables".format(**locals())) return user, password
[ "def", "_get_auth", "(", "self", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "fn", "=", "os", ".", "path", ".", "expanduser", "(", "AUTH_FILE", ")", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "for", "i", ",", "line", "in", "enumerate", "(", "csv", ".", "reader", "(", "open", "(", "fn", ")", ")", ")", ":", "if", "len", "(", "line", ")", "!=", "3", ":", "log", ".", "warning", "(", "\"Cannot parse line {i} in {fn}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "continue", "hostname", ",", "username", ",", "pwd", "=", "line", "if", "(", "hostname", "in", "(", "\"\"", ",", "\"*\"", ",", "self", ".", "host", ")", "and", "(", "user", "is", "None", "or", "username", "==", "user", ")", ")", ":", "return", "(", "username", ",", "pwd", ")", "if", "user", "is", "None", ":", "user", "=", "os", ".", "environ", ".", "get", "(", "\"AMCAT_USER\"", ",", "os", ".", "environ", ".", "get", "(", "\"USER\"", ")", ")", "if", "password", "is", "None", ":", "password", "=", "os", ".", "environ", ".", "get", "(", "\"AMCAT_PASSWORD\"", ")", "if", "user", "is", "None", "or", "password", "is", "None", ":", "raise", "Exception", "(", "\"No authentication info for {user}@{self.host} \"", "\"from {fn} or AMCAT_USER / AMCAT_PASSWORD \"", "\"variables\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "return", "user", ",", "password" ]
Get the authentication info for the current user, from 1) a ~/.amcatauth file, which should be a csv file containing host, username, password entries 2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
[ "Get", "the", "authentication", "info", "for", "the", "current", "user", "from", "1", ")", "a", "~", "/", ".", "amcatauth", "file", "which", "should", "be", "a", "csv", "file", "containing", "host", "username", "password", "entries", "2", ")", "the", "AMCAT_USER", "(", "or", "USER", ")", "and", "AMCAT_PASSWORD", "environment", "variables" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L170-L195
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.request
def request(self, url, method="get", format="json", data=None, expected_status=None, headers=None, use_xpost=True, **options): """ Make an HTTP request to the given relative URL with the host, user, and password information. Returns the deserialized json if successful, and raises an exception otherwise """ if expected_status is None: if method == "get": expected_status = 200 elif method == "post": expected_status = 201 else: raise ValueError("No expected status supplied and method unknown.") if not url.startswith("http"): url = "{self.host}/api/v4/{url}".format(**locals()) if format is not None: options = dict({'format': format}, **options) options = {field: value for field, value in options.items() if value is not None} headers = dict(headers or {}, Authorization="Token {}".format(self.token)) #headers['Accept-encoding'] = 'gzip' if method == "get" and use_xpost: # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our # query via POST. This allows for a large number of parameters to be supplied assert(data is None) headers.update({"X-HTTP-METHOD-OVERRIDE": method}) data = options options = None method = "post" r = requests.request(method, url, data=data, params=options, headers=headers) log.debug( "HTTP {method} {url} (options={options!r}, data={data!r}," "headers={headers}) -> {r.status_code}".format(**locals()) ) return check(r, expected_status=expected_status)
python
def request(self, url, method="get", format="json", data=None, expected_status=None, headers=None, use_xpost=True, **options): """ Make an HTTP request to the given relative URL with the host, user, and password information. Returns the deserialized json if successful, and raises an exception otherwise """ if expected_status is None: if method == "get": expected_status = 200 elif method == "post": expected_status = 201 else: raise ValueError("No expected status supplied and method unknown.") if not url.startswith("http"): url = "{self.host}/api/v4/{url}".format(**locals()) if format is not None: options = dict({'format': format}, **options) options = {field: value for field, value in options.items() if value is not None} headers = dict(headers or {}, Authorization="Token {}".format(self.token)) #headers['Accept-encoding'] = 'gzip' if method == "get" and use_xpost: # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our # query via POST. This allows for a large number of parameters to be supplied assert(data is None) headers.update({"X-HTTP-METHOD-OVERRIDE": method}) data = options options = None method = "post" r = requests.request(method, url, data=data, params=options, headers=headers) log.debug( "HTTP {method} {url} (options={options!r}, data={data!r}," "headers={headers}) -> {r.status_code}".format(**locals()) ) return check(r, expected_status=expected_status)
[ "def", "request", "(", "self", ",", "url", ",", "method", "=", "\"get\"", ",", "format", "=", "\"json\"", ",", "data", "=", "None", ",", "expected_status", "=", "None", ",", "headers", "=", "None", ",", "use_xpost", "=", "True", ",", "*", "*", "options", ")", ":", "if", "expected_status", "is", "None", ":", "if", "method", "==", "\"get\"", ":", "expected_status", "=", "200", "elif", "method", "==", "\"post\"", ":", "expected_status", "=", "201", "else", ":", "raise", "ValueError", "(", "\"No expected status supplied and method unknown.\"", ")", "if", "not", "url", ".", "startswith", "(", "\"http\"", ")", ":", "url", "=", "\"{self.host}/api/v4/{url}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "if", "format", "is", "not", "None", ":", "options", "=", "dict", "(", "{", "'format'", ":", "format", "}", ",", "*", "*", "options", ")", "options", "=", "{", "field", ":", "value", "for", "field", ",", "value", "in", "options", ".", "items", "(", ")", "if", "value", "is", "not", "None", "}", "headers", "=", "dict", "(", "headers", "or", "{", "}", ",", "Authorization", "=", "\"Token {}\"", ".", "format", "(", "self", ".", "token", ")", ")", "#headers['Accept-encoding'] = 'gzip'", "if", "method", "==", "\"get\"", "and", "use_xpost", ":", "# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our", "# query via POST. This allows for a large number of parameters to be supplied", "assert", "(", "data", "is", "None", ")", "headers", ".", "update", "(", "{", "\"X-HTTP-METHOD-OVERRIDE\"", ":", "method", "}", ")", "data", "=", "options", "options", "=", "None", "method", "=", "\"post\"", "r", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "data", "=", "data", ",", "params", "=", "options", ",", "headers", "=", "headers", ")", "log", ".", "debug", "(", "\"HTTP {method} {url} (options={options!r}, data={data!r},\"", "\"headers={headers}) -> {r.status_code}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "return", "check", "(", "r", ",", "expected_status", "=", "expected_status", ")" ]
Make an HTTP request to the given relative URL with the host, user, and password information. Returns the deserialized json if successful, and raises an exception otherwise
[ "Make", "an", "HTTP", "request", "to", "the", "given", "relative", "URL", "with", "the", "host", "user", "and", "password", "information", ".", "Returns", "the", "deserialized", "json", "if", "successful", "and", "raises", "an", "exception", "otherwise" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L211-L250
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.get_pages
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters): """ Get all pages at url, yielding individual results :param url: the url to fetch :param page: start from this page :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: additional filters :return: a generator of objects (dicts) from the API """ n = 0 for page in itertools.count(page): r = self.request(url, page=page, page_size=page_size, **filters) n += len(r['results']) log.debug("Got {url} page {page} / {pages}".format(url=url, **r)) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break
python
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters): """ Get all pages at url, yielding individual results :param url: the url to fetch :param page: start from this page :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: additional filters :return: a generator of objects (dicts) from the API """ n = 0 for page in itertools.count(page): r = self.request(url, page=page, page_size=page_size, **filters) n += len(r['results']) log.debug("Got {url} page {page} / {pages}".format(url=url, **r)) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break
[ "def", "get_pages", "(", "self", ",", "url", ",", "page", "=", "1", ",", "page_size", "=", "100", ",", "yield_pages", "=", "False", ",", "*", "*", "filters", ")", ":", "n", "=", "0", "for", "page", "in", "itertools", ".", "count", "(", "page", ")", ":", "r", "=", "self", ".", "request", "(", "url", ",", "page", "=", "page", ",", "page_size", "=", "page_size", ",", "*", "*", "filters", ")", "n", "+=", "len", "(", "r", "[", "'results'", "]", ")", "log", ".", "debug", "(", "\"Got {url} page {page} / {pages}\"", ".", "format", "(", "url", "=", "url", ",", "*", "*", "r", ")", ")", "if", "yield_pages", ":", "yield", "r", "else", ":", "for", "row", "in", "r", "[", "'results'", "]", ":", "yield", "row", "if", "r", "[", "'next'", "]", "is", "None", ":", "break" ]
Get all pages at url, yielding individual results :param url: the url to fetch :param page: start from this page :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: additional filters :return: a generator of objects (dicts) from the API
[ "Get", "all", "pages", "at", "url", "yielding", "individual", "results", ":", "param", "url", ":", "the", "url", "to", "fetch", ":", "param", "page", ":", "start", "from", "this", "page", ":", "param", "page_size", ":", "results", "per", "page", ":", "param", "yield_pages", ":", "yield", "whole", "pages", "rather", "than", "individual", "results", ":", "param", "filters", ":", "additional", "filters", ":", "return", ":", "a", "generator", "of", "objects", "(", "dicts", ")", "from", "the", "API" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L254-L275
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.get_scroll
def get_scroll(self, url, page_size=100, yield_pages=False, **filters): """ Scroll through the resource at url and yield the individual results :param url: url to scroll through :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: Additional filters :return: a generator of objects (dicts) from the API """ n = 0 options = dict(page_size=page_size, **filters) format = filters.get('format') while True: r = self.request(url, use_xpost=False, **options) n += len(r['results']) log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals())) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break url = r['next'] options = {'format': None}
python
def get_scroll(self, url, page_size=100, yield_pages=False, **filters): """ Scroll through the resource at url and yield the individual results :param url: url to scroll through :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: Additional filters :return: a generator of objects (dicts) from the API """ n = 0 options = dict(page_size=page_size, **filters) format = filters.get('format') while True: r = self.request(url, use_xpost=False, **options) n += len(r['results']) log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals())) if yield_pages: yield r else: for row in r['results']: yield row if r['next'] is None: break url = r['next'] options = {'format': None}
[ "def", "get_scroll", "(", "self", ",", "url", ",", "page_size", "=", "100", ",", "yield_pages", "=", "False", ",", "*", "*", "filters", ")", ":", "n", "=", "0", "options", "=", "dict", "(", "page_size", "=", "page_size", ",", "*", "*", "filters", ")", "format", "=", "filters", ".", "get", "(", "'format'", ")", "while", "True", ":", "r", "=", "self", ".", "request", "(", "url", ",", "use_xpost", "=", "False", ",", "*", "*", "options", ")", "n", "+=", "len", "(", "r", "[", "'results'", "]", ")", "log", ".", "debug", "(", "\"Got {} {n}/{total}\"", ".", "format", "(", "url", ".", "split", "(", "\"?\"", ")", "[", "0", "]", ",", "total", "=", "r", "[", "'total'", "]", ",", "*", "*", "locals", "(", ")", ")", ")", "if", "yield_pages", ":", "yield", "r", "else", ":", "for", "row", "in", "r", "[", "'results'", "]", ":", "yield", "row", "if", "r", "[", "'next'", "]", "is", "None", ":", "break", "url", "=", "r", "[", "'next'", "]", "options", "=", "{", "'format'", ":", "None", "}" ]
Scroll through the resource at url and yield the individual results :param url: url to scroll through :param page_size: results per page :param yield_pages: yield whole pages rather than individual results :param filters: Additional filters :return: a generator of objects (dicts) from the API
[ "Scroll", "through", "the", "resource", "at", "url", "and", "yield", "the", "individual", "results", ":", "param", "url", ":", "url", "to", "scroll", "through", ":", "param", "page_size", ":", "results", "per", "page", ":", "param", "yield_pages", ":", "yield", "whole", "pages", "rather", "than", "individual", "results", ":", "param", "filters", ":", "Additional", "filters", ":", "return", ":", "a", "generator", "of", "objects", "(", "dicts", ")", "from", "the", "API" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L277-L301
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.get_status
def get_status(self): """Get the AmCAT status page""" url = URL.status.format(**locals()) return self.get_request(url)
python
def get_status(self): """Get the AmCAT status page""" url = URL.status.format(**locals()) return self.get_request(url)
[ "def", "get_status", "(", "self", ")", ":", "url", "=", "URL", ".", "status", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "self", ".", "get_request", "(", "url", ")" ]
Get the AmCAT status page
[ "Get", "the", "AmCAT", "status", "page" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L303-L306
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.aggregate
def aggregate(self, **filters): """Conduct an aggregate query""" url = URL.aggregate.format(**locals()) return self.get_pages(url, **filters)
python
def aggregate(self, **filters): """Conduct an aggregate query""" url = URL.aggregate.format(**locals()) return self.get_pages(url, **filters)
[ "def", "aggregate", "(", "self", ",", "*", "*", "filters", ")", ":", "url", "=", "URL", ".", "aggregate", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "self", ".", "get_pages", "(", "url", ",", "*", "*", "filters", ")" ]
Conduct an aggregate query
[ "Conduct", "an", "aggregate", "query" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L308-L311
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.list_sets
def list_sets(self, project, **filters): """List the articlesets in a project""" url = URL.articlesets.format(**locals()) return self.get_pages(url, **filters)
python
def list_sets(self, project, **filters): """List the articlesets in a project""" url = URL.articlesets.format(**locals()) return self.get_pages(url, **filters)
[ "def", "list_sets", "(", "self", ",", "project", ",", "*", "*", "filters", ")", ":", "url", "=", "URL", ".", "articlesets", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "self", ".", "get_pages", "(", "url", ",", "*", "*", "filters", ")" ]
List the articlesets in a project
[ "List", "the", "articlesets", "in", "a", "project" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L313-L316
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.get_set
def get_set(self, project, articleset, **filters): """List the articlesets in a project""" url = URL.articleset.format(**locals()) return self.request(url, **filters)
python
def get_set(self, project, articleset, **filters): """List the articlesets in a project""" url = URL.articleset.format(**locals()) return self.request(url, **filters)
[ "def", "get_set", "(", "self", ",", "project", ",", "articleset", ",", "*", "*", "filters", ")", ":", "url", "=", "URL", ".", "articleset", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "self", ".", "request", "(", "url", ",", "*", "*", "filters", ")" ]
List the articlesets in a project
[ "List", "the", "articlesets", "in", "a", "project" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L318-L321
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.list_articles
def list_articles(self, project, articleset, page=1, **filters): """List the articles in a set""" url = URL.article.format(**locals()) return self.get_pages(url, page=page, **filters)
python
def list_articles(self, project, articleset, page=1, **filters): """List the articles in a set""" url = URL.article.format(**locals()) return self.get_pages(url, page=page, **filters)
[ "def", "list_articles", "(", "self", ",", "project", ",", "articleset", ",", "page", "=", "1", ",", "*", "*", "filters", ")", ":", "url", "=", "URL", ".", "article", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "self", ".", "get_pages", "(", "url", ",", "page", "=", "page", ",", "*", "*", "filters", ")" ]
List the articles in a set
[ "List", "the", "articles", "in", "a", "set" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L323-L326
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.create_set
def create_set(self, project, json_data=None, **options): """ Create a new article set. Provide the needed arguments using post_data or with key-value pairs """ url = URL.articlesets.format(**locals()) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, (string_types)): json_data = json.dumps(json_data,default = serialize) headers = {'content-type': 'application/json'} return self.request( url, method='post', data=json_data, headers=headers)
python
def create_set(self, project, json_data=None, **options): """ Create a new article set. Provide the needed arguments using post_data or with key-value pairs """ url = URL.articlesets.format(**locals()) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, (string_types)): json_data = json.dumps(json_data,default = serialize) headers = {'content-type': 'application/json'} return self.request( url, method='post', data=json_data, headers=headers)
[ "def", "create_set", "(", "self", ",", "project", ",", "json_data", "=", "None", ",", "*", "*", "options", ")", ":", "url", "=", "URL", ".", "articlesets", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "if", "json_data", "is", "None", ":", "# form encoded request", "return", "self", ".", "request", "(", "url", ",", "method", "=", "\"post\"", ",", "data", "=", "options", ")", "else", ":", "if", "not", "isinstance", "(", "json_data", ",", "(", "string_types", ")", ")", ":", "json_data", "=", "json", ".", "dumps", "(", "json_data", ",", "default", "=", "serialize", ")", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", "return", "self", ".", "request", "(", "url", ",", "method", "=", "'post'", ",", "data", "=", "json_data", ",", "headers", "=", "headers", ")" ]
Create a new article set. Provide the needed arguments using post_data or with key-value pairs
[ "Create", "a", "new", "article", "set", ".", "Provide", "the", "needed", "arguments", "using", "post_data", "or", "with", "key", "-", "value", "pairs" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L333-L347
amcat/amcatclient
amcatclient/amcatclient.py
AmcatAPI.create_articles
def create_articles(self, project, articleset, json_data=None, **options): """ Create one or more articles in the set. Provide the needed arguments using the json_data or with key-value pairs @param json_data: A dictionary or list of dictionaries. Each dict can contain a 'children' attribute which is another list of dictionaries. """ url = URL.article.format(**locals()) # TODO duplicated from create_set, move into requests # (or separate post method?) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, string_types): json_data = json.dumps(json_data, default=serialize) headers = {'content-type': 'application/json'} return self.request(url, method='post', data=json_data, headers=headers)
python
def create_articles(self, project, articleset, json_data=None, **options): """ Create one or more articles in the set. Provide the needed arguments using the json_data or with key-value pairs @param json_data: A dictionary or list of dictionaries. Each dict can contain a 'children' attribute which is another list of dictionaries. """ url = URL.article.format(**locals()) # TODO duplicated from create_set, move into requests # (or separate post method?) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, string_types): json_data = json.dumps(json_data, default=serialize) headers = {'content-type': 'application/json'} return self.request(url, method='post', data=json_data, headers=headers)
[ "def", "create_articles", "(", "self", ",", "project", ",", "articleset", ",", "json_data", "=", "None", ",", "*", "*", "options", ")", ":", "url", "=", "URL", ".", "article", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "# TODO duplicated from create_set, move into requests", "# (or separate post method?)", "if", "json_data", "is", "None", ":", "# form encoded request", "return", "self", ".", "request", "(", "url", ",", "method", "=", "\"post\"", ",", "data", "=", "options", ")", "else", ":", "if", "not", "isinstance", "(", "json_data", ",", "string_types", ")", ":", "json_data", "=", "json", ".", "dumps", "(", "json_data", ",", "default", "=", "serialize", ")", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", "return", "self", ".", "request", "(", "url", ",", "method", "=", "'post'", ",", "data", "=", "json_data", ",", "headers", "=", "headers", ")" ]
Create one or more articles in the set. Provide the needed arguments using the json_data or with key-value pairs @param json_data: A dictionary or list of dictionaries. Each dict can contain a 'children' attribute which is another list of dictionaries.
[ "Create", "one", "or", "more", "articles", "in", "the", "set", ".", "Provide", "the", "needed", "arguments", "using", "the", "json_data", "or", "with", "key", "-", "value", "pairs" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L349-L367
walkr/nanoservice
nanoservice/crypto.py
Authenticator.sign
def sign(self, encoded): """ Return authentication signature of encoded bytes """ signature = self._hmac.copy() signature.update(encoded) return signature.hexdigest().encode('utf-8')
python
def sign(self, encoded): """ Return authentication signature of encoded bytes """ signature = self._hmac.copy() signature.update(encoded) return signature.hexdigest().encode('utf-8')
[ "def", "sign", "(", "self", ",", "encoded", ")", ":", "signature", "=", "self", ".", "_hmac", ".", "copy", "(", ")", "signature", ".", "update", "(", "encoded", ")", "return", "signature", ".", "hexdigest", "(", ")", ".", "encode", "(", "'utf-8'", ")" ]
Return authentication signature of encoded bytes
[ "Return", "authentication", "signature", "of", "encoded", "bytes" ]
train
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/crypto.py#L42-L46
walkr/nanoservice
nanoservice/crypto.py
Authenticator.split
def split(self, encoded): """ Split into signature and message """ maxlen = len(encoded) - self.sig_size message = encoded[:maxlen] signature = encoded[-self.sig_size:] return message, signature
python
def split(self, encoded): """ Split into signature and message """ maxlen = len(encoded) - self.sig_size message = encoded[:maxlen] signature = encoded[-self.sig_size:] return message, signature
[ "def", "split", "(", "self", ",", "encoded", ")", ":", "maxlen", "=", "len", "(", "encoded", ")", "-", "self", ".", "sig_size", "message", "=", "encoded", "[", ":", "maxlen", "]", "signature", "=", "encoded", "[", "-", "self", ".", "sig_size", ":", "]", "return", "message", ",", "signature" ]
Split into signature and message
[ "Split", "into", "signature", "and", "message" ]
train
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/crypto.py#L58-L63
walkr/nanoservice
nanoservice/crypto.py
Authenticator.auth
def auth(self, encoded): """ Validate integrity of encoded bytes """ message, signature = self.split(encoded) computed = self.sign(message) if not hmac.compare_digest(signature, computed): raise AuthenticatorInvalidSignature
python
def auth(self, encoded): """ Validate integrity of encoded bytes """ message, signature = self.split(encoded) computed = self.sign(message) if not hmac.compare_digest(signature, computed): raise AuthenticatorInvalidSignature
[ "def", "auth", "(", "self", ",", "encoded", ")", ":", "message", ",", "signature", "=", "self", ".", "split", "(", "encoded", ")", "computed", "=", "self", ".", "sign", "(", "message", ")", "if", "not", "hmac", ".", "compare_digest", "(", "signature", ",", "computed", ")", ":", "raise", "AuthenticatorInvalidSignature" ]
Validate integrity of encoded bytes
[ "Validate", "integrity", "of", "encoded", "bytes" ]
train
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/crypto.py#L65-L70
agile4you/bottle-neck
bottle_neck/cbv.py
cached_classproperty
def cached_classproperty(fun): """A memorization decorator for class properties. It implements the above `classproperty` decorator, with the difference that the function result is computed and attached to class as direct attribute. (Lazy loading and caching.) """ @functools.wraps(fun) def get(cls): try: return cls.__cache[fun] except AttributeError: cls.__cache = {} except KeyError: # pragma: no cover pass ret = cls.__cache[fun] = fun(cls) return ret return classproperty(get)
python
def cached_classproperty(fun): """A memorization decorator for class properties. It implements the above `classproperty` decorator, with the difference that the function result is computed and attached to class as direct attribute. (Lazy loading and caching.) """ @functools.wraps(fun) def get(cls): try: return cls.__cache[fun] except AttributeError: cls.__cache = {} except KeyError: # pragma: no cover pass ret = cls.__cache[fun] = fun(cls) return ret return classproperty(get)
[ "def", "cached_classproperty", "(", "fun", ")", ":", "@", "functools", ".", "wraps", "(", "fun", ")", "def", "get", "(", "cls", ")", ":", "try", ":", "return", "cls", ".", "__cache", "[", "fun", "]", "except", "AttributeError", ":", "cls", ".", "__cache", "=", "{", "}", "except", "KeyError", ":", "# pragma: no cover", "pass", "ret", "=", "cls", ".", "__cache", "[", "fun", "]", "=", "fun", "(", "cls", ")", "return", "ret", "return", "classproperty", "(", "get", ")" ]
A memorization decorator for class properties. It implements the above `classproperty` decorator, with the difference that the function result is computed and attached to class as direct attribute. (Lazy loading and caching.)
[ "A", "memorization", "decorator", "for", "class", "properties", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L92-L109
agile4you/bottle-neck
bottle_neck/cbv.py
plugin_method
def plugin_method(*plugin_names): """Plugin Method decorator. Signs a web handler function with the plugins to be applied as attributes. Args: plugin_names (list): A list of plugin callable names Returns: A wrapped handler callable. Examples: >>> @plugin_method('json', 'bill') ... def method(): ... return "Hello!" ... >>> print method.json True >>> print method.bill True """ def wrapper(callable_obj): for plugin_name in plugin_names: if not hasattr(callable_obj, plugin_name): setattr(callable_obj, plugin_name, True) return callable_obj return wrapper
python
def plugin_method(*plugin_names): """Plugin Method decorator. Signs a web handler function with the plugins to be applied as attributes. Args: plugin_names (list): A list of plugin callable names Returns: A wrapped handler callable. Examples: >>> @plugin_method('json', 'bill') ... def method(): ... return "Hello!" ... >>> print method.json True >>> print method.bill True """ def wrapper(callable_obj): for plugin_name in plugin_names: if not hasattr(callable_obj, plugin_name): setattr(callable_obj, plugin_name, True) return callable_obj return wrapper
[ "def", "plugin_method", "(", "*", "plugin_names", ")", ":", "def", "wrapper", "(", "callable_obj", ")", ":", "for", "plugin_name", "in", "plugin_names", ":", "if", "not", "hasattr", "(", "callable_obj", ",", "plugin_name", ")", ":", "setattr", "(", "callable_obj", ",", "plugin_name", ",", "True", ")", "return", "callable_obj", "return", "wrapper" ]
Plugin Method decorator. Signs a web handler function with the plugins to be applied as attributes. Args: plugin_names (list): A list of plugin callable names Returns: A wrapped handler callable. Examples: >>> @plugin_method('json', 'bill') ... def method(): ... return "Hello!" ... >>> print method.json True >>> print method.bill True
[ "Plugin", "Method", "decorator", ".", "Signs", "a", "web", "handler", "function", "with", "the", "plugins", "to", "be", "applied", "as", "attributes", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L112-L138
agile4you/bottle-neck
bottle_neck/cbv.py
route_method
def route_method(method_name, extra_part=False): """Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None """ def wrapper(callable_obj): if method_name.lower() not in DEFAULT_ROUTES: raise HandlerHTTPMethodError( 'Invalid http method in method: {}'.format(method_name) ) callable_obj.http_method = method_name.upper() callable_obj.url_extra_part = callable_obj.__name__ if extra_part\ else None return classmethod(callable_obj) return wrapper
python
def route_method(method_name, extra_part=False): """Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None """ def wrapper(callable_obj): if method_name.lower() not in DEFAULT_ROUTES: raise HandlerHTTPMethodError( 'Invalid http method in method: {}'.format(method_name) ) callable_obj.http_method = method_name.upper() callable_obj.url_extra_part = callable_obj.__name__ if extra_part\ else None return classmethod(callable_obj) return wrapper
[ "def", "route_method", "(", "method_name", ",", "extra_part", "=", "False", ")", ":", "def", "wrapper", "(", "callable_obj", ")", ":", "if", "method_name", ".", "lower", "(", ")", "not", "in", "DEFAULT_ROUTES", ":", "raise", "HandlerHTTPMethodError", "(", "'Invalid http method in method: {}'", ".", "format", "(", "method_name", ")", ")", "callable_obj", ".", "http_method", "=", "method_name", ".", "upper", "(", ")", "callable_obj", ".", "url_extra_part", "=", "callable_obj", ".", "__name__", "if", "extra_part", "else", "None", "return", "classmethod", "(", "callable_obj", ")", "return", "wrapper" ]
Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None
[ "Custom", "handler", "routing", "decorator", ".", "Signs", "a", "web", "handler", "callable", "with", "the", "http", "method", "as", "attribute", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/cbv.py#L141-L175
GaretJax/lancet
lancet/commands/jira.py
issue_add
def issue_add(lancet, assign, add_to_sprint, summary): """ Create a new issue on the issue tracker. """ summary = " ".join(summary) issue = create_issue( lancet, summary, # project_id=project_id, add_to_active_sprint=add_to_sprint, ) if assign: if assign == "me": username = lancet.tracker.whoami() else: username = assign assign_issue(lancet, issue, username) click.echo("Created issue")
python
def issue_add(lancet, assign, add_to_sprint, summary): """ Create a new issue on the issue tracker. """ summary = " ".join(summary) issue = create_issue( lancet, summary, # project_id=project_id, add_to_active_sprint=add_to_sprint, ) if assign: if assign == "me": username = lancet.tracker.whoami() else: username = assign assign_issue(lancet, issue, username) click.echo("Created issue")
[ "def", "issue_add", "(", "lancet", ",", "assign", ",", "add_to_sprint", ",", "summary", ")", ":", "summary", "=", "\" \"", ".", "join", "(", "summary", ")", "issue", "=", "create_issue", "(", "lancet", ",", "summary", ",", "# project_id=project_id,", "add_to_active_sprint", "=", "add_to_sprint", ",", ")", "if", "assign", ":", "if", "assign", "==", "\"me\"", ":", "username", "=", "lancet", ".", "tracker", ".", "whoami", "(", ")", "else", ":", "username", "=", "assign", "assign_issue", "(", "lancet", ",", "issue", ",", "username", ")", "click", ".", "echo", "(", "\"Created issue\"", ")" ]
Create a new issue on the issue tracker.
[ "Create", "a", "new", "issue", "on", "the", "issue", "tracker", "." ]
train
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/commands/jira.py#L31-L49
edx/edx-django-extensions
edx_management_commands/management_commands/management/commands/manage_user.py
Command._maybe_update
def _maybe_update(self, user, attribute, new_value): """ DRY helper. If the specified attribute of the user differs from the specified value, it will be updated. """ old_value = getattr(user, attribute) if new_value != old_value: self.stderr.write( _('Setting {attribute} for user "{username}" to "{new_value}"').format( attribute=attribute, username=user.username, new_value=new_value ) ) setattr(user, attribute, new_value)
python
def _maybe_update(self, user, attribute, new_value): """ DRY helper. If the specified attribute of the user differs from the specified value, it will be updated. """ old_value = getattr(user, attribute) if new_value != old_value: self.stderr.write( _('Setting {attribute} for user "{username}" to "{new_value}"').format( attribute=attribute, username=user.username, new_value=new_value ) ) setattr(user, attribute, new_value)
[ "def", "_maybe_update", "(", "self", ",", "user", ",", "attribute", ",", "new_value", ")", ":", "old_value", "=", "getattr", "(", "user", ",", "attribute", ")", "if", "new_value", "!=", "old_value", ":", "self", ".", "stderr", ".", "write", "(", "_", "(", "'Setting {attribute} for user \"{username}\" to \"{new_value}\"'", ")", ".", "format", "(", "attribute", "=", "attribute", ",", "username", "=", "user", ".", "username", ",", "new_value", "=", "new_value", ")", ")", "setattr", "(", "user", ",", "attribute", ",", "new_value", ")" ]
DRY helper. If the specified attribute of the user differs from the specified value, it will be updated.
[ "DRY", "helper", ".", "If", "the", "specified", "attribute", "of", "the", "user", "differs", "from", "the", "specified", "value", "it", "will", "be", "updated", "." ]
train
https://github.com/edx/edx-django-extensions/blob/35bbf7f95453c0e2c07acf3539722a92e7b6f548/edx_management_commands/management_commands/management/commands/manage_user.py#L26-L38
edx/edx-django-extensions
edx_management_commands/management_commands/management/commands/manage_user.py
Command._check_email_match
def _check_email_match(self, user, email): """ DRY helper. Requiring the user to specify both username and email will help catch certain issues, for example if the expected username has already been taken by someone else. """ if user.email != email: # The passed email address doesn't match this username's email address. # Assume a problem and fail. raise CommandError( _( 'Skipping user "{}" because the specified and existing email ' 'addresses do not match.' ).format(user.username) )
python
def _check_email_match(self, user, email): """ DRY helper. Requiring the user to specify both username and email will help catch certain issues, for example if the expected username has already been taken by someone else. """ if user.email != email: # The passed email address doesn't match this username's email address. # Assume a problem and fail. raise CommandError( _( 'Skipping user "{}" because the specified and existing email ' 'addresses do not match.' ).format(user.username) )
[ "def", "_check_email_match", "(", "self", ",", "user", ",", "email", ")", ":", "if", "user", ".", "email", "!=", "email", ":", "# The passed email address doesn't match this username's email address.", "# Assume a problem and fail.", "raise", "CommandError", "(", "_", "(", "'Skipping user \"{}\" because the specified and existing email '", "'addresses do not match.'", ")", ".", "format", "(", "user", ".", "username", ")", ")" ]
DRY helper. Requiring the user to specify both username and email will help catch certain issues, for example if the expected username has already been taken by someone else.
[ "DRY", "helper", "." ]
train
https://github.com/edx/edx-django-extensions/blob/35bbf7f95453c0e2c07acf3539722a92e7b6f548/edx_management_commands/management_commands/management/commands/manage_user.py#L40-L56
GaretJax/lancet
lancet/timer.py
credentials_checker
def credentials_checker(url, username, password): """Check the provided credentials using the Harvest API.""" api = HarvestAPI(url, (username, password)) try: api.whoami() except HarvestError: return False else: return True
python
def credentials_checker(url, username, password): """Check the provided credentials using the Harvest API.""" api = HarvestAPI(url, (username, password)) try: api.whoami() except HarvestError: return False else: return True
[ "def", "credentials_checker", "(", "url", ",", "username", ",", "password", ")", ":", "api", "=", "HarvestAPI", "(", "url", ",", "(", "username", ",", "password", ")", ")", "try", ":", "api", ".", "whoami", "(", ")", "except", "HarvestError", ":", "return", "False", "else", ":", "return", "True" ]
Check the provided credentials using the Harvest API.
[ "Check", "the", "provided", "credentials", "using", "the", "Harvest", "API", "." ]
train
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/timer.py#L219-L227
GaretJax/lancet
lancet/timer.py
harvest
def harvest(lancet, config_section): """Construct a new Harvest client.""" url, username, password = lancet.get_credentials( config_section, credentials_checker ) project_id_getter = lancet.get_instance_from_config( "timer", "project_id_getter", lancet ) task_id_getter = lancet.get_instance_from_config( "timer", "task_id_getter", lancet ) client = HarvestPlatform( server=url, basic_auth=(username, password), project_id_getter=project_id_getter, task_id_getter=task_id_getter, ) lancet.call_on_close(client.close) return client
python
def harvest(lancet, config_section): """Construct a new Harvest client.""" url, username, password = lancet.get_credentials( config_section, credentials_checker ) project_id_getter = lancet.get_instance_from_config( "timer", "project_id_getter", lancet ) task_id_getter = lancet.get_instance_from_config( "timer", "task_id_getter", lancet ) client = HarvestPlatform( server=url, basic_auth=(username, password), project_id_getter=project_id_getter, task_id_getter=task_id_getter, ) lancet.call_on_close(client.close) return client
[ "def", "harvest", "(", "lancet", ",", "config_section", ")", ":", "url", ",", "username", ",", "password", "=", "lancet", ".", "get_credentials", "(", "config_section", ",", "credentials_checker", ")", "project_id_getter", "=", "lancet", ".", "get_instance_from_config", "(", "\"timer\"", ",", "\"project_id_getter\"", ",", "lancet", ")", "task_id_getter", "=", "lancet", ".", "get_instance_from_config", "(", "\"timer\"", ",", "\"task_id_getter\"", ",", "lancet", ")", "client", "=", "HarvestPlatform", "(", "server", "=", "url", ",", "basic_auth", "=", "(", "username", ",", "password", ")", ",", "project_id_getter", "=", "project_id_getter", ",", "task_id_getter", "=", "task_id_getter", ",", ")", "lancet", ".", "call_on_close", "(", "client", ".", "close", ")", "return", "client" ]
Construct a new Harvest client.
[ "Construct", "a", "new", "Harvest", "client", "." ]
train
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/timer.py#L230-L250
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/path.py
temp_dir
def temp_dir(folder=None, delete=True): # type: (Optional[str], bool) -> str """Get a temporary directory optionally with folder appended (and created if it doesn't exist) Args: folder (Optional[str]): Folder to create in temporary folder. Defaults to None. delete (bool): Whether to delete folder on exiting with statement Returns: str: A temporary directory """ tempdir = get_temp_dir() if folder: tempdir = join(tempdir, folder) if not exists(tempdir): makedirs(tempdir) try: yield tempdir finally: if delete: rmtree(tempdir)
python
def temp_dir(folder=None, delete=True): # type: (Optional[str], bool) -> str """Get a temporary directory optionally with folder appended (and created if it doesn't exist) Args: folder (Optional[str]): Folder to create in temporary folder. Defaults to None. delete (bool): Whether to delete folder on exiting with statement Returns: str: A temporary directory """ tempdir = get_temp_dir() if folder: tempdir = join(tempdir, folder) if not exists(tempdir): makedirs(tempdir) try: yield tempdir finally: if delete: rmtree(tempdir)
[ "def", "temp_dir", "(", "folder", "=", "None", ",", "delete", "=", "True", ")", ":", "# type: (Optional[str], bool) -> str", "tempdir", "=", "get_temp_dir", "(", ")", "if", "folder", ":", "tempdir", "=", "join", "(", "tempdir", ",", "folder", ")", "if", "not", "exists", "(", "tempdir", ")", ":", "makedirs", "(", "tempdir", ")", "try", ":", "yield", "tempdir", "finally", ":", "if", "delete", ":", "rmtree", "(", "tempdir", ")" ]
Get a temporary directory optionally with folder appended (and created if it doesn't exist) Args: folder (Optional[str]): Folder to create in temporary folder. Defaults to None. delete (bool): Whether to delete folder on exiting with statement Returns: str: A temporary directory
[ "Get", "a", "temporary", "directory", "optionally", "with", "folder", "appended", "(", "and", "created", "if", "it", "doesn", "t", "exist", ")" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/path.py#L60-L80
martinkou/applepushnotification
applepushnotification/service.py
NotificationService.send
def send(self, obj): """Send a push notification""" if not isinstance(obj, NotificationMessage): raise ValueError, u"You can only send NotificationMessage objects." self._send_queue.put(obj)
python
def send(self, obj): """Send a push notification""" if not isinstance(obj, NotificationMessage): raise ValueError, u"You can only send NotificationMessage objects." self._send_queue.put(obj)
[ "def", "send", "(", "self", ",", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "NotificationMessage", ")", ":", "raise", "ValueError", ",", "u\"You can only send NotificationMessage objects.\"", "self", ".", "_send_queue", ".", "put", "(", "obj", ")" ]
Send a push notification
[ "Send", "a", "push", "notification" ]
train
https://github.com/martinkou/applepushnotification/blob/43ebe5963fa9c48990dd57f6aaba6056a1e7470f/applepushnotification/service.py#L156-L160
martinkou/applepushnotification
applepushnotification/service.py
NotificationService.get_error
def get_error(self, block = True, timeout = None): """ Gets the next error message. Each error message is a 2-tuple of (status, identifier).""" return self._error_queue.get(block = block, timeout = timeout)
python
def get_error(self, block = True, timeout = None): """ Gets the next error message. Each error message is a 2-tuple of (status, identifier).""" return self._error_queue.get(block = block, timeout = timeout)
[ "def", "get_error", "(", "self", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_error_queue", ".", "get", "(", "block", "=", "block", ",", "timeout", "=", "timeout", ")" ]
Gets the next error message. Each error message is a 2-tuple of (status, identifier).
[ "Gets", "the", "next", "error", "message", ".", "Each", "error", "message", "is", "a", "2", "-", "tuple", "of", "(", "status", "identifier", ")", "." ]
train
https://github.com/martinkou/applepushnotification/blob/43ebe5963fa9c48990dd57f6aaba6056a1e7470f/applepushnotification/service.py#L162-L167
martinkou/applepushnotification
applepushnotification/service.py
NotificationService.get_feedback
def get_feedback(self, block = True, timeout = None): """ Gets the next feedback message. Each feedback message is a 2-tuple of (timestamp, device_token).""" if self._feedback_greenlet is None: self._feedback_greenlet = gevent.spawn(self._feedback_loop) return self._feedback_queue.get(block = block, timeout = timeout)
python
def get_feedback(self, block = True, timeout = None): """ Gets the next feedback message. Each feedback message is a 2-tuple of (timestamp, device_token).""" if self._feedback_greenlet is None: self._feedback_greenlet = gevent.spawn(self._feedback_loop) return self._feedback_queue.get(block = block, timeout = timeout)
[ "def", "get_feedback", "(", "self", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "_feedback_greenlet", "is", "None", ":", "self", ".", "_feedback_greenlet", "=", "gevent", ".", "spawn", "(", "self", ".", "_feedback_loop", ")", "return", "self", ".", "_feedback_queue", ".", "get", "(", "block", "=", "block", ",", "timeout", "=", "timeout", ")" ]
Gets the next feedback message. Each feedback message is a 2-tuple of (timestamp, device_token).
[ "Gets", "the", "next", "feedback", "message", "." ]
train
https://github.com/martinkou/applepushnotification/blob/43ebe5963fa9c48990dd57f6aaba6056a1e7470f/applepushnotification/service.py#L169-L176
martinkou/applepushnotification
applepushnotification/service.py
NotificationService.wait_send
def wait_send(self, timeout = None): """Wait until all queued messages are sent.""" self._send_queue_cleared.clear() self._send_queue_cleared.wait(timeout = timeout)
python
def wait_send(self, timeout = None): """Wait until all queued messages are sent.""" self._send_queue_cleared.clear() self._send_queue_cleared.wait(timeout = timeout)
[ "def", "wait_send", "(", "self", ",", "timeout", "=", "None", ")", ":", "self", ".", "_send_queue_cleared", ".", "clear", "(", ")", "self", ".", "_send_queue_cleared", ".", "wait", "(", "timeout", "=", "timeout", ")" ]
Wait until all queued messages are sent.
[ "Wait", "until", "all", "queued", "messages", "are", "sent", "." ]
train
https://github.com/martinkou/applepushnotification/blob/43ebe5963fa9c48990dd57f6aaba6056a1e7470f/applepushnotification/service.py#L178-L181
martinkou/applepushnotification
applepushnotification/service.py
NotificationService.start
def start(self): """Start the message sending loop.""" if self._send_greenlet is None: self._send_greenlet = gevent.spawn(self._send_loop)
python
def start(self): """Start the message sending loop.""" if self._send_greenlet is None: self._send_greenlet = gevent.spawn(self._send_loop)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "_send_greenlet", "is", "None", ":", "self", ".", "_send_greenlet", "=", "gevent", ".", "spawn", "(", "self", ".", "_send_loop", ")" ]
Start the message sending loop.
[ "Start", "the", "message", "sending", "loop", "." ]
train
https://github.com/martinkou/applepushnotification/blob/43ebe5963fa9c48990dd57f6aaba6056a1e7470f/applepushnotification/service.py#L183-L186
martinkou/applepushnotification
applepushnotification/service.py
NotificationService.stop
def stop(self, timeout = 10.0): """ Send all pending messages, close connection. Returns True if no message left to sent. False if dirty. - timeout: seconds to wait for sending remaining messages. disconnect immedately if None. """ if (self._send_greenlet is not None) and \ (self._send_queue.qsize() > 0): self.wait_send(timeout = timeout) if self._send_greenlet is not None: gevent.kill(self._send_greenlet) self._send_greenlet = None if self._error_greenlet is not None: gevent.kill(self._error_greenlet) self._error_greenlet = None if self._feedback_greenlet is not None: gevent.kill(self._feedback_greenlet) self._feedback_greenlet = None return self._send_queue.qsize() < 1
python
def stop(self, timeout = 10.0): """ Send all pending messages, close connection. Returns True if no message left to sent. False if dirty. - timeout: seconds to wait for sending remaining messages. disconnect immedately if None. """ if (self._send_greenlet is not None) and \ (self._send_queue.qsize() > 0): self.wait_send(timeout = timeout) if self._send_greenlet is not None: gevent.kill(self._send_greenlet) self._send_greenlet = None if self._error_greenlet is not None: gevent.kill(self._error_greenlet) self._error_greenlet = None if self._feedback_greenlet is not None: gevent.kill(self._feedback_greenlet) self._feedback_greenlet = None return self._send_queue.qsize() < 1
[ "def", "stop", "(", "self", ",", "timeout", "=", "10.0", ")", ":", "if", "(", "self", ".", "_send_greenlet", "is", "not", "None", ")", "and", "(", "self", ".", "_send_queue", ".", "qsize", "(", ")", ">", "0", ")", ":", "self", ".", "wait_send", "(", "timeout", "=", "timeout", ")", "if", "self", ".", "_send_greenlet", "is", "not", "None", ":", "gevent", ".", "kill", "(", "self", ".", "_send_greenlet", ")", "self", ".", "_send_greenlet", "=", "None", "if", "self", ".", "_error_greenlet", "is", "not", "None", ":", "gevent", ".", "kill", "(", "self", ".", "_error_greenlet", ")", "self", ".", "_error_greenlet", "=", "None", "if", "self", ".", "_feedback_greenlet", "is", "not", "None", ":", "gevent", ".", "kill", "(", "self", ".", "_feedback_greenlet", ")", "self", ".", "_feedback_greenlet", "=", "None", "return", "self", ".", "_send_queue", ".", "qsize", "(", ")", "<", "1" ]
Send all pending messages, close connection. Returns True if no message left to sent. False if dirty. - timeout: seconds to wait for sending remaining messages. disconnect immedately if None.
[ "Send", "all", "pending", "messages", "close", "connection", ".", "Returns", "True", "if", "no", "message", "left", "to", "sent", ".", "False", "if", "dirty", ".", "-", "timeout", ":", "seconds", "to", "wait", "for", "sending", "remaining", "messages", ".", "disconnect", "immedately", "if", "None", "." ]
train
https://github.com/martinkou/applepushnotification/blob/43ebe5963fa9c48990dd57f6aaba6056a1e7470f/applepushnotification/service.py#L188-L210
nbedi/typecaster
typecaster/ssml.py
convert_to_ssml
def convert_to_ssml(text, text_format): """ Convert text to SSML based on the text's current format. NOTE: This module is extremely limited at the moment and will be expanded. :param text: The text to convert. :param text_format: The text format of the text. Currently supports 'plain', 'html' or None for skipping SSML conversion. """ if text_format is None: return text elif text_format == 'plain': return plain_to_ssml(text) elif text_format == 'html': return html_to_ssml(text) else: raise ValueError(text_format + ': text format not found.')
python
def convert_to_ssml(text, text_format): """ Convert text to SSML based on the text's current format. NOTE: This module is extremely limited at the moment and will be expanded. :param text: The text to convert. :param text_format: The text format of the text. Currently supports 'plain', 'html' or None for skipping SSML conversion. """ if text_format is None: return text elif text_format == 'plain': return plain_to_ssml(text) elif text_format == 'html': return html_to_ssml(text) else: raise ValueError(text_format + ': text format not found.')
[ "def", "convert_to_ssml", "(", "text", ",", "text_format", ")", ":", "if", "text_format", "is", "None", ":", "return", "text", "elif", "text_format", "==", "'plain'", ":", "return", "plain_to_ssml", "(", "text", ")", "elif", "text_format", "==", "'html'", ":", "return", "html_to_ssml", "(", "text", ")", "else", ":", "raise", "ValueError", "(", "text_format", "+", "': text format not found.'", ")" ]
Convert text to SSML based on the text's current format. NOTE: This module is extremely limited at the moment and will be expanded. :param text: The text to convert. :param text_format: The text format of the text. Currently supports 'plain', 'html' or None for skipping SSML conversion.
[ "Convert", "text", "to", "SSML", "based", "on", "the", "text", "s", "current", "format", ".", "NOTE", ":", "This", "module", "is", "extremely", "limited", "at", "the", "moment", "and", "will", "be", "expanded", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/ssml.py#L19-L37
nbedi/typecaster
typecaster/ssml.py
html_to_ssml
def html_to_ssml(text): """ Replaces specific html tags with probable SSML counterparts. """ ssml_text = reduce(lambda x, y: x.replace(y, html_to_ssml_maps[y]), html_to_ssml_maps, text) return ssml_text
python
def html_to_ssml(text): """ Replaces specific html tags with probable SSML counterparts. """ ssml_text = reduce(lambda x, y: x.replace(y, html_to_ssml_maps[y]), html_to_ssml_maps, text) return ssml_text
[ "def", "html_to_ssml", "(", "text", ")", ":", "ssml_text", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "replace", "(", "y", ",", "html_to_ssml_maps", "[", "y", "]", ")", ",", "html_to_ssml_maps", ",", "text", ")", "return", "ssml_text" ]
Replaces specific html tags with probable SSML counterparts.
[ "Replaces", "specific", "html", "tags", "with", "probable", "SSML", "counterparts", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/ssml.py#L48-L53
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/text.py
multiple_replace
def multiple_replace(string, replacements): # type: (str, Dict[str,str]) -> str """Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements """ pattern = re.compile("|".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL) return pattern.sub(lambda x: replacements[x.group(0)], string)
python
def multiple_replace(string, replacements): # type: (str, Dict[str,str]) -> str """Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements """ pattern = re.compile("|".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL) return pattern.sub(lambda x: replacements[x.group(0)], string)
[ "def", "multiple_replace", "(", "string", ",", "replacements", ")", ":", "# type: (str, Dict[str,str]) -> str", "pattern", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "[", "re", ".", "escape", "(", "k", ")", "for", "k", "in", "sorted", "(", "replacements", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", "]", ")", ",", "flags", "=", "re", ".", "DOTALL", ")", "return", "pattern", ".", "sub", "(", "lambda", "x", ":", "replacements", "[", "x", ".", "group", "(", "0", ")", "]", ",", "string", ")" ]
Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements
[ "Simultaneously", "replace", "multiple", "strigns", "in", "a", "string" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/text.py#L9-L22
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/text.py
get_matching_text_in_strs
def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''): # type: (str, str, int, str, str) -> List[str] """Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text """ compare = difflib.SequenceMatcher(lambda x: x in ignore) compare.set_seqs(a=a, b=b) matching_text = list() for match in compare.get_matching_blocks(): start = match.a text = a[start: start+match.size] if end_characters: prev_text = text while len(text) != 0 and text[0] in end_characters: text = text[1:] while len(text) != 0 and text[-1] not in end_characters: text = text[:-1] if len(text) == 0: text = prev_text if len(text) >= match_min_size: matching_text.append(text) return matching_text
python
def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''): # type: (str, str, int, str, str) -> List[str] """Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text """ compare = difflib.SequenceMatcher(lambda x: x in ignore) compare.set_seqs(a=a, b=b) matching_text = list() for match in compare.get_matching_blocks(): start = match.a text = a[start: start+match.size] if end_characters: prev_text = text while len(text) != 0 and text[0] in end_characters: text = text[1:] while len(text) != 0 and text[-1] not in end_characters: text = text[:-1] if len(text) == 0: text = prev_text if len(text) >= match_min_size: matching_text.append(text) return matching_text
[ "def", "get_matching_text_in_strs", "(", "a", ",", "b", ",", "match_min_size", "=", "30", ",", "ignore", "=", "''", ",", "end_characters", "=", "''", ")", ":", "# type: (str, str, int, str, str) -> List[str]", "compare", "=", "difflib", ".", "SequenceMatcher", "(", "lambda", "x", ":", "x", "in", "ignore", ")", "compare", ".", "set_seqs", "(", "a", "=", "a", ",", "b", "=", "b", ")", "matching_text", "=", "list", "(", ")", "for", "match", "in", "compare", ".", "get_matching_blocks", "(", ")", ":", "start", "=", "match", ".", "a", "text", "=", "a", "[", "start", ":", "start", "+", "match", ".", "size", "]", "if", "end_characters", ":", "prev_text", "=", "text", "while", "len", "(", "text", ")", "!=", "0", "and", "text", "[", "0", "]", "in", "end_characters", ":", "text", "=", "text", "[", "1", ":", "]", "while", "len", "(", "text", ")", "!=", "0", "and", "text", "[", "-", "1", "]", "not", "in", "end_characters", ":", "text", "=", "text", "[", ":", "-", "1", "]", "if", "len", "(", "text", ")", "==", "0", ":", "text", "=", "prev_text", "if", "len", "(", "text", ")", ">=", "match_min_size", ":", "matching_text", ".", "append", "(", "text", ")", "return", "matching_text" ]
Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text
[ "Returns", "a", "list", "of", "matching", "blocks", "of", "text", "in", "a", "and", "b" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/text.py#L39-L71
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/text.py
get_matching_text
def get_matching_text(string_list, match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) a = ''.join(result) return a
python
def get_matching_text(string_list, match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) a = ''.join(result) return a
[ "def", "get_matching_text", "(", "string_list", ",", "match_min_size", "=", "30", ",", "ignore", "=", "''", ",", "end_characters", "=", "'.!\\r\\n'", ")", ":", "# type: (List[str], int, str, str) -> str", "a", "=", "string_list", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "string_list", ")", ")", ":", "b", "=", "string_list", "[", "i", "]", "result", "=", "get_matching_text_in_strs", "(", "a", ",", "b", ",", "match_min_size", "=", "match_min_size", ",", "ignore", "=", "ignore", ",", "end_characters", "=", "end_characters", ")", "a", "=", "''", ".", "join", "(", "result", ")", "return", "a" ]
Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching
[ "Returns", "a", "string", "containing", "matching", "blocks", "of", "text", "in", "a", "list", "of", "strings", "followed", "by", "non", "-", "matching", "." ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/text.py#L74-L94
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/text.py
get_matching_then_nonmatching_text
def get_matching_then_nonmatching_text(string_list, separator='', match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], str, int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ def add_separator_if_needed(text_list): if separator and len(text_list) > 0 and text_list[-1][-len(separator):] != separator: text_list.append(separator) a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] combined_len = len(a) + len(b) result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) new_a = a new_b = b for text in result: new_a = new_a.replace(text, '') new_b = new_b.replace(text, '') if new_a and new_a in a: pos_a = a.index(new_a) else: pos_a = combined_len if new_b and new_b in b: pos_b = b.index(new_b) else: pos_b = combined_len if pos_b > pos_a: text_1 = new_b pos_1 = pos_b text_2 = new_a pos_2 = pos_a else: text_1 = new_a pos_1 = pos_a text_2 = new_b pos_2 = pos_b output = list() pos = 0 for text in result: output.append(text) pos += len(text) if text_1 and pos >= pos_1: add_separator_if_needed(output) output.append(text_1) pos += len(text_1) text_1 = None if text_2 and pos >= pos_2: add_separator_if_needed(output) output.append(text_2) pos += len(text_2) text_2 = None if text_1 and pos_1 == combined_len: add_separator_if_needed(output) output.append(text_1) if text_2 and pos_2 == combined_len: add_separator_if_needed(output) output.append(text_2) a = ''.join(output) return a
python
def get_matching_then_nonmatching_text(string_list, separator='', match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], str, int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ def add_separator_if_needed(text_list): if separator and len(text_list) > 0 and text_list[-1][-len(separator):] != separator: text_list.append(separator) a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] combined_len = len(a) + len(b) result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) new_a = a new_b = b for text in result: new_a = new_a.replace(text, '') new_b = new_b.replace(text, '') if new_a and new_a in a: pos_a = a.index(new_a) else: pos_a = combined_len if new_b and new_b in b: pos_b = b.index(new_b) else: pos_b = combined_len if pos_b > pos_a: text_1 = new_b pos_1 = pos_b text_2 = new_a pos_2 = pos_a else: text_1 = new_a pos_1 = pos_a text_2 = new_b pos_2 = pos_b output = list() pos = 0 for text in result: output.append(text) pos += len(text) if text_1 and pos >= pos_1: add_separator_if_needed(output) output.append(text_1) pos += len(text_1) text_1 = None if text_2 and pos >= pos_2: add_separator_if_needed(output) output.append(text_2) pos += len(text_2) text_2 = None if text_1 and pos_1 == combined_len: add_separator_if_needed(output) output.append(text_1) if text_2 and pos_2 == combined_len: add_separator_if_needed(output) output.append(text_2) a = ''.join(output) return a
[ "def", "get_matching_then_nonmatching_text", "(", "string_list", ",", "separator", "=", "''", ",", "match_min_size", "=", "30", ",", "ignore", "=", "''", ",", "end_characters", "=", "'.!\\r\\n'", ")", ":", "# type: (List[str], str, int, str, str) -> str", "def", "add_separator_if_needed", "(", "text_list", ")", ":", "if", "separator", "and", "len", "(", "text_list", ")", ">", "0", "and", "text_list", "[", "-", "1", "]", "[", "-", "len", "(", "separator", ")", ":", "]", "!=", "separator", ":", "text_list", ".", "append", "(", "separator", ")", "a", "=", "string_list", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "string_list", ")", ")", ":", "b", "=", "string_list", "[", "i", "]", "combined_len", "=", "len", "(", "a", ")", "+", "len", "(", "b", ")", "result", "=", "get_matching_text_in_strs", "(", "a", ",", "b", ",", "match_min_size", "=", "match_min_size", ",", "ignore", "=", "ignore", ",", "end_characters", "=", "end_characters", ")", "new_a", "=", "a", "new_b", "=", "b", "for", "text", "in", "result", ":", "new_a", "=", "new_a", ".", "replace", "(", "text", ",", "''", ")", "new_b", "=", "new_b", ".", "replace", "(", "text", ",", "''", ")", "if", "new_a", "and", "new_a", "in", "a", ":", "pos_a", "=", "a", ".", "index", "(", "new_a", ")", "else", ":", "pos_a", "=", "combined_len", "if", "new_b", "and", "new_b", "in", "b", ":", "pos_b", "=", "b", ".", "index", "(", "new_b", ")", "else", ":", "pos_b", "=", "combined_len", "if", "pos_b", ">", "pos_a", ":", "text_1", "=", "new_b", "pos_1", "=", "pos_b", "text_2", "=", "new_a", "pos_2", "=", "pos_a", "else", ":", "text_1", "=", "new_a", "pos_1", "=", "pos_a", "text_2", "=", "new_b", "pos_2", "=", "pos_b", "output", "=", "list", "(", ")", "pos", "=", "0", "for", "text", "in", "result", ":", "output", ".", "append", "(", "text", ")", "pos", "+=", "len", "(", "text", ")", "if", "text_1", "and", "pos", ">=", "pos_1", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_1", ")", "pos", "+=", "len", "(", "text_1", ")", "text_1", "=", "None", "if", "text_2", "and", "pos", ">=", "pos_2", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_2", ")", "pos", "+=", "len", "(", "text_2", ")", "text_2", "=", "None", "if", "text_1", "and", "pos_1", "==", "combined_len", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_1", ")", "if", "text_2", "and", "pos_2", "==", "combined_len", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_2", ")", "a", "=", "''", ".", "join", "(", "output", ")", "return", "a" ]
Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching
[ "Returns", "a", "string", "containing", "matching", "blocks", "of", "text", "in", "a", "list", "of", "strings", "followed", "by", "non", "-", "matching", "." ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/text.py#L97-L168
facelessuser/bracex
bracex/__init__.py
iexpand
def iexpand(string, keep_escapes=False): """Expand braces and return an iterator.""" if isinstance(string, bytes): is_bytes = True string = string.decode('latin-1') else: is_bytes = False if is_bytes: return (entry.encode('latin-1') for entry in ExpandBrace(keep_escapes).expand(string)) else: return (entry for entry in ExpandBrace(keep_escapes).expand(string))
python
def iexpand(string, keep_escapes=False): """Expand braces and return an iterator.""" if isinstance(string, bytes): is_bytes = True string = string.decode('latin-1') else: is_bytes = False if is_bytes: return (entry.encode('latin-1') for entry in ExpandBrace(keep_escapes).expand(string)) else: return (entry for entry in ExpandBrace(keep_escapes).expand(string))
[ "def", "iexpand", "(", "string", ",", "keep_escapes", "=", "False", ")", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "is_bytes", "=", "True", "string", "=", "string", ".", "decode", "(", "'latin-1'", ")", "else", ":", "is_bytes", "=", "False", "if", "is_bytes", ":", "return", "(", "entry", ".", "encode", "(", "'latin-1'", ")", "for", "entry", "in", "ExpandBrace", "(", "keep_escapes", ")", ".", "expand", "(", "string", ")", ")", "else", ":", "return", "(", "entry", "for", "entry", "in", "ExpandBrace", "(", "keep_escapes", ")", ".", "expand", "(", "string", ")", ")" ]
Expand braces and return an iterator.
[ "Expand", "braces", "and", "return", "an", "iterator", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L50-L64
facelessuser/bracex
bracex/__init__.py
ExpandBrace.set_expanding
def set_expanding(self): """Set that we are expanding a sequence, and return whether a release is required by the caller.""" status = not self.expanding if status: self.expanding = True return status
python
def set_expanding(self): """Set that we are expanding a sequence, and return whether a release is required by the caller.""" status = not self.expanding if status: self.expanding = True return status
[ "def", "set_expanding", "(", "self", ")", ":", "status", "=", "not", "self", ".", "expanding", "if", "status", ":", "self", ".", "expanding", "=", "True", "return", "status" ]
Set that we are expanding a sequence, and return whether a release is required by the caller.
[ "Set", "that", "we", "are", "expanding", "a", "sequence", "and", "return", "whether", "a", "release", "is", "required", "by", "the", "caller", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L140-L146
facelessuser/bracex
bracex/__init__.py
ExpandBrace.get_escape
def get_escape(self, c, i): """Get an escape.""" try: escaped = next(i) except StopIteration: escaped = '' return c + escaped if self.keep_escapes else escaped
python
def get_escape(self, c, i): """Get an escape.""" try: escaped = next(i) except StopIteration: escaped = '' return c + escaped if self.keep_escapes else escaped
[ "def", "get_escape", "(", "self", ",", "c", ",", "i", ")", ":", "try", ":", "escaped", "=", "next", "(", "i", ")", "except", "StopIteration", ":", "escaped", "=", "''", "return", "c", "+", "escaped", "if", "self", ".", "keep_escapes", "else", "escaped" ]
Get an escape.
[ "Get", "an", "escape", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L159-L166
facelessuser/bracex
bracex/__init__.py
ExpandBrace.squash
def squash(self, a, b): """ Returns a generator that squashes two iterables into one. ``` ['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or'] ``` """ return ((''.join(x) if isinstance(x, tuple) else x) for x in itertools.product(a, b))
python
def squash(self, a, b): """ Returns a generator that squashes two iterables into one. ``` ['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or'] ``` """ return ((''.join(x) if isinstance(x, tuple) else x) for x in itertools.product(a, b))
[ "def", "squash", "(", "self", ",", "a", ",", "b", ")", ":", "return", "(", "(", "''", ".", "join", "(", "x", ")", "if", "isinstance", "(", "x", ",", "tuple", ")", "else", "x", ")", "for", "x", "in", "itertools", ".", "product", "(", "a", ",", "b", ")", ")" ]
Returns a generator that squashes two iterables into one. ``` ['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or'] ```
[ "Returns", "a", "generator", "that", "squashes", "two", "iterables", "into", "one", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L168-L177
facelessuser/bracex
bracex/__init__.py
ExpandBrace.get_literals
def get_literals(self, c, i, depth): """ Get a string literal. Gather all the literal chars up to opening curly or closing brace. Also gather chars between braces and commas within a group (is_expanding). """ result = [''] is_dollar = False try: while c: ignore_brace = is_dollar is_dollar = False if c == '$': is_dollar = True elif c == '\\': c = [self.get_escape(c, i)] elif not ignore_brace and c == '{': # Try and get the group index = i.index try: seq = self.get_sequence(next(i), i, depth + 1) if seq: c = seq except StopIteration: # Searched to end of string # and still didn't find it. i.rewind(i.index - index) elif self.is_expanding() and c in (',', '}'): # We are Expanding within a group and found a group delimiter # Return what we gathered before the group delimiters. i.rewind(1) return (x for x in result) # Squash the current set of literals. result = self.squash(result, [c] if isinstance(c, str) else c) c = next(i) except StopIteration: if self.is_expanding(): return None return (x for x in result)
python
def get_literals(self, c, i, depth): """ Get a string literal. Gather all the literal chars up to opening curly or closing brace. Also gather chars between braces and commas within a group (is_expanding). """ result = [''] is_dollar = False try: while c: ignore_brace = is_dollar is_dollar = False if c == '$': is_dollar = True elif c == '\\': c = [self.get_escape(c, i)] elif not ignore_brace and c == '{': # Try and get the group index = i.index try: seq = self.get_sequence(next(i), i, depth + 1) if seq: c = seq except StopIteration: # Searched to end of string # and still didn't find it. i.rewind(i.index - index) elif self.is_expanding() and c in (',', '}'): # We are Expanding within a group and found a group delimiter # Return what we gathered before the group delimiters. i.rewind(1) return (x for x in result) # Squash the current set of literals. result = self.squash(result, [c] if isinstance(c, str) else c) c = next(i) except StopIteration: if self.is_expanding(): return None return (x for x in result)
[ "def", "get_literals", "(", "self", ",", "c", ",", "i", ",", "depth", ")", ":", "result", "=", "[", "''", "]", "is_dollar", "=", "False", "try", ":", "while", "c", ":", "ignore_brace", "=", "is_dollar", "is_dollar", "=", "False", "if", "c", "==", "'$'", ":", "is_dollar", "=", "True", "elif", "c", "==", "'\\\\'", ":", "c", "=", "[", "self", ".", "get_escape", "(", "c", ",", "i", ")", "]", "elif", "not", "ignore_brace", "and", "c", "==", "'{'", ":", "# Try and get the group", "index", "=", "i", ".", "index", "try", ":", "seq", "=", "self", ".", "get_sequence", "(", "next", "(", "i", ")", ",", "i", ",", "depth", "+", "1", ")", "if", "seq", ":", "c", "=", "seq", "except", "StopIteration", ":", "# Searched to end of string", "# and still didn't find it.", "i", ".", "rewind", "(", "i", ".", "index", "-", "index", ")", "elif", "self", ".", "is_expanding", "(", ")", "and", "c", "in", "(", "','", ",", "'}'", ")", ":", "# We are Expanding within a group and found a group delimiter", "# Return what we gathered before the group delimiters.", "i", ".", "rewind", "(", "1", ")", "return", "(", "x", "for", "x", "in", "result", ")", "# Squash the current set of literals.", "result", "=", "self", ".", "squash", "(", "result", ",", "[", "c", "]", "if", "isinstance", "(", "c", ",", "str", ")", "else", "c", ")", "c", "=", "next", "(", "i", ")", "except", "StopIteration", ":", "if", "self", ".", "is_expanding", "(", ")", ":", "return", "None", "return", "(", "x", "for", "x", "in", "result", ")" ]
Get a string literal. Gather all the literal chars up to opening curly or closing brace. Also gather chars between braces and commas within a group (is_expanding).
[ "Get", "a", "string", "literal", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L179-L227
facelessuser/bracex
bracex/__init__.py
ExpandBrace.combine
def combine(self, a, b): """A generator that combines two iterables.""" for l in (a, b): for x in l: yield x
python
def combine(self, a, b): """A generator that combines two iterables.""" for l in (a, b): for x in l: yield x
[ "def", "combine", "(", "self", ",", "a", ",", "b", ")", ":", "for", "l", "in", "(", "a", ",", "b", ")", ":", "for", "x", "in", "l", ":", "yield", "x" ]
A generator that combines two iterables.
[ "A", "generator", "that", "combines", "two", "iterables", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L229-L234
facelessuser/bracex
bracex/__init__.py
ExpandBrace.get_sequence
def get_sequence(self, c, i, depth): """ Get the sequence. Get sequence between `{}`, such as: `{a,b}`, `{1..2[..inc]}`, etc. It will basically crawl to the end or find a valid series. """ result = [] release = self.set_expanding() has_comma = False # Used to indicate validity of group (`{1..2}` are an exception). is_empty = True # Tracks whether the current slot is empty `{slot,slot,slot}`. # Detect numerical and alphabetic series: `{1..2}` etc. i.rewind(1) item = self.get_range(i) i.advance(1) if item is not None: self.release_expanding(release) return (x for x in item) try: while c: # Bash has some special top level logic. if `}` follows `{` but hasn't matched # a group yet, keep going except when the first 2 bytes are `{}` which gets # completely ignored. keep_looking = depth == 1 and not has_comma # and i.index not in self.skip_index if (c == '}' and (not keep_looking or i.index == 2)): # If there is no comma, we know the sequence is bogus. if is_empty: result = (x for x in self.combine(result, [''])) if not has_comma: result = ('{' + literal + '}' for literal in result) self.release_expanding(release) return (x for x in result) elif c == ',': # Must be the first element in the list. has_comma = True if is_empty: result = (x for x in self.combine(result, [''])) else: is_empty = True else: if c == '}': # Top level: If we didn't find a comma, we haven't # completed the top level group. Request more and # append to what we already have for the first slot. if not result: result = (x for x in self.combine(result, [c])) else: result = self.squash(result, [c]) value = self.get_literals(next(i), i, depth) if value is not None: result = self.squash(result, value) is_empty = False else: # Lower level: Try to find group, but give up if cannot acquire. value = self.get_literals(c, i, depth) if value is not None: result = (x for x in self.combine(result, value)) is_empty = False c = next(i) except StopIteration: self.release_expanding(release) raise
python
def get_sequence(self, c, i, depth): """ Get the sequence. Get sequence between `{}`, such as: `{a,b}`, `{1..2[..inc]}`, etc. It will basically crawl to the end or find a valid series. """ result = [] release = self.set_expanding() has_comma = False # Used to indicate validity of group (`{1..2}` are an exception). is_empty = True # Tracks whether the current slot is empty `{slot,slot,slot}`. # Detect numerical and alphabetic series: `{1..2}` etc. i.rewind(1) item = self.get_range(i) i.advance(1) if item is not None: self.release_expanding(release) return (x for x in item) try: while c: # Bash has some special top level logic. if `}` follows `{` but hasn't matched # a group yet, keep going except when the first 2 bytes are `{}` which gets # completely ignored. keep_looking = depth == 1 and not has_comma # and i.index not in self.skip_index if (c == '}' and (not keep_looking or i.index == 2)): # If there is no comma, we know the sequence is bogus. if is_empty: result = (x for x in self.combine(result, [''])) if not has_comma: result = ('{' + literal + '}' for literal in result) self.release_expanding(release) return (x for x in result) elif c == ',': # Must be the first element in the list. has_comma = True if is_empty: result = (x for x in self.combine(result, [''])) else: is_empty = True else: if c == '}': # Top level: If we didn't find a comma, we haven't # completed the top level group. Request more and # append to what we already have for the first slot. if not result: result = (x for x in self.combine(result, [c])) else: result = self.squash(result, [c]) value = self.get_literals(next(i), i, depth) if value is not None: result = self.squash(result, value) is_empty = False else: # Lower level: Try to find group, but give up if cannot acquire. value = self.get_literals(c, i, depth) if value is not None: result = (x for x in self.combine(result, value)) is_empty = False c = next(i) except StopIteration: self.release_expanding(release) raise
[ "def", "get_sequence", "(", "self", ",", "c", ",", "i", ",", "depth", ")", ":", "result", "=", "[", "]", "release", "=", "self", ".", "set_expanding", "(", ")", "has_comma", "=", "False", "# Used to indicate validity of group (`{1..2}` are an exception).", "is_empty", "=", "True", "# Tracks whether the current slot is empty `{slot,slot,slot}`.", "# Detect numerical and alphabetic series: `{1..2}` etc.", "i", ".", "rewind", "(", "1", ")", "item", "=", "self", ".", "get_range", "(", "i", ")", "i", ".", "advance", "(", "1", ")", "if", "item", "is", "not", "None", ":", "self", ".", "release_expanding", "(", "release", ")", "return", "(", "x", "for", "x", "in", "item", ")", "try", ":", "while", "c", ":", "# Bash has some special top level logic. if `}` follows `{` but hasn't matched", "# a group yet, keep going except when the first 2 bytes are `{}` which gets", "# completely ignored.", "keep_looking", "=", "depth", "==", "1", "and", "not", "has_comma", "# and i.index not in self.skip_index", "if", "(", "c", "==", "'}'", "and", "(", "not", "keep_looking", "or", "i", ".", "index", "==", "2", ")", ")", ":", "# If there is no comma, we know the sequence is bogus.", "if", "is_empty", ":", "result", "=", "(", "x", "for", "x", "in", "self", ".", "combine", "(", "result", ",", "[", "''", "]", ")", ")", "if", "not", "has_comma", ":", "result", "=", "(", "'{'", "+", "literal", "+", "'}'", "for", "literal", "in", "result", ")", "self", ".", "release_expanding", "(", "release", ")", "return", "(", "x", "for", "x", "in", "result", ")", "elif", "c", "==", "','", ":", "# Must be the first element in the list.", "has_comma", "=", "True", "if", "is_empty", ":", "result", "=", "(", "x", "for", "x", "in", "self", ".", "combine", "(", "result", ",", "[", "''", "]", ")", ")", "else", ":", "is_empty", "=", "True", "else", ":", "if", "c", "==", "'}'", ":", "# Top level: If we didn't find a comma, we haven't", "# completed the top level group. Request more and", "# append to what we already have for the first slot.", "if", "not", "result", ":", "result", "=", "(", "x", "for", "x", "in", "self", ".", "combine", "(", "result", ",", "[", "c", "]", ")", ")", "else", ":", "result", "=", "self", ".", "squash", "(", "result", ",", "[", "c", "]", ")", "value", "=", "self", ".", "get_literals", "(", "next", "(", "i", ")", ",", "i", ",", "depth", ")", "if", "value", "is", "not", "None", ":", "result", "=", "self", ".", "squash", "(", "result", ",", "value", ")", "is_empty", "=", "False", "else", ":", "# Lower level: Try to find group, but give up if cannot acquire.", "value", "=", "self", ".", "get_literals", "(", "c", ",", "i", ",", "depth", ")", "if", "value", "is", "not", "None", ":", "result", "=", "(", "x", "for", "x", "in", "self", ".", "combine", "(", "result", ",", "value", ")", ")", "is_empty", "=", "False", "c", "=", "next", "(", "i", ")", "except", "StopIteration", ":", "self", ".", "release_expanding", "(", "release", ")", "raise" ]
Get the sequence. Get sequence between `{}`, such as: `{a,b}`, `{1..2[..inc]}`, etc. It will basically crawl to the end or find a valid series.
[ "Get", "the", "sequence", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L236-L303
facelessuser/bracex
bracex/__init__.py
ExpandBrace.get_range
def get_range(self, i): """ Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine). """ try: m = i.match(RE_INT_ITER) if m: return self.get_int_range(*m.groups()) m = i.match(RE_CHR_ITER) if m: return self.get_char_range(*m.groups()) except Exception: # pragma: no cover # TODO: We really should never fail here, # but if we do, assume the sequence range # was invalid. This catch can probably # be removed in the future with more testing. pass return None
python
def get_range(self, i): """ Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine). """ try: m = i.match(RE_INT_ITER) if m: return self.get_int_range(*m.groups()) m = i.match(RE_CHR_ITER) if m: return self.get_char_range(*m.groups()) except Exception: # pragma: no cover # TODO: We really should never fail here, # but if we do, assume the sequence range # was invalid. This catch can probably # be removed in the future with more testing. pass return None
[ "def", "get_range", "(", "self", ",", "i", ")", ":", "try", ":", "m", "=", "i", ".", "match", "(", "RE_INT_ITER", ")", "if", "m", ":", "return", "self", ".", "get_int_range", "(", "*", "m", ".", "groups", "(", ")", ")", "m", "=", "i", ".", "match", "(", "RE_CHR_ITER", ")", "if", "m", ":", "return", "self", ".", "get_char_range", "(", "*", "m", ".", "groups", "(", ")", ")", "except", "Exception", ":", "# pragma: no cover", "# TODO: We really should never fail here,", "# but if we do, assume the sequence range", "# was invalid. This catch can probably", "# be removed in the future with more testing.", "pass", "return", "None" ]
Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine).
[ "Check", "and", "retrieve", "range", "if", "value", "is", "a", "valid", "range", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L305-L328
facelessuser/bracex
bracex/__init__.py
ExpandBrace.format_value
def format_value(self, value, padding): """Get padding adjusting for negative values.""" # padding = padding - 1 if value < 0 and padding > 0 else padding # prefix = '-' if value < 0 else '' if padding: return "{:0{pad}d}".format(value, pad=padding) else: return str(value)
python
def format_value(self, value, padding): """Get padding adjusting for negative values.""" # padding = padding - 1 if value < 0 and padding > 0 else padding # prefix = '-' if value < 0 else '' if padding: return "{:0{pad}d}".format(value, pad=padding) else: return str(value)
[ "def", "format_value", "(", "self", ",", "value", ",", "padding", ")", ":", "# padding = padding - 1 if value < 0 and padding > 0 else padding", "# prefix = '-' if value < 0 else ''", "if", "padding", ":", "return", "\"{:0{pad}d}\"", ".", "format", "(", "value", ",", "pad", "=", "padding", ")", "else", ":", "return", "str", "(", "value", ")" ]
Get padding adjusting for negative values.
[ "Get", "padding", "adjusting", "for", "negative", "values", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L330-L340
facelessuser/bracex
bracex/__init__.py
ExpandBrace.get_int_range
def get_int_range(self, start, end, increment=None): """Get an integer range between start and end and increments of increment.""" first, last = int(start), int(end) increment = int(increment) if increment is not None else 1 max_length = max(len(start), len(end)) # Zero doesn't make sense as an incrementer # but like bash, just assume one if increment == 0: increment = 1 if start[0] == '-': start = start[1:] if end[0] == '-': end = end[1:] if (len(start) > 1 and start[0] == '0') or (len(end) > 1 and end[0] == '0'): padding = max_length else: padding = 0 if first < last: r = range(first, last + 1, -increment if increment < 0 else increment) else: r = range(first, last - 1, increment if increment < 0 else -increment) return (self.format_value(value, padding) for value in r)
python
def get_int_range(self, start, end, increment=None): """Get an integer range between start and end and increments of increment.""" first, last = int(start), int(end) increment = int(increment) if increment is not None else 1 max_length = max(len(start), len(end)) # Zero doesn't make sense as an incrementer # but like bash, just assume one if increment == 0: increment = 1 if start[0] == '-': start = start[1:] if end[0] == '-': end = end[1:] if (len(start) > 1 and start[0] == '0') or (len(end) > 1 and end[0] == '0'): padding = max_length else: padding = 0 if first < last: r = range(first, last + 1, -increment if increment < 0 else increment) else: r = range(first, last - 1, increment if increment < 0 else -increment) return (self.format_value(value, padding) for value in r)
[ "def", "get_int_range", "(", "self", ",", "start", ",", "end", ",", "increment", "=", "None", ")", ":", "first", ",", "last", "=", "int", "(", "start", ")", ",", "int", "(", "end", ")", "increment", "=", "int", "(", "increment", ")", "if", "increment", "is", "not", "None", "else", "1", "max_length", "=", "max", "(", "len", "(", "start", ")", ",", "len", "(", "end", ")", ")", "# Zero doesn't make sense as an incrementer", "# but like bash, just assume one", "if", "increment", "==", "0", ":", "increment", "=", "1", "if", "start", "[", "0", "]", "==", "'-'", ":", "start", "=", "start", "[", "1", ":", "]", "if", "end", "[", "0", "]", "==", "'-'", ":", "end", "=", "end", "[", "1", ":", "]", "if", "(", "len", "(", "start", ")", ">", "1", "and", "start", "[", "0", "]", "==", "'0'", ")", "or", "(", "len", "(", "end", ")", ">", "1", "and", "end", "[", "0", "]", "==", "'0'", ")", ":", "padding", "=", "max_length", "else", ":", "padding", "=", "0", "if", "first", "<", "last", ":", "r", "=", "range", "(", "first", ",", "last", "+", "1", ",", "-", "increment", "if", "increment", "<", "0", "else", "increment", ")", "else", ":", "r", "=", "range", "(", "first", ",", "last", "-", "1", ",", "increment", "if", "increment", "<", "0", "else", "-", "increment", ")", "return", "(", "self", ".", "format_value", "(", "value", ",", "padding", ")", "for", "value", "in", "r", ")" ]
Get an integer range between start and end and increments of increment.
[ "Get", "an", "integer", "range", "between", "start", "and", "end", "and", "increments", "of", "increment", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L342-L372
facelessuser/bracex
bracex/__init__.py
ExpandBrace.get_char_range
def get_char_range(self, start, end, increment=None): """Get a range of alphabetic characters.""" increment = int(increment) if increment else 1 if increment < 0: increment = -increment # Zero doesn't make sense as an incrementer # but like bash, just assume one if increment == 0: increment = 1 inverse = start > end alpha = _nalpha if inverse else _alpha start = alpha.index(start) end = alpha.index(end) if start < end: return (c for c in alpha[start:end + 1:increment]) else: return (c for c in alpha[end:start + 1:increment])
python
def get_char_range(self, start, end, increment=None): """Get a range of alphabetic characters.""" increment = int(increment) if increment else 1 if increment < 0: increment = -increment # Zero doesn't make sense as an incrementer # but like bash, just assume one if increment == 0: increment = 1 inverse = start > end alpha = _nalpha if inverse else _alpha start = alpha.index(start) end = alpha.index(end) if start < end: return (c for c in alpha[start:end + 1:increment]) else: return (c for c in alpha[end:start + 1:increment])
[ "def", "get_char_range", "(", "self", ",", "start", ",", "end", ",", "increment", "=", "None", ")", ":", "increment", "=", "int", "(", "increment", ")", "if", "increment", "else", "1", "if", "increment", "<", "0", ":", "increment", "=", "-", "increment", "# Zero doesn't make sense as an incrementer", "# but like bash, just assume one", "if", "increment", "==", "0", ":", "increment", "=", "1", "inverse", "=", "start", ">", "end", "alpha", "=", "_nalpha", "if", "inverse", "else", "_alpha", "start", "=", "alpha", ".", "index", "(", "start", ")", "end", "=", "alpha", ".", "index", "(", "end", ")", "if", "start", "<", "end", ":", "return", "(", "c", "for", "c", "in", "alpha", "[", "start", ":", "end", "+", "1", ":", "increment", "]", ")", "else", ":", "return", "(", "c", "for", "c", "in", "alpha", "[", "end", ":", "start", "+", "1", ":", "increment", "]", ")" ]
Get a range of alphabetic characters.
[ "Get", "a", "range", "of", "alphabetic", "characters", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L374-L396
facelessuser/bracex
bracex/__init__.py
ExpandBrace.expand
def expand(self, string): """Expand.""" self.expanding = False empties = [] found_literal = False if string: i = iter(StringIter(string)) for x in self.get_literals(next(i), i, 0): # We don't want to return trailing empty strings. # Store empty strings and output only when followed by a literal. if not x: empties.append(x) continue found_literal = True while empties: yield empties.pop(0) yield x empties = [] # We found no literals so return an empty string if not found_literal: yield ""
python
def expand(self, string): """Expand.""" self.expanding = False empties = [] found_literal = False if string: i = iter(StringIter(string)) for x in self.get_literals(next(i), i, 0): # We don't want to return trailing empty strings. # Store empty strings and output only when followed by a literal. if not x: empties.append(x) continue found_literal = True while empties: yield empties.pop(0) yield x empties = [] # We found no literals so return an empty string if not found_literal: yield ""
[ "def", "expand", "(", "self", ",", "string", ")", ":", "self", ".", "expanding", "=", "False", "empties", "=", "[", "]", "found_literal", "=", "False", "if", "string", ":", "i", "=", "iter", "(", "StringIter", "(", "string", ")", ")", "for", "x", "in", "self", ".", "get_literals", "(", "next", "(", "i", ")", ",", "i", ",", "0", ")", ":", "# We don't want to return trailing empty strings.", "# Store empty strings and output only when followed by a literal.", "if", "not", "x", ":", "empties", ".", "append", "(", "x", ")", "continue", "found_literal", "=", "True", "while", "empties", ":", "yield", "empties", ".", "pop", "(", "0", ")", "yield", "x", "empties", "=", "[", "]", "# We found no literals so return an empty string", "if", "not", "found_literal", ":", "yield", "\"\"" ]
Expand.
[ "Expand", "." ]
train
https://github.com/facelessuser/bracex/blob/1fdf83e2bdfb939e78ba9966bcef80cd7a5c8534/bracex/__init__.py#L398-L420
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
merge_two_dictionaries
def merge_two_dictionaries(a, b, merge_lists=False): # type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound """Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ key = None # ## debug output # sys.stderr.write('DEBUG: %s to %s\n' %(b,a)) try: if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)): # border case for first run or if a is a primitive a = b elif isinstance(a, list): # lists can be appended or replaced if isinstance(b, list): if merge_lists: # merge lists a.extend(b) else: # replace list a = b else: # append to list a.append(b) elif isinstance(a, (dict, UserDict)): # dicts must be merged if isinstance(b, (dict, UserDict)): for key in b: if key in a: a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists) else: a[key] = b[key] else: raise ValueError('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise ValueError('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError as e: raise ValueError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a)) return a
python
def merge_two_dictionaries(a, b, merge_lists=False): # type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound """Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ key = None # ## debug output # sys.stderr.write('DEBUG: %s to %s\n' %(b,a)) try: if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)): # border case for first run or if a is a primitive a = b elif isinstance(a, list): # lists can be appended or replaced if isinstance(b, list): if merge_lists: # merge lists a.extend(b) else: # replace list a = b else: # append to list a.append(b) elif isinstance(a, (dict, UserDict)): # dicts must be merged if isinstance(b, (dict, UserDict)): for key in b: if key in a: a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists) else: a[key] = b[key] else: raise ValueError('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise ValueError('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError as e: raise ValueError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a)) return a
[ "def", "merge_two_dictionaries", "(", "a", ",", "b", ",", "merge_lists", "=", "False", ")", ":", "# type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound", "key", "=", "None", "# ## debug output", "# sys.stderr.write('DEBUG: %s to %s\\n' %(b,a))", "try", ":", "if", "a", "is", "None", "or", "isinstance", "(", "a", ",", "(", "six", ".", "string_types", ",", "six", ".", "text_type", ",", "six", ".", "integer_types", ",", "float", ")", ")", ":", "# border case for first run or if a is a primitive", "a", "=", "b", "elif", "isinstance", "(", "a", ",", "list", ")", ":", "# lists can be appended or replaced", "if", "isinstance", "(", "b", ",", "list", ")", ":", "if", "merge_lists", ":", "# merge lists", "a", ".", "extend", "(", "b", ")", "else", ":", "# replace list", "a", "=", "b", "else", ":", "# append to list", "a", ".", "append", "(", "b", ")", "elif", "isinstance", "(", "a", ",", "(", "dict", ",", "UserDict", ")", ")", ":", "# dicts must be merged", "if", "isinstance", "(", "b", ",", "(", "dict", ",", "UserDict", ")", ")", ":", "for", "key", "in", "b", ":", "if", "key", "in", "a", ":", "a", "[", "key", "]", "=", "merge_two_dictionaries", "(", "a", "[", "key", "]", ",", "b", "[", "key", "]", ",", "merge_lists", "=", "merge_lists", ")", "else", ":", "a", "[", "key", "]", "=", "b", "[", "key", "]", "else", ":", "raise", "ValueError", "(", "'Cannot merge non-dict \"%s\" into dict \"%s\"'", "%", "(", "b", ",", "a", ")", ")", "else", ":", "raise", "ValueError", "(", "'NOT IMPLEMENTED \"%s\" into \"%s\"'", "%", "(", "b", ",", "a", ")", ")", "except", "TypeError", "as", "e", ":", "raise", "ValueError", "(", "'TypeError \"%s\" in key \"%s\" when merging \"%s\" into \"%s\"'", "%", "(", "e", ",", "key", ",", "b", ",", "a", ")", ")", "return", "a" ]
Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
[ "Merges", "b", "into", "a", "and", "returns", "merged", "result" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L16-L63
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
merge_dictionaries
def merge_dictionaries(dicts, merge_lists=False): # type: (List[DictUpperBound], bool) -> DictUpperBound """Merges all dictionaries in dicts into a single dictionary and returns result Args: dicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ dict1 = dicts[0] for other_dict in dicts[1:]: merge_two_dictionaries(dict1, other_dict, merge_lists=merge_lists) return dict1
python
def merge_dictionaries(dicts, merge_lists=False): # type: (List[DictUpperBound], bool) -> DictUpperBound """Merges all dictionaries in dicts into a single dictionary and returns result Args: dicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary """ dict1 = dicts[0] for other_dict in dicts[1:]: merge_two_dictionaries(dict1, other_dict, merge_lists=merge_lists) return dict1
[ "def", "merge_dictionaries", "(", "dicts", ",", "merge_lists", "=", "False", ")", ":", "# type: (List[DictUpperBound], bool) -> DictUpperBound", "dict1", "=", "dicts", "[", "0", "]", "for", "other_dict", "in", "dicts", "[", "1", ":", "]", ":", "merge_two_dictionaries", "(", "dict1", ",", "other_dict", ",", "merge_lists", "=", "merge_lists", ")", "return", "dict1" ]
Merges all dictionaries in dicts into a single dictionary and returns result Args: dicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
[ "Merges", "all", "dictionaries", "in", "dicts", "into", "a", "single", "dictionary", "and", "returns", "result" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L66-L81
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
dict_diff
def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'): # type: (DictUpperBound, DictUpperBound, str) -> Dict """Compares two dictionaries Args: d1 (DictUpperBound): First dictionary to compare d2 (DictUpperBound): Second dictionary to compare no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: Dict: Comparison dictionary """ d1keys = set(d1.keys()) d2keys = set(d2.keys()) both = d1keys & d2keys diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]} diff.update({k: (d1[k], no_key) for k in d1keys - both}) diff.update({k: (no_key, d2[k]) for k in d2keys - both}) return diff
python
def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'): # type: (DictUpperBound, DictUpperBound, str) -> Dict """Compares two dictionaries Args: d1 (DictUpperBound): First dictionary to compare d2 (DictUpperBound): Second dictionary to compare no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: Dict: Comparison dictionary """ d1keys = set(d1.keys()) d2keys = set(d2.keys()) both = d1keys & d2keys diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]} diff.update({k: (d1[k], no_key) for k in d1keys - both}) diff.update({k: (no_key, d2[k]) for k in d2keys - both}) return diff
[ "def", "dict_diff", "(", "d1", ",", "d2", ",", "no_key", "=", "'<KEYNOTFOUND>'", ")", ":", "# type: (DictUpperBound, DictUpperBound, str) -> Dict", "d1keys", "=", "set", "(", "d1", ".", "keys", "(", ")", ")", "d2keys", "=", "set", "(", "d2", ".", "keys", "(", ")", ")", "both", "=", "d1keys", "&", "d2keys", "diff", "=", "{", "k", ":", "(", "d1", "[", "k", "]", ",", "d2", "[", "k", "]", ")", "for", "k", "in", "both", "if", "d1", "[", "k", "]", "!=", "d2", "[", "k", "]", "}", "diff", ".", "update", "(", "{", "k", ":", "(", "d1", "[", "k", "]", ",", "no_key", ")", "for", "k", "in", "d1keys", "-", "both", "}", ")", "diff", ".", "update", "(", "{", "k", ":", "(", "no_key", ",", "d2", "[", "k", "]", ")", "for", "k", "in", "d2keys", "-", "both", "}", ")", "return", "diff" ]
Compares two dictionaries Args: d1 (DictUpperBound): First dictionary to compare d2 (DictUpperBound): Second dictionary to compare no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: Dict: Comparison dictionary
[ "Compares", "two", "dictionaries" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L84-L103
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
dict_of_lists_add
def dict_of_lists_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None """ list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
python
def dict_of_lists_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None """ list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
[ "def", "dict_of_lists_add", "(", "dictionary", ",", "key", ",", "value", ")", ":", "# type: (DictUpperBound, Any, Any) -> None", "list_objs", "=", "dictionary", ".", "get", "(", "key", ",", "list", "(", ")", ")", "list_objs", ".", "append", "(", "value", ")", "dictionary", "[", "key", "]", "=", "list_objs" ]
Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None
[ "Add", "value", "to", "a", "list", "in", "a", "dictionary", "by", "key" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L106-L121
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
dict_of_sets_add
def dict_of_sets_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None """ set_objs = dictionary.get(key, set()) set_objs.add(value) dictionary[key] = set_objs
python
def dict_of_sets_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None """ set_objs = dictionary.get(key, set()) set_objs.add(value) dictionary[key] = set_objs
[ "def", "dict_of_sets_add", "(", "dictionary", ",", "key", ",", "value", ")", ":", "# type: (DictUpperBound, Any, Any) -> None", "set_objs", "=", "dictionary", ".", "get", "(", "key", ",", "set", "(", ")", ")", "set_objs", ".", "add", "(", "value", ")", "dictionary", "[", "key", "]", "=", "set_objs" ]
Add value to a set in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to set in dictionary Returns: None
[ "Add", "value", "to", "a", "set", "in", "a", "dictionary", "by", "key" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L124-L139
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
list_distribute_contents_simple
def list_distribute_contents_simple(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List """Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list """ dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) output_list = list() i = 0 done = False while not done: found = False for key in sorted(dictionary): if i < len(dictionary[key]): output_list.append(dictionary[key][i]) found = True if found: i += 1 else: done = True return output_list
python
def list_distribute_contents_simple(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List """Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list """ dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) output_list = list() i = 0 done = False while not done: found = False for key in sorted(dictionary): if i < len(dictionary[key]): output_list.append(dictionary[key][i]) found = True if found: i += 1 else: done = True return output_list
[ "def", "list_distribute_contents_simple", "(", "input_list", ",", "function", "=", "lambda", "x", ":", "x", ")", ":", "# type: (List, Callable[[Any], Any]) -> List", "dictionary", "=", "dict", "(", ")", "for", "obj", "in", "input_list", ":", "dict_of_lists_add", "(", "dictionary", ",", "function", "(", "obj", ")", ",", "obj", ")", "output_list", "=", "list", "(", ")", "i", "=", "0", "done", "=", "False", "while", "not", "done", ":", "found", "=", "False", "for", "key", "in", "sorted", "(", "dictionary", ")", ":", "if", "i", "<", "len", "(", "dictionary", "[", "key", "]", ")", ":", "output_list", ".", "append", "(", "dictionary", "[", "key", "]", "[", "i", "]", ")", "found", "=", "True", "if", "found", ":", "i", "+=", "1", "else", ":", "done", "=", "True", "return", "output_list" ]
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
[ "Distribute", "the", "contents", "of", "a", "list", "eg", ".", "[", "1", "1", "1", "2", "2", "3", "]", "-", ">", "[", "1", "2", "3", "1", "2", "1", "]", ".", "List", "can", "contain", "complex", "types", "like", "dictionaries", "in", "which", "case", "the", "function", "can", "return", "the", "appropriate", "value", "eg", ".", "lambda", "x", ":", "x", "[", "KEY", "]" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L142-L171
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
list_distribute_contents
def list_distribute_contents(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List """Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 1, 2, 1, 3]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list """ def riffle_shuffle(piles_list): def grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) if not piles_list: return [] piles_list.sort(key=len, reverse=True) width = len(piles_list[0]) pile_iters_list = [iter(pile) for pile in piles_list] pile_sizes_list = [[pile_position] * len(pile) for pile_position, pile in enumerate(piles_list)] grouped_rows = grouper(width, itertools.chain.from_iterable(pile_sizes_list)) grouped_columns = zip_longest(*grouped_rows) shuffled_pile = [next(pile_iters_list[position]) for position in itertools.chain.from_iterable(grouped_columns) if position is not None] return shuffled_pile dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) intermediate_list = list() for key in sorted(dictionary): intermediate_list.append(dictionary[key]) return riffle_shuffle(intermediate_list)
python
def list_distribute_contents(input_list, function=lambda x: x): # type: (List, Callable[[Any], Any]) -> List """Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 1, 2, 1, 3]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list """ def riffle_shuffle(piles_list): def grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) if not piles_list: return [] piles_list.sort(key=len, reverse=True) width = len(piles_list[0]) pile_iters_list = [iter(pile) for pile in piles_list] pile_sizes_list = [[pile_position] * len(pile) for pile_position, pile in enumerate(piles_list)] grouped_rows = grouper(width, itertools.chain.from_iterable(pile_sizes_list)) grouped_columns = zip_longest(*grouped_rows) shuffled_pile = [next(pile_iters_list[position]) for position in itertools.chain.from_iterable(grouped_columns) if position is not None] return shuffled_pile dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) intermediate_list = list() for key in sorted(dictionary): intermediate_list.append(dictionary[key]) return riffle_shuffle(intermediate_list)
[ "def", "list_distribute_contents", "(", "input_list", ",", "function", "=", "lambda", "x", ":", "x", ")", ":", "# type: (List, Callable[[Any], Any]) -> List", "def", "riffle_shuffle", "(", "piles_list", ")", ":", "def", "grouper", "(", "n", ",", "iterable", ",", "fillvalue", "=", "None", ")", ":", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "n", "return", "zip_longest", "(", "fillvalue", "=", "fillvalue", ",", "*", "args", ")", "if", "not", "piles_list", ":", "return", "[", "]", "piles_list", ".", "sort", "(", "key", "=", "len", ",", "reverse", "=", "True", ")", "width", "=", "len", "(", "piles_list", "[", "0", "]", ")", "pile_iters_list", "=", "[", "iter", "(", "pile", ")", "for", "pile", "in", "piles_list", "]", "pile_sizes_list", "=", "[", "[", "pile_position", "]", "*", "len", "(", "pile", ")", "for", "pile_position", ",", "pile", "in", "enumerate", "(", "piles_list", ")", "]", "grouped_rows", "=", "grouper", "(", "width", ",", "itertools", ".", "chain", ".", "from_iterable", "(", "pile_sizes_list", ")", ")", "grouped_columns", "=", "zip_longest", "(", "*", "grouped_rows", ")", "shuffled_pile", "=", "[", "next", "(", "pile_iters_list", "[", "position", "]", ")", "for", "position", "in", "itertools", ".", "chain", ".", "from_iterable", "(", "grouped_columns", ")", "if", "position", "is", "not", "None", "]", "return", "shuffled_pile", "dictionary", "=", "dict", "(", ")", "for", "obj", "in", "input_list", ":", "dict_of_lists_add", "(", "dictionary", ",", "function", "(", "obj", ")", ",", "obj", ")", "intermediate_list", "=", "list", "(", ")", "for", "key", "in", "sorted", "(", "dictionary", ")", ":", "intermediate_list", ".", "append", "(", "dictionary", "[", "key", "]", ")", "return", "riffle_shuffle", "(", "intermediate_list", ")" ]
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 1, 2, 1, 3]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
[ "Distribute", "the", "contents", "of", "a", "list", "eg", ".", "[", "1", "1", "1", "2", "2", "3", "]", "-", ">", "[", "1", "2", "1", "2", "1", "3", "]", ".", "List", "can", "contain", "complex", "types", "like", "dictionaries", "in", "which", "case", "the", "function", "can", "return", "the", "appropriate", "value", "eg", ".", "lambda", "x", ":", "x", "[", "KEY", "]" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L174-L211
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
extract_list_from_list_of_dict
def extract_list_from_list_of_dict(list_of_dict, key): # type: (List[DictUpperBound], Any) -> List """Extract a list by looking up key in each member of a list of dictionaries Args: list_of_dict (List[DictUpperBound]): List of dictionaries key (Any): Key to find in each dictionary Returns: List: List containing values returned from each dictionary """ result = list() for dictionary in list_of_dict: result.append(dictionary[key]) return result
python
def extract_list_from_list_of_dict(list_of_dict, key): # type: (List[DictUpperBound], Any) -> List """Extract a list by looking up key in each member of a list of dictionaries Args: list_of_dict (List[DictUpperBound]): List of dictionaries key (Any): Key to find in each dictionary Returns: List: List containing values returned from each dictionary """ result = list() for dictionary in list_of_dict: result.append(dictionary[key]) return result
[ "def", "extract_list_from_list_of_dict", "(", "list_of_dict", ",", "key", ")", ":", "# type: (List[DictUpperBound], Any) -> List", "result", "=", "list", "(", ")", "for", "dictionary", "in", "list_of_dict", ":", "result", ".", "append", "(", "dictionary", "[", "key", "]", ")", "return", "result" ]
Extract a list by looking up key in each member of a list of dictionaries Args: list_of_dict (List[DictUpperBound]): List of dictionaries key (Any): Key to find in each dictionary Returns: List: List containing values returned from each dictionary
[ "Extract", "a", "list", "by", "looking", "up", "key", "in", "each", "member", "of", "a", "list", "of", "dictionaries" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L214-L229
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
key_value_convert
def key_value_convert(dictin, keyfn=lambda x: x, valuefn=lambda x: x, dropfailedkeys=False, dropfailedvalues=False, exception=ValueError): # type: (DictUpperBound, Callable[[Any], Any], Callable[[Any], Any], bool, bool, ExceptionUpperBound) -> Dict """Convert keys and/or values of dictionary using functions passed in as parameters Args: dictin (DictUpperBound): Input dictionary keyfn (Callable[[Any], Any]): Function to convert keys. Defaults to lambda x: x valuefn (Callable[[Any], Any]): Function to convert values. Defaults to lambda x: x dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. dropfailedvalues (bool): Whether to drop dictionary entries where value conversion fails. Defaults to False. exception (ExceptionUpperBound): The exception to expect if keyfn or valuefn fail. Defaults to ValueError. Returns: Dict: Dictionary with converted keys and/or values """ dictout = dict() for key in dictin: try: new_key = keyfn(key) except exception: if dropfailedkeys: continue new_key = key value = dictin[key] try: new_value = valuefn(value) except exception: if dropfailedvalues: continue new_value = value dictout[new_key] = new_value return dictout
python
def key_value_convert(dictin, keyfn=lambda x: x, valuefn=lambda x: x, dropfailedkeys=False, dropfailedvalues=False, exception=ValueError): # type: (DictUpperBound, Callable[[Any], Any], Callable[[Any], Any], bool, bool, ExceptionUpperBound) -> Dict """Convert keys and/or values of dictionary using functions passed in as parameters Args: dictin (DictUpperBound): Input dictionary keyfn (Callable[[Any], Any]): Function to convert keys. Defaults to lambda x: x valuefn (Callable[[Any], Any]): Function to convert values. Defaults to lambda x: x dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. dropfailedvalues (bool): Whether to drop dictionary entries where value conversion fails. Defaults to False. exception (ExceptionUpperBound): The exception to expect if keyfn or valuefn fail. Defaults to ValueError. Returns: Dict: Dictionary with converted keys and/or values """ dictout = dict() for key in dictin: try: new_key = keyfn(key) except exception: if dropfailedkeys: continue new_key = key value = dictin[key] try: new_value = valuefn(value) except exception: if dropfailedvalues: continue new_value = value dictout[new_key] = new_value return dictout
[ "def", "key_value_convert", "(", "dictin", ",", "keyfn", "=", "lambda", "x", ":", "x", ",", "valuefn", "=", "lambda", "x", ":", "x", ",", "dropfailedkeys", "=", "False", ",", "dropfailedvalues", "=", "False", ",", "exception", "=", "ValueError", ")", ":", "# type: (DictUpperBound, Callable[[Any], Any], Callable[[Any], Any], bool, bool, ExceptionUpperBound) -> Dict", "dictout", "=", "dict", "(", ")", "for", "key", "in", "dictin", ":", "try", ":", "new_key", "=", "keyfn", "(", "key", ")", "except", "exception", ":", "if", "dropfailedkeys", ":", "continue", "new_key", "=", "key", "value", "=", "dictin", "[", "key", "]", "try", ":", "new_value", "=", "valuefn", "(", "value", ")", "except", "exception", ":", "if", "dropfailedvalues", ":", "continue", "new_value", "=", "value", "dictout", "[", "new_key", "]", "=", "new_value", "return", "dictout" ]
Convert keys and/or values of dictionary using functions passed in as parameters Args: dictin (DictUpperBound): Input dictionary keyfn (Callable[[Any], Any]): Function to convert keys. Defaults to lambda x: x valuefn (Callable[[Any], Any]): Function to convert values. Defaults to lambda x: x dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. dropfailedvalues (bool): Whether to drop dictionary entries where value conversion fails. Defaults to False. exception (ExceptionUpperBound): The exception to expect if keyfn or valuefn fail. Defaults to ValueError. Returns: Dict: Dictionary with converted keys and/or values
[ "Convert", "keys", "and", "/", "or", "values", "of", "dictionary", "using", "functions", "passed", "in", "as", "parameters" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L232-L265
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
integer_key_convert
def integer_key_convert(dictin, dropfailedkeys=False): # type: (DictUpperBound, bool) -> Dict """Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers """ return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
python
def integer_key_convert(dictin, dropfailedkeys=False): # type: (DictUpperBound, bool) -> Dict """Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers """ return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
[ "def", "integer_key_convert", "(", "dictin", ",", "dropfailedkeys", "=", "False", ")", ":", "# type: (DictUpperBound, bool) -> Dict", "return", "key_value_convert", "(", "dictin", ",", "keyfn", "=", "int", ",", "dropfailedkeys", "=", "dropfailedkeys", ")" ]
Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers
[ "Convert", "keys", "of", "dictionary", "to", "integers" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L268-L280
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
integer_value_convert
def integer_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict """Convert values of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to integers """ return key_value_convert(dictin, valuefn=int, dropfailedvalues=dropfailedvalues)
python
def integer_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict """Convert values of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to integers """ return key_value_convert(dictin, valuefn=int, dropfailedvalues=dropfailedvalues)
[ "def", "integer_value_convert", "(", "dictin", ",", "dropfailedvalues", "=", "False", ")", ":", "# type: (DictUpperBound, bool) -> Dict", "return", "key_value_convert", "(", "dictin", ",", "valuefn", "=", "int", ",", "dropfailedvalues", "=", "dropfailedvalues", ")" ]
Convert values of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to integers
[ "Convert", "values", "of", "dictionary", "to", "integers" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L283-L295
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
float_value_convert
def float_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict """Convert values of dictionary to floats Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to floats """ return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)
python
def float_value_convert(dictin, dropfailedvalues=False): # type: (DictUpperBound, bool) -> Dict """Convert values of dictionary to floats Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to floats """ return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)
[ "def", "float_value_convert", "(", "dictin", ",", "dropfailedvalues", "=", "False", ")", ":", "# type: (DictUpperBound, bool) -> Dict", "return", "key_value_convert", "(", "dictin", ",", "valuefn", "=", "float", ",", "dropfailedvalues", "=", "dropfailedvalues", ")" ]
Convert values of dictionary to floats Args: dictin (DictUpperBound): Input dictionary dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with values converted to floats
[ "Convert", "values", "of", "dictionary", "to", "floats" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L298-L310
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
avg_dicts
def avg_dicts(dictin1, dictin2, dropmissing=True): # type: (DictUpperBound, DictUpperBound, bool) -> Dict """Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries """ dictout = dict() for key in dictin1: if key in dictin2: dictout[key] = (dictin1[key] + dictin2[key]) / 2 elif not dropmissing: dictout[key] = dictin1[key] if not dropmissing: for key in dictin2: if key not in dictin1: dictout[key] = dictin2[key] return dictout
python
def avg_dicts(dictin1, dictin2, dropmissing=True): # type: (DictUpperBound, DictUpperBound, bool) -> Dict """Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries """ dictout = dict() for key in dictin1: if key in dictin2: dictout[key] = (dictin1[key] + dictin2[key]) / 2 elif not dropmissing: dictout[key] = dictin1[key] if not dropmissing: for key in dictin2: if key not in dictin1: dictout[key] = dictin2[key] return dictout
[ "def", "avg_dicts", "(", "dictin1", ",", "dictin2", ",", "dropmissing", "=", "True", ")", ":", "# type: (DictUpperBound, DictUpperBound, bool) -> Dict", "dictout", "=", "dict", "(", ")", "for", "key", "in", "dictin1", ":", "if", "key", "in", "dictin2", ":", "dictout", "[", "key", "]", "=", "(", "dictin1", "[", "key", "]", "+", "dictin2", "[", "key", "]", ")", "/", "2", "elif", "not", "dropmissing", ":", "dictout", "[", "key", "]", "=", "dictin1", "[", "key", "]", "if", "not", "dropmissing", ":", "for", "key", "in", "dictin2", ":", "if", "key", "not", "in", "dictin1", ":", "dictout", "[", "key", "]", "=", "dictin2", "[", "key", "]", "return", "dictout" ]
Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries
[ "Create", "a", "new", "dictionary", "from", "two", "dictionaries", "by", "averaging", "values" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L313-L336
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
read_list_from_csv
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs): # type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]] """Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form """ stream = Stream(filepath, headers=headers, **kwargs) stream.open() result = stream.read(keyed=dict_form) stream.close() return result
python
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs): # type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]] """Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form """ stream = Stream(filepath, headers=headers, **kwargs) stream.open() result = stream.read(keyed=dict_form) stream.close() return result
[ "def", "read_list_from_csv", "(", "filepath", ",", "dict_form", "=", "False", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]]", "stream", "=", "Stream", "(", "filepath", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "stream", ".", "open", "(", ")", "result", "=", "stream", ".", "read", "(", "keyed", "=", "dict_form", ")", "stream", ".", "close", "(", ")", "return", "result" ]
Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form
[ "Read", "a", "list", "of", "rows", "in", "dict", "or", "list", "form", "from", "a", "csv", ".", "(", "The", "headers", "argument", "is", "either", "a", "row", "number", "or", "list", "of", "row", "numbers", "(", "in", "case", "of", "multi", "-", "line", "headers", ")", "to", "be", "considered", "as", "headers", "(", "rows", "start", "counting", "at", "1", ")", "or", "the", "actual", "headers", "defined", "a", "list", "of", "strings", ".", "If", "not", "set", "all", "rows", "will", "be", "treated", "as", "containing", "values", ".", ")" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L339-L360
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
write_list_to_csv
def write_list_to_csv(list_of_rows, filepath, headers=None): # type: (List[Union[DictUpperBound, List]], str, Union[int, List[int], List[str], None]) -> None """Write a list of rows in dict or list form to a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: list_of_rows (List[Union[DictUpperBound, List]]): List of rows in dict or list form filepath (str): Path to write to headers (Union[int, List[int], List[str], None]): Headers to write. Defaults to None. Returns: None """ stream = Stream(list_of_rows, headers=headers) stream.open() stream.save(filepath, format='csv') stream.close()
python
def write_list_to_csv(list_of_rows, filepath, headers=None): # type: (List[Union[DictUpperBound, List]], str, Union[int, List[int], List[str], None]) -> None """Write a list of rows in dict or list form to a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: list_of_rows (List[Union[DictUpperBound, List]]): List of rows in dict or list form filepath (str): Path to write to headers (Union[int, List[int], List[str], None]): Headers to write. Defaults to None. Returns: None """ stream = Stream(list_of_rows, headers=headers) stream.open() stream.save(filepath, format='csv') stream.close()
[ "def", "write_list_to_csv", "(", "list_of_rows", ",", "filepath", ",", "headers", "=", "None", ")", ":", "# type: (List[Union[DictUpperBound, List]], str, Union[int, List[int], List[str], None]) -> None", "stream", "=", "Stream", "(", "list_of_rows", ",", "headers", "=", "headers", ")", "stream", ".", "open", "(", ")", "stream", ".", "save", "(", "filepath", ",", "format", "=", "'csv'", ")", "stream", ".", "close", "(", ")" ]
Write a list of rows in dict or list form to a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: list_of_rows (List[Union[DictUpperBound, List]]): List of rows in dict or list form filepath (str): Path to write to headers (Union[int, List[int], List[str], None]): Headers to write. Defaults to None. Returns: None
[ "Write", "a", "list", "of", "rows", "in", "dict", "or", "list", "form", "to", "a", "csv", ".", "(", "The", "headers", "argument", "is", "either", "a", "row", "number", "or", "list", "of", "row", "numbers", "(", "in", "case", "of", "multi", "-", "line", "headers", ")", "to", "be", "considered", "as", "headers", "(", "rows", "start", "counting", "at", "1", ")", "or", "the", "actual", "headers", "defined", "a", "list", "of", "strings", ".", "If", "not", "set", "all", "rows", "will", "be", "treated", "as", "containing", "values", ".", ")" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L363-L382
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
args_to_dict
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] """Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments """ arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
python
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] """Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments """ arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
[ "def", "args_to_dict", "(", "args", ")", ":", "# type: (str) -> DictUpperBound[str,str]", "arguments", "=", "dict", "(", ")", "for", "arg", "in", "args", ".", "split", "(", "','", ")", ":", "key", ",", "value", "=", "arg", ".", "split", "(", "'='", ")", "arguments", "[", "key", "]", "=", "value", "return", "arguments" ]
Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments
[ "Convert", "command", "line", "arguments", "in", "a", "comma", "separated", "string", "to", "a", "dictionary" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L385-L400
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/compare.py
compare_files
def compare_files(path1, path2): # type: (str, str) -> List[str] """Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files """ diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
python
def compare_files(path1, path2): # type: (str, str) -> List[str] """Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files """ diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
[ "def", "compare_files", "(", "path1", ",", "path2", ")", ":", "# type: (str, str) -> List[str]", "diff", "=", "difflib", ".", "ndiff", "(", "open", "(", "path1", ")", ".", "readlines", "(", ")", ",", "open", "(", "path2", ")", ".", "readlines", "(", ")", ")", "return", "[", "x", "for", "x", "in", "diff", "if", "x", "[", "0", "]", "in", "[", "'-'", ",", "'+'", ",", "'?'", "]", "]" ]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
[ "Returns", "the", "delta", "between", "two", "files", "using", "-", "?", "+", "format", "excluding", "lines", "that", "are", "the", "same" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/compare.py#L7-L21
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/compare.py
assert_files_same
def assert_files_same(path1, path2): # type: (str, str) -> None """Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None """ difflines = compare_files(path1, path2) assert len(difflines) == 0, ''.join(['\n'] + difflines)
python
def assert_files_same(path1, path2): # type: (str, str) -> None """Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None """ difflines = compare_files(path1, path2) assert len(difflines) == 0, ''.join(['\n'] + difflines)
[ "def", "assert_files_same", "(", "path1", ",", "path2", ")", ":", "# type: (str, str) -> None", "difflines", "=", "compare_files", "(", "path1", ",", "path2", ")", "assert", "len", "(", "difflines", ")", "==", "0", ",", "''", ".", "join", "(", "[", "'\\n'", "]", "+", "difflines", ")" ]
Asserts that two files are the same and returns delta using -, ?, + format if not Args: path1 (str): Path to first file path2 (str): Path to second file Returns: None
[ "Asserts", "that", "two", "files", "are", "the", "same", "and", "returns", "delta", "using", "-", "?", "+", "format", "if", "not" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/compare.py#L24-L38
VisTrails/tej
tej/main.py
main
def main(): """Entry point when called on the command-line. """ # Locale locale.setlocale(locale.LC_ALL, '') # Encoding for output streams if str == bytes: # PY2 writer = codecs.getwriter(locale.getpreferredencoding()) o_stdout, o_stderr = sys.stdout, sys.stderr sys.stdout = writer(sys.stdout) sys.stdout.buffer = o_stdout sys.stderr = writer(sys.stderr) sys.stderr.buffer = o_stderr else: # PY3 sys.stdin = sys.stdin.buffer # Parses command-line # Runtime to setup def add_runtime_option(opt): opt.add_argument( '-r', '--runtime', action='store', help="runtime to deploy on the server if the queue doesn't exist. " "If unspecified, will auto-detect what is appropriate, and " "fallback on 'default'.") # Destination selection def add_destination_option(opt): opt.add_argument('destination', action='store', help="Machine to SSH into; [user@]host[:port]") opt.add_argument('--queue', action='store', default=DEFAULT_TEJ_DIR, help="Directory for tej's files") # Root parser parser = argparse.ArgumentParser( description="Trivial Extensible Job-submission") parser.add_argument('--version', action='version', version="tej version %s" % tej_version) parser.add_argument('-v', '--verbose', action='count', default=1, dest='verbosity', help="augments verbosity level") subparsers = parser.add_subparsers(title="commands", metavar='') # Setup action parser_setup = subparsers.add_parser( 'setup', help="Sets up tej on a remote machine") add_destination_option(parser_setup) add_runtime_option(parser_setup) parser_setup.add_argument('--make-link', action='append', dest='make_link') parser_setup.add_argument('--make-default-link', action='append_const', dest='make_link', const=DEFAULT_TEJ_DIR) parser_setup.add_argument('--force', action='store_true') parser_setup.add_argument('--only-links', action='store_true') parser_setup.set_defaults(func=_setup) # Submit action parser_submit = subparsers.add_parser( 'submit', help="Submits a job to a remote machine") add_destination_option(parser_submit) add_runtime_option(parser_submit) parser_submit.add_argument('--id', action='store', help="Identifier for the new job") parser_submit.add_argument('--script', action='store', help="Relative name of the script in the " "directory") parser_submit.add_argument('directory', action='store', help="Job directory to upload") parser_submit.set_defaults(func=_submit) # Status action parser_status = subparsers.add_parser( 'status', help="Gets the status of a job") add_destination_option(parser_status) parser_status.add_argument('--id', action='store', help="Identifier of the running job") parser_status.set_defaults(func=_status) # Download action parser_download = subparsers.add_parser( 'download', help="Downloads files from finished job") add_destination_option(parser_download) parser_download.add_argument('--id', action='store', help="Identifier of the job") parser_download.add_argument('files', action='store', nargs=argparse.ONE_OR_MORE, help="Files to download") parser_download.set_defaults(func=_download) # Kill action parser_kill = subparsers.add_parser( 'kill', help="Kills a running job") add_destination_option(parser_kill) parser_kill.add_argument('--id', action='store', help="Identifier of the running job") parser_kill.set_defaults(func=_kill) # Delete action parser_delete = subparsers.add_parser( 'delete', help="Deletes a finished job") add_destination_option(parser_delete) parser_delete.add_argument('--id', action='store', help="Identifier of the finished job") parser_delete.set_defaults(func=_delete) # List action parser_list = subparsers.add_parser( 'list', help="Lists remote jobs") add_destination_option(parser_list) parser_list.set_defaults(func=_list) args = parser.parse_args() setup_logging(args.verbosity) try: args.func(args) except Error as e: # No need to show a traceback here, this is not an internal error logger.critical(e) sys.exit(1) sys.exit(0)
python
def main(): """Entry point when called on the command-line. """ # Locale locale.setlocale(locale.LC_ALL, '') # Encoding for output streams if str == bytes: # PY2 writer = codecs.getwriter(locale.getpreferredencoding()) o_stdout, o_stderr = sys.stdout, sys.stderr sys.stdout = writer(sys.stdout) sys.stdout.buffer = o_stdout sys.stderr = writer(sys.stderr) sys.stderr.buffer = o_stderr else: # PY3 sys.stdin = sys.stdin.buffer # Parses command-line # Runtime to setup def add_runtime_option(opt): opt.add_argument( '-r', '--runtime', action='store', help="runtime to deploy on the server if the queue doesn't exist. " "If unspecified, will auto-detect what is appropriate, and " "fallback on 'default'.") # Destination selection def add_destination_option(opt): opt.add_argument('destination', action='store', help="Machine to SSH into; [user@]host[:port]") opt.add_argument('--queue', action='store', default=DEFAULT_TEJ_DIR, help="Directory for tej's files") # Root parser parser = argparse.ArgumentParser( description="Trivial Extensible Job-submission") parser.add_argument('--version', action='version', version="tej version %s" % tej_version) parser.add_argument('-v', '--verbose', action='count', default=1, dest='verbosity', help="augments verbosity level") subparsers = parser.add_subparsers(title="commands", metavar='') # Setup action parser_setup = subparsers.add_parser( 'setup', help="Sets up tej on a remote machine") add_destination_option(parser_setup) add_runtime_option(parser_setup) parser_setup.add_argument('--make-link', action='append', dest='make_link') parser_setup.add_argument('--make-default-link', action='append_const', dest='make_link', const=DEFAULT_TEJ_DIR) parser_setup.add_argument('--force', action='store_true') parser_setup.add_argument('--only-links', action='store_true') parser_setup.set_defaults(func=_setup) # Submit action parser_submit = subparsers.add_parser( 'submit', help="Submits a job to a remote machine") add_destination_option(parser_submit) add_runtime_option(parser_submit) parser_submit.add_argument('--id', action='store', help="Identifier for the new job") parser_submit.add_argument('--script', action='store', help="Relative name of the script in the " "directory") parser_submit.add_argument('directory', action='store', help="Job directory to upload") parser_submit.set_defaults(func=_submit) # Status action parser_status = subparsers.add_parser( 'status', help="Gets the status of a job") add_destination_option(parser_status) parser_status.add_argument('--id', action='store', help="Identifier of the running job") parser_status.set_defaults(func=_status) # Download action parser_download = subparsers.add_parser( 'download', help="Downloads files from finished job") add_destination_option(parser_download) parser_download.add_argument('--id', action='store', help="Identifier of the job") parser_download.add_argument('files', action='store', nargs=argparse.ONE_OR_MORE, help="Files to download") parser_download.set_defaults(func=_download) # Kill action parser_kill = subparsers.add_parser( 'kill', help="Kills a running job") add_destination_option(parser_kill) parser_kill.add_argument('--id', action='store', help="Identifier of the running job") parser_kill.set_defaults(func=_kill) # Delete action parser_delete = subparsers.add_parser( 'delete', help="Deletes a finished job") add_destination_option(parser_delete) parser_delete.add_argument('--id', action='store', help="Identifier of the finished job") parser_delete.set_defaults(func=_delete) # List action parser_list = subparsers.add_parser( 'list', help="Lists remote jobs") add_destination_option(parser_list) parser_list.set_defaults(func=_list) args = parser.parse_args() setup_logging(args.verbosity) try: args.func(args) except Error as e: # No need to show a traceback here, this is not an internal error logger.critical(e) sys.exit(1) sys.exit(0)
[ "def", "main", "(", ")", ":", "# Locale", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "''", ")", "# Encoding for output streams", "if", "str", "==", "bytes", ":", "# PY2", "writer", "=", "codecs", ".", "getwriter", "(", "locale", ".", "getpreferredencoding", "(", ")", ")", "o_stdout", ",", "o_stderr", "=", "sys", ".", "stdout", ",", "sys", ".", "stderr", "sys", ".", "stdout", "=", "writer", "(", "sys", ".", "stdout", ")", "sys", ".", "stdout", ".", "buffer", "=", "o_stdout", "sys", ".", "stderr", "=", "writer", "(", "sys", ".", "stderr", ")", "sys", ".", "stderr", ".", "buffer", "=", "o_stderr", "else", ":", "# PY3", "sys", ".", "stdin", "=", "sys", ".", "stdin", ".", "buffer", "# Parses command-line", "# Runtime to setup", "def", "add_runtime_option", "(", "opt", ")", ":", "opt", ".", "add_argument", "(", "'-r'", ",", "'--runtime'", ",", "action", "=", "'store'", ",", "help", "=", "\"runtime to deploy on the server if the queue doesn't exist. \"", "\"If unspecified, will auto-detect what is appropriate, and \"", "\"fallback on 'default'.\"", ")", "# Destination selection", "def", "add_destination_option", "(", "opt", ")", ":", "opt", ".", "add_argument", "(", "'destination'", ",", "action", "=", "'store'", ",", "help", "=", "\"Machine to SSH into; [user@]host[:port]\"", ")", "opt", ".", "add_argument", "(", "'--queue'", ",", "action", "=", "'store'", ",", "default", "=", "DEFAULT_TEJ_DIR", ",", "help", "=", "\"Directory for tej's files\"", ")", "# Root parser", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Trivial Extensible Job-submission\"", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "\"tej version %s\"", "%", "tej_version", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'count'", ",", "default", "=", "1", ",", "dest", "=", "'verbosity'", ",", "help", "=", "\"augments verbosity level\"", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "title", "=", "\"commands\"", ",", "metavar", "=", "''", ")", "# Setup action", "parser_setup", "=", "subparsers", ".", "add_parser", "(", "'setup'", ",", "help", "=", "\"Sets up tej on a remote machine\"", ")", "add_destination_option", "(", "parser_setup", ")", "add_runtime_option", "(", "parser_setup", ")", "parser_setup", ".", "add_argument", "(", "'--make-link'", ",", "action", "=", "'append'", ",", "dest", "=", "'make_link'", ")", "parser_setup", ".", "add_argument", "(", "'--make-default-link'", ",", "action", "=", "'append_const'", ",", "dest", "=", "'make_link'", ",", "const", "=", "DEFAULT_TEJ_DIR", ")", "parser_setup", ".", "add_argument", "(", "'--force'", ",", "action", "=", "'store_true'", ")", "parser_setup", ".", "add_argument", "(", "'--only-links'", ",", "action", "=", "'store_true'", ")", "parser_setup", ".", "set_defaults", "(", "func", "=", "_setup", ")", "# Submit action", "parser_submit", "=", "subparsers", ".", "add_parser", "(", "'submit'", ",", "help", "=", "\"Submits a job to a remote machine\"", ")", "add_destination_option", "(", "parser_submit", ")", "add_runtime_option", "(", "parser_submit", ")", "parser_submit", ".", "add_argument", "(", "'--id'", ",", "action", "=", "'store'", ",", "help", "=", "\"Identifier for the new job\"", ")", "parser_submit", ".", "add_argument", "(", "'--script'", ",", "action", "=", "'store'", ",", "help", "=", "\"Relative name of the script in the \"", "\"directory\"", ")", "parser_submit", ".", "add_argument", "(", "'directory'", ",", "action", "=", "'store'", ",", "help", "=", "\"Job directory to upload\"", ")", "parser_submit", ".", "set_defaults", "(", "func", "=", "_submit", ")", "# Status action", "parser_status", "=", "subparsers", ".", "add_parser", "(", "'status'", ",", "help", "=", "\"Gets the status of a job\"", ")", "add_destination_option", "(", "parser_status", ")", "parser_status", ".", "add_argument", "(", "'--id'", ",", "action", "=", "'store'", ",", "help", "=", "\"Identifier of the running job\"", ")", "parser_status", ".", "set_defaults", "(", "func", "=", "_status", ")", "# Download action", "parser_download", "=", "subparsers", ".", "add_parser", "(", "'download'", ",", "help", "=", "\"Downloads files from finished job\"", ")", "add_destination_option", "(", "parser_download", ")", "parser_download", ".", "add_argument", "(", "'--id'", ",", "action", "=", "'store'", ",", "help", "=", "\"Identifier of the job\"", ")", "parser_download", ".", "add_argument", "(", "'files'", ",", "action", "=", "'store'", ",", "nargs", "=", "argparse", ".", "ONE_OR_MORE", ",", "help", "=", "\"Files to download\"", ")", "parser_download", ".", "set_defaults", "(", "func", "=", "_download", ")", "# Kill action", "parser_kill", "=", "subparsers", ".", "add_parser", "(", "'kill'", ",", "help", "=", "\"Kills a running job\"", ")", "add_destination_option", "(", "parser_kill", ")", "parser_kill", ".", "add_argument", "(", "'--id'", ",", "action", "=", "'store'", ",", "help", "=", "\"Identifier of the running job\"", ")", "parser_kill", ".", "set_defaults", "(", "func", "=", "_kill", ")", "# Delete action", "parser_delete", "=", "subparsers", ".", "add_parser", "(", "'delete'", ",", "help", "=", "\"Deletes a finished job\"", ")", "add_destination_option", "(", "parser_delete", ")", "parser_delete", ".", "add_argument", "(", "'--id'", ",", "action", "=", "'store'", ",", "help", "=", "\"Identifier of the finished job\"", ")", "parser_delete", ".", "set_defaults", "(", "func", "=", "_delete", ")", "# List action", "parser_list", "=", "subparsers", ".", "add_parser", "(", "'list'", ",", "help", "=", "\"Lists remote jobs\"", ")", "add_destination_option", "(", "parser_list", ")", "parser_list", ".", "set_defaults", "(", "func", "=", "_list", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "setup_logging", "(", "args", ".", "verbosity", ")", "try", ":", "args", ".", "func", "(", "args", ")", "except", "Error", "as", "e", ":", "# No need to show a traceback here, this is not an internal error", "logger", ".", "critical", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "sys", ".", "exit", "(", "0", ")" ]
Entry point when called on the command-line.
[ "Entry", "point", "when", "called", "on", "the", "command", "-", "line", "." ]
train
https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/main.py#L103-L231
agile4you/bottle-neck
bottle_neck/plugins.py
WrapErrorPlugin.apply
def apply(self, callback, context): # pragma: no cover """Apply the HTTPError wrapper to the callback. """ def wrapper(*args, **kwargs): try: return callback(*args, **kwargs) except bottle.HTTPError as error: return self.error_wrapper.from_status( status_line=error.status_line, msg=error.body ) return wrapper
python
def apply(self, callback, context): # pragma: no cover """Apply the HTTPError wrapper to the callback. """ def wrapper(*args, **kwargs): try: return callback(*args, **kwargs) except bottle.HTTPError as error: return self.error_wrapper.from_status( status_line=error.status_line, msg=error.body ) return wrapper
[ "def", "apply", "(", "self", ",", "callback", ",", "context", ")", ":", "# pragma: no cover", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "bottle", ".", "HTTPError", "as", "error", ":", "return", "self", ".", "error_wrapper", ".", "from_status", "(", "status_line", "=", "error", ".", "status_line", ",", "msg", "=", "error", ".", "body", ")", "return", "wrapper" ]
Apply the HTTPError wrapper to the callback.
[ "Apply", "the", "HTTPError", "wrapper", "to", "the", "callback", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/plugins.py#L61-L74
nbedi/typecaster
typecaster/models.py
Podcast.add_episode
def add_episode(self, text, text_format, title, author, summary=None, publish_date=None, synthesizer='watson', synth_args=None, sentence_break='. '): """ Add a new episode to the podcast. :param text: See :meth:`Episode`. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`. """ if title in self.episodes: raise ValueError('"' + title + '" already exists as an episode title.') link = self.output_path + '/' + title.replace(' ', '_').lower() + '.mp3' episode_text = convert_to_ssml(text, text_format) new_episode = Episode(episode_text, text_format, title, author, link, summary, publish_date, synthesizer, synth_args, sentence_break) self.episodes[title] = new_episode
python
def add_episode(self, text, text_format, title, author, summary=None, publish_date=None, synthesizer='watson', synth_args=None, sentence_break='. '): """ Add a new episode to the podcast. :param text: See :meth:`Episode`. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`. """ if title in self.episodes: raise ValueError('"' + title + '" already exists as an episode title.') link = self.output_path + '/' + title.replace(' ', '_').lower() + '.mp3' episode_text = convert_to_ssml(text, text_format) new_episode = Episode(episode_text, text_format, title, author, link, summary, publish_date, synthesizer, synth_args, sentence_break) self.episodes[title] = new_episode
[ "def", "add_episode", "(", "self", ",", "text", ",", "text_format", ",", "title", ",", "author", ",", "summary", "=", "None", ",", "publish_date", "=", "None", ",", "synthesizer", "=", "'watson'", ",", "synth_args", "=", "None", ",", "sentence_break", "=", "'. '", ")", ":", "if", "title", "in", "self", ".", "episodes", ":", "raise", "ValueError", "(", "'\"'", "+", "title", "+", "'\" already exists as an episode title.'", ")", "link", "=", "self", ".", "output_path", "+", "'/'", "+", "title", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "lower", "(", ")", "+", "'.mp3'", "episode_text", "=", "convert_to_ssml", "(", "text", ",", "text_format", ")", "new_episode", "=", "Episode", "(", "episode_text", ",", "text_format", ",", "title", ",", "author", ",", "link", ",", "summary", ",", "publish_date", ",", "synthesizer", ",", "synth_args", ",", "sentence_break", ")", "self", ".", "episodes", "[", "title", "]", "=", "new_episode" ]
Add a new episode to the podcast. :param text: See :meth:`Episode`. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`.
[ "Add", "a", "new", "episode", "to", "the", "podcast", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/models.py#L70-L101
nbedi/typecaster
typecaster/models.py
Podcast.add_scheduled_job
def add_scheduled_job(self, text_source, cron_args, text_format, title, author, summary=None, synthesizer='watson', synth_args=None, sentence_break='. '): """ Add and start a new scheduled job to dynamically generate podcasts. Note: scheduling will end when the process ends. This works best when run inside an existing application. :param text_source: A function that generates podcast text. Examples: a function that opens a file with today's date as a filename or a function that requests a specific url and extracts the main text. Also see :meth:`Episode`. :param cron_args: A dictionary of cron parameters. Keys can be: 'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute' and 'second'. Keys that are not specified will be parsed as 'any'/'*'. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. Since titles need to be unique, a timestamp will be appended to the title for each episode. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`. """ if not callable(text_source): raise TypeError('Argument "text" must be a function') def add_episode(): episode_text = text_source() episode_title = title + '_' + datetime.utcnow().strftime('%Y%m%d%H%M%S') self.add_episode(episode_text, text_format, episode_title, author, summary, datetime.utcnow(), synthesizer, synth_args, sentence_break) self.scheduled_jobs[title] = self._scheduler.add_job(add_episode, 'cron', id=title, **cron_args) if not self._scheduler.running: self._scheduler.start()
python
def add_scheduled_job(self, text_source, cron_args, text_format, title, author, summary=None, synthesizer='watson', synth_args=None, sentence_break='. '): """ Add and start a new scheduled job to dynamically generate podcasts. Note: scheduling will end when the process ends. This works best when run inside an existing application. :param text_source: A function that generates podcast text. Examples: a function that opens a file with today's date as a filename or a function that requests a specific url and extracts the main text. Also see :meth:`Episode`. :param cron_args: A dictionary of cron parameters. Keys can be: 'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute' and 'second'. Keys that are not specified will be parsed as 'any'/'*'. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. Since titles need to be unique, a timestamp will be appended to the title for each episode. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`. """ if not callable(text_source): raise TypeError('Argument "text" must be a function') def add_episode(): episode_text = text_source() episode_title = title + '_' + datetime.utcnow().strftime('%Y%m%d%H%M%S') self.add_episode(episode_text, text_format, episode_title, author, summary, datetime.utcnow(), synthesizer, synth_args, sentence_break) self.scheduled_jobs[title] = self._scheduler.add_job(add_episode, 'cron', id=title, **cron_args) if not self._scheduler.running: self._scheduler.start()
[ "def", "add_scheduled_job", "(", "self", ",", "text_source", ",", "cron_args", ",", "text_format", ",", "title", ",", "author", ",", "summary", "=", "None", ",", "synthesizer", "=", "'watson'", ",", "synth_args", "=", "None", ",", "sentence_break", "=", "'. '", ")", ":", "if", "not", "callable", "(", "text_source", ")", ":", "raise", "TypeError", "(", "'Argument \"text\" must be a function'", ")", "def", "add_episode", "(", ")", ":", "episode_text", "=", "text_source", "(", ")", "episode_title", "=", "title", "+", "'_'", "+", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y%m%d%H%M%S'", ")", "self", ".", "add_episode", "(", "episode_text", ",", "text_format", ",", "episode_title", ",", "author", ",", "summary", ",", "datetime", ".", "utcnow", "(", ")", ",", "synthesizer", ",", "synth_args", ",", "sentence_break", ")", "self", ".", "scheduled_jobs", "[", "title", "]", "=", "self", ".", "_scheduler", ".", "add_job", "(", "add_episode", ",", "'cron'", ",", "id", "=", "title", ",", "*", "*", "cron_args", ")", "if", "not", "self", ".", "_scheduler", ".", "running", ":", "self", ".", "_scheduler", ".", "start", "(", ")" ]
Add and start a new scheduled job to dynamically generate podcasts. Note: scheduling will end when the process ends. This works best when run inside an existing application. :param text_source: A function that generates podcast text. Examples: a function that opens a file with today's date as a filename or a function that requests a specific url and extracts the main text. Also see :meth:`Episode`. :param cron_args: A dictionary of cron parameters. Keys can be: 'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute' and 'second'. Keys that are not specified will be parsed as 'any'/'*'. :param text_format: See :meth:`Episode`. :param title: See :meth:`Episode`. Since titles need to be unique, a timestamp will be appended to the title for each episode. :param author: See :meth:`Episode`. :param summary: See :meth:`Episode`. :param publish_date: See :meth:`Episode`. :param synthesizer: See :meth:`typecaster.utils.text_to_speech`. :param synth_args: See :meth:`typecaster.utils.text_to_speech`. :param sentence_break: See :meth:`typecaster.utils.text_to_speech`.
[ "Add", "and", "start", "a", "new", "scheduled", "job", "to", "dynamically", "generate", "podcasts", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/models.py#L103-L150
nbedi/typecaster
typecaster/models.py
Podcast.publish
def publish(self, titles): """ Publish a set of episodes to the Podcast's RSS feed. :param titles: Either a single episode title or a sequence of episode titles to publish. """ if isinstance(titles, Sequence) and not isinstance(titles, six.string_types): for title in titles: self.episodes[title].publish() elif isinstance(titles, six.string_types): self.episodes[titles].publish() else: raise TypeError('titles must be a string or a sequence of strings.') self.update_rss_feed()
python
def publish(self, titles): """ Publish a set of episodes to the Podcast's RSS feed. :param titles: Either a single episode title or a sequence of episode titles to publish. """ if isinstance(titles, Sequence) and not isinstance(titles, six.string_types): for title in titles: self.episodes[title].publish() elif isinstance(titles, six.string_types): self.episodes[titles].publish() else: raise TypeError('titles must be a string or a sequence of strings.') self.update_rss_feed()
[ "def", "publish", "(", "self", ",", "titles", ")", ":", "if", "isinstance", "(", "titles", ",", "Sequence", ")", "and", "not", "isinstance", "(", "titles", ",", "six", ".", "string_types", ")", ":", "for", "title", "in", "titles", ":", "self", ".", "episodes", "[", "title", "]", ".", "publish", "(", ")", "elif", "isinstance", "(", "titles", ",", "six", ".", "string_types", ")", ":", "self", ".", "episodes", "[", "titles", "]", ".", "publish", "(", ")", "else", ":", "raise", "TypeError", "(", "'titles must be a string or a sequence of strings.'", ")", "self", ".", "update_rss_feed", "(", ")" ]
Publish a set of episodes to the Podcast's RSS feed. :param titles: Either a single episode title or a sequence of episode titles to publish.
[ "Publish", "a", "set", "of", "episodes", "to", "the", "Podcast", "s", "RSS", "feed", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/models.py#L152-L168
nbedi/typecaster
typecaster/models.py
Episode.render_audio
def render_audio(self): """ Synthesize audio from the episode's text. """ segment = text_to_speech(self._text, self.synthesizer, self.synth_args, self.sentence_break) milli = len(segment) seconds = '{0:.1f}'.format(float(milli) / 1000 % 60).zfill(2) minutes = '{0:.0f}'.format((milli / (1000 * 60)) % 60).zfill(2) hours = '{0:.0f}'.format((milli / (1000 * 60 * 60)) % 24).zfill(2) self.duration = hours + ':' + minutes + ':' + seconds segment.export(self.link, format='mp3') self.length = os.path.getsize(self.link)
python
def render_audio(self): """ Synthesize audio from the episode's text. """ segment = text_to_speech(self._text, self.synthesizer, self.synth_args, self.sentence_break) milli = len(segment) seconds = '{0:.1f}'.format(float(milli) / 1000 % 60).zfill(2) minutes = '{0:.0f}'.format((milli / (1000 * 60)) % 60).zfill(2) hours = '{0:.0f}'.format((milli / (1000 * 60 * 60)) % 24).zfill(2) self.duration = hours + ':' + minutes + ':' + seconds segment.export(self.link, format='mp3') self.length = os.path.getsize(self.link)
[ "def", "render_audio", "(", "self", ")", ":", "segment", "=", "text_to_speech", "(", "self", ".", "_text", ",", "self", ".", "synthesizer", ",", "self", ".", "synth_args", ",", "self", ".", "sentence_break", ")", "milli", "=", "len", "(", "segment", ")", "seconds", "=", "'{0:.1f}'", ".", "format", "(", "float", "(", "milli", ")", "/", "1000", "%", "60", ")", ".", "zfill", "(", "2", ")", "minutes", "=", "'{0:.0f}'", ".", "format", "(", "(", "milli", "/", "(", "1000", "*", "60", ")", ")", "%", "60", ")", ".", "zfill", "(", "2", ")", "hours", "=", "'{0:.0f}'", ".", "format", "(", "(", "milli", "/", "(", "1000", "*", "60", "*", "60", ")", ")", "%", "24", ")", ".", "zfill", "(", "2", ")", "self", ".", "duration", "=", "hours", "+", "':'", "+", "minutes", "+", "':'", "+", "seconds", "segment", ".", "export", "(", "self", ".", "link", ",", "format", "=", "'mp3'", ")", "self", ".", "length", "=", "os", ".", "path", ".", "getsize", "(", "self", ".", "link", ")" ]
Synthesize audio from the episode's text.
[ "Synthesize", "audio", "from", "the", "episode", "s", "text", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/models.py#L261-L274
nbedi/typecaster
typecaster/models.py
Episode.publish
def publish(self): """ Mark an episode as published. """ if self.published is False: self.published = True else: raise Warning(self.title + ' is already published.')
python
def publish(self): """ Mark an episode as published. """ if self.published is False: self.published = True else: raise Warning(self.title + ' is already published.')
[ "def", "publish", "(", "self", ")", ":", "if", "self", ".", "published", "is", "False", ":", "self", ".", "published", "=", "True", "else", ":", "raise", "Warning", "(", "self", ".", "title", "+", "' is already published.'", ")" ]
Mark an episode as published.
[ "Mark", "an", "episode", "as", "published", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/models.py#L276-L283
nbedi/typecaster
typecaster/models.py
Episode.unpublish
def unpublish(self): """ Mark an episode as not published. """ if self.published is True: self.published = False else: raise Warning(self.title + ' is already not published.')
python
def unpublish(self): """ Mark an episode as not published. """ if self.published is True: self.published = False else: raise Warning(self.title + ' is already not published.')
[ "def", "unpublish", "(", "self", ")", ":", "if", "self", ".", "published", "is", "True", ":", "self", ".", "published", "=", "False", "else", ":", "raise", "Warning", "(", "self", ".", "title", "+", "' is already not published.'", ")" ]
Mark an episode as not published.
[ "Mark", "an", "episode", "as", "not", "published", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/models.py#L285-L292
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
UserAgent._environment_variables
def _environment_variables(**kwargs): # type: (Any) -> Any """ Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments """ user_agent = os.getenv('USER_AGENT') if user_agent is not None: kwargs['user_agent'] = user_agent preprefix = os.getenv('PREPREFIX') if preprefix is not None: kwargs['preprefix'] = preprefix return kwargs
python
def _environment_variables(**kwargs): # type: (Any) -> Any """ Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments """ user_agent = os.getenv('USER_AGENT') if user_agent is not None: kwargs['user_agent'] = user_agent preprefix = os.getenv('PREPREFIX') if preprefix is not None: kwargs['preprefix'] = preprefix return kwargs
[ "def", "_environment_variables", "(", "*", "*", "kwargs", ")", ":", "# type: (Any) -> Any", "user_agent", "=", "os", ".", "getenv", "(", "'USER_AGENT'", ")", "if", "user_agent", "is", "not", "None", ":", "kwargs", "[", "'user_agent'", "]", "=", "user_agent", "preprefix", "=", "os", ".", "getenv", "(", "'PREPREFIX'", ")", "if", "preprefix", "is", "not", "None", ":", "kwargs", "[", "'preprefix'", "]", "=", "preprefix", "return", "kwargs" ]
Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments
[ "Overwrite", "keyword", "arguments", "with", "environment", "variables" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L23-L42
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
UserAgent._construct
def _construct(configdict, prefix, ua): # type: (Dict, str, str) -> str """ Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string """ if not ua: raise UserAgentError("User_agent parameter missing. It can be your project's name for example.") preprefix = configdict.get('preprefix') if preprefix: user_agent = '%s:' % preprefix else: user_agent = '' if prefix: user_agent = '%s%s-' % (user_agent, prefix) user_agent = '%s%s' % (user_agent, ua) return user_agent
python
def _construct(configdict, prefix, ua): # type: (Dict, str, str) -> str """ Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string """ if not ua: raise UserAgentError("User_agent parameter missing. It can be your project's name for example.") preprefix = configdict.get('preprefix') if preprefix: user_agent = '%s:' % preprefix else: user_agent = '' if prefix: user_agent = '%s%s-' % (user_agent, prefix) user_agent = '%s%s' % (user_agent, ua) return user_agent
[ "def", "_construct", "(", "configdict", ",", "prefix", ",", "ua", ")", ":", "# type: (Dict, str, str) -> str", "if", "not", "ua", ":", "raise", "UserAgentError", "(", "\"User_agent parameter missing. It can be your project's name for example.\"", ")", "preprefix", "=", "configdict", ".", "get", "(", "'preprefix'", ")", "if", "preprefix", ":", "user_agent", "=", "'%s:'", "%", "preprefix", "else", ":", "user_agent", "=", "''", "if", "prefix", ":", "user_agent", "=", "'%s%s-'", "%", "(", "user_agent", ",", "prefix", ")", "user_agent", "=", "'%s%s'", "%", "(", "user_agent", ",", "ua", ")", "return", "user_agent" ]
Construct user agent Args: configdict (str): Additional configuration for user agent prefix (str): Text to put at start of user agent ua (str): Custom user agent text Returns: str: Full user agent string
[ "Construct", "user", "agent" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L45-L69
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
UserAgent._load
def _load(cls, prefix, user_agent_config_yaml, user_agent_lookup=None): # type: (str, str, Optional[str]) -> str """ Load user agent YAML file Args: prefix (str): Text to put at start of user agent user_agent_config_yaml (str): Path to user agent YAML file user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: user agent """ if not user_agent_config_yaml: user_agent_config_yaml = cls.default_user_agent_config_yaml logger.info( 'No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml) if not isfile(user_agent_config_yaml): raise UserAgentError( "User_agent should be supplied in a YAML config file. It can be your project's name for example.") logger.info('Loading user agent config from: %s' % user_agent_config_yaml) user_agent_config_dict = load_yaml(user_agent_config_yaml) if user_agent_lookup: user_agent_config_dict = user_agent_config_dict.get(user_agent_lookup) if not user_agent_config_dict: raise UserAgentError("No user agent information read from: %s" % user_agent_config_yaml) ua = user_agent_config_dict.get('user_agent') return cls._construct(user_agent_config_dict, prefix, ua)
python
def _load(cls, prefix, user_agent_config_yaml, user_agent_lookup=None): # type: (str, str, Optional[str]) -> str """ Load user agent YAML file Args: prefix (str): Text to put at start of user agent user_agent_config_yaml (str): Path to user agent YAML file user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: user agent """ if not user_agent_config_yaml: user_agent_config_yaml = cls.default_user_agent_config_yaml logger.info( 'No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml) if not isfile(user_agent_config_yaml): raise UserAgentError( "User_agent should be supplied in a YAML config file. It can be your project's name for example.") logger.info('Loading user agent config from: %s' % user_agent_config_yaml) user_agent_config_dict = load_yaml(user_agent_config_yaml) if user_agent_lookup: user_agent_config_dict = user_agent_config_dict.get(user_agent_lookup) if not user_agent_config_dict: raise UserAgentError("No user agent information read from: %s" % user_agent_config_yaml) ua = user_agent_config_dict.get('user_agent') return cls._construct(user_agent_config_dict, prefix, ua)
[ "def", "_load", "(", "cls", ",", "prefix", ",", "user_agent_config_yaml", ",", "user_agent_lookup", "=", "None", ")", ":", "# type: (str, str, Optional[str]) -> str", "if", "not", "user_agent_config_yaml", ":", "user_agent_config_yaml", "=", "cls", ".", "default_user_agent_config_yaml", "logger", ".", "info", "(", "'No user agent or user agent config file given. Using default user agent config file: %s.'", "%", "user_agent_config_yaml", ")", "if", "not", "isfile", "(", "user_agent_config_yaml", ")", ":", "raise", "UserAgentError", "(", "\"User_agent should be supplied in a YAML config file. It can be your project's name for example.\"", ")", "logger", ".", "info", "(", "'Loading user agent config from: %s'", "%", "user_agent_config_yaml", ")", "user_agent_config_dict", "=", "load_yaml", "(", "user_agent_config_yaml", ")", "if", "user_agent_lookup", ":", "user_agent_config_dict", "=", "user_agent_config_dict", ".", "get", "(", "user_agent_lookup", ")", "if", "not", "user_agent_config_dict", ":", "raise", "UserAgentError", "(", "\"No user agent information read from: %s\"", "%", "user_agent_config_yaml", ")", "ua", "=", "user_agent_config_dict", ".", "get", "(", "'user_agent'", ")", "return", "cls", ".", "_construct", "(", "user_agent_config_dict", ",", "prefix", ",", "ua", ")" ]
Load user agent YAML file Args: prefix (str): Text to put at start of user agent user_agent_config_yaml (str): Path to user agent YAML file user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: user agent
[ "Load", "user", "agent", "YAML", "file" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L72-L100
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
UserAgent._create
def _create(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str """ Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string """ kwargs = UserAgent._environment_variables(**kwargs) if 'user_agent' in kwargs: user_agent = kwargs['user_agent'] del kwargs['user_agent'] prefix = kwargs.get('prefix') if prefix: del kwargs['prefix'] else: prefix = 'HDXPythonUtilities/%s' % get_utils_version() if not user_agent: ua = cls._load(prefix, user_agent_config_yaml, user_agent_lookup) else: ua = cls._construct(kwargs, prefix, user_agent) return ua
python
def _create(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str """ Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string """ kwargs = UserAgent._environment_variables(**kwargs) if 'user_agent' in kwargs: user_agent = kwargs['user_agent'] del kwargs['user_agent'] prefix = kwargs.get('prefix') if prefix: del kwargs['prefix'] else: prefix = 'HDXPythonUtilities/%s' % get_utils_version() if not user_agent: ua = cls._load(prefix, user_agent_config_yaml, user_agent_lookup) else: ua = cls._construct(kwargs, prefix, user_agent) return ua
[ "def", "_create", "(", "cls", ",", "user_agent", "=", "None", ",", "user_agent_config_yaml", "=", "None", ",", "user_agent_lookup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[str], Optional[str], Optional[str], Any) -> str", "kwargs", "=", "UserAgent", ".", "_environment_variables", "(", "*", "*", "kwargs", ")", "if", "'user_agent'", "in", "kwargs", ":", "user_agent", "=", "kwargs", "[", "'user_agent'", "]", "del", "kwargs", "[", "'user_agent'", "]", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ")", "if", "prefix", ":", "del", "kwargs", "[", "'prefix'", "]", "else", ":", "prefix", "=", "'HDXPythonUtilities/%s'", "%", "get_utils_version", "(", ")", "if", "not", "user_agent", ":", "ua", "=", "cls", ".", "_load", "(", "prefix", ",", "user_agent_config_yaml", ",", "user_agent_lookup", ")", "else", ":", "ua", "=", "cls", ".", "_construct", "(", "kwargs", ",", "prefix", ",", "user_agent", ")", "return", "ua" ]
Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
[ "Get", "full", "user", "agent", "string" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L103-L131
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
UserAgent.set_global
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> None """ Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None """ cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
python
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> None """ Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None """ cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
[ "def", "set_global", "(", "cls", ",", "user_agent", "=", "None", ",", "user_agent_config_yaml", "=", "None", ",", "user_agent_lookup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[str], Optional[str], Optional[str], Any) -> None", "cls", ".", "user_agent", "=", "cls", ".", "_create", "(", "user_agent", ",", "user_agent_config_yaml", ",", "user_agent_lookup", ",", "*", "*", "kwargs", ")" ]
Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None
[ "Set", "global", "user", "agent", "string" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L146-L160
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/useragent.py
UserAgent.get
def get(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str """ Get full user agent string from parameters if supplied falling back on global user agent if set. Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string """ if user_agent or user_agent_config_yaml or 'user_agent' in UserAgent._environment_variables(**kwargs): return UserAgent._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs) if cls.user_agent: return cls.user_agent else: raise UserAgentError( 'You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!')
python
def get(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> str """ Get full user agent string from parameters if supplied falling back on global user agent if set. Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string """ if user_agent or user_agent_config_yaml or 'user_agent' in UserAgent._environment_variables(**kwargs): return UserAgent._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs) if cls.user_agent: return cls.user_agent else: raise UserAgentError( 'You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!')
[ "def", "get", "(", "cls", ",", "user_agent", "=", "None", ",", "user_agent_config_yaml", "=", "None", ",", "user_agent_lookup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[str], Optional[str], Optional[str], Any) -> str", "if", "user_agent", "or", "user_agent_config_yaml", "or", "'user_agent'", "in", "UserAgent", ".", "_environment_variables", "(", "*", "*", "kwargs", ")", ":", "return", "UserAgent", ".", "_create", "(", "user_agent", ",", "user_agent_config_yaml", ",", "user_agent_lookup", ",", "*", "*", "kwargs", ")", "if", "cls", ".", "user_agent", ":", "return", "cls", ".", "user_agent", "else", ":", "raise", "UserAgentError", "(", "'You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!'", ")" ]
Get full user agent string from parameters if supplied falling back on global user agent if set. Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
[ "Get", "full", "user", "agent", "string", "from", "parameters", "if", "supplied", "falling", "back", "on", "global", "user", "agent", "if", "set", "." ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/useragent.py#L163-L183
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/saver.py
save_yaml
def save_yaml(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None """Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ if sortkeys: dictionary = dict(dictionary) with open(path, 'w') as f: if pretty: pyaml.dump(dictionary, f) else: yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)
python
def save_yaml(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None """Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ if sortkeys: dictionary = dict(dictionary) with open(path, 'w') as f: if pretty: pyaml.dump(dictionary, f) else: yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)
[ "def", "save_yaml", "(", "dictionary", ",", "path", ",", "pretty", "=", "False", ",", "sortkeys", "=", "False", ")", ":", "# type: (Dict, str, bool, bool) -> None", "if", "sortkeys", ":", "dictionary", "=", "dict", "(", "dictionary", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "if", "pretty", ":", "pyaml", ".", "dump", "(", "dictionary", ",", "f", ")", "else", ":", "yaml", ".", "dump", "(", "dictionary", ",", "f", ",", "default_flow_style", "=", "None", ",", "Dumper", "=", "yamlloader", ".", "ordereddict", ".", "CDumper", ")" ]
Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
[ "Save", "dictionary", "to", "YAML", "file", "preserving", "order", "if", "it", "is", "an", "OrderedDict" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/saver.py#L15-L34
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/saver.py
save_json
def save_json(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None """Save dictionary to JSON file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to JSON file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ with open(path, 'w') as f: if pretty: indent = 2 separators = (',', ': ') else: indent = None separators = (', ', ': ') json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)
python
def save_json(dictionary, path, pretty=False, sortkeys=False): # type: (Dict, str, bool, bool) -> None """Save dictionary to JSON file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to JSON file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ with open(path, 'w') as f: if pretty: indent = 2 separators = (',', ': ') else: indent = None separators = (', ', ': ') json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)
[ "def", "save_json", "(", "dictionary", ",", "path", ",", "pretty", "=", "False", ",", "sortkeys", "=", "False", ")", ":", "# type: (Dict, str, bool, bool) -> None", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "if", "pretty", ":", "indent", "=", "2", "separators", "=", "(", "','", ",", "': '", ")", "else", ":", "indent", "=", "None", "separators", "=", "(", "', '", ",", "': '", ")", "json", ".", "dump", "(", "dictionary", ",", "f", ",", "indent", "=", "indent", ",", "sort_keys", "=", "sortkeys", ",", "separators", "=", "separators", ")" ]
Save dictionary to JSON file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to JSON file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
[ "Save", "dictionary", "to", "JSON", "file", "preserving", "order", "if", "it", "is", "an", "OrderedDict" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/saver.py#L37-L57
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/loader.py
load_yaml
def load_yaml(path): # type: (str) -> OrderedDict """Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file """ with open(path, 'rt') as f: yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader) if not yamldict: raise (LoadError('YAML file: %s is empty!' % path)) return yamldict
python
def load_yaml(path): # type: (str) -> OrderedDict """Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file """ with open(path, 'rt') as f: yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader) if not yamldict: raise (LoadError('YAML file: %s is empty!' % path)) return yamldict
[ "def", "load_yaml", "(", "path", ")", ":", "# type: (str) -> OrderedDict", "with", "open", "(", "path", ",", "'rt'", ")", "as", "f", ":", "yamldict", "=", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ",", "Loader", "=", "yamlloader", ".", "ordereddict", ".", "CSafeLoader", ")", "if", "not", "yamldict", ":", "raise", "(", "LoadError", "(", "'YAML file: %s is empty!'", "%", "path", ")", ")", "return", "yamldict" ]
Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file
[ "Load", "YAML", "file", "into", "an", "ordered", "dictionary" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/loader.py#L79-L93
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/loader.py
load_json
def load_json(path): # type: (str) -> OrderedDict """Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file """ with open(path, 'rt') as f: jsondict = json.loads(f.read(), object_pairs_hook=OrderedDict) if not jsondict: raise (LoadError('JSON file: %s is empty!' % path)) return jsondict
python
def load_json(path): # type: (str) -> OrderedDict """Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file """ with open(path, 'rt') as f: jsondict = json.loads(f.read(), object_pairs_hook=OrderedDict) if not jsondict: raise (LoadError('JSON file: %s is empty!' % path)) return jsondict
[ "def", "load_json", "(", "path", ")", ":", "# type: (str) -> OrderedDict", "with", "open", "(", "path", ",", "'rt'", ")", "as", "f", ":", "jsondict", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ",", "object_pairs_hook", "=", "OrderedDict", ")", "if", "not", "jsondict", ":", "raise", "(", "LoadError", "(", "'JSON file: %s is empty!'", "%", "path", ")", ")", "return", "jsondict" ]
Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file
[ "Load", "JSON", "file", "into", "an", "ordered", "dictionary" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/loader.py#L96-L110
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/loader.py
load_file_to_str
def load_file_to_str(path): # type: (str) -> str """ Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file """ with open(path, 'rt') as f: string = f.read().replace(linesep, '') if not string: raise LoadError('%s file is empty!' % path) return string
python
def load_file_to_str(path): # type: (str) -> str """ Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file """ with open(path, 'rt') as f: string = f.read().replace(linesep, '') if not string: raise LoadError('%s file is empty!' % path) return string
[ "def", "load_file_to_str", "(", "path", ")", ":", "# type: (str) -> str", "with", "open", "(", "path", ",", "'rt'", ")", "as", "f", ":", "string", "=", "f", ".", "read", "(", ")", ".", "replace", "(", "linesep", ",", "''", ")", "if", "not", "string", ":", "raise", "LoadError", "(", "'%s file is empty!'", "%", "path", ")", "return", "string" ]
Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file
[ "Load", "file", "into", "a", "string", "removing", "newlines" ]
train
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/loader.py#L113-L129
GaretJax/lancet
lancet/contrib/packaging_tools.py
contributors
def contributors(lancet, output): """ List all contributors visible in the git history. """ sorting = pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE commits = lancet.repo.walk(lancet.repo.head.target, sorting) contributors = ((c.author.name, c.author.email) for c in commits) contributors = OrderedDict(contributors) template_content = content_from_path( lancet.config.get('packaging', 'contributors_template')) template = Template(template_content) output.write(template.render(contributors=contributors).encode('utf-8'))
python
def contributors(lancet, output): """ List all contributors visible in the git history. """ sorting = pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE commits = lancet.repo.walk(lancet.repo.head.target, sorting) contributors = ((c.author.name, c.author.email) for c in commits) contributors = OrderedDict(contributors) template_content = content_from_path( lancet.config.get('packaging', 'contributors_template')) template = Template(template_content) output.write(template.render(contributors=contributors).encode('utf-8'))
[ "def", "contributors", "(", "lancet", ",", "output", ")", ":", "sorting", "=", "pygit2", ".", "GIT_SORT_TIME", "|", "pygit2", ".", "GIT_SORT_REVERSE", "commits", "=", "lancet", ".", "repo", ".", "walk", "(", "lancet", ".", "repo", ".", "head", ".", "target", ",", "sorting", ")", "contributors", "=", "(", "(", "c", ".", "author", ".", "name", ",", "c", ".", "author", ".", "email", ")", "for", "c", "in", "commits", ")", "contributors", "=", "OrderedDict", "(", "contributors", ")", "template_content", "=", "content_from_path", "(", "lancet", ".", "config", ".", "get", "(", "'packaging'", ",", "'contributors_template'", ")", ")", "template", "=", "Template", "(", "template_content", ")", "output", ".", "write", "(", "template", ".", "render", "(", "contributors", "=", "contributors", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
List all contributors visible in the git history.
[ "List", "all", "contributors", "visible", "in", "the", "git", "history", "." ]
train
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/contrib/packaging_tools.py#L14-L26
nbedi/typecaster
typecaster/utils.py
text_to_speech
def text_to_speech(text, synthesizer, synth_args, sentence_break): """ Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '. """ if len(text.split()) < 50: if synthesizer == 'watson': with open('.temp.wav', 'wb') as temp: temp.write(watson_request(text=text, synth_args=synth_args).content) response = AudioSegment.from_wav('.temp.wav') os.remove('.temp.wav') return response else: raise ValueError('"' + synthesizer + '" synthesizer not found.') else: segments = [] for i, sentence in enumerate(text.split(sentence_break)): if synthesizer == 'watson': with open('.temp' + str(i) + '.wav', 'wb') as temp: temp.write(watson_request(text=sentence, synth_args=synth_args).content) segments.append(AudioSegment.from_wav('.temp' + str(i) + '.wav')) os.remove('.temp' + str(i) + '.wav') else: raise ValueError('"' + synthesizer + '" synthesizer not found.') response = segments[0] for segment in segments[1:]: response = response + segment return response
python
def text_to_speech(text, synthesizer, synth_args, sentence_break): """ Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '. """ if len(text.split()) < 50: if synthesizer == 'watson': with open('.temp.wav', 'wb') as temp: temp.write(watson_request(text=text, synth_args=synth_args).content) response = AudioSegment.from_wav('.temp.wav') os.remove('.temp.wav') return response else: raise ValueError('"' + synthesizer + '" synthesizer not found.') else: segments = [] for i, sentence in enumerate(text.split(sentence_break)): if synthesizer == 'watson': with open('.temp' + str(i) + '.wav', 'wb') as temp: temp.write(watson_request(text=sentence, synth_args=synth_args).content) segments.append(AudioSegment.from_wav('.temp' + str(i) + '.wav')) os.remove('.temp' + str(i) + '.wav') else: raise ValueError('"' + synthesizer + '" synthesizer not found.') response = segments[0] for segment in segments[1:]: response = response + segment return response
[ "def", "text_to_speech", "(", "text", ",", "synthesizer", ",", "synth_args", ",", "sentence_break", ")", ":", "if", "len", "(", "text", ".", "split", "(", ")", ")", "<", "50", ":", "if", "synthesizer", "==", "'watson'", ":", "with", "open", "(", "'.temp.wav'", ",", "'wb'", ")", "as", "temp", ":", "temp", ".", "write", "(", "watson_request", "(", "text", "=", "text", ",", "synth_args", "=", "synth_args", ")", ".", "content", ")", "response", "=", "AudioSegment", ".", "from_wav", "(", "'.temp.wav'", ")", "os", ".", "remove", "(", "'.temp.wav'", ")", "return", "response", "else", ":", "raise", "ValueError", "(", "'\"'", "+", "synthesizer", "+", "'\" synthesizer not found.'", ")", "else", ":", "segments", "=", "[", "]", "for", "i", ",", "sentence", "in", "enumerate", "(", "text", ".", "split", "(", "sentence_break", ")", ")", ":", "if", "synthesizer", "==", "'watson'", ":", "with", "open", "(", "'.temp'", "+", "str", "(", "i", ")", "+", "'.wav'", ",", "'wb'", ")", "as", "temp", ":", "temp", ".", "write", "(", "watson_request", "(", "text", "=", "sentence", ",", "synth_args", "=", "synth_args", ")", ".", "content", ")", "segments", ".", "append", "(", "AudioSegment", ".", "from_wav", "(", "'.temp'", "+", "str", "(", "i", ")", "+", "'.wav'", ")", ")", "os", ".", "remove", "(", "'.temp'", "+", "str", "(", "i", ")", "+", "'.wav'", ")", "else", ":", "raise", "ValueError", "(", "'\"'", "+", "synthesizer", "+", "'\" synthesizer not found.'", ")", "response", "=", "segments", "[", "0", "]", "for", "segment", "in", "segments", "[", "1", ":", "]", ":", "response", "=", "response", "+", "segment", "return", "response" ]
Converts given text to a pydub AudioSegment using a specified speech synthesizer. At the moment, IBM Watson's text-to-speech API is the only available synthesizer. :param text: The text that will be synthesized to audio. :param synthesizer: The text-to-speech synthesizer to use. At the moment, 'watson' is the only available input. :param synth_args: A dictionary of arguments to pass to the synthesizer. Parameters for authorization (username/password) should be passed here. :param sentence_break: A string that identifies a sentence break or another logical break in the text. Necessary for text longer than 50 words. Defaults to '. '.
[ "Converts", "given", "text", "to", "a", "pydub", "AudioSegment", "using", "a", "specified", "speech", "synthesizer", ".", "At", "the", "moment", "IBM", "Watson", "s", "text", "-", "to", "-", "speech", "API", "is", "the", "only", "available", "synthesizer", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/utils.py#L11-L53
nbedi/typecaster
typecaster/utils.py
watson_request
def watson_request(text, synth_args): """ Makes a single request to the IBM Watson text-to-speech API. :param text: The text that will be synthesized to audio. :param synth_args: A dictionary of arguments to add to the request. These should include username and password for authentication. """ params = { 'text': text, 'accept': 'audio/wav' } if synth_args is not None: params.update(synth_args) if 'username' in params: username = params.pop('username') else: raise Warning('The IBM Watson API requires credentials that should be passed as "username" and "password" in "synth_args"') if 'password' in params: password = params.pop('password') else: raise Warning('The IBM Watson API requires credentials that should be passed as "username" and "password" in "synth_args"') return requests.get(watson_url, auth=(username, password), params=params)
python
def watson_request(text, synth_args): """ Makes a single request to the IBM Watson text-to-speech API. :param text: The text that will be synthesized to audio. :param synth_args: A dictionary of arguments to add to the request. These should include username and password for authentication. """ params = { 'text': text, 'accept': 'audio/wav' } if synth_args is not None: params.update(synth_args) if 'username' in params: username = params.pop('username') else: raise Warning('The IBM Watson API requires credentials that should be passed as "username" and "password" in "synth_args"') if 'password' in params: password = params.pop('password') else: raise Warning('The IBM Watson API requires credentials that should be passed as "username" and "password" in "synth_args"') return requests.get(watson_url, auth=(username, password), params=params)
[ "def", "watson_request", "(", "text", ",", "synth_args", ")", ":", "params", "=", "{", "'text'", ":", "text", ",", "'accept'", ":", "'audio/wav'", "}", "if", "synth_args", "is", "not", "None", ":", "params", ".", "update", "(", "synth_args", ")", "if", "'username'", "in", "params", ":", "username", "=", "params", ".", "pop", "(", "'username'", ")", "else", ":", "raise", "Warning", "(", "'The IBM Watson API requires credentials that should be passed as \"username\" and \"password\" in \"synth_args\"'", ")", "if", "'password'", "in", "params", ":", "password", "=", "params", ".", "pop", "(", "'password'", ")", "else", ":", "raise", "Warning", "(", "'The IBM Watson API requires credentials that should be passed as \"username\" and \"password\" in \"synth_args\"'", ")", "return", "requests", ".", "get", "(", "watson_url", ",", "auth", "=", "(", "username", ",", "password", ")", ",", "params", "=", "params", ")" ]
Makes a single request to the IBM Watson text-to-speech API. :param text: The text that will be synthesized to audio. :param synth_args: A dictionary of arguments to add to the request. These should include username and password for authentication.
[ "Makes", "a", "single", "request", "to", "the", "IBM", "Watson", "text", "-", "to", "-", "speech", "API", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/utils.py#L56-L82
nbedi/typecaster
typecaster/utils.py
build_rss_feed
def build_rss_feed(podcast): """ Builds a podcast RSS feed and returns an xml file. :param podcast: A Podcast model to build the RSS feed from. """ if not os.path.exists(podcast.output_path): os.makedirs(podcast.output_path) rss = ET.Element('rss', attrib={'xmlns:itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd', 'version': '2.0'}) channel = ET.SubElement(rss, 'channel') ET.SubElement(channel, 'title').text = podcast.title ET.SubElement(channel, 'link').text = podcast.link ET.SubElement(channel, 'copyright').text = podcast.copyright ET.SubElement(channel, 'itunes:subtitle').text = podcast.subtitle ET.SubElement(channel, 'itunes:author').text = podcast.author ET.SubElement(channel, 'itunes:summary').text = podcast.description ET.SubElement(channel, 'description').text = podcast.description owner = ET.SubElement(channel, 'itunes:owner') ET.SubElement(owner, 'itunes:name').text = podcast.owner_name ET.SubElement(owner, 'itunes:email').text = podcast.owner_email ET.SubElement(channel, 'itunes:image').text = podcast.image for category in podcast.categories: ET.SubElement(channel, 'itunes:category').text = category for episode in sorted(podcast.episodes.values(), key=lambda x: x.publish_date): if episode.published is True: item = ET.SubElement(channel, 'item') ET.SubElement(item, 'title').text = episode.title ET.SubElement(item, 'author').text = episode.author ET.SubElement(item, 'summary').text = episode.summary ET.SubElement(item, 'enclosure', attrib={'url': podcast.link + '/' + episode.link, 'length': str(episode.length), 'type': 'audio/x-mp3'}) ET.SubElement(item, 'guid').text = podcast.link + '/' + episode.link ET.SubElement(item, 'pubDate').text = episode.publish_date.strftime('%a, %d %b %Y %H:%M:%S UTC') ET.SubElement(item, 'itunes:duration').text = episode.duration tree = ET.ElementTree(rss) with open(podcast.output_path + '/feed.xml', 'wb') as feed: tree.write(feed)
python
def build_rss_feed(podcast): """ Builds a podcast RSS feed and returns an xml file. :param podcast: A Podcast model to build the RSS feed from. """ if not os.path.exists(podcast.output_path): os.makedirs(podcast.output_path) rss = ET.Element('rss', attrib={'xmlns:itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd', 'version': '2.0'}) channel = ET.SubElement(rss, 'channel') ET.SubElement(channel, 'title').text = podcast.title ET.SubElement(channel, 'link').text = podcast.link ET.SubElement(channel, 'copyright').text = podcast.copyright ET.SubElement(channel, 'itunes:subtitle').text = podcast.subtitle ET.SubElement(channel, 'itunes:author').text = podcast.author ET.SubElement(channel, 'itunes:summary').text = podcast.description ET.SubElement(channel, 'description').text = podcast.description owner = ET.SubElement(channel, 'itunes:owner') ET.SubElement(owner, 'itunes:name').text = podcast.owner_name ET.SubElement(owner, 'itunes:email').text = podcast.owner_email ET.SubElement(channel, 'itunes:image').text = podcast.image for category in podcast.categories: ET.SubElement(channel, 'itunes:category').text = category for episode in sorted(podcast.episodes.values(), key=lambda x: x.publish_date): if episode.published is True: item = ET.SubElement(channel, 'item') ET.SubElement(item, 'title').text = episode.title ET.SubElement(item, 'author').text = episode.author ET.SubElement(item, 'summary').text = episode.summary ET.SubElement(item, 'enclosure', attrib={'url': podcast.link + '/' + episode.link, 'length': str(episode.length), 'type': 'audio/x-mp3'}) ET.SubElement(item, 'guid').text = podcast.link + '/' + episode.link ET.SubElement(item, 'pubDate').text = episode.publish_date.strftime('%a, %d %b %Y %H:%M:%S UTC') ET.SubElement(item, 'itunes:duration').text = episode.duration tree = ET.ElementTree(rss) with open(podcast.output_path + '/feed.xml', 'wb') as feed: tree.write(feed)
[ "def", "build_rss_feed", "(", "podcast", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "podcast", ".", "output_path", ")", ":", "os", ".", "makedirs", "(", "podcast", ".", "output_path", ")", "rss", "=", "ET", ".", "Element", "(", "'rss'", ",", "attrib", "=", "{", "'xmlns:itunes'", ":", "'http://www.itunes.com/dtds/podcast-1.0.dtd'", ",", "'version'", ":", "'2.0'", "}", ")", "channel", "=", "ET", ".", "SubElement", "(", "rss", ",", "'channel'", ")", "ET", ".", "SubElement", "(", "channel", ",", "'title'", ")", ".", "text", "=", "podcast", ".", "title", "ET", ".", "SubElement", "(", "channel", ",", "'link'", ")", ".", "text", "=", "podcast", ".", "link", "ET", ".", "SubElement", "(", "channel", ",", "'copyright'", ")", ".", "text", "=", "podcast", ".", "copyright", "ET", ".", "SubElement", "(", "channel", ",", "'itunes:subtitle'", ")", ".", "text", "=", "podcast", ".", "subtitle", "ET", ".", "SubElement", "(", "channel", ",", "'itunes:author'", ")", ".", "text", "=", "podcast", ".", "author", "ET", ".", "SubElement", "(", "channel", ",", "'itunes:summary'", ")", ".", "text", "=", "podcast", ".", "description", "ET", ".", "SubElement", "(", "channel", ",", "'description'", ")", ".", "text", "=", "podcast", ".", "description", "owner", "=", "ET", ".", "SubElement", "(", "channel", ",", "'itunes:owner'", ")", "ET", ".", "SubElement", "(", "owner", ",", "'itunes:name'", ")", ".", "text", "=", "podcast", ".", "owner_name", "ET", ".", "SubElement", "(", "owner", ",", "'itunes:email'", ")", ".", "text", "=", "podcast", ".", "owner_email", "ET", ".", "SubElement", "(", "channel", ",", "'itunes:image'", ")", ".", "text", "=", "podcast", ".", "image", "for", "category", "in", "podcast", ".", "categories", ":", "ET", ".", "SubElement", "(", "channel", ",", "'itunes:category'", ")", ".", "text", "=", "category", "for", "episode", "in", "sorted", "(", "podcast", ".", "episodes", ".", "values", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "publish_date", ")", ":", "if", "episode", ".", "published", "is", "True", ":", "item", "=", "ET", ".", "SubElement", "(", "channel", ",", "'item'", ")", "ET", ".", "SubElement", "(", "item", ",", "'title'", ")", ".", "text", "=", "episode", ".", "title", "ET", ".", "SubElement", "(", "item", ",", "'author'", ")", ".", "text", "=", "episode", ".", "author", "ET", ".", "SubElement", "(", "item", ",", "'summary'", ")", ".", "text", "=", "episode", ".", "summary", "ET", ".", "SubElement", "(", "item", ",", "'enclosure'", ",", "attrib", "=", "{", "'url'", ":", "podcast", ".", "link", "+", "'/'", "+", "episode", ".", "link", ",", "'length'", ":", "str", "(", "episode", ".", "length", ")", ",", "'type'", ":", "'audio/x-mp3'", "}", ")", "ET", ".", "SubElement", "(", "item", ",", "'guid'", ")", ".", "text", "=", "podcast", ".", "link", "+", "'/'", "+", "episode", ".", "link", "ET", ".", "SubElement", "(", "item", ",", "'pubDate'", ")", ".", "text", "=", "episode", ".", "publish_date", ".", "strftime", "(", "'%a, %d %b %Y %H:%M:%S UTC'", ")", "ET", ".", "SubElement", "(", "item", ",", "'itunes:duration'", ")", ".", "text", "=", "episode", ".", "duration", "tree", "=", "ET", ".", "ElementTree", "(", "rss", ")", "with", "open", "(", "podcast", ".", "output_path", "+", "'/feed.xml'", ",", "'wb'", ")", "as", "feed", ":", "tree", ".", "write", "(", "feed", ")" ]
Builds a podcast RSS feed and returns an xml file. :param podcast: A Podcast model to build the RSS feed from.
[ "Builds", "a", "podcast", "RSS", "feed", "and", "returns", "an", "xml", "file", "." ]
train
https://github.com/nbedi/typecaster/blob/09eee6d4fbad9f70c90364ea89ab39917f903afc/typecaster/utils.py#L85-L128
agile4you/bottle-neck
example.py
ResourceHandler.get
def get(self, uid=None): """Example retrieve API method. """ # Return resource collection if uid is None: return self.response_factory.ok(data=resource_db) # Return resource based on UID. try: record = [r for r in resource_db if r.get('id') == uid].pop() except IndexError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist.'.format(uid)]) return self.response_factory.ok(data=record)
python
def get(self, uid=None): """Example retrieve API method. """ # Return resource collection if uid is None: return self.response_factory.ok(data=resource_db) # Return resource based on UID. try: record = [r for r in resource_db if r.get('id') == uid].pop() except IndexError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist.'.format(uid)]) return self.response_factory.ok(data=record)
[ "def", "get", "(", "self", ",", "uid", "=", "None", ")", ":", "# Return resource collection", "if", "uid", "is", "None", ":", "return", "self", ".", "response_factory", ".", "ok", "(", "data", "=", "resource_db", ")", "# Return resource based on UID.", "try", ":", "record", "=", "[", "r", "for", "r", "in", "resource_db", "if", "r", ".", "get", "(", "'id'", ")", "==", "uid", "]", ".", "pop", "(", ")", "except", "IndexError", ":", "return", "self", ".", "response_factory", ".", "not_found", "(", "errors", "=", "[", "'Resource with UID {} does not exist.'", ".", "format", "(", "uid", ")", "]", ")", "return", "self", ".", "response_factory", ".", "ok", "(", "data", "=", "record", ")" ]
Example retrieve API method.
[ "Example", "retrieve", "API", "method", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/example.py#L27-L43
agile4you/bottle-neck
example.py
ResourceHandler.post
def post(self): """Example POST method. """ resource_data = self.request.json record = {'id': str(len(resource_db) + 1), 'name': resource_data.get('name')} resource_db.append(record) return self.response_factory.ok(data=record)
python
def post(self): """Example POST method. """ resource_data = self.request.json record = {'id': str(len(resource_db) + 1), 'name': resource_data.get('name')} resource_db.append(record) return self.response_factory.ok(data=record)
[ "def", "post", "(", "self", ")", ":", "resource_data", "=", "self", ".", "request", ".", "json", "record", "=", "{", "'id'", ":", "str", "(", "len", "(", "resource_db", ")", "+", "1", ")", ",", "'name'", ":", "resource_data", ".", "get", "(", "'name'", ")", "}", "resource_db", ".", "append", "(", "record", ")", "return", "self", ".", "response_factory", ".", "ok", "(", "data", "=", "record", ")" ]
Example POST method.
[ "Example", "POST", "method", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/example.py#L45-L56
agile4you/bottle-neck
example.py
ResourceHandler.put
def put(self, uid): """Example PUT method. """ resource_data = self.request.json try: record = resource_db[uid] except KeyError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist!']) record['name'] = resource_data.get('name') return self.response_factory.ok(data=record)
python
def put(self, uid): """Example PUT method. """ resource_data = self.request.json try: record = resource_db[uid] except KeyError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist!']) record['name'] = resource_data.get('name') return self.response_factory.ok(data=record)
[ "def", "put", "(", "self", ",", "uid", ")", ":", "resource_data", "=", "self", ".", "request", ".", "json", "try", ":", "record", "=", "resource_db", "[", "uid", "]", "except", "KeyError", ":", "return", "self", ".", "response_factory", ".", "not_found", "(", "errors", "=", "[", "'Resource with UID {} does not exist!'", "]", ")", "record", "[", "'name'", "]", "=", "resource_data", ".", "get", "(", "'name'", ")", "return", "self", ".", "response_factory", ".", "ok", "(", "data", "=", "record", ")" ]
Example PUT method.
[ "Example", "PUT", "method", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/example.py#L58-L72
agile4you/bottle-neck
example.py
ResourceHandler.delete
def delete(self, uid): """Example DELETE method. """ try: record = resource_db[uid].copy() except KeyError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist!']) del resource_db[uid] return self.response_factory.ok(data=record)
python
def delete(self, uid): """Example DELETE method. """ try: record = resource_db[uid].copy() except KeyError: return self.response_factory.not_found(errors=['Resource with UID {} does not exist!']) del resource_db[uid] return self.response_factory.ok(data=record)
[ "def", "delete", "(", "self", ",", "uid", ")", ":", "try", ":", "record", "=", "resource_db", "[", "uid", "]", ".", "copy", "(", ")", "except", "KeyError", ":", "return", "self", ".", "response_factory", ".", "not_found", "(", "errors", "=", "[", "'Resource with UID {} does not exist!'", "]", ")", "del", "resource_db", "[", "uid", "]", "return", "self", ".", "response_factory", ".", "ok", "(", "data", "=", "record", ")" ]
Example DELETE method.
[ "Example", "DELETE", "method", "." ]
train
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/example.py#L74-L85
amcat/amcatclient
demo_wikinews_scraper.py
get_pages
def get_pages(url): """ Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat """ while True: yield url doc = html.parse(url).find("body") links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")] if not links: break url = urljoin(url, links[0].get('href'))
python
def get_pages(url): """ Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat """ while True: yield url doc = html.parse(url).find("body") links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")] if not links: break url = urljoin(url, links[0].get('href'))
[ "def", "get_pages", "(", "url", ")", ":", "while", "True", ":", "yield", "url", "doc", "=", "html", ".", "parse", "(", "url", ")", ".", "find", "(", "\"body\"", ")", "links", "=", "[", "a", "for", "a", "in", "doc", ".", "findall", "(", "\".//a\"", ")", "if", "a", ".", "text", "and", "a", ".", "text", ".", "startswith", "(", "\"next \"", ")", "]", "if", "not", "links", ":", "break", "url", "=", "urljoin", "(", "url", ",", "links", "[", "0", "]", ".", "get", "(", "'href'", ")", ")" ]
Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat
[ "Return", "the", "pages", "from", "the", "starting", "url", "Technically", "look", "for", "the", "next", "50", "link", "yield", "and", "download", "it", "repeat" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L45-L56
amcat/amcatclient
demo_wikinews_scraper.py
get_article_urls
def get_article_urls(url): """ Return the articles from a page Technically, look for a div with class mw-search-result-heading and get the first link from this div """ doc = html.parse(url).getroot() for div in doc.cssselect("div.mw-search-result-heading"): href = div.cssselect("a")[0].get('href') if ":" in href: continue # skip Category: links href = urljoin(url, href) yield href
python
def get_article_urls(url): """ Return the articles from a page Technically, look for a div with class mw-search-result-heading and get the first link from this div """ doc = html.parse(url).getroot() for div in doc.cssselect("div.mw-search-result-heading"): href = div.cssselect("a")[0].get('href') if ":" in href: continue # skip Category: links href = urljoin(url, href) yield href
[ "def", "get_article_urls", "(", "url", ")", ":", "doc", "=", "html", ".", "parse", "(", "url", ")", ".", "getroot", "(", ")", "for", "div", "in", "doc", ".", "cssselect", "(", "\"div.mw-search-result-heading\"", ")", ":", "href", "=", "div", ".", "cssselect", "(", "\"a\"", ")", "[", "0", "]", ".", "get", "(", "'href'", ")", "if", "\":\"", "in", "href", ":", "continue", "# skip Category: links", "href", "=", "urljoin", "(", "url", ",", "href", ")", "yield", "href" ]
Return the articles from a page Technically, look for a div with class mw-search-result-heading and get the first link from this div
[ "Return", "the", "articles", "from", "a", "page", "Technically", "look", "for", "a", "div", "with", "class", "mw", "-", "search", "-", "result", "-", "heading", "and", "get", "the", "first", "link", "from", "this", "div" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L58-L70
amcat/amcatclient
demo_wikinews_scraper.py
get_article
def get_article(url): """ Return a single article as a 'amcat-ready' dict Uses the 'export' function of wikinews to get an xml article """ a = html.parse(url).getroot() title = a.cssselect(".firstHeading")[0].text_content() date = a.cssselect(".published")[0].text_content() date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat() paras = a.cssselect("#mw-content-text p") paras = paras[1:] # skip first paragraph, which contains date text = "\n\n".join(p.text_content().strip() for p in paras) return dict(headline=title, date=date, url=url, text=text, medium="Wikinews")
python
def get_article(url): """ Return a single article as a 'amcat-ready' dict Uses the 'export' function of wikinews to get an xml article """ a = html.parse(url).getroot() title = a.cssselect(".firstHeading")[0].text_content() date = a.cssselect(".published")[0].text_content() date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat() paras = a.cssselect("#mw-content-text p") paras = paras[1:] # skip first paragraph, which contains date text = "\n\n".join(p.text_content().strip() for p in paras) return dict(headline=title, date=date, url=url, text=text, medium="Wikinews")
[ "def", "get_article", "(", "url", ")", ":", "a", "=", "html", ".", "parse", "(", "url", ")", ".", "getroot", "(", ")", "title", "=", "a", ".", "cssselect", "(", "\".firstHeading\"", ")", "[", "0", "]", ".", "text_content", "(", ")", "date", "=", "a", ".", "cssselect", "(", "\".published\"", ")", "[", "0", "]", ".", "text_content", "(", ")", "date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "\"%A, %B %d, %Y\"", ")", ".", "isoformat", "(", ")", "paras", "=", "a", ".", "cssselect", "(", "\"#mw-content-text p\"", ")", "paras", "=", "paras", "[", "1", ":", "]", "# skip first paragraph, which contains date", "text", "=", "\"\\n\\n\"", ".", "join", "(", "p", ".", "text_content", "(", ")", ".", "strip", "(", ")", "for", "p", "in", "paras", ")", "return", "dict", "(", "headline", "=", "title", ",", "date", "=", "date", ",", "url", "=", "url", ",", "text", "=", "text", ",", "medium", "=", "\"Wikinews\"", ")" ]
Return a single article as a 'amcat-ready' dict Uses the 'export' function of wikinews to get an xml article
[ "Return", "a", "single", "article", "as", "a", "amcat", "-", "ready", "dict", "Uses", "the", "export", "function", "of", "wikinews", "to", "get", "an", "xml", "article" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L89-L106
amcat/amcatclient
demo_wikinews_scraper.py
scrape_wikinews
def scrape_wikinews(conn, project, articleset, query): """ Scrape wikinews articles from the given query @param conn: The AmcatAPI object @param articleset: The target articleset ID @param category: The wikinews category name """ url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query) logging.info(url) for page in get_pages(url): urls = get_article_urls(page) arts = list(get_articles(urls)) logging.info("Adding {} articles to set {}:{}" .format(len(arts), project, articleset)) conn.create_articles(project=project, articleset=articleset, json_data=arts)
python
def scrape_wikinews(conn, project, articleset, query): """ Scrape wikinews articles from the given query @param conn: The AmcatAPI object @param articleset: The target articleset ID @param category: The wikinews category name """ url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query) logging.info(url) for page in get_pages(url): urls = get_article_urls(page) arts = list(get_articles(urls)) logging.info("Adding {} articles to set {}:{}" .format(len(arts), project, articleset)) conn.create_articles(project=project, articleset=articleset, json_data=arts)
[ "def", "scrape_wikinews", "(", "conn", ",", "project", ",", "articleset", ",", "query", ")", ":", "url", "=", "\"http://en.wikinews.org/w/index.php?search={}&limit=50\"", ".", "format", "(", "query", ")", "logging", ".", "info", "(", "url", ")", "for", "page", "in", "get_pages", "(", "url", ")", ":", "urls", "=", "get_article_urls", "(", "page", ")", "arts", "=", "list", "(", "get_articles", "(", "urls", ")", ")", "logging", ".", "info", "(", "\"Adding {} articles to set {}:{}\"", ".", "format", "(", "len", "(", "arts", ")", ",", "project", ",", "articleset", ")", ")", "conn", ".", "create_articles", "(", "project", "=", "project", ",", "articleset", "=", "articleset", ",", "json_data", "=", "arts", ")" ]
Scrape wikinews articles from the given query @param conn: The AmcatAPI object @param articleset: The target articleset ID @param category: The wikinews category name
[ "Scrape", "wikinews", "articles", "from", "the", "given", "query" ]
train
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L118-L133
walkr/nanoservice
benchmarks/bench_req_rep_raw.py
start_service
def start_service(addr, n): """ Start a service """ s = Service(addr) started = time.time() for _ in range(n): msg = s.socket.recv() s.socket.send(msg) s.socket.close() duration = time.time() - started print('Raw REP service stats:') util.print_stats(n, duration) return
python
def start_service(addr, n): """ Start a service """ s = Service(addr) started = time.time() for _ in range(n): msg = s.socket.recv() s.socket.send(msg) s.socket.close() duration = time.time() - started print('Raw REP service stats:') util.print_stats(n, duration) return
[ "def", "start_service", "(", "addr", ",", "n", ")", ":", "s", "=", "Service", "(", "addr", ")", "started", "=", "time", ".", "time", "(", ")", "for", "_", "in", "range", "(", "n", ")", ":", "msg", "=", "s", ".", "socket", ".", "recv", "(", ")", "s", ".", "socket", ".", "send", "(", "msg", ")", "s", ".", "socket", ".", "close", "(", ")", "duration", "=", "time", ".", "time", "(", ")", "-", "started", "print", "(", "'Raw REP service stats:'", ")", "util", ".", "print_stats", "(", "n", ",", "duration", ")", "return" ]
Start a service
[ "Start", "a", "service" ]
train
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/benchmarks/bench_req_rep_raw.py#L8-L21
walkr/nanoservice
benchmarks/bench_req_rep_raw.py
bench
def bench(client, n): """ Benchmark n requests """ items = list(range(n)) # Time client publish operations # ------------------------------ started = time.time() msg = b'x' for i in items: client.socket.send(msg) res = client.socket.recv() assert msg == res duration = time.time() - started print('Raw REQ client stats:') util.print_stats(n, duration)
python
def bench(client, n): """ Benchmark n requests """ items = list(range(n)) # Time client publish operations # ------------------------------ started = time.time() msg = b'x' for i in items: client.socket.send(msg) res = client.socket.recv() assert msg == res duration = time.time() - started print('Raw REQ client stats:') util.print_stats(n, duration)
[ "def", "bench", "(", "client", ",", "n", ")", ":", "items", "=", "list", "(", "range", "(", "n", ")", ")", "# Time client publish operations", "# ------------------------------", "started", "=", "time", ".", "time", "(", ")", "msg", "=", "b'x'", "for", "i", "in", "items", ":", "client", ".", "socket", ".", "send", "(", "msg", ")", "res", "=", "client", ".", "socket", ".", "recv", "(", ")", "assert", "msg", "==", "res", "duration", "=", "time", ".", "time", "(", ")", "-", "started", "print", "(", "'Raw REQ client stats:'", ")", "util", ".", "print_stats", "(", "n", ",", "duration", ")" ]
Benchmark n requests
[ "Benchmark", "n", "requests" ]
train
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/benchmarks/bench_req_rep_raw.py#L24-L39