repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
jstitch/MambuPy
MambuPy/rest/mambuloan.py
MambuLoan.setUser
def setUser(self, *args, **kwargs): """Adds the user for this loan to a 'user' field. User is a MambuUser object. Returns the number of requests done to Mambu. """ try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err except AttributeError as ae: from .mambuuser import MambuUser self.mambuuserclass = MambuUser try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err self['user'] = user return 1
python
def setUser(self, *args, **kwargs): """Adds the user for this loan to a 'user' field. User is a MambuUser object. Returns the number of requests done to Mambu. """ try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err except AttributeError as ae: from .mambuuser import MambuUser self.mambuuserclass = MambuUser try: user = self.mambuuserclass(entid=self['assignedUserKey'], *args, **kwargs) except KeyError as kerr: err = MambuError("La cuenta %s no tiene asignado un usuario" % self['id']) err.noUser = True raise err self['user'] = user return 1
[ "def", "setUser", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "user", "=", "self", ".", "mambuuserclass", "(", "entid", "=", "self", "[", "'assignedUserKey'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "KeyError", "as", "kerr", ":", "err", "=", "MambuError", "(", "\"La cuenta %s no tiene asignado un usuario\"", "%", "self", "[", "'id'", "]", ")", "err", ".", "noUser", "=", "True", "raise", "err", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambuuser", "import", "MambuUser", "self", ".", "mambuuserclass", "=", "MambuUser", "try", ":", "user", "=", "self", ".", "mambuuserclass", "(", "entid", "=", "self", "[", "'assignedUserKey'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "KeyError", "as", "kerr", ":", "err", "=", "MambuError", "(", "\"La cuenta %s no tiene asignado un usuario\"", "%", "self", "[", "'id'", "]", ")", "err", ".", "noUser", "=", "True", "raise", "err", "self", "[", "'user'", "]", "=", "user", "return", "1" ]
Adds the user for this loan to a 'user' field. User is a MambuUser object. Returns the number of requests done to Mambu.
[ "Adds", "the", "user", "for", "this", "loan", "to", "a", "user", "field", "." ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuloan.py#L183-L208
train
jstitch/MambuPy
MambuPy/rest/mambuloan.py
MambuLoan.setProduct
def setProduct(self, cache=False, *args, **kwargs): """Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu. """ if cache: try: prods = self.allmambuproductsclass(*args, **kwargs) except AttributeError as ae: from .mambuproduct import AllMambuProducts self.allmambuproductsclass = AllMambuProducts prods = self.allmambuproductsclass(*args, **kwargs) for prod in prods: if prod['encodedKey'] == self['productTypeKey']: self['product'] = prod try: # asked for cache, but cache was originally empty prods.noinit except AttributeError: return 1 return 0 try: product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) except AttributeError as ae: from .mambuproduct import MambuProduct self.mambuproductclass = MambuProduct product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) self['product'] = product return 1
python
def setProduct(self, cache=False, *args, **kwargs): """Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu. """ if cache: try: prods = self.allmambuproductsclass(*args, **kwargs) except AttributeError as ae: from .mambuproduct import AllMambuProducts self.allmambuproductsclass = AllMambuProducts prods = self.allmambuproductsclass(*args, **kwargs) for prod in prods: if prod['encodedKey'] == self['productTypeKey']: self['product'] = prod try: # asked for cache, but cache was originally empty prods.noinit except AttributeError: return 1 return 0 try: product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) except AttributeError as ae: from .mambuproduct import MambuProduct self.mambuproductclass = MambuProduct product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) self['product'] = product return 1
[ "def", "setProduct", "(", "self", ",", "cache", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "cache", ":", "try", ":", "prods", "=", "self", ".", "allmambuproductsclass", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambuproduct", "import", "AllMambuProducts", "self", ".", "allmambuproductsclass", "=", "AllMambuProducts", "prods", "=", "self", ".", "allmambuproductsclass", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "prod", "in", "prods", ":", "if", "prod", "[", "'encodedKey'", "]", "==", "self", "[", "'productTypeKey'", "]", ":", "self", "[", "'product'", "]", "=", "prod", "try", ":", "# asked for cache, but cache was originally empty", "prods", ".", "noinit", "except", "AttributeError", ":", "return", "1", "return", "0", "try", ":", "product", "=", "self", ".", "mambuproductclass", "(", "entid", "=", "self", "[", "'productTypeKey'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambuproduct", "import", "MambuProduct", "self", ".", "mambuproductclass", "=", "MambuProduct", "product", "=", "self", ".", "mambuproductclass", "(", "entid", "=", "self", "[", "'productTypeKey'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", "[", "'product'", "]", "=", "product", "return", "1" ]
Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu.
[ "Adds", "the", "product", "for", "this", "loan", "to", "a", "product", "field", "." ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuloan.py#L211-L247
train
jstitch/MambuPy
MambuPy/rest/mambuloan.py
MambuLoan.getClientDetails
def getClientDetails(self, *args, **kwargs): """Gets the loan details for every client holder of the account. As default, assigns the whole loan amount to each client. This works fine for Client holders of the loan account. When Group holders, this is perhaps not ideal, but I cannot tell. If you inherit MambuLoan you should override this method to determine another way to assign particular amounts to each client. You can also use the overriden version of this method to add several other fields with information you wish to associate to each client holder of the loan account. BEWARE: for group loan accounts, this code assumes the holder (the group) currentrly has all the client members. But for accounts where the holder has changed along time, you may stumble upon accounts which assumes certain group members which weren't the members that belonged to the group when it was disbursed. """ loannames = [] holder = kwargs['holder'] for client in holder['clients']: loannames.append({'id' : client['id'], 'name' : client['name'], 'client' : client, 'amount' : self['loanAmount'] }) return loannames
python
def getClientDetails(self, *args, **kwargs): """Gets the loan details for every client holder of the account. As default, assigns the whole loan amount to each client. This works fine for Client holders of the loan account. When Group holders, this is perhaps not ideal, but I cannot tell. If you inherit MambuLoan you should override this method to determine another way to assign particular amounts to each client. You can also use the overriden version of this method to add several other fields with information you wish to associate to each client holder of the loan account. BEWARE: for group loan accounts, this code assumes the holder (the group) currentrly has all the client members. But for accounts where the holder has changed along time, you may stumble upon accounts which assumes certain group members which weren't the members that belonged to the group when it was disbursed. """ loannames = [] holder = kwargs['holder'] for client in holder['clients']: loannames.append({'id' : client['id'], 'name' : client['name'], 'client' : client, 'amount' : self['loanAmount'] }) return loannames
[ "def", "getClientDetails", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "loannames", "=", "[", "]", "holder", "=", "kwargs", "[", "'holder'", "]", "for", "client", "in", "holder", "[", "'clients'", "]", ":", "loannames", ".", "append", "(", "{", "'id'", ":", "client", "[", "'id'", "]", ",", "'name'", ":", "client", "[", "'name'", "]", ",", "'client'", ":", "client", ",", "'amount'", ":", "self", "[", "'loanAmount'", "]", "}", ")", "return", "loannames" ]
Gets the loan details for every client holder of the account. As default, assigns the whole loan amount to each client. This works fine for Client holders of the loan account. When Group holders, this is perhaps not ideal, but I cannot tell. If you inherit MambuLoan you should override this method to determine another way to assign particular amounts to each client. You can also use the overriden version of this method to add several other fields with information you wish to associate to each client holder of the loan account. BEWARE: for group loan accounts, this code assumes the holder (the group) currentrly has all the client members. But for accounts where the holder has changed along time, you may stumble upon accounts which assumes certain group members which weren't the members that belonged to the group when it was disbursed.
[ "Gets", "the", "loan", "details", "for", "every", "client", "holder", "of", "the", "account", "." ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuloan.py#L412-L444
train
inveniosoftware/invenio-query-parser
invenio_query_parser/contrib/elasticsearch/walkers/dsl.py
ElasticSearchDSL.get_fields_for_keyword
def get_fields_for_keyword(self, keyword, mode='a'): """Convert keyword to fields.""" field = self.keyword_to_fields.get(keyword, keyword) if isinstance(field, dict): return field[mode] elif isinstance(field, (list, tuple)): return field return [field]
python
def get_fields_for_keyword(self, keyword, mode='a'): """Convert keyword to fields.""" field = self.keyword_to_fields.get(keyword, keyword) if isinstance(field, dict): return field[mode] elif isinstance(field, (list, tuple)): return field return [field]
[ "def", "get_fields_for_keyword", "(", "self", ",", "keyword", ",", "mode", "=", "'a'", ")", ":", "field", "=", "self", ".", "keyword_to_fields", ".", "get", "(", "keyword", ",", "keyword", ")", "if", "isinstance", "(", "field", ",", "dict", ")", ":", "return", "field", "[", "mode", "]", "elif", "isinstance", "(", "field", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "field", "return", "[", "field", "]" ]
Convert keyword to fields.
[ "Convert", "keyword", "to", "fields", "." ]
21a2c36318003ff52d2e18e7196bb420db8ecb4b
https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/contrib/elasticsearch/walkers/dsl.py#L46-L53
train
pyslackers/sir-bot-a-lot
sirbot/utils/__init__.py
merge_dict
def merge_dict(a, b, path=None): """ Merge dict b into a """ if not path: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dict(a[key], b[key], path + [str(key)]) else: continue else: a[key] = b[key] return a
python
def merge_dict(a, b, path=None): """ Merge dict b into a """ if not path: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dict(a[key], b[key], path + [str(key)]) else: continue else: a[key] = b[key] return a
[ "def", "merge_dict", "(", "a", ",", "b", ",", "path", "=", "None", ")", ":", "if", "not", "path", ":", "path", "=", "[", "]", "for", "key", "in", "b", ":", "if", "key", "in", "a", ":", "if", "isinstance", "(", "a", "[", "key", "]", ",", "dict", ")", "and", "isinstance", "(", "b", "[", "key", "]", ",", "dict", ")", ":", "merge_dict", "(", "a", "[", "key", "]", ",", "b", "[", "key", "]", ",", "path", "+", "[", "str", "(", "key", ")", "]", ")", "else", ":", "continue", "else", ":", "a", "[", "key", "]", "=", "b", "[", "key", "]", "return", "a" ]
Merge dict b into a
[ "Merge", "dict", "b", "into", "a" ]
22dfdd6a14d61dbe29423fd131b7a23e618b68d7
https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/utils/__init__.py#L8-L22
train
BernardFW/bernard
src/bernard/i18n/_formatter.py
make_date
def make_date(obj: Union[date, datetime, Text], timezone: tzinfo=None): """ A flexible method to get a date object. It accepts either an ISO 8601 date/time string, either a Python `datetime`, either a Python `date`. If the input is a date/time and a timezone is specified, the resulting date object will be in the specified time zone. """ if isinstance(obj, datetime): if hasattr(obj, 'astimezone') and timezone: obj = obj.astimezone(timezone) return obj.date() elif isinstance(obj, date): return obj elif isinstance(obj, str): return make_date(parse_date(obj), timezone)
python
def make_date(obj: Union[date, datetime, Text], timezone: tzinfo=None): """ A flexible method to get a date object. It accepts either an ISO 8601 date/time string, either a Python `datetime`, either a Python `date`. If the input is a date/time and a timezone is specified, the resulting date object will be in the specified time zone. """ if isinstance(obj, datetime): if hasattr(obj, 'astimezone') and timezone: obj = obj.astimezone(timezone) return obj.date() elif isinstance(obj, date): return obj elif isinstance(obj, str): return make_date(parse_date(obj), timezone)
[ "def", "make_date", "(", "obj", ":", "Union", "[", "date", ",", "datetime", ",", "Text", "]", ",", "timezone", ":", "tzinfo", "=", "None", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "if", "hasattr", "(", "obj", ",", "'astimezone'", ")", "and", "timezone", ":", "obj", "=", "obj", ".", "astimezone", "(", "timezone", ")", "return", "obj", ".", "date", "(", ")", "elif", "isinstance", "(", "obj", ",", "date", ")", ":", "return", "obj", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "make_date", "(", "parse_date", "(", "obj", ")", ",", "timezone", ")" ]
A flexible method to get a date object. It accepts either an ISO 8601 date/time string, either a Python `datetime`, either a Python `date`. If the input is a date/time and a timezone is specified, the resulting date object will be in the specified time zone.
[ "A", "flexible", "method", "to", "get", "a", "date", "object", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/_formatter.py#L22-L40
train
BernardFW/bernard
src/bernard/i18n/_formatter.py
I18nFormatter.format_date
def format_date(self, value, format_): """ Format the date using Babel """ date_ = make_date(value) return dates.format_date(date_, format_, locale=self.lang)
python
def format_date(self, value, format_): """ Format the date using Babel """ date_ = make_date(value) return dates.format_date(date_, format_, locale=self.lang)
[ "def", "format_date", "(", "self", ",", "value", ",", "format_", ")", ":", "date_", "=", "make_date", "(", "value", ")", "return", "dates", ".", "format_date", "(", "date_", ",", "format_", ",", "locale", "=", "self", ".", "lang", ")" ]
Format the date using Babel
[ "Format", "the", "date", "using", "Babel" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/_formatter.py#L80-L85
train
BernardFW/bernard
src/bernard/i18n/_formatter.py
I18nFormatter.format_datetime
def format_datetime(self, value, format_): """ Format the datetime using Babel """ date_ = make_datetime(value) return dates.format_datetime(date_, format_, locale=self.lang)
python
def format_datetime(self, value, format_): """ Format the datetime using Babel """ date_ = make_datetime(value) return dates.format_datetime(date_, format_, locale=self.lang)
[ "def", "format_datetime", "(", "self", ",", "value", ",", "format_", ")", ":", "date_", "=", "make_datetime", "(", "value", ")", "return", "dates", ".", "format_datetime", "(", "date_", ",", "format_", ",", "locale", "=", "self", ".", "lang", ")" ]
Format the datetime using Babel
[ "Format", "the", "datetime", "using", "Babel" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/_formatter.py#L87-L92
train
BernardFW/bernard
src/bernard/i18n/_formatter.py
I18nFormatter.format_field
def format_field(self, value, spec): """ Provide the additional formatters for localization. """ if spec.startswith('date:'): _, format_ = spec.split(':', 1) return self.format_date(value, format_) elif spec.startswith('datetime:'): _, format_ = spec.split(':', 1) return self.format_datetime(value, format_) elif spec == 'number': return self.format_number(value) else: return super(I18nFormatter, self).format_field(value, spec)
python
def format_field(self, value, spec): """ Provide the additional formatters for localization. """ if spec.startswith('date:'): _, format_ = spec.split(':', 1) return self.format_date(value, format_) elif spec.startswith('datetime:'): _, format_ = spec.split(':', 1) return self.format_datetime(value, format_) elif spec == 'number': return self.format_number(value) else: return super(I18nFormatter, self).format_field(value, spec)
[ "def", "format_field", "(", "self", ",", "value", ",", "spec", ")", ":", "if", "spec", ".", "startswith", "(", "'date:'", ")", ":", "_", ",", "format_", "=", "spec", ".", "split", "(", "':'", ",", "1", ")", "return", "self", ".", "format_date", "(", "value", ",", "format_", ")", "elif", "spec", ".", "startswith", "(", "'datetime:'", ")", ":", "_", ",", "format_", "=", "spec", ".", "split", "(", "':'", ",", "1", ")", "return", "self", ".", "format_datetime", "(", "value", ",", "format_", ")", "elif", "spec", "==", "'number'", ":", "return", "self", ".", "format_number", "(", "value", ")", "else", ":", "return", "super", "(", "I18nFormatter", ",", "self", ")", ".", "format_field", "(", "value", ",", "spec", ")" ]
Provide the additional formatters for localization.
[ "Provide", "the", "additional", "formatters", "for", "localization", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/_formatter.py#L100-L114
train
klmitch/turnstile
turnstile/limits.py
BucketKey._decode
def _decode(cls, value): """Decode the given value, reverting '%'-encoded groups.""" value = cls._DEC_RE.sub(lambda x: '%c' % int(x.group(1), 16), value) return json.loads(value)
python
def _decode(cls, value): """Decode the given value, reverting '%'-encoded groups.""" value = cls._DEC_RE.sub(lambda x: '%c' % int(x.group(1), 16), value) return json.loads(value)
[ "def", "_decode", "(", "cls", ",", "value", ")", ":", "value", "=", "cls", ".", "_DEC_RE", ".", "sub", "(", "lambda", "x", ":", "'%c'", "%", "int", "(", "x", ".", "group", "(", "1", ")", ",", "16", ")", ",", "value", ")", "return", "json", ".", "loads", "(", "value", ")" ]
Decode the given value, reverting '%'-encoded groups.
[ "Decode", "the", "given", "value", "reverting", "%", "-", "encoded", "groups", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L153-L157
train
klmitch/turnstile
turnstile/limits.py
BucketKey.decode
def decode(cls, key): """ Decode a bucket key into a BucketKey instance. :param key: The string form of a bucket key. :returns: A suitable instance of BucketKey corresponding to the passed-in key. """ # Determine bucket key version prefix, sep, param_str = key.partition(':') if sep != ':' or prefix not in cls._prefix_to_version: raise ValueError("%r is not a bucket key" % key) version = cls._prefix_to_version[prefix] # Take the parameters apart... parts = param_str.split('/') uuid = parts.pop(0) params = {} for part in parts: name, sep, value = part.partition('=') # Make sure it's well-formed if sep != '=': raise ValueError("Cannot interpret parameter expression %r" % part) params[name] = cls._decode(value) # Return a BucketKey return cls(uuid, params, version=version)
python
def decode(cls, key): """ Decode a bucket key into a BucketKey instance. :param key: The string form of a bucket key. :returns: A suitable instance of BucketKey corresponding to the passed-in key. """ # Determine bucket key version prefix, sep, param_str = key.partition(':') if sep != ':' or prefix not in cls._prefix_to_version: raise ValueError("%r is not a bucket key" % key) version = cls._prefix_to_version[prefix] # Take the parameters apart... parts = param_str.split('/') uuid = parts.pop(0) params = {} for part in parts: name, sep, value = part.partition('=') # Make sure it's well-formed if sep != '=': raise ValueError("Cannot interpret parameter expression %r" % part) params[name] = cls._decode(value) # Return a BucketKey return cls(uuid, params, version=version)
[ "def", "decode", "(", "cls", ",", "key", ")", ":", "# Determine bucket key version", "prefix", ",", "sep", ",", "param_str", "=", "key", ".", "partition", "(", "':'", ")", "if", "sep", "!=", "':'", "or", "prefix", "not", "in", "cls", ".", "_prefix_to_version", ":", "raise", "ValueError", "(", "\"%r is not a bucket key\"", "%", "key", ")", "version", "=", "cls", ".", "_prefix_to_version", "[", "prefix", "]", "# Take the parameters apart...", "parts", "=", "param_str", ".", "split", "(", "'/'", ")", "uuid", "=", "parts", ".", "pop", "(", "0", ")", "params", "=", "{", "}", "for", "part", "in", "parts", ":", "name", ",", "sep", ",", "value", "=", "part", ".", "partition", "(", "'='", ")", "# Make sure it's well-formed", "if", "sep", "!=", "'='", ":", "raise", "ValueError", "(", "\"Cannot interpret parameter expression %r\"", "%", "part", ")", "params", "[", "name", "]", "=", "cls", ".", "_decode", "(", "value", ")", "# Return a BucketKey", "return", "cls", "(", "uuid", ",", "params", ",", "version", "=", "version", ")" ]
Decode a bucket key into a BucketKey instance. :param key: The string form of a bucket key. :returns: A suitable instance of BucketKey corresponding to the passed-in key.
[ "Decode", "a", "bucket", "key", "into", "a", "BucketKey", "instance", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L198-L229
train
klmitch/turnstile
turnstile/limits.py
BucketLoader.need_summary
def need_summary(self, now, max_updates, max_age): """ Helper method to determine if a "summarize" record should be added. :param now: The current time. :param max_updates: Maximum number of updates before a summarize is required. :param max_age: Maximum age of the last summarize record. This is used in the case where a summarize request has been lost by the compactor. :returns: True if a "summarize" record should be added, False otherwise. """ # Handle the case where an old summarize record exists if self.summarized is True and self.last_summarize_ts + max_age <= now: return True return self.summarized is False and self.updates >= max_updates
python
def need_summary(self, now, max_updates, max_age): """ Helper method to determine if a "summarize" record should be added. :param now: The current time. :param max_updates: Maximum number of updates before a summarize is required. :param max_age: Maximum age of the last summarize record. This is used in the case where a summarize request has been lost by the compactor. :returns: True if a "summarize" record should be added, False otherwise. """ # Handle the case where an old summarize record exists if self.summarized is True and self.last_summarize_ts + max_age <= now: return True return self.summarized is False and self.updates >= max_updates
[ "def", "need_summary", "(", "self", ",", "now", ",", "max_updates", ",", "max_age", ")", ":", "# Handle the case where an old summarize record exists", "if", "self", ".", "summarized", "is", "True", "and", "self", ".", "last_summarize_ts", "+", "max_age", "<=", "now", ":", "return", "True", "return", "self", ".", "summarized", "is", "False", "and", "self", ".", "updates", ">=", "max_updates" ]
Helper method to determine if a "summarize" record should be added. :param now: The current time. :param max_updates: Maximum number of updates before a summarize is required. :param max_age: Maximum age of the last summarize record. This is used in the case where a summarize request has been lost by the compactor. :returns: True if a "summarize" record should be added, False otherwise.
[ "Helper", "method", "to", "determine", "if", "a", "summarize", "record", "should", "be", "added", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L403-L423
train
klmitch/turnstile
turnstile/limits.py
Bucket.dehydrate
def dehydrate(self): """Return a dict representing this bucket.""" # Only concerned about very specific attributes result = {} for attr in self.attrs: result[attr] = getattr(self, attr) return result
python
def dehydrate(self): """Return a dict representing this bucket.""" # Only concerned about very specific attributes result = {} for attr in self.attrs: result[attr] = getattr(self, attr) return result
[ "def", "dehydrate", "(", "self", ")", ":", "# Only concerned about very specific attributes", "result", "=", "{", "}", "for", "attr", "in", "self", ".", "attrs", ":", "result", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "return", "result" ]
Return a dict representing this bucket.
[ "Return", "a", "dict", "representing", "this", "bucket", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L464-L472
train
klmitch/turnstile
turnstile/limits.py
Bucket.delay
def delay(self, params, now=None): """Determine delay until next request.""" if now is None: now = time.time() # Initialize last... if not self.last: self.last = now elif now < self.last: now = self.last # How much has leaked out? leaked = now - self.last # Update the last message time self.last = now # Update the water level self.level = max(self.level - leaked, 0) # Are we too full? difference = self.level + self.limit.cost - self.limit.unit_value if difference >= self.eps: self.next = now + difference return difference # OK, raise the water level and set next to an appropriate # value self.level += self.limit.cost self.next = now return None
python
def delay(self, params, now=None): """Determine delay until next request.""" if now is None: now = time.time() # Initialize last... if not self.last: self.last = now elif now < self.last: now = self.last # How much has leaked out? leaked = now - self.last # Update the last message time self.last = now # Update the water level self.level = max(self.level - leaked, 0) # Are we too full? difference = self.level + self.limit.cost - self.limit.unit_value if difference >= self.eps: self.next = now + difference return difference # OK, raise the water level and set next to an appropriate # value self.level += self.limit.cost self.next = now return None
[ "def", "delay", "(", "self", ",", "params", ",", "now", "=", "None", ")", ":", "if", "now", "is", "None", ":", "now", "=", "time", ".", "time", "(", ")", "# Initialize last...", "if", "not", "self", ".", "last", ":", "self", ".", "last", "=", "now", "elif", "now", "<", "self", ".", "last", ":", "now", "=", "self", ".", "last", "# How much has leaked out?", "leaked", "=", "now", "-", "self", ".", "last", "# Update the last message time", "self", ".", "last", "=", "now", "# Update the water level", "self", ".", "level", "=", "max", "(", "self", ".", "level", "-", "leaked", ",", "0", ")", "# Are we too full?", "difference", "=", "self", ".", "level", "+", "self", ".", "limit", ".", "cost", "-", "self", ".", "limit", ".", "unit_value", "if", "difference", ">=", "self", ".", "eps", ":", "self", ".", "next", "=", "now", "+", "difference", "return", "difference", "# OK, raise the water level and set next to an appropriate", "# value", "self", ".", "level", "+=", "self", ".", "limit", ".", "cost", "self", ".", "next", "=", "now", "return", "None" ]
Determine delay until next request.
[ "Determine", "delay", "until", "next", "request", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L474-L506
train
klmitch/turnstile
turnstile/limits.py
Bucket.messages
def messages(self): """Return remaining messages before limiting.""" return int(math.floor(((self.limit.unit_value - self.level) / self.limit.unit_value) * self.limit.value))
python
def messages(self): """Return remaining messages before limiting.""" return int(math.floor(((self.limit.unit_value - self.level) / self.limit.unit_value) * self.limit.value))
[ "def", "messages", "(", "self", ")", ":", "return", "int", "(", "math", ".", "floor", "(", "(", "(", "self", ".", "limit", ".", "unit_value", "-", "self", ".", "level", ")", "/", "self", ".", "limit", ".", "unit_value", ")", "*", "self", ".", "limit", ".", "value", ")", ")" ]
Return remaining messages before limiting.
[ "Return", "remaining", "messages", "before", "limiting", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L509-L513
train
klmitch/turnstile
turnstile/limits.py
Limit.dehydrate
def dehydrate(self): """Return a dict representing this limit.""" # Only concerned about very specific attributes result = dict(limit_class=self._limit_full_name) for attr in self.attrs: # Using getattr allows the properties to come into play result[attr] = getattr(self, attr) return result
python
def dehydrate(self): """Return a dict representing this limit.""" # Only concerned about very specific attributes result = dict(limit_class=self._limit_full_name) for attr in self.attrs: # Using getattr allows the properties to come into play result[attr] = getattr(self, attr) return result
[ "def", "dehydrate", "(", "self", ")", ":", "# Only concerned about very specific attributes", "result", "=", "dict", "(", "limit_class", "=", "self", ".", "_limit_full_name", ")", "for", "attr", "in", "self", ".", "attrs", ":", "# Using getattr allows the properties to come into play", "result", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "return", "result" ]
Return a dict representing this limit.
[ "Return", "a", "dict", "representing", "this", "limit", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L729-L738
train
klmitch/turnstile
turnstile/limits.py
Limit.load
def load(self, key): """ Given a bucket key, load the corresponding bucket. :param key: The bucket key. This may be either a string or a BucketKey object. :returns: A Bucket object. """ # Turn the key into a BucketKey if isinstance(key, basestring): key = BucketKey.decode(key) # Make sure the uuids match if key.uuid != self.uuid: raise ValueError("%s is not a bucket corresponding to this limit" % key) # If the key is a version 1 key, load it straight from the # database if key.version == 1: raw = self.db.get(str(key)) if raw is None: return self.bucket_class(self.db, self, str(key)) return self.bucket_class.hydrate(self.db, msgpack.loads(raw), self, str(key)) # OK, use a BucketLoader records = self.db.lrange(str(key), 0, -1) loader = BucketLoader(self.bucket_class, self.db, self, str(key), records) return loader.bucket
python
def load(self, key): """ Given a bucket key, load the corresponding bucket. :param key: The bucket key. This may be either a string or a BucketKey object. :returns: A Bucket object. """ # Turn the key into a BucketKey if isinstance(key, basestring): key = BucketKey.decode(key) # Make sure the uuids match if key.uuid != self.uuid: raise ValueError("%s is not a bucket corresponding to this limit" % key) # If the key is a version 1 key, load it straight from the # database if key.version == 1: raw = self.db.get(str(key)) if raw is None: return self.bucket_class(self.db, self, str(key)) return self.bucket_class.hydrate(self.db, msgpack.loads(raw), self, str(key)) # OK, use a BucketLoader records = self.db.lrange(str(key), 0, -1) loader = BucketLoader(self.bucket_class, self.db, self, str(key), records) return loader.bucket
[ "def", "load", "(", "self", ",", "key", ")", ":", "# Turn the key into a BucketKey", "if", "isinstance", "(", "key", ",", "basestring", ")", ":", "key", "=", "BucketKey", ".", "decode", "(", "key", ")", "# Make sure the uuids match", "if", "key", ".", "uuid", "!=", "self", ".", "uuid", ":", "raise", "ValueError", "(", "\"%s is not a bucket corresponding to this limit\"", "%", "key", ")", "# If the key is a version 1 key, load it straight from the", "# database", "if", "key", ".", "version", "==", "1", ":", "raw", "=", "self", ".", "db", ".", "get", "(", "str", "(", "key", ")", ")", "if", "raw", "is", "None", ":", "return", "self", ".", "bucket_class", "(", "self", ".", "db", ",", "self", ",", "str", "(", "key", ")", ")", "return", "self", ".", "bucket_class", ".", "hydrate", "(", "self", ".", "db", ",", "msgpack", ".", "loads", "(", "raw", ")", ",", "self", ",", "str", "(", "key", ")", ")", "# OK, use a BucketLoader", "records", "=", "self", ".", "db", ".", "lrange", "(", "str", "(", "key", ")", ",", "0", ",", "-", "1", ")", "loader", "=", "BucketLoader", "(", "self", ".", "bucket_class", ",", "self", ".", "db", ",", "self", ",", "str", "(", "key", ")", ",", "records", ")", "return", "loader", ".", "bucket" ]
Given a bucket key, load the corresponding bucket. :param key: The bucket key. This may be either a string or a BucketKey object. :returns: A Bucket object.
[ "Given", "a", "bucket", "key", "load", "the", "corresponding", "bucket", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L780-L813
train
klmitch/turnstile
turnstile/limits.py
Limit.decode
def decode(self, key): """ Given a bucket key, compute the parameters used to compute that key. Note: Deprecated. Use BucketKey.decode() instead. :param key: The bucket key. Note that the UUID must match the UUID of this limit; a ValueError will be raised if this is not the case. """ # Parse the bucket key key = BucketKey.decode(key) # Make sure the uuids match if key.uuid != self.uuid: raise ValueError("%s is not a bucket corresponding to this limit" % key) return key.params
python
def decode(self, key): """ Given a bucket key, compute the parameters used to compute that key. Note: Deprecated. Use BucketKey.decode() instead. :param key: The bucket key. Note that the UUID must match the UUID of this limit; a ValueError will be raised if this is not the case. """ # Parse the bucket key key = BucketKey.decode(key) # Make sure the uuids match if key.uuid != self.uuid: raise ValueError("%s is not a bucket corresponding to this limit" % key) return key.params
[ "def", "decode", "(", "self", ",", "key", ")", ":", "# Parse the bucket key", "key", "=", "BucketKey", ".", "decode", "(", "key", ")", "# Make sure the uuids match", "if", "key", ".", "uuid", "!=", "self", ".", "uuid", ":", "raise", "ValueError", "(", "\"%s is not a bucket corresponding to this limit\"", "%", "key", ")", "return", "key", ".", "params" ]
Given a bucket key, compute the parameters used to compute that key. Note: Deprecated. Use BucketKey.decode() instead. :param key: The bucket key. Note that the UUID must match the UUID of this limit; a ValueError will be raised if this is not the case.
[ "Given", "a", "bucket", "key", "compute", "the", "parameters", "used", "to", "compute", "that", "key", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L815-L835
train
klmitch/turnstile
turnstile/limits.py
Limit._filter
def _filter(self, environ, params): """ Performs final filtering of the request to determine if this limit applies. Returns False if the limit does not apply or if the call should not be limited, or True to apply the limit. """ # Search for required query arguments if self.queries: # No query string available if 'QUERY_STRING' not in environ: return False # Extract the list of provided query arguments from the # QUERY_STRING available = set(qstr.partition('=')[0] for qstr in environ['QUERY_STRING'].split('&')) # Check if we have the required query arguments required = set(self.queries) if not required.issubset(available): return False # Use only the parameters listed in use; we'll add the others # back later unused = {} for key, value in params.items(): if key not in self.use: unused[key] = value # Do this in a separate step so we avoid changing a # dictionary during traversal for key in unused: del params[key] # First, we need to set up any additional params required to # get the bucket. If the DeferLimit exception is thrown, no # further processing is performed. try: additional = self.filter(environ, params, unused) or {} except DeferLimit: return False # Compute the bucket key key = self.key(params) # Update the parameters... params.update(unused) params.update(additional) # Get the current time now = time.time() # Allow up to a minute to mutate the bucket record. If no # bucket exists currently, this is essentially a no-op, and # the bucket won't expire anyway, once the update record is # pushed. self.db.expire(key, 60) # Push an update record update_uuid = str(uuid.uuid4()) update = { 'uuid': update_uuid, 'update': { 'params': params, 'time': now, }, } self.db.rpush(key, msgpack.dumps(update)) # Now suck in the bucket records = self.db.lrange(key, 0, -1) loader = BucketLoader(self.bucket_class, self.db, self, key, records) # Determine if we should initialize the compactor algorithm on # this bucket if 'turnstile.conf' in environ: config = environ['turnstile.conf']['compactor'] try: max_updates = int(config['max_updates']) except (KeyError, ValueError): max_updates = None try: max_age = int(config['max_age']) except (KeyError, ValueError): max_age = 600 if max_updates and loader.need_summary(now, max_updates, max_age): # Add a summary record; we want to do this before # instructing the compactor to compact. If we did the # compactor instruction first, and a crash occurred # before adding the summarize record, the lack of # quiesence could cause two compactor threads to run # on the same bucket, leading to a race condition that # could corrupt the bucket. With this ordering, if a # crash occurs before the compactor instruction, the # maximum aging applied to summarize records will # cause this logic to eventually be retriggered, which # should allow the compactor instruction to be issued. summarize = dict(summarize=now, uuid=str(uuid.uuid4())) self.db.rpush(key, msgpack.dumps(summarize)) # Instruct the compactor to compact this record compactor_key = config.get('compactor_key', 'compactor') self.db.zadd(compactor_key, int(math.ceil(now)), key) # Set the expire on the bucket self.db.expireat(key, loader.bucket.expire) # If we found a delay, store the particulars in the # environment; this will later be sorted and an error message # corresponding to the longest delay returned. if loader.delay is not None: environ.setdefault('turnstile.delay', []) environ['turnstile.delay'].append((loader.delay, self, loader.bucket)) # Finally, if desired, add the bucket key to a desired # database set set_name = environ.get('turnstile.bucket_set') if set_name: self.db.zadd(set_name, loader.bucket.expire, key) # Should we continue the route scan? return not self.continue_scan
python
def _filter(self, environ, params): """ Performs final filtering of the request to determine if this limit applies. Returns False if the limit does not apply or if the call should not be limited, or True to apply the limit. """ # Search for required query arguments if self.queries: # No query string available if 'QUERY_STRING' not in environ: return False # Extract the list of provided query arguments from the # QUERY_STRING available = set(qstr.partition('=')[0] for qstr in environ['QUERY_STRING'].split('&')) # Check if we have the required query arguments required = set(self.queries) if not required.issubset(available): return False # Use only the parameters listed in use; we'll add the others # back later unused = {} for key, value in params.items(): if key not in self.use: unused[key] = value # Do this in a separate step so we avoid changing a # dictionary during traversal for key in unused: del params[key] # First, we need to set up any additional params required to # get the bucket. If the DeferLimit exception is thrown, no # further processing is performed. try: additional = self.filter(environ, params, unused) or {} except DeferLimit: return False # Compute the bucket key key = self.key(params) # Update the parameters... params.update(unused) params.update(additional) # Get the current time now = time.time() # Allow up to a minute to mutate the bucket record. If no # bucket exists currently, this is essentially a no-op, and # the bucket won't expire anyway, once the update record is # pushed. self.db.expire(key, 60) # Push an update record update_uuid = str(uuid.uuid4()) update = { 'uuid': update_uuid, 'update': { 'params': params, 'time': now, }, } self.db.rpush(key, msgpack.dumps(update)) # Now suck in the bucket records = self.db.lrange(key, 0, -1) loader = BucketLoader(self.bucket_class, self.db, self, key, records) # Determine if we should initialize the compactor algorithm on # this bucket if 'turnstile.conf' in environ: config = environ['turnstile.conf']['compactor'] try: max_updates = int(config['max_updates']) except (KeyError, ValueError): max_updates = None try: max_age = int(config['max_age']) except (KeyError, ValueError): max_age = 600 if max_updates and loader.need_summary(now, max_updates, max_age): # Add a summary record; we want to do this before # instructing the compactor to compact. If we did the # compactor instruction first, and a crash occurred # before adding the summarize record, the lack of # quiesence could cause two compactor threads to run # on the same bucket, leading to a race condition that # could corrupt the bucket. With this ordering, if a # crash occurs before the compactor instruction, the # maximum aging applied to summarize records will # cause this logic to eventually be retriggered, which # should allow the compactor instruction to be issued. summarize = dict(summarize=now, uuid=str(uuid.uuid4())) self.db.rpush(key, msgpack.dumps(summarize)) # Instruct the compactor to compact this record compactor_key = config.get('compactor_key', 'compactor') self.db.zadd(compactor_key, int(math.ceil(now)), key) # Set the expire on the bucket self.db.expireat(key, loader.bucket.expire) # If we found a delay, store the particulars in the # environment; this will later be sorted and an error message # corresponding to the longest delay returned. if loader.delay is not None: environ.setdefault('turnstile.delay', []) environ['turnstile.delay'].append((loader.delay, self, loader.bucket)) # Finally, if desired, add the bucket key to a desired # database set set_name = environ.get('turnstile.bucket_set') if set_name: self.db.zadd(set_name, loader.bucket.expire, key) # Should we continue the route scan? return not self.continue_scan
[ "def", "_filter", "(", "self", ",", "environ", ",", "params", ")", ":", "# Search for required query arguments", "if", "self", ".", "queries", ":", "# No query string available", "if", "'QUERY_STRING'", "not", "in", "environ", ":", "return", "False", "# Extract the list of provided query arguments from the", "# QUERY_STRING", "available", "=", "set", "(", "qstr", ".", "partition", "(", "'='", ")", "[", "0", "]", "for", "qstr", "in", "environ", "[", "'QUERY_STRING'", "]", ".", "split", "(", "'&'", ")", ")", "# Check if we have the required query arguments", "required", "=", "set", "(", "self", ".", "queries", ")", "if", "not", "required", ".", "issubset", "(", "available", ")", ":", "return", "False", "# Use only the parameters listed in use; we'll add the others", "# back later", "unused", "=", "{", "}", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "not", "in", "self", ".", "use", ":", "unused", "[", "key", "]", "=", "value", "# Do this in a separate step so we avoid changing a", "# dictionary during traversal", "for", "key", "in", "unused", ":", "del", "params", "[", "key", "]", "# First, we need to set up any additional params required to", "# get the bucket. If the DeferLimit exception is thrown, no", "# further processing is performed.", "try", ":", "additional", "=", "self", ".", "filter", "(", "environ", ",", "params", ",", "unused", ")", "or", "{", "}", "except", "DeferLimit", ":", "return", "False", "# Compute the bucket key", "key", "=", "self", ".", "key", "(", "params", ")", "# Update the parameters...", "params", ".", "update", "(", "unused", ")", "params", ".", "update", "(", "additional", ")", "# Get the current time", "now", "=", "time", ".", "time", "(", ")", "# Allow up to a minute to mutate the bucket record. If no", "# bucket exists currently, this is essentially a no-op, and", "# the bucket won't expire anyway, once the update record is", "# pushed.", "self", ".", "db", ".", "expire", "(", "key", ",", "60", ")", "# Push an update record", "update_uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "update", "=", "{", "'uuid'", ":", "update_uuid", ",", "'update'", ":", "{", "'params'", ":", "params", ",", "'time'", ":", "now", ",", "}", ",", "}", "self", ".", "db", ".", "rpush", "(", "key", ",", "msgpack", ".", "dumps", "(", "update", ")", ")", "# Now suck in the bucket", "records", "=", "self", ".", "db", ".", "lrange", "(", "key", ",", "0", ",", "-", "1", ")", "loader", "=", "BucketLoader", "(", "self", ".", "bucket_class", ",", "self", ".", "db", ",", "self", ",", "key", ",", "records", ")", "# Determine if we should initialize the compactor algorithm on", "# this bucket", "if", "'turnstile.conf'", "in", "environ", ":", "config", "=", "environ", "[", "'turnstile.conf'", "]", "[", "'compactor'", "]", "try", ":", "max_updates", "=", "int", "(", "config", "[", "'max_updates'", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "max_updates", "=", "None", "try", ":", "max_age", "=", "int", "(", "config", "[", "'max_age'", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "max_age", "=", "600", "if", "max_updates", "and", "loader", ".", "need_summary", "(", "now", ",", "max_updates", ",", "max_age", ")", ":", "# Add a summary record; we want to do this before", "# instructing the compactor to compact. If we did the", "# compactor instruction first, and a crash occurred", "# before adding the summarize record, the lack of", "# quiesence could cause two compactor threads to run", "# on the same bucket, leading to a race condition that", "# could corrupt the bucket. With this ordering, if a", "# crash occurs before the compactor instruction, the", "# maximum aging applied to summarize records will", "# cause this logic to eventually be retriggered, which", "# should allow the compactor instruction to be issued.", "summarize", "=", "dict", "(", "summarize", "=", "now", ",", "uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "self", ".", "db", ".", "rpush", "(", "key", ",", "msgpack", ".", "dumps", "(", "summarize", ")", ")", "# Instruct the compactor to compact this record", "compactor_key", "=", "config", ".", "get", "(", "'compactor_key'", ",", "'compactor'", ")", "self", ".", "db", ".", "zadd", "(", "compactor_key", ",", "int", "(", "math", ".", "ceil", "(", "now", ")", ")", ",", "key", ")", "# Set the expire on the bucket", "self", ".", "db", ".", "expireat", "(", "key", ",", "loader", ".", "bucket", ".", "expire", ")", "# If we found a delay, store the particulars in the", "# environment; this will later be sorted and an error message", "# corresponding to the longest delay returned.", "if", "loader", ".", "delay", "is", "not", "None", ":", "environ", ".", "setdefault", "(", "'turnstile.delay'", ",", "[", "]", ")", "environ", "[", "'turnstile.delay'", "]", ".", "append", "(", "(", "loader", ".", "delay", ",", "self", ",", "loader", ".", "bucket", ")", ")", "# Finally, if desired, add the bucket key to a desired", "# database set", "set_name", "=", "environ", ".", "get", "(", "'turnstile.bucket_set'", ")", "if", "set_name", ":", "self", ".", "db", ".", "zadd", "(", "set_name", ",", "loader", ".", "bucket", ".", "expire", ",", "key", ")", "# Should we continue the route scan?", "return", "not", "self", ".", "continue_scan" ]
Performs final filtering of the request to determine if this limit applies. Returns False if the limit does not apply or if the call should not be limited, or True to apply the limit.
[ "Performs", "final", "filtering", "of", "the", "request", "to", "determine", "if", "this", "limit", "applies", ".", "Returns", "False", "if", "the", "limit", "does", "not", "apply", "or", "if", "the", "call", "should", "not", "be", "limited", "or", "True", "to", "apply", "the", "limit", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L849-L972
train
klmitch/turnstile
turnstile/limits.py
Limit.format
def format(self, status, headers, environ, bucket, delay): """ Formats a response entity. Returns a tuple of the desired status code and the formatted entity. The default status code is passed in, as is a dictionary of headers. :param status: The default status code. Should be returned to the caller, or an alternate selected. The status code should include both the number and the message, separated by a single space. :param headers: A dictionary of headers for the response. Should update the 'Content-Type' header at a minimum. :param environ: The WSGI environment for the request. :param bucket: The bucket containing the data which caused the delay decision to be made. This can be used to obtain such information as the next time the request can be made. :param delay: The number of seconds by which the request should be delayed. """ # This is a default response entity, which can be overridden # by limit subclasses. entity = ("This request was rate-limited. " "Please retry your request after %s." % time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(bucket.next))) headers['Content-Type'] = 'text/plain' return status, entity
python
def format(self, status, headers, environ, bucket, delay): """ Formats a response entity. Returns a tuple of the desired status code and the formatted entity. The default status code is passed in, as is a dictionary of headers. :param status: The default status code. Should be returned to the caller, or an alternate selected. The status code should include both the number and the message, separated by a single space. :param headers: A dictionary of headers for the response. Should update the 'Content-Type' header at a minimum. :param environ: The WSGI environment for the request. :param bucket: The bucket containing the data which caused the delay decision to be made. This can be used to obtain such information as the next time the request can be made. :param delay: The number of seconds by which the request should be delayed. """ # This is a default response entity, which can be overridden # by limit subclasses. entity = ("This request was rate-limited. " "Please retry your request after %s." % time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(bucket.next))) headers['Content-Type'] = 'text/plain' return status, entity
[ "def", "format", "(", "self", ",", "status", ",", "headers", ",", "environ", ",", "bucket", ",", "delay", ")", ":", "# This is a default response entity, which can be overridden", "# by limit subclasses.", "entity", "=", "(", "\"This request was rate-limited. \"", "\"Please retry your request after %s.\"", "%", "time", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ",", "time", ".", "gmtime", "(", "bucket", ".", "next", ")", ")", ")", "headers", "[", "'Content-Type'", "]", "=", "'text/plain'", "return", "status", ",", "entity" ]
Formats a response entity. Returns a tuple of the desired status code and the formatted entity. The default status code is passed in, as is a dictionary of headers. :param status: The default status code. Should be returned to the caller, or an alternate selected. The status code should include both the number and the message, separated by a single space. :param headers: A dictionary of headers for the response. Should update the 'Content-Type' header at a minimum. :param environ: The WSGI environment for the request. :param bucket: The bucket containing the data which caused the delay decision to be made. This can be used to obtain such information as the next time the request can be made. :param delay: The number of seconds by which the request should be delayed.
[ "Formats", "a", "response", "entity", ".", "Returns", "a", "tuple", "of", "the", "desired", "status", "code", "and", "the", "formatted", "entity", ".", "The", "default", "status", "code", "is", "passed", "in", "as", "is", "a", "dictionary", "of", "headers", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L993-L1023
train
openvax/varlens
varlens/util.py
drop_prefix
def drop_prefix(strings): """ Removes common prefix from a collection of strings """ strings_without_extensions = [ s.split(".", 2)[0] for s in strings ] if len(strings_without_extensions) == 1: return [os.path.basename(strings_without_extensions[0])] prefix_len = len(os.path.commonprefix(strings_without_extensions)) result = [string[prefix_len:] for string in strings_without_extensions] if len(set(result)) != len(strings): # If these operations resulted in a collision, just return the original # strings. return strings return result
python
def drop_prefix(strings): """ Removes common prefix from a collection of strings """ strings_without_extensions = [ s.split(".", 2)[0] for s in strings ] if len(strings_without_extensions) == 1: return [os.path.basename(strings_without_extensions[0])] prefix_len = len(os.path.commonprefix(strings_without_extensions)) result = [string[prefix_len:] for string in strings_without_extensions] if len(set(result)) != len(strings): # If these operations resulted in a collision, just return the original # strings. return strings return result
[ "def", "drop_prefix", "(", "strings", ")", ":", "strings_without_extensions", "=", "[", "s", ".", "split", "(", "\".\"", ",", "2", ")", "[", "0", "]", "for", "s", "in", "strings", "]", "if", "len", "(", "strings_without_extensions", ")", "==", "1", ":", "return", "[", "os", ".", "path", ".", "basename", "(", "strings_without_extensions", "[", "0", "]", ")", "]", "prefix_len", "=", "len", "(", "os", ".", "path", ".", "commonprefix", "(", "strings_without_extensions", ")", ")", "result", "=", "[", "string", "[", "prefix_len", ":", "]", "for", "string", "in", "strings_without_extensions", "]", "if", "len", "(", "set", "(", "result", ")", ")", "!=", "len", "(", "strings", ")", ":", "# If these operations resulted in a collision, just return the original", "# strings.", "return", "strings", "return", "result" ]
Removes common prefix from a collection of strings
[ "Removes", "common", "prefix", "from", "a", "collection", "of", "strings" ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/util.py#L52-L68
train
ossobv/dutree
dutree/dutree.py
DuNode.count
def count(self): "Return how many nodes this contains, including self." if self._nodes is None: return 1 return sum(i.count() for i in self._nodes)
python
def count(self): "Return how many nodes this contains, including self." if self._nodes is None: return 1 return sum(i.count() for i in self._nodes)
[ "def", "count", "(", "self", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "1", "return", "sum", "(", "i", ".", "count", "(", ")", "for", "i", "in", "self", ".", "_nodes", ")" ]
Return how many nodes this contains, including self.
[ "Return", "how", "many", "nodes", "this", "contains", "including", "self", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L107-L111
train
ossobv/dutree
dutree/dutree.py
DuNode.app_size
def app_size(self): "Return the total apparent size, including children." if self._nodes is None: return self._app_size return sum(i.app_size() for i in self._nodes)
python
def app_size(self): "Return the total apparent size, including children." if self._nodes is None: return self._app_size return sum(i.app_size() for i in self._nodes)
[ "def", "app_size", "(", "self", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "self", ".", "_app_size", "return", "sum", "(", "i", ".", "app_size", "(", ")", "for", "i", "in", "self", ".", "_nodes", ")" ]
Return the total apparent size, including children.
[ "Return", "the", "total", "apparent", "size", "including", "children", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L120-L124
train
ossobv/dutree
dutree/dutree.py
DuNode.use_size
def use_size(self): "Return the total used size, including children." if self._nodes is None: return self._use_size return sum(i.use_size() for i in self._nodes)
python
def use_size(self): "Return the total used size, including children." if self._nodes is None: return self._use_size return sum(i.use_size() for i in self._nodes)
[ "def", "use_size", "(", "self", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "self", ".", "_use_size", "return", "sum", "(", "i", ".", "use_size", "(", ")", "for", "i", "in", "self", ".", "_nodes", ")" ]
Return the total used size, including children.
[ "Return", "the", "total", "used", "size", "including", "children", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L127-L131
train
ossobv/dutree
dutree/dutree.py
DuNode._prune_all_if_small
def _prune_all_if_small(self, small_size, a_or_u): "Return True and delete children if small enough." if self._nodes is None: return True total_size = (self.app_size() if a_or_u else self.use_size()) if total_size < small_size: if a_or_u: self._set_size(total_size, self.use_size()) else: self._set_size(self.app_size(), total_size) return True return False
python
def _prune_all_if_small(self, small_size, a_or_u): "Return True and delete children if small enough." if self._nodes is None: return True total_size = (self.app_size() if a_or_u else self.use_size()) if total_size < small_size: if a_or_u: self._set_size(total_size, self.use_size()) else: self._set_size(self.app_size(), total_size) return True return False
[ "def", "_prune_all_if_small", "(", "self", ",", "small_size", ",", "a_or_u", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "True", "total_size", "=", "(", "self", ".", "app_size", "(", ")", "if", "a_or_u", "else", "self", ".", "use_size", "(", ")", ")", "if", "total_size", "<", "small_size", ":", "if", "a_or_u", ":", "self", ".", "_set_size", "(", "total_size", ",", "self", ".", "use_size", "(", ")", ")", "else", ":", "self", ".", "_set_size", "(", "self", ".", "app_size", "(", ")", ",", "total_size", ")", "return", "True", "return", "False" ]
Return True and delete children if small enough.
[ "Return", "True", "and", "delete", "children", "if", "small", "enough", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L153-L166
train
ossobv/dutree
dutree/dutree.py
DuNode._prune_some_if_small
def _prune_some_if_small(self, small_size, a_or_u): "Merge some nodes in the directory, whilst keeping others." # Assert that we're not messing things up. prev_app_size = self.app_size() prev_use_size = self.use_size() keep_nodes = [] prune_app_size = 0 prune_use_size = 0 for node in self._nodes: node_size = node.app_size() if a_or_u else node.use_size() if node_size < small_size: if a_or_u: prune_app_size += node_size prune_use_size += node.use_size() else: prune_app_size += node.app_size() prune_use_size += node_size else: keep_nodes.append(node) # Last "leftover" node? Merge with parent. if len(keep_nodes) == 1 and keep_nodes[-1]._isdir is None: prune_app_size += keep_nodes[-1]._app_size prune_use_size += keep_nodes[-1]._use_size keep_nodes = [] if prune_app_size: if not keep_nodes: # The only node to keep, no "leftovers" here. Move data # to the parent. keep_nodes = None assert self._isdir and self._nodes is not None self._set_size(prune_app_size, prune_use_size) elif keep_nodes and keep_nodes[-1]._isdir is None: # There was already a leftover node. Add the new leftovers. keep_nodes[-1]._add_size(prune_app_size, prune_use_size) else: # Create a new leftover node. keep_nodes.append(DuNode.new_leftovers( self._path, prune_app_size, prune_use_size)) # Update nodes and do the actual assertion. self._nodes = keep_nodes assert prev_app_size == self.app_size(), ( prev_app_size, self.app_size()) assert prev_use_size == self.use_size(), ( prev_use_size, self.use_size())
python
def _prune_some_if_small(self, small_size, a_or_u): "Merge some nodes in the directory, whilst keeping others." # Assert that we're not messing things up. prev_app_size = self.app_size() prev_use_size = self.use_size() keep_nodes = [] prune_app_size = 0 prune_use_size = 0 for node in self._nodes: node_size = node.app_size() if a_or_u else node.use_size() if node_size < small_size: if a_or_u: prune_app_size += node_size prune_use_size += node.use_size() else: prune_app_size += node.app_size() prune_use_size += node_size else: keep_nodes.append(node) # Last "leftover" node? Merge with parent. if len(keep_nodes) == 1 and keep_nodes[-1]._isdir is None: prune_app_size += keep_nodes[-1]._app_size prune_use_size += keep_nodes[-1]._use_size keep_nodes = [] if prune_app_size: if not keep_nodes: # The only node to keep, no "leftovers" here. Move data # to the parent. keep_nodes = None assert self._isdir and self._nodes is not None self._set_size(prune_app_size, prune_use_size) elif keep_nodes and keep_nodes[-1]._isdir is None: # There was already a leftover node. Add the new leftovers. keep_nodes[-1]._add_size(prune_app_size, prune_use_size) else: # Create a new leftover node. keep_nodes.append(DuNode.new_leftovers( self._path, prune_app_size, prune_use_size)) # Update nodes and do the actual assertion. self._nodes = keep_nodes assert prev_app_size == self.app_size(), ( prev_app_size, self.app_size()) assert prev_use_size == self.use_size(), ( prev_use_size, self.use_size())
[ "def", "_prune_some_if_small", "(", "self", ",", "small_size", ",", "a_or_u", ")", ":", "# Assert that we're not messing things up.", "prev_app_size", "=", "self", ".", "app_size", "(", ")", "prev_use_size", "=", "self", ".", "use_size", "(", ")", "keep_nodes", "=", "[", "]", "prune_app_size", "=", "0", "prune_use_size", "=", "0", "for", "node", "in", "self", ".", "_nodes", ":", "node_size", "=", "node", ".", "app_size", "(", ")", "if", "a_or_u", "else", "node", ".", "use_size", "(", ")", "if", "node_size", "<", "small_size", ":", "if", "a_or_u", ":", "prune_app_size", "+=", "node_size", "prune_use_size", "+=", "node", ".", "use_size", "(", ")", "else", ":", "prune_app_size", "+=", "node", ".", "app_size", "(", ")", "prune_use_size", "+=", "node_size", "else", ":", "keep_nodes", ".", "append", "(", "node", ")", "# Last \"leftover\" node? Merge with parent.", "if", "len", "(", "keep_nodes", ")", "==", "1", "and", "keep_nodes", "[", "-", "1", "]", ".", "_isdir", "is", "None", ":", "prune_app_size", "+=", "keep_nodes", "[", "-", "1", "]", ".", "_app_size", "prune_use_size", "+=", "keep_nodes", "[", "-", "1", "]", ".", "_use_size", "keep_nodes", "=", "[", "]", "if", "prune_app_size", ":", "if", "not", "keep_nodes", ":", "# The only node to keep, no \"leftovers\" here. Move data", "# to the parent.", "keep_nodes", "=", "None", "assert", "self", ".", "_isdir", "and", "self", ".", "_nodes", "is", "not", "None", "self", ".", "_set_size", "(", "prune_app_size", ",", "prune_use_size", ")", "elif", "keep_nodes", "and", "keep_nodes", "[", "-", "1", "]", ".", "_isdir", "is", "None", ":", "# There was already a leftover node. Add the new leftovers.", "keep_nodes", "[", "-", "1", "]", ".", "_add_size", "(", "prune_app_size", ",", "prune_use_size", ")", "else", ":", "# Create a new leftover node.", "keep_nodes", ".", "append", "(", "DuNode", ".", "new_leftovers", "(", "self", ".", "_path", ",", "prune_app_size", ",", "prune_use_size", ")", ")", "# Update nodes and do the actual assertion.", "self", ".", "_nodes", "=", "keep_nodes", "assert", "prev_app_size", "==", "self", ".", "app_size", "(", ")", ",", "(", "prev_app_size", ",", "self", ".", "app_size", "(", ")", ")", "assert", "prev_use_size", "==", "self", ".", "use_size", "(", ")", ",", "(", "prev_use_size", ",", "self", ".", "use_size", "(", ")", ")" ]
Merge some nodes in the directory, whilst keeping others.
[ "Merge", "some", "nodes", "in", "the", "directory", "whilst", "keeping", "others", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L168-L215
train
ossobv/dutree
dutree/dutree.py
DuNode.merge_upwards_if_smaller_than
def merge_upwards_if_smaller_than(self, small_size, a_or_u): """After prune_if_smaller_than is run, we may still have excess nodes. For example, with a small_size of 609710690: 7 /* 28815419 /data/* 32 /data/srv/* 925746 /data/srv/docker.bak/* 12 /data/srv/docker.bak/shared/* 682860348 /data/srv/docker.bak/shared/standalone/* This is reduced to: 31147487 /* 682860355 /data/srv/docker.bak/shared/standalone/* Run this only when done with the scanning.""" # Assert that we're not messing things up. prev_app_size = self.app_size() prev_use_size = self.use_size() small_nodes = self._find_small_nodes(small_size, (), a_or_u) for node, parents in small_nodes: # Check immediate grandparent for isdir=None and if it # exists, move this there. The isdir=None node is always # last. if len(parents) >= 2: tail = parents[-2]._nodes[-1] if tail._isdir is None: assert tail._app_size is not None, tail tail._add_size(node.app_size(), node.use_size()) parents[-1]._nodes.remove(node) assert len(parents[-1]._nodes) # The actual assertion. assert prev_app_size == self.app_size(), ( prev_app_size, self.app_size()) assert prev_use_size == self.use_size(), ( prev_use_size, self.use_size())
python
def merge_upwards_if_smaller_than(self, small_size, a_or_u): """After prune_if_smaller_than is run, we may still have excess nodes. For example, with a small_size of 609710690: 7 /* 28815419 /data/* 32 /data/srv/* 925746 /data/srv/docker.bak/* 12 /data/srv/docker.bak/shared/* 682860348 /data/srv/docker.bak/shared/standalone/* This is reduced to: 31147487 /* 682860355 /data/srv/docker.bak/shared/standalone/* Run this only when done with the scanning.""" # Assert that we're not messing things up. prev_app_size = self.app_size() prev_use_size = self.use_size() small_nodes = self._find_small_nodes(small_size, (), a_or_u) for node, parents in small_nodes: # Check immediate grandparent for isdir=None and if it # exists, move this there. The isdir=None node is always # last. if len(parents) >= 2: tail = parents[-2]._nodes[-1] if tail._isdir is None: assert tail._app_size is not None, tail tail._add_size(node.app_size(), node.use_size()) parents[-1]._nodes.remove(node) assert len(parents[-1]._nodes) # The actual assertion. assert prev_app_size == self.app_size(), ( prev_app_size, self.app_size()) assert prev_use_size == self.use_size(), ( prev_use_size, self.use_size())
[ "def", "merge_upwards_if_smaller_than", "(", "self", ",", "small_size", ",", "a_or_u", ")", ":", "# Assert that we're not messing things up.", "prev_app_size", "=", "self", ".", "app_size", "(", ")", "prev_use_size", "=", "self", ".", "use_size", "(", ")", "small_nodes", "=", "self", ".", "_find_small_nodes", "(", "small_size", ",", "(", ")", ",", "a_or_u", ")", "for", "node", ",", "parents", "in", "small_nodes", ":", "# Check immediate grandparent for isdir=None and if it", "# exists, move this there. The isdir=None node is always", "# last.", "if", "len", "(", "parents", ")", ">=", "2", ":", "tail", "=", "parents", "[", "-", "2", "]", ".", "_nodes", "[", "-", "1", "]", "if", "tail", ".", "_isdir", "is", "None", ":", "assert", "tail", ".", "_app_size", "is", "not", "None", ",", "tail", "tail", ".", "_add_size", "(", "node", ".", "app_size", "(", ")", ",", "node", ".", "use_size", "(", ")", ")", "parents", "[", "-", "1", "]", ".", "_nodes", ".", "remove", "(", "node", ")", "assert", "len", "(", "parents", "[", "-", "1", "]", ".", "_nodes", ")", "# The actual assertion.", "assert", "prev_app_size", "==", "self", ".", "app_size", "(", ")", ",", "(", "prev_app_size", ",", "self", ".", "app_size", "(", ")", ")", "assert", "prev_use_size", "==", "self", ".", "use_size", "(", ")", ",", "(", "prev_use_size", ",", "self", ".", "use_size", "(", ")", ")" ]
After prune_if_smaller_than is run, we may still have excess nodes. For example, with a small_size of 609710690: 7 /* 28815419 /data/* 32 /data/srv/* 925746 /data/srv/docker.bak/* 12 /data/srv/docker.bak/shared/* 682860348 /data/srv/docker.bak/shared/standalone/* This is reduced to: 31147487 /* 682860355 /data/srv/docker.bak/shared/standalone/* Run this only when done with the scanning.
[ "After", "prune_if_smaller_than", "is", "run", "we", "may", "still", "have", "excess", "nodes", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L217-L258
train
ossobv/dutree
dutree/dutree.py
DuNode.as_tree
def as_tree(self): "Return the nodes as a list of lists." if self._nodes is None: return [self] ret = [self] for node in self._nodes: ret.append(node.as_tree()) return ret
python
def as_tree(self): "Return the nodes as a list of lists." if self._nodes is None: return [self] ret = [self] for node in self._nodes: ret.append(node.as_tree()) return ret
[ "def", "as_tree", "(", "self", ")", ":", "if", "self", ".", "_nodes", "is", "None", ":", "return", "[", "self", "]", "ret", "=", "[", "self", "]", "for", "node", "in", "self", ".", "_nodes", ":", "ret", ".", "append", "(", "node", ".", "as_tree", "(", ")", ")", "return", "ret" ]
Return the nodes as a list of lists.
[ "Return", "the", "nodes", "as", "a", "list", "of", "lists", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L272-L279
train
ossobv/dutree
dutree/dutree.py
DuScan._check_path
def _check_path(self): "Immediately check if we can access path. Otherwise bail." if not path.isdir(self._path or '/'): raise OSError('Path {!r} is not a directory'.format(self._path))
python
def _check_path(self): "Immediately check if we can access path. Otherwise bail." if not path.isdir(self._path or '/'): raise OSError('Path {!r} is not a directory'.format(self._path))
[ "def", "_check_path", "(", "self", ")", ":", "if", "not", "path", ".", "isdir", "(", "self", ".", "_path", "or", "'/'", ")", ":", "raise", "OSError", "(", "'Path {!r} is not a directory'", ".", "format", "(", "self", ".", "_path", ")", ")" ]
Immediately check if we can access path. Otherwise bail.
[ "Immediately", "check", "if", "we", "can", "access", "path", ".", "Otherwise", "bail", "." ]
adceeeb17f9fd70a7ed9c674850d7015d820eb2a
https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L320-L323
train
allonhadaya/dmenu-python
dmenu/dmenu.py
version
def version(command='dmenu'): '''The dmenu command's version message. Raises: DmenuCommandError Example: >>> import dmenu >>> dmenu.version() 'dmenu-4.5, \xc2\xa9 2006-2012 dmenu engineers, see LICENSE for details' ''' args = [command, '-v'] try: # start the dmenu process proc = subprocess.Popen( args, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: # something went wrong with starting the process raise DmenuCommandError(args, err) if proc.wait() == 0: # version information from stdout return proc.stdout.read().rstrip('\n') # error from dmenu raise DmenuCommandError(args, proc.stderr.read())
python
def version(command='dmenu'): '''The dmenu command's version message. Raises: DmenuCommandError Example: >>> import dmenu >>> dmenu.version() 'dmenu-4.5, \xc2\xa9 2006-2012 dmenu engineers, see LICENSE for details' ''' args = [command, '-v'] try: # start the dmenu process proc = subprocess.Popen( args, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: # something went wrong with starting the process raise DmenuCommandError(args, err) if proc.wait() == 0: # version information from stdout return proc.stdout.read().rstrip('\n') # error from dmenu raise DmenuCommandError(args, proc.stderr.read())
[ "def", "version", "(", "command", "=", "'dmenu'", ")", ":", "args", "=", "[", "command", ",", "'-v'", "]", "try", ":", "# start the dmenu process", "proc", "=", "subprocess", ".", "Popen", "(", "args", ",", "universal_newlines", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "except", "OSError", "as", "err", ":", "# something went wrong with starting the process", "raise", "DmenuCommandError", "(", "args", ",", "err", ")", "if", "proc", ".", "wait", "(", ")", "==", "0", ":", "# version information from stdout", "return", "proc", ".", "stdout", ".", "read", "(", ")", ".", "rstrip", "(", "'\\n'", ")", "# error from dmenu", "raise", "DmenuCommandError", "(", "args", ",", "proc", ".", "stderr", ".", "read", "(", ")", ")" ]
The dmenu command's version message. Raises: DmenuCommandError Example: >>> import dmenu >>> dmenu.version() 'dmenu-4.5, \xc2\xa9 2006-2012 dmenu engineers, see LICENSE for details'
[ "The", "dmenu", "command", "s", "version", "message", "." ]
30eca49a9368c61e13e87f530cc3785a369536c4
https://github.com/allonhadaya/dmenu-python/blob/30eca49a9368c61e13e87f530cc3785a369536c4/dmenu/dmenu.py#L28-L59
train
allonhadaya/dmenu-python
dmenu/dmenu.py
show
def show( items, command='dmenu', bottom=None, fast=None, case_insensitive=None, lines=None, monitor=None, prompt=None, font=None, background=None, foreground=None, background_selected=None, foreground_selected=None): '''Present a dmenu to the user. Args: items (Iterable[str]): defines the menu items being presented to the user. items should not contain the newline character. command (Optional[str]): defines the path to the dmenu executable. Defaults to 'dmenu'. bottom (Optional[bool]): dmenu appears at the bottom of the screen. fast (Optional[bool]): dmenu grabs the keyboard before reading stdin. This is faster, but will lock up X until stdin reaches end-of-file. case_insensitive (Optional[bool]): dmenu matches menu items case insensitively. lines (Optional[int]): dmenu lists items vertically, with the given number of lines. monitor (Optional[int]): dmenu is displayed on the monitor number supplied. Monitor numbers are starting from 0. prompt (Optional[str]): defines the prompt to be displayed to the left of the input field. font (Optional[str]): defines the font or font set used. eg. "fixed" or "Monospace-12:normal" (an xft font) background (Optional[str]): defines the normal background color. #RGB, #RRGGBB, and X color names are supported. foreground (Optional[str]): defines the normal foreground color. background_selected (Optional[str]): defines the selected background color. foreground_selected (Optional[str]): defines the selected foreground color. Raises: DmenuCommandError DmenuUsageError Returns: The user's selected menu item, their own typed item, or None if they hit escape. Examples: >>> import dmenu >>> dmenu.show(['a', 'b', 'c']) 'a' # user selected a >>> dmenu.show(['a', 'b', 'c'], prompt='pick a letter') 'b' # user selected b >>> dmenu.show(['a', 'b', 'c']) None # user hit escape >>> dmenu.show(['a', 'b', 'c']) 'd' # user typed their own selection, d >>> dmenu.show(['a', 'b', 'c'], command='not_a_valid_dmenu') Traceback (most recent call last): ... dmenu.dmenu.DmenuCommandError: The provided dmenu command could not be used (['not_a_valid_dmenu']): [Errno 2] No such file or directory: 'not_a_valid_dmenu' >>> dmenu.show(['a', 'b', 'c'], monitor=2) Traceback (most recent call last): ... dmenu.dmenu.DmenuUsageError: This version of dmenu does not support your usage (['dmenu', '-m', '2']): usage: dmenu [-b] [-f] [-i] [-l lines] [-p prompt] [-fn font] [-nb color] [-nf color] [-sb color] [-sf color] [-v] Consider configuring show using partial application: >>> import functools >>> show = functools.partial(dmenu.show, bottom=True) >>> show(['we', 'show', 'up', 'below']) >>> show(['us', 'too']) ''' # construct args args = [command] if bottom: args.append('-b') if fast: args.append('-f') if case_insensitive: args.append('-i') if lines is not None: args.extend(('-l', str(lines))) if monitor is not None: args.extend(('-m', str(monitor))) if prompt is not None: args.extend(('-p', prompt)) if font is not None: args.extend(('-fn', font)) if background is not None: args.extend(('-nb', background)) if foreground is not None: args.extend(('-nf', foreground)) if background_selected is not None: args.extend(('-sb', background_selected)) if foreground_selected is not None: args.extend(('-sf', foreground_selected)) try: # start the dmenu process proc = subprocess.Popen( args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: # something went wrong with starting the process raise DmenuCommandError(args, err) # write items over to dmenu with proc.stdin: for item in items: proc.stdin.write(item) proc.stdin.write('\n') if proc.wait() == 0: # user made a selection return proc.stdout.read().rstrip('\n') stderr = proc.stderr.read() if stderr == '': # user hit escape return None if re.match('usage', stderr, re.I): # usage error raise DmenuUsageError(args, stderr) # other error from dmenu raise DmenuCommandError(args, stderr)
python
def show( items, command='dmenu', bottom=None, fast=None, case_insensitive=None, lines=None, monitor=None, prompt=None, font=None, background=None, foreground=None, background_selected=None, foreground_selected=None): '''Present a dmenu to the user. Args: items (Iterable[str]): defines the menu items being presented to the user. items should not contain the newline character. command (Optional[str]): defines the path to the dmenu executable. Defaults to 'dmenu'. bottom (Optional[bool]): dmenu appears at the bottom of the screen. fast (Optional[bool]): dmenu grabs the keyboard before reading stdin. This is faster, but will lock up X until stdin reaches end-of-file. case_insensitive (Optional[bool]): dmenu matches menu items case insensitively. lines (Optional[int]): dmenu lists items vertically, with the given number of lines. monitor (Optional[int]): dmenu is displayed on the monitor number supplied. Monitor numbers are starting from 0. prompt (Optional[str]): defines the prompt to be displayed to the left of the input field. font (Optional[str]): defines the font or font set used. eg. "fixed" or "Monospace-12:normal" (an xft font) background (Optional[str]): defines the normal background color. #RGB, #RRGGBB, and X color names are supported. foreground (Optional[str]): defines the normal foreground color. background_selected (Optional[str]): defines the selected background color. foreground_selected (Optional[str]): defines the selected foreground color. Raises: DmenuCommandError DmenuUsageError Returns: The user's selected menu item, their own typed item, or None if they hit escape. Examples: >>> import dmenu >>> dmenu.show(['a', 'b', 'c']) 'a' # user selected a >>> dmenu.show(['a', 'b', 'c'], prompt='pick a letter') 'b' # user selected b >>> dmenu.show(['a', 'b', 'c']) None # user hit escape >>> dmenu.show(['a', 'b', 'c']) 'd' # user typed their own selection, d >>> dmenu.show(['a', 'b', 'c'], command='not_a_valid_dmenu') Traceback (most recent call last): ... dmenu.dmenu.DmenuCommandError: The provided dmenu command could not be used (['not_a_valid_dmenu']): [Errno 2] No such file or directory: 'not_a_valid_dmenu' >>> dmenu.show(['a', 'b', 'c'], monitor=2) Traceback (most recent call last): ... dmenu.dmenu.DmenuUsageError: This version of dmenu does not support your usage (['dmenu', '-m', '2']): usage: dmenu [-b] [-f] [-i] [-l lines] [-p prompt] [-fn font] [-nb color] [-nf color] [-sb color] [-sf color] [-v] Consider configuring show using partial application: >>> import functools >>> show = functools.partial(dmenu.show, bottom=True) >>> show(['we', 'show', 'up', 'below']) >>> show(['us', 'too']) ''' # construct args args = [command] if bottom: args.append('-b') if fast: args.append('-f') if case_insensitive: args.append('-i') if lines is not None: args.extend(('-l', str(lines))) if monitor is not None: args.extend(('-m', str(monitor))) if prompt is not None: args.extend(('-p', prompt)) if font is not None: args.extend(('-fn', font)) if background is not None: args.extend(('-nb', background)) if foreground is not None: args.extend(('-nf', foreground)) if background_selected is not None: args.extend(('-sb', background_selected)) if foreground_selected is not None: args.extend(('-sf', foreground_selected)) try: # start the dmenu process proc = subprocess.Popen( args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: # something went wrong with starting the process raise DmenuCommandError(args, err) # write items over to dmenu with proc.stdin: for item in items: proc.stdin.write(item) proc.stdin.write('\n') if proc.wait() == 0: # user made a selection return proc.stdout.read().rstrip('\n') stderr = proc.stderr.read() if stderr == '': # user hit escape return None if re.match('usage', stderr, re.I): # usage error raise DmenuUsageError(args, stderr) # other error from dmenu raise DmenuCommandError(args, stderr)
[ "def", "show", "(", "items", ",", "command", "=", "'dmenu'", ",", "bottom", "=", "None", ",", "fast", "=", "None", ",", "case_insensitive", "=", "None", ",", "lines", "=", "None", ",", "monitor", "=", "None", ",", "prompt", "=", "None", ",", "font", "=", "None", ",", "background", "=", "None", ",", "foreground", "=", "None", ",", "background_selected", "=", "None", ",", "foreground_selected", "=", "None", ")", ":", "# construct args", "args", "=", "[", "command", "]", "if", "bottom", ":", "args", ".", "append", "(", "'-b'", ")", "if", "fast", ":", "args", ".", "append", "(", "'-f'", ")", "if", "case_insensitive", ":", "args", ".", "append", "(", "'-i'", ")", "if", "lines", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-l'", ",", "str", "(", "lines", ")", ")", ")", "if", "monitor", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-m'", ",", "str", "(", "monitor", ")", ")", ")", "if", "prompt", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-p'", ",", "prompt", ")", ")", "if", "font", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-fn'", ",", "font", ")", ")", "if", "background", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-nb'", ",", "background", ")", ")", "if", "foreground", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-nf'", ",", "foreground", ")", ")", "if", "background_selected", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-sb'", ",", "background_selected", ")", ")", "if", "foreground_selected", "is", "not", "None", ":", "args", ".", "extend", "(", "(", "'-sf'", ",", "foreground_selected", ")", ")", "try", ":", "# start the dmenu process", "proc", "=", "subprocess", ".", "Popen", "(", "args", ",", "universal_newlines", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "except", "OSError", "as", "err", ":", "# something went wrong with starting the process", "raise", "DmenuCommandError", "(", "args", ",", "err", ")", "# write items over to dmenu", "with", "proc", ".", "stdin", ":", "for", "item", "in", "items", ":", "proc", ".", "stdin", ".", "write", "(", "item", ")", "proc", ".", "stdin", ".", "write", "(", "'\\n'", ")", "if", "proc", ".", "wait", "(", ")", "==", "0", ":", "# user made a selection", "return", "proc", ".", "stdout", ".", "read", "(", ")", ".", "rstrip", "(", "'\\n'", ")", "stderr", "=", "proc", ".", "stderr", ".", "read", "(", ")", "if", "stderr", "==", "''", ":", "# user hit escape", "return", "None", "if", "re", ".", "match", "(", "'usage'", ",", "stderr", ",", "re", ".", "I", ")", ":", "# usage error", "raise", "DmenuUsageError", "(", "args", ",", "stderr", ")", "# other error from dmenu", "raise", "DmenuCommandError", "(", "args", ",", "stderr", ")" ]
Present a dmenu to the user. Args: items (Iterable[str]): defines the menu items being presented to the user. items should not contain the newline character. command (Optional[str]): defines the path to the dmenu executable. Defaults to 'dmenu'. bottom (Optional[bool]): dmenu appears at the bottom of the screen. fast (Optional[bool]): dmenu grabs the keyboard before reading stdin. This is faster, but will lock up X until stdin reaches end-of-file. case_insensitive (Optional[bool]): dmenu matches menu items case insensitively. lines (Optional[int]): dmenu lists items vertically, with the given number of lines. monitor (Optional[int]): dmenu is displayed on the monitor number supplied. Monitor numbers are starting from 0. prompt (Optional[str]): defines the prompt to be displayed to the left of the input field. font (Optional[str]): defines the font or font set used. eg. "fixed" or "Monospace-12:normal" (an xft font) background (Optional[str]): defines the normal background color. #RGB, #RRGGBB, and X color names are supported. foreground (Optional[str]): defines the normal foreground color. background_selected (Optional[str]): defines the selected background color. foreground_selected (Optional[str]): defines the selected foreground color. Raises: DmenuCommandError DmenuUsageError Returns: The user's selected menu item, their own typed item, or None if they hit escape. Examples: >>> import dmenu >>> dmenu.show(['a', 'b', 'c']) 'a' # user selected a >>> dmenu.show(['a', 'b', 'c'], prompt='pick a letter') 'b' # user selected b >>> dmenu.show(['a', 'b', 'c']) None # user hit escape >>> dmenu.show(['a', 'b', 'c']) 'd' # user typed their own selection, d >>> dmenu.show(['a', 'b', 'c'], command='not_a_valid_dmenu') Traceback (most recent call last): ... dmenu.dmenu.DmenuCommandError: The provided dmenu command could not be used (['not_a_valid_dmenu']): [Errno 2] No such file or directory: 'not_a_valid_dmenu' >>> dmenu.show(['a', 'b', 'c'], monitor=2) Traceback (most recent call last): ... dmenu.dmenu.DmenuUsageError: This version of dmenu does not support your usage (['dmenu', '-m', '2']): usage: dmenu [-b] [-f] [-i] [-l lines] [-p prompt] [-fn font] [-nb color] [-nf color] [-sb color] [-sf color] [-v] Consider configuring show using partial application: >>> import functools >>> show = functools.partial(dmenu.show, bottom=True) >>> show(['we', 'show', 'up', 'below']) >>> show(['us', 'too'])
[ "Present", "a", "dmenu", "to", "the", "user", "." ]
30eca49a9368c61e13e87f530cc3785a369536c4
https://github.com/allonhadaya/dmenu-python/blob/30eca49a9368c61e13e87f530cc3785a369536c4/dmenu/dmenu.py#L62-L206
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/filtered_network.py
FilteredNetwork.get_upregulated_genes_network
def get_upregulated_genes_network(self) -> Graph: """Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes. """ logger.info("In get_upregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(up_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
python
def get_upregulated_genes_network(self) -> Graph: """Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes. """ logger.info("In get_upregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(up_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
[ "def", "get_upregulated_genes_network", "(", "self", ")", "->", "Graph", ":", "logger", ".", "info", "(", "\"In get_upregulated_genes_network()\"", ")", "deg_graph", "=", "self", ".", "graph", ".", "copy", "(", ")", "# deep copy graph", "not_diff_expr", "=", "self", ".", "graph", ".", "vs", "(", "up_regulated_eq", "=", "False", ")", "# delete genes which are not differentially expressed or have no connections to others", "deg_graph", ".", "delete_vertices", "(", "not_diff_expr", ".", "indices", ")", "deg_graph", ".", "delete_vertices", "(", "deg_graph", ".", "vs", ".", "select", "(", "_degree_eq", "=", "0", ")", ")", "return", "deg_graph" ]
Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes.
[ "Get", "the", "graph", "of", "up", "-", "regulated", "genes", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/filtered_network.py#L27-L41
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/filtered_network.py
FilteredNetwork.get_downregulated_genes_network
def get_downregulated_genes_network(self) -> Graph: """Get the graph of down-regulated genes. :return Graph: Graph of down-regulated genes. """ logger.info("In get_downregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(down_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
python
def get_downregulated_genes_network(self) -> Graph: """Get the graph of down-regulated genes. :return Graph: Graph of down-regulated genes. """ logger.info("In get_downregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(down_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
[ "def", "get_downregulated_genes_network", "(", "self", ")", "->", "Graph", ":", "logger", ".", "info", "(", "\"In get_downregulated_genes_network()\"", ")", "deg_graph", "=", "self", ".", "graph", ".", "copy", "(", ")", "# deep copy graph", "not_diff_expr", "=", "self", ".", "graph", ".", "vs", "(", "down_regulated_eq", "=", "False", ")", "# delete genes which are not differentially expressed or have no connections to others", "deg_graph", ".", "delete_vertices", "(", "not_diff_expr", ".", "indices", ")", "deg_graph", ".", "delete_vertices", "(", "deg_graph", ".", "vs", ".", "select", "(", "_degree_eq", "=", "0", ")", ")", "return", "deg_graph" ]
Get the graph of down-regulated genes. :return Graph: Graph of down-regulated genes.
[ "Get", "the", "graph", "of", "down", "-", "regulated", "genes", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/filtered_network.py#L43-L57
train
BernardFW/bernard
src/bernard/misc/main/_base.py
make_parser
def make_parser(): """ Generate the parser for all sub-commands """ parser = argparse.ArgumentParser(description='BERNARD CLI utility') sp = parser.add_subparsers(help='Sub-command') parser_run = sp.add_parser('run', help='Run the BERNARD server') parser_run.set_defaults(action='run') parser_sheet = sp.add_parser('sheet', help='Import files from Google ' 'Sheets') parser_sheet.set_defaults(action='sheet') parser_sheet.add_argument( '--auth_host_name', default='localhost', help='Hostname when running a local web server.' ) parser_sheet.add_argument( '--noauth_local_webserver', action='store_true', default=False, help='Do not run a local web server.' ) parser_sheet.add_argument( '--auth_host_port', default=[8080, 8090], type=int, nargs='*', help='Port web server should listen on.' ) parser_sheet.add_argument( '--logging_level', default='ERROR', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set the logging level of detail.' ) parser_sp = sp.add_parser('start_project', help='Starts a project') parser_sp.set_defaults(action='start_project') parser_sp.add_argument( 'project_name', help='A snake-case name for your project' ) parser_sp.add_argument( 'dir', help='Directory to store the project' ) return parser
python
def make_parser(): """ Generate the parser for all sub-commands """ parser = argparse.ArgumentParser(description='BERNARD CLI utility') sp = parser.add_subparsers(help='Sub-command') parser_run = sp.add_parser('run', help='Run the BERNARD server') parser_run.set_defaults(action='run') parser_sheet = sp.add_parser('sheet', help='Import files from Google ' 'Sheets') parser_sheet.set_defaults(action='sheet') parser_sheet.add_argument( '--auth_host_name', default='localhost', help='Hostname when running a local web server.' ) parser_sheet.add_argument( '--noauth_local_webserver', action='store_true', default=False, help='Do not run a local web server.' ) parser_sheet.add_argument( '--auth_host_port', default=[8080, 8090], type=int, nargs='*', help='Port web server should listen on.' ) parser_sheet.add_argument( '--logging_level', default='ERROR', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set the logging level of detail.' ) parser_sp = sp.add_parser('start_project', help='Starts a project') parser_sp.set_defaults(action='start_project') parser_sp.add_argument( 'project_name', help='A snake-case name for your project' ) parser_sp.add_argument( 'dir', help='Directory to store the project' ) return parser
[ "def", "make_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'BERNARD CLI utility'", ")", "sp", "=", "parser", ".", "add_subparsers", "(", "help", "=", "'Sub-command'", ")", "parser_run", "=", "sp", ".", "add_parser", "(", "'run'", ",", "help", "=", "'Run the BERNARD server'", ")", "parser_run", ".", "set_defaults", "(", "action", "=", "'run'", ")", "parser_sheet", "=", "sp", ".", "add_parser", "(", "'sheet'", ",", "help", "=", "'Import files from Google '", "'Sheets'", ")", "parser_sheet", ".", "set_defaults", "(", "action", "=", "'sheet'", ")", "parser_sheet", ".", "add_argument", "(", "'--auth_host_name'", ",", "default", "=", "'localhost'", ",", "help", "=", "'Hostname when running a local web server.'", ")", "parser_sheet", ".", "add_argument", "(", "'--noauth_local_webserver'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Do not run a local web server.'", ")", "parser_sheet", ".", "add_argument", "(", "'--auth_host_port'", ",", "default", "=", "[", "8080", ",", "8090", "]", ",", "type", "=", "int", ",", "nargs", "=", "'*'", ",", "help", "=", "'Port web server should listen on.'", ")", "parser_sheet", ".", "add_argument", "(", "'--logging_level'", ",", "default", "=", "'ERROR'", ",", "choices", "=", "[", "'DEBUG'", ",", "'INFO'", ",", "'WARNING'", ",", "'ERROR'", ",", "'CRITICAL'", "]", ",", "help", "=", "'Set the logging level of detail.'", ")", "parser_sp", "=", "sp", ".", "add_parser", "(", "'start_project'", ",", "help", "=", "'Starts a project'", ")", "parser_sp", ".", "set_defaults", "(", "action", "=", "'start_project'", ")", "parser_sp", ".", "add_argument", "(", "'project_name'", ",", "help", "=", "'A snake-case name for your project'", ")", "parser_sp", ".", "add_argument", "(", "'dir'", ",", "help", "=", "'Directory to store the project'", ")", "return", "parser" ]
Generate the parser for all sub-commands
[ "Generate", "the", "parser", "for", "all", "sub", "-", "commands" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/main/_base.py#L5-L55
train
BernardFW/bernard
src/bernard/misc/main/_base.py
main
def main(): """ Run the appropriate main function according to the output of the parser. """ parser = make_parser() args = parser.parse_args() if not hasattr(args, 'action'): parser.print_help() exit(1) if args.action == 'sheet': from bernard.misc.sheet_sync import main as main_sheet main_sheet(args) elif args.action == 'run': from bernard.cli import main as main_run main_run() elif args.action == 'start_project': from bernard.misc.start_project import main as main_sp main_sp(args)
python
def main(): """ Run the appropriate main function according to the output of the parser. """ parser = make_parser() args = parser.parse_args() if not hasattr(args, 'action'): parser.print_help() exit(1) if args.action == 'sheet': from bernard.misc.sheet_sync import main as main_sheet main_sheet(args) elif args.action == 'run': from bernard.cli import main as main_run main_run() elif args.action == 'start_project': from bernard.misc.start_project import main as main_sp main_sp(args)
[ "def", "main", "(", ")", ":", "parser", "=", "make_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "not", "hasattr", "(", "args", ",", "'action'", ")", ":", "parser", ".", "print_help", "(", ")", "exit", "(", "1", ")", "if", "args", ".", "action", "==", "'sheet'", ":", "from", "bernard", ".", "misc", ".", "sheet_sync", "import", "main", "as", "main_sheet", "main_sheet", "(", "args", ")", "elif", "args", ".", "action", "==", "'run'", ":", "from", "bernard", ".", "cli", "import", "main", "as", "main_run", "main_run", "(", ")", "elif", "args", ".", "action", "==", "'start_project'", ":", "from", "bernard", ".", "misc", ".", "start_project", "import", "main", "as", "main_sp", "main_sp", "(", "args", ")" ]
Run the appropriate main function according to the output of the parser.
[ "Run", "the", "appropriate", "main", "function", "according", "to", "the", "output", "of", "the", "parser", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/main/_base.py#L58-L78
train
sthysel/knobs
src/environment.py
load_dotenv
def load_dotenv(dotenv_path, verbose=False): """ Read a .env file and load into os.environ. :param dotenv_path: :type dotenv_path: str :param verbose: verbosity flag, raise warning if path does not exist :return: success flag """ if not os.path.exists(dotenv_path): if verbose: warnings.warn(f"Not loading {dotenv_path}, it doesn't exist.") return None for k, v in dotenv_values(dotenv_path).items(): os.environ.setdefault(k, v) return True
python
def load_dotenv(dotenv_path, verbose=False): """ Read a .env file and load into os.environ. :param dotenv_path: :type dotenv_path: str :param verbose: verbosity flag, raise warning if path does not exist :return: success flag """ if not os.path.exists(dotenv_path): if verbose: warnings.warn(f"Not loading {dotenv_path}, it doesn't exist.") return None for k, v in dotenv_values(dotenv_path).items(): os.environ.setdefault(k, v) return True
[ "def", "load_dotenv", "(", "dotenv_path", ",", "verbose", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dotenv_path", ")", ":", "if", "verbose", ":", "warnings", ".", "warn", "(", "f\"Not loading {dotenv_path}, it doesn't exist.\"", ")", "return", "None", "for", "k", ",", "v", "in", "dotenv_values", "(", "dotenv_path", ")", ".", "items", "(", ")", ":", "os", ".", "environ", ".", "setdefault", "(", "k", ",", "v", ")", "return", "True" ]
Read a .env file and load into os.environ. :param dotenv_path: :type dotenv_path: str :param verbose: verbosity flag, raise warning if path does not exist :return: success flag
[ "Read", "a", ".", "env", "file", "and", "load", "into", "os", ".", "environ", "." ]
1d01f50f643068076e38118a93fed9375ea3ac81
https://github.com/sthysel/knobs/blob/1d01f50f643068076e38118a93fed9375ea3ac81/src/environment.py#L20-L35
train
sthysel/knobs
src/environment.py
get_key
def get_key(dotenv_path, key_to_get, verbose=False): """ Gets the value of a given key from the given .env If the .env path given doesn't exist, fails :param dotenv_path: path :param key_to_get: key :param verbose: verbosity flag, raise warning if path does not exist :return: value of variable from environment file or None """ key_to_get = str(key_to_get) if not os.path.exists(dotenv_path): if verbose: warnings.warn(f"Can't read {dotenv_path}, it doesn't exist.") return None dotenv_as_dict = dotenv_values(dotenv_path) if key_to_get in dotenv_as_dict: return dotenv_as_dict[key_to_get] else: if verbose: warnings.warn(f"key {key_to_get} not found in {dotenv_path}.") return None
python
def get_key(dotenv_path, key_to_get, verbose=False): """ Gets the value of a given key from the given .env If the .env path given doesn't exist, fails :param dotenv_path: path :param key_to_get: key :param verbose: verbosity flag, raise warning if path does not exist :return: value of variable from environment file or None """ key_to_get = str(key_to_get) if not os.path.exists(dotenv_path): if verbose: warnings.warn(f"Can't read {dotenv_path}, it doesn't exist.") return None dotenv_as_dict = dotenv_values(dotenv_path) if key_to_get in dotenv_as_dict: return dotenv_as_dict[key_to_get] else: if verbose: warnings.warn(f"key {key_to_get} not found in {dotenv_path}.") return None
[ "def", "get_key", "(", "dotenv_path", ",", "key_to_get", ",", "verbose", "=", "False", ")", ":", "key_to_get", "=", "str", "(", "key_to_get", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dotenv_path", ")", ":", "if", "verbose", ":", "warnings", ".", "warn", "(", "f\"Can't read {dotenv_path}, it doesn't exist.\"", ")", "return", "None", "dotenv_as_dict", "=", "dotenv_values", "(", "dotenv_path", ")", "if", "key_to_get", "in", "dotenv_as_dict", ":", "return", "dotenv_as_dict", "[", "key_to_get", "]", "else", ":", "if", "verbose", ":", "warnings", ".", "warn", "(", "f\"key {key_to_get} not found in {dotenv_path}.\"", ")", "return", "None" ]
Gets the value of a given key from the given .env If the .env path given doesn't exist, fails :param dotenv_path: path :param key_to_get: key :param verbose: verbosity flag, raise warning if path does not exist :return: value of variable from environment file or None
[ "Gets", "the", "value", "of", "a", "given", "key", "from", "the", "given", ".", "env" ]
1d01f50f643068076e38118a93fed9375ea3ac81
https://github.com/sthysel/knobs/blob/1d01f50f643068076e38118a93fed9375ea3ac81/src/environment.py#L38-L59
train
sthysel/knobs
src/environment.py
_get_format
def _get_format(value, quote_mode='always'): """ Returns the quote format depending on the quote_mode. This determines if the key value will be quoted when written to the env file. :param value: :param quote_mode: :return: str :raises: KeyError if the quote_mode is unknown """ formats = {'always': '{key}="{value}"\n', 'auto': '{key}={value}\n'} if quote_mode not in formats.keys(): return KeyError(f'quote_mode {quote_mode} is invalid') _mode = quote_mode if quote_mode == 'auto' and ' ' in value: _mode = 'always' return formats.get(_mode)
python
def _get_format(value, quote_mode='always'): """ Returns the quote format depending on the quote_mode. This determines if the key value will be quoted when written to the env file. :param value: :param quote_mode: :return: str :raises: KeyError if the quote_mode is unknown """ formats = {'always': '{key}="{value}"\n', 'auto': '{key}={value}\n'} if quote_mode not in formats.keys(): return KeyError(f'quote_mode {quote_mode} is invalid') _mode = quote_mode if quote_mode == 'auto' and ' ' in value: _mode = 'always' return formats.get(_mode)
[ "def", "_get_format", "(", "value", ",", "quote_mode", "=", "'always'", ")", ":", "formats", "=", "{", "'always'", ":", "'{key}=\"{value}\"\\n'", ",", "'auto'", ":", "'{key}={value}\\n'", "}", "if", "quote_mode", "not", "in", "formats", ".", "keys", "(", ")", ":", "return", "KeyError", "(", "f'quote_mode {quote_mode} is invalid'", ")", "_mode", "=", "quote_mode", "if", "quote_mode", "==", "'auto'", "and", "' '", "in", "value", ":", "_mode", "=", "'always'", "return", "formats", ".", "get", "(", "_mode", ")" ]
Returns the quote format depending on the quote_mode. This determines if the key value will be quoted when written to the env file. :param value: :param quote_mode: :return: str :raises: KeyError if the quote_mode is unknown
[ "Returns", "the", "quote", "format", "depending", "on", "the", "quote_mode", ".", "This", "determines", "if", "the", "key", "value", "will", "be", "quoted", "when", "written", "to", "the", "env", "file", "." ]
1d01f50f643068076e38118a93fed9375ea3ac81
https://github.com/sthysel/knobs/blob/1d01f50f643068076e38118a93fed9375ea3ac81/src/environment.py#L181-L201
train
sthysel/knobs
src/environment.py
find_dotenv
def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False): """ Search in increasingly higher folders for the given file Returns path to the file if found, or an empty string otherwise """ if usecwd or '__file__' not in globals(): # should work without __file__, e.g. in REPL or IPython notebook path = os.getcwd() else: # will work for .py files frame_filename = sys._getframe().f_back.f_code.co_filename path = os.path.dirname(os.path.abspath(frame_filename)) for dirname in _walk_to_root(path): check_path = os.path.join(dirname, filename) if os.path.exists(check_path): return check_path if raise_error_if_not_found: raise IOError('File not found') return ''
python
def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False): """ Search in increasingly higher folders for the given file Returns path to the file if found, or an empty string otherwise """ if usecwd or '__file__' not in globals(): # should work without __file__, e.g. in REPL or IPython notebook path = os.getcwd() else: # will work for .py files frame_filename = sys._getframe().f_back.f_code.co_filename path = os.path.dirname(os.path.abspath(frame_filename)) for dirname in _walk_to_root(path): check_path = os.path.join(dirname, filename) if os.path.exists(check_path): return check_path if raise_error_if_not_found: raise IOError('File not found') return ''
[ "def", "find_dotenv", "(", "filename", "=", "'.env'", ",", "raise_error_if_not_found", "=", "False", ",", "usecwd", "=", "False", ")", ":", "if", "usecwd", "or", "'__file__'", "not", "in", "globals", "(", ")", ":", "# should work without __file__, e.g. in REPL or IPython notebook", "path", "=", "os", ".", "getcwd", "(", ")", "else", ":", "# will work for .py files", "frame_filename", "=", "sys", ".", "_getframe", "(", ")", ".", "f_back", ".", "f_code", ".", "co_filename", "path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "frame_filename", ")", ")", "for", "dirname", "in", "_walk_to_root", "(", "path", ")", ":", "check_path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "check_path", ")", ":", "return", "check_path", "if", "raise_error_if_not_found", ":", "raise", "IOError", "(", "'File not found'", ")", "return", "''" ]
Search in increasingly higher folders for the given file Returns path to the file if found, or an empty string otherwise
[ "Search", "in", "increasingly", "higher", "folders", "for", "the", "given", "file" ]
1d01f50f643068076e38118a93fed9375ea3ac81
https://github.com/sthysel/knobs/blob/1d01f50f643068076e38118a93fed9375ea3ac81/src/environment.py#L237-L259
train
garenchan/policy
policy/_parser.py
reducer
def reducer(*tokens): """Decorator for reduction methods. Arguments are a sequence of tokens, which should trigger running this reduction method. """ def wrapper(func): # Make sure that we have a list of reducer sequences if not hasattr(func, 'reducers'): func.reducers = [] # Add the token to the list of reducer sequences func.reducers.append(list(tokens)) return func return wrapper
python
def reducer(*tokens): """Decorator for reduction methods. Arguments are a sequence of tokens, which should trigger running this reduction method. """ def wrapper(func): # Make sure that we have a list of reducer sequences if not hasattr(func, 'reducers'): func.reducers = [] # Add the token to the list of reducer sequences func.reducers.append(list(tokens)) return func return wrapper
[ "def", "reducer", "(", "*", "tokens", ")", ":", "def", "wrapper", "(", "func", ")", ":", "# Make sure that we have a list of reducer sequences", "if", "not", "hasattr", "(", "func", ",", "'reducers'", ")", ":", "func", ".", "reducers", "=", "[", "]", "# Add the token to the list of reducer sequences", "func", ".", "reducers", ".", "append", "(", "list", "(", "tokens", ")", ")", "return", "func", "return", "wrapper" ]
Decorator for reduction methods. Arguments are a sequence of tokens, which should trigger running this reduction method.
[ "Decorator", "for", "reduction", "methods", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L19-L36
train
garenchan/policy
policy/_parser.py
parse_rule
def parse_rule(rule: str, raise_error=False): """Parses policy to a tree of Check objects.""" parser = Parser(raise_error) return parser.parse(rule)
python
def parse_rule(rule: str, raise_error=False): """Parses policy to a tree of Check objects.""" parser = Parser(raise_error) return parser.parse(rule)
[ "def", "parse_rule", "(", "rule", ":", "str", ",", "raise_error", "=", "False", ")", ":", "parser", "=", "Parser", "(", "raise_error", ")", "return", "parser", ".", "parse", "(", "rule", ")" ]
Parses policy to a tree of Check objects.
[ "Parses", "policy", "to", "a", "tree", "of", "Check", "objects", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L269-L273
train
garenchan/policy
policy/_parser.py
Parser._reduce
def _reduce(self): """Perform a greedy reduction of token stream. If a reducer method matches, it will be executed, then the :meth:`reduce` method will be called recursively to search for any more possible reductions. """ for reduction, methname in self.reducers: token_num = len(reduction) if (len(self.tokens) >= token_num and self.tokens[-token_num:] == reduction): # Get the reduction method meth = getattr(self, methname) # Reduce the token stream results = meth(*self.values[-token_num:]) self.tokens[-token_num:] = [r[0] for r in results] self.values[-token_num:] = [r[1] for r in results] # Check for any more reductions return self._reduce()
python
def _reduce(self): """Perform a greedy reduction of token stream. If a reducer method matches, it will be executed, then the :meth:`reduce` method will be called recursively to search for any more possible reductions. """ for reduction, methname in self.reducers: token_num = len(reduction) if (len(self.tokens) >= token_num and self.tokens[-token_num:] == reduction): # Get the reduction method meth = getattr(self, methname) # Reduce the token stream results = meth(*self.values[-token_num:]) self.tokens[-token_num:] = [r[0] for r in results] self.values[-token_num:] = [r[1] for r in results] # Check for any more reductions return self._reduce()
[ "def", "_reduce", "(", "self", ")", ":", "for", "reduction", ",", "methname", "in", "self", ".", "reducers", ":", "token_num", "=", "len", "(", "reduction", ")", "if", "(", "len", "(", "self", ".", "tokens", ")", ">=", "token_num", "and", "self", ".", "tokens", "[", "-", "token_num", ":", "]", "==", "reduction", ")", ":", "# Get the reduction method", "meth", "=", "getattr", "(", "self", ",", "methname", ")", "# Reduce the token stream", "results", "=", "meth", "(", "*", "self", ".", "values", "[", "-", "token_num", ":", "]", ")", "self", ".", "tokens", "[", "-", "token_num", ":", "]", "=", "[", "r", "[", "0", "]", "for", "r", "in", "results", "]", "self", ".", "values", "[", "-", "token_num", ":", "]", "=", "[", "r", "[", "1", "]", "for", "r", "in", "results", "]", "# Check for any more reductions", "return", "self", ".", "_reduce", "(", ")" ]
Perform a greedy reduction of token stream. If a reducer method matches, it will be executed, then the :meth:`reduce` method will be called recursively to search for any more possible reductions.
[ "Perform", "a", "greedy", "reduction", "of", "token", "stream", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L75-L97
train
garenchan/policy
policy/_parser.py
Parser._parse_check
def _parse_check(self, rule): """Parse a single base check rule into an appropriate Check object.""" # Handle the special constant-type checks for check_cls in (checks.FalseCheck, checks.TrueCheck): check = check_cls() if rule == str(check): return check try: kind, match = rule.split(':', 1) except Exception: if self.raise_error: raise InvalidRuleException(rule) else: LOG.exception('Failed to understand rule %r', rule) # If the rule is invalid, we'll fail closed return checks.FalseCheck() if kind in checks.registered_checks: return checks.registered_checks[kind](kind, match) elif None in checks.registered_checks: return checks.registered_checks[None](kind, match) elif self.raise_error: raise InvalidRuleException(rule) else: LOG.error('No handler for matches of kind %r', kind) # If the rule is invalid, we'll fail closed return checks.FalseCheck()
python
def _parse_check(self, rule): """Parse a single base check rule into an appropriate Check object.""" # Handle the special constant-type checks for check_cls in (checks.FalseCheck, checks.TrueCheck): check = check_cls() if rule == str(check): return check try: kind, match = rule.split(':', 1) except Exception: if self.raise_error: raise InvalidRuleException(rule) else: LOG.exception('Failed to understand rule %r', rule) # If the rule is invalid, we'll fail closed return checks.FalseCheck() if kind in checks.registered_checks: return checks.registered_checks[kind](kind, match) elif None in checks.registered_checks: return checks.registered_checks[None](kind, match) elif self.raise_error: raise InvalidRuleException(rule) else: LOG.error('No handler for matches of kind %r', kind) # If the rule is invalid, we'll fail closed return checks.FalseCheck()
[ "def", "_parse_check", "(", "self", ",", "rule", ")", ":", "# Handle the special constant-type checks", "for", "check_cls", "in", "(", "checks", ".", "FalseCheck", ",", "checks", ".", "TrueCheck", ")", ":", "check", "=", "check_cls", "(", ")", "if", "rule", "==", "str", "(", "check", ")", ":", "return", "check", "try", ":", "kind", ",", "match", "=", "rule", ".", "split", "(", "':'", ",", "1", ")", "except", "Exception", ":", "if", "self", ".", "raise_error", ":", "raise", "InvalidRuleException", "(", "rule", ")", "else", ":", "LOG", ".", "exception", "(", "'Failed to understand rule %r'", ",", "rule", ")", "# If the rule is invalid, we'll fail closed", "return", "checks", ".", "FalseCheck", "(", ")", "if", "kind", "in", "checks", ".", "registered_checks", ":", "return", "checks", ".", "registered_checks", "[", "kind", "]", "(", "kind", ",", "match", ")", "elif", "None", "in", "checks", ".", "registered_checks", ":", "return", "checks", ".", "registered_checks", "[", "None", "]", "(", "kind", ",", "match", ")", "elif", "self", ".", "raise_error", ":", "raise", "InvalidRuleException", "(", "rule", ")", "else", ":", "LOG", ".", "error", "(", "'No handler for matches of kind %r'", ",", "kind", ")", "# If the rule is invalid, we'll fail closed", "return", "checks", ".", "FalseCheck", "(", ")" ]
Parse a single base check rule into an appropriate Check object.
[ "Parse", "a", "single", "base", "check", "rule", "into", "an", "appropriate", "Check", "object", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L117-L145
train
garenchan/policy
policy/_parser.py
Parser._parse_tokenize
def _parse_tokenize(self, rule): """Tokenizer for the policy language.""" for token in self._TOKENIZE_RE.split(rule): # Skip empty tokens if not token or token.isspace(): continue # Handle leading parens on the token clean = token.lstrip('(') for i in range(len(token) - len(clean)): yield '(', '(' # If it was only parentheses, continue if not clean: continue else: token = clean # Handle trailing parens on the token clean = token.rstrip(')') trail = len(token) - len(clean) # Yield the cleaned token lowered = clean.lower() if lowered in ('and', 'or', 'not'): # Special tokens yield lowered, clean elif clean: # Not a special token, but not composed solely of ')' if len(token) >= 2 and ((token[0], token[-1]) in [('"', '"'), ("'", "'")]): # It's a quoted string yield 'string', token[1:-1] else: yield 'check', self._parse_check(clean) # Yield the trailing parens for i in range(trail): yield ')', ')'
python
def _parse_tokenize(self, rule): """Tokenizer for the policy language.""" for token in self._TOKENIZE_RE.split(rule): # Skip empty tokens if not token or token.isspace(): continue # Handle leading parens on the token clean = token.lstrip('(') for i in range(len(token) - len(clean)): yield '(', '(' # If it was only parentheses, continue if not clean: continue else: token = clean # Handle trailing parens on the token clean = token.rstrip(')') trail = len(token) - len(clean) # Yield the cleaned token lowered = clean.lower() if lowered in ('and', 'or', 'not'): # Special tokens yield lowered, clean elif clean: # Not a special token, but not composed solely of ')' if len(token) >= 2 and ((token[0], token[-1]) in [('"', '"'), ("'", "'")]): # It's a quoted string yield 'string', token[1:-1] else: yield 'check', self._parse_check(clean) # Yield the trailing parens for i in range(trail): yield ')', ')'
[ "def", "_parse_tokenize", "(", "self", ",", "rule", ")", ":", "for", "token", "in", "self", ".", "_TOKENIZE_RE", ".", "split", "(", "rule", ")", ":", "# Skip empty tokens", "if", "not", "token", "or", "token", ".", "isspace", "(", ")", ":", "continue", "# Handle leading parens on the token", "clean", "=", "token", ".", "lstrip", "(", "'('", ")", "for", "i", "in", "range", "(", "len", "(", "token", ")", "-", "len", "(", "clean", ")", ")", ":", "yield", "'('", ",", "'('", "# If it was only parentheses, continue", "if", "not", "clean", ":", "continue", "else", ":", "token", "=", "clean", "# Handle trailing parens on the token", "clean", "=", "token", ".", "rstrip", "(", "')'", ")", "trail", "=", "len", "(", "token", ")", "-", "len", "(", "clean", ")", "# Yield the cleaned token", "lowered", "=", "clean", ".", "lower", "(", ")", "if", "lowered", "in", "(", "'and'", ",", "'or'", ",", "'not'", ")", ":", "# Special tokens", "yield", "lowered", ",", "clean", "elif", "clean", ":", "# Not a special token, but not composed solely of ')'", "if", "len", "(", "token", ")", ">=", "2", "and", "(", "(", "token", "[", "0", "]", ",", "token", "[", "-", "1", "]", ")", "in", "[", "(", "'\"'", ",", "'\"'", ")", ",", "(", "\"'\"", ",", "\"'\"", ")", "]", ")", ":", "# It's a quoted string", "yield", "'string'", ",", "token", "[", "1", ":", "-", "1", "]", "else", ":", "yield", "'check'", ",", "self", ".", "_parse_check", "(", "clean", ")", "# Yield the trailing parens", "for", "i", "in", "range", "(", "trail", ")", ":", "yield", "')'", ",", "')'" ]
Tokenizer for the policy language.
[ "Tokenizer", "for", "the", "policy", "language", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L147-L186
train
garenchan/policy
policy/_parser.py
Parser.parse
def parse(self, rule: str): """Parses policy to tree. Translate a policy written in the policy language into a tree of Check objects. """ # Empty rule means always accept if not rule: return checks.TrueCheck() for token, value in self._parse_tokenize(rule): self._shift(token, value) try: return self.result except ValueError: LOG.exception('Failed to understand rule %r', rule) # Fail closed return checks.FalseCheck()
python
def parse(self, rule: str): """Parses policy to tree. Translate a policy written in the policy language into a tree of Check objects. """ # Empty rule means always accept if not rule: return checks.TrueCheck() for token, value in self._parse_tokenize(rule): self._shift(token, value) try: return self.result except ValueError: LOG.exception('Failed to understand rule %r', rule) # Fail closed return checks.FalseCheck()
[ "def", "parse", "(", "self", ",", "rule", ":", "str", ")", ":", "# Empty rule means always accept", "if", "not", "rule", ":", "return", "checks", ".", "TrueCheck", "(", ")", "for", "token", ",", "value", "in", "self", ".", "_parse_tokenize", "(", "rule", ")", ":", "self", ".", "_shift", "(", "token", ",", "value", ")", "try", ":", "return", "self", ".", "result", "except", "ValueError", ":", "LOG", ".", "exception", "(", "'Failed to understand rule %r'", ",", "rule", ")", "# Fail closed", "return", "checks", ".", "FalseCheck", "(", ")" ]
Parses policy to tree. Translate a policy written in the policy language into a tree of Check objects.
[ "Parses", "policy", "to", "tree", "." ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L188-L207
train
garenchan/policy
policy/_parser.py
Parser._mix_or_and_expr
def _mix_or_and_expr(self, or_expr, _and, check): """Modify the case 'A or B and C' AND operator's priority is higher than OR operator. """ or_expr, check1 = or_expr.pop_check() if isinstance(check1, checks.AndCheck): and_expr = check1 and_expr.add_check(check) else: and_expr = checks.AndCheck(check1, check) return [('or_expr', or_expr.add_check(and_expr))]
python
def _mix_or_and_expr(self, or_expr, _and, check): """Modify the case 'A or B and C' AND operator's priority is higher than OR operator. """ or_expr, check1 = or_expr.pop_check() if isinstance(check1, checks.AndCheck): and_expr = check1 and_expr.add_check(check) else: and_expr = checks.AndCheck(check1, check) return [('or_expr', or_expr.add_check(and_expr))]
[ "def", "_mix_or_and_expr", "(", "self", ",", "or_expr", ",", "_and", ",", "check", ")", ":", "or_expr", ",", "check1", "=", "or_expr", ".", "pop_check", "(", ")", "if", "isinstance", "(", "check1", ",", "checks", ".", "AndCheck", ")", ":", "and_expr", "=", "check1", "and_expr", ".", "add_check", "(", "check", ")", "else", ":", "and_expr", "=", "checks", ".", "AndCheck", "(", "check1", ",", "check", ")", "return", "[", "(", "'or_expr'", ",", "or_expr", ".", "add_check", "(", "and_expr", ")", ")", "]" ]
Modify the case 'A or B and C' AND operator's priority is higher than OR operator.
[ "Modify", "the", "case", "A", "or", "B", "and", "C" ]
7709ae5f371146f8c90380d0877a5e59d731f644
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L226-L238
train
inveniosoftware/invenio-query-parser
invenio_query_parser/utils.py
build_valid_keywords_grammar
def build_valid_keywords_grammar(keywords=None): """Update parser grammar to add a list of allowed keywords.""" from invenio_query_parser.parser import KeywordQuery, KeywordRule, \ NotKeywordValue, SimpleQuery, ValueQuery if keywords: KeywordRule.grammar = attr('value', re.compile( r"(\d\d\d\w{{0,3}}|{0})\b".format("|".join(keywords), re.I))) NotKeywordValue.grammar = attr('value', re.compile( r'\b(?!\d\d\d\w{{0,3}}|{0}:)\S+\b:'.format( ":|".join(keywords)))) SimpleQuery.grammar = attr( 'op', [NotKeywordValue, KeywordQuery, ValueQuery]) else: KeywordRule.grammar = attr('value', re.compile(r"[\w\d]+(\.[\w\d]+)*")) SimpleQuery.grammar = attr('op', [KeywordQuery, ValueQuery])
python
def build_valid_keywords_grammar(keywords=None): """Update parser grammar to add a list of allowed keywords.""" from invenio_query_parser.parser import KeywordQuery, KeywordRule, \ NotKeywordValue, SimpleQuery, ValueQuery if keywords: KeywordRule.grammar = attr('value', re.compile( r"(\d\d\d\w{{0,3}}|{0})\b".format("|".join(keywords), re.I))) NotKeywordValue.grammar = attr('value', re.compile( r'\b(?!\d\d\d\w{{0,3}}|{0}:)\S+\b:'.format( ":|".join(keywords)))) SimpleQuery.grammar = attr( 'op', [NotKeywordValue, KeywordQuery, ValueQuery]) else: KeywordRule.grammar = attr('value', re.compile(r"[\w\d]+(\.[\w\d]+)*")) SimpleQuery.grammar = attr('op', [KeywordQuery, ValueQuery])
[ "def", "build_valid_keywords_grammar", "(", "keywords", "=", "None", ")", ":", "from", "invenio_query_parser", ".", "parser", "import", "KeywordQuery", ",", "KeywordRule", ",", "NotKeywordValue", ",", "SimpleQuery", ",", "ValueQuery", "if", "keywords", ":", "KeywordRule", ".", "grammar", "=", "attr", "(", "'value'", ",", "re", ".", "compile", "(", "r\"(\\d\\d\\d\\w{{0,3}}|{0})\\b\"", ".", "format", "(", "\"|\"", ".", "join", "(", "keywords", ")", ",", "re", ".", "I", ")", ")", ")", "NotKeywordValue", ".", "grammar", "=", "attr", "(", "'value'", ",", "re", ".", "compile", "(", "r'\\b(?!\\d\\d\\d\\w{{0,3}}|{0}:)\\S+\\b:'", ".", "format", "(", "\":|\"", ".", "join", "(", "keywords", ")", ")", ")", ")", "SimpleQuery", ".", "grammar", "=", "attr", "(", "'op'", ",", "[", "NotKeywordValue", ",", "KeywordQuery", ",", "ValueQuery", "]", ")", "else", ":", "KeywordRule", ".", "grammar", "=", "attr", "(", "'value'", ",", "re", ".", "compile", "(", "r\"[\\w\\d]+(\\.[\\w\\d]+)*\"", ")", ")", "SimpleQuery", ".", "grammar", "=", "attr", "(", "'op'", ",", "[", "KeywordQuery", ",", "ValueQuery", "]", ")" ]
Update parser grammar to add a list of allowed keywords.
[ "Update", "parser", "grammar", "to", "add", "a", "list", "of", "allowed", "keywords", "." ]
21a2c36318003ff52d2e18e7196bb420db8ecb4b
https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/utils.py#L34-L51
train
frostming/marko
marko/renderer.py
Renderer.render
def render(self, element): """Renders the given element to string. :param element: a element to be rendered. :returns: the output string or any values. """ # Store the root node to provide some context to render functions if not self.root_node: self.root_node = element render_func = getattr( self, self._cls_to_func_name(element.__class__), None) if not render_func: render_func = self.render_children return render_func(element)
python
def render(self, element): """Renders the given element to string. :param element: a element to be rendered. :returns: the output string or any values. """ # Store the root node to provide some context to render functions if not self.root_node: self.root_node = element render_func = getattr( self, self._cls_to_func_name(element.__class__), None) if not render_func: render_func = self.render_children return render_func(element)
[ "def", "render", "(", "self", ",", "element", ")", ":", "# Store the root node to provide some context to render functions", "if", "not", "self", ".", "root_node", ":", "self", ".", "root_node", "=", "element", "render_func", "=", "getattr", "(", "self", ",", "self", ".", "_cls_to_func_name", "(", "element", ".", "__class__", ")", ",", "None", ")", "if", "not", "render_func", ":", "render_func", "=", "self", ".", "render_children", "return", "render_func", "(", "element", ")" ]
Renders the given element to string. :param element: a element to be rendered. :returns: the output string or any values.
[ "Renders", "the", "given", "element", "to", "string", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/renderer.py#L37-L50
train
frostming/marko
marko/renderer.py
Renderer.render_children
def render_children(self, element): """ Recursively renders child elements. Joins the rendered strings with no space in between. If newlines / spaces are needed between elements, add them in their respective templates, or override this function in the renderer subclass, so that whitespace won't seem to appear magically for anyone reading your program. :param element: a branch node who has children attribute. """ rendered = [self.render(child) for child in element.children] return ''.join(rendered)
python
def render_children(self, element): """ Recursively renders child elements. Joins the rendered strings with no space in between. If newlines / spaces are needed between elements, add them in their respective templates, or override this function in the renderer subclass, so that whitespace won't seem to appear magically for anyone reading your program. :param element: a branch node who has children attribute. """ rendered = [self.render(child) for child in element.children] return ''.join(rendered)
[ "def", "render_children", "(", "self", ",", "element", ")", ":", "rendered", "=", "[", "self", ".", "render", "(", "child", ")", "for", "child", "in", "element", ".", "children", "]", "return", "''", ".", "join", "(", "rendered", ")" ]
Recursively renders child elements. Joins the rendered strings with no space in between. If newlines / spaces are needed between elements, add them in their respective templates, or override this function in the renderer subclass, so that whitespace won't seem to appear magically for anyone reading your program. :param element: a branch node who has children attribute.
[ "Recursively", "renders", "child", "elements", ".", "Joins", "the", "rendered", "strings", "with", "no", "space", "in", "between", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/renderer.py#L52-L65
train
jstitch/MambuPy
MambuPy/rest/mambuuser.py
MambuUser.setGroups
def setGroups(self, *args, **kwargs): """Adds the groups assigned to this user to a 'groups' field. Returns the number of requests done to Mambu. """ try: groups = self.mambugroupsclass(creditOfficerUsername=self['username'], *args, **kwargs) except AttributeError as ae: from .mambugroup import MambuGroups self.mambugroupsclass = MambuGroups groups = self.mambugroupsclass(creditOfficerUsername=self['username'], *args, **kwargs) self['groups'] = groups return 1
python
def setGroups(self, *args, **kwargs): """Adds the groups assigned to this user to a 'groups' field. Returns the number of requests done to Mambu. """ try: groups = self.mambugroupsclass(creditOfficerUsername=self['username'], *args, **kwargs) except AttributeError as ae: from .mambugroup import MambuGroups self.mambugroupsclass = MambuGroups groups = self.mambugroupsclass(creditOfficerUsername=self['username'], *args, **kwargs) self['groups'] = groups return 1
[ "def", "setGroups", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "groups", "=", "self", ".", "mambugroupsclass", "(", "creditOfficerUsername", "=", "self", "[", "'username'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambugroup", "import", "MambuGroups", "self", ".", "mambugroupsclass", "=", "MambuGroups", "groups", "=", "self", ".", "mambugroupsclass", "(", "creditOfficerUsername", "=", "self", "[", "'username'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", "[", "'groups'", "]", "=", "groups", "return", "1" ]
Adds the groups assigned to this user to a 'groups' field. Returns the number of requests done to Mambu.
[ "Adds", "the", "groups", "assigned", "to", "this", "user", "to", "a", "groups", "field", "." ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuuser.py#L60-L73
train
jstitch/MambuPy
MambuPy/rest/mambuuser.py
MambuUser.setRoles
def setRoles(self, *args, **kwargs): """Adds the role assigned to this user to a 'role' field. Depends on the 'role' field that comes with a fullDetails=True build of the MambuUser. Returns the number of requests done to Mambu. """ try: role = self.mamburoleclass(entid=self['role']['encodedKey'], *args, **kwargs) except KeyError: return 0 except AttributeError as ae: from .mamburoles import MambuRole self.mamburoleclass = MambuRole try: role = self.mamburoleclass(entid=self['role']['encodedKey'], *args, **kwargs) except KeyError: return 0 self['role']['role'] = role return 1
python
def setRoles(self, *args, **kwargs): """Adds the role assigned to this user to a 'role' field. Depends on the 'role' field that comes with a fullDetails=True build of the MambuUser. Returns the number of requests done to Mambu. """ try: role = self.mamburoleclass(entid=self['role']['encodedKey'], *args, **kwargs) except KeyError: return 0 except AttributeError as ae: from .mamburoles import MambuRole self.mamburoleclass = MambuRole try: role = self.mamburoleclass(entid=self['role']['encodedKey'], *args, **kwargs) except KeyError: return 0 self['role']['role'] = role return 1
[ "def", "setRoles", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "role", "=", "self", ".", "mamburoleclass", "(", "entid", "=", "self", "[", "'role'", "]", "[", "'encodedKey'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "KeyError", ":", "return", "0", "except", "AttributeError", "as", "ae", ":", "from", ".", "mamburoles", "import", "MambuRole", "self", ".", "mamburoleclass", "=", "MambuRole", "try", ":", "role", "=", "self", ".", "mamburoleclass", "(", "entid", "=", "self", "[", "'role'", "]", "[", "'encodedKey'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "KeyError", ":", "return", "0", "self", "[", "'role'", "]", "[", "'role'", "]", "=", "role", "return", "1" ]
Adds the role assigned to this user to a 'role' field. Depends on the 'role' field that comes with a fullDetails=True build of the MambuUser. Returns the number of requests done to Mambu.
[ "Adds", "the", "role", "assigned", "to", "this", "user", "to", "a", "role", "field", "." ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuuser.py#L76-L97
train
jstitch/MambuPy
MambuPy/rest/mambuuser.py
MambuUser.create
def create(self, data, *args, **kwargs): """Creates an user in Mambu Parameters -data dictionary with data to send """ super(MambuUser, self).create(data) self['user'][self.customFieldName] = self['customInformation'] self.init(attrs=self['user'])
python
def create(self, data, *args, **kwargs): """Creates an user in Mambu Parameters -data dictionary with data to send """ super(MambuUser, self).create(data) self['user'][self.customFieldName] = self['customInformation'] self.init(attrs=self['user'])
[ "def", "create", "(", "self", ",", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "MambuUser", ",", "self", ")", ".", "create", "(", "data", ")", "self", "[", "'user'", "]", "[", "self", ".", "customFieldName", "]", "=", "self", "[", "'customInformation'", "]", "self", ".", "init", "(", "attrs", "=", "self", "[", "'user'", "]", ")" ]
Creates an user in Mambu Parameters -data dictionary with data to send
[ "Creates", "an", "user", "in", "Mambu" ]
2af98cc12e7ed5ec183b3e97644e880e70b79ee8
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuuser.py#L100-L109
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork.write_attribute_adj_list
def write_attribute_adj_list(self, path): """Write the bipartite attribute graph to a file. :param str path: Path to the output file. """ att_mappings = self.get_attribute_mappings() with open(path, mode="w") as file: for k, v in att_mappings.items(): print("{} {}".format(k, " ".join(str(e) for e in v)), file=file)
python
def write_attribute_adj_list(self, path): """Write the bipartite attribute graph to a file. :param str path: Path to the output file. """ att_mappings = self.get_attribute_mappings() with open(path, mode="w") as file: for k, v in att_mappings.items(): print("{} {}".format(k, " ".join(str(e) for e in v)), file=file)
[ "def", "write_attribute_adj_list", "(", "self", ",", "path", ")", ":", "att_mappings", "=", "self", ".", "get_attribute_mappings", "(", ")", "with", "open", "(", "path", ",", "mode", "=", "\"w\"", ")", "as", "file", ":", "for", "k", ",", "v", "in", "att_mappings", ".", "items", "(", ")", ":", "print", "(", "\"{} {}\"", ".", "format", "(", "k", ",", "\" \"", ".", "join", "(", "str", "(", "e", ")", "for", "e", "in", "v", ")", ")", ",", "file", "=", "file", ")" ]
Write the bipartite attribute graph to a file. :param str path: Path to the output file.
[ "Write", "the", "bipartite", "attribute", "graph", "to", "a", "file", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L25-L34
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork.get_attribute_mappings
def get_attribute_mappings(self): """Get a dictionary of mappings between vertices and enumerated attributes. :return: Dictionary of mappings between vertices and enumerated attributes. """ att_ind_start = len(self.graph.vs) att_mappings = defaultdict(list) att_ind_end = self._add_differential_expression_attributes(att_ind_start, att_mappings) if "associated_diseases" in self.graph.vs.attributes(): self._add_disease_association_attributes(att_ind_end, att_mappings) return att_mappings
python
def get_attribute_mappings(self): """Get a dictionary of mappings between vertices and enumerated attributes. :return: Dictionary of mappings between vertices and enumerated attributes. """ att_ind_start = len(self.graph.vs) att_mappings = defaultdict(list) att_ind_end = self._add_differential_expression_attributes(att_ind_start, att_mappings) if "associated_diseases" in self.graph.vs.attributes(): self._add_disease_association_attributes(att_ind_end, att_mappings) return att_mappings
[ "def", "get_attribute_mappings", "(", "self", ")", ":", "att_ind_start", "=", "len", "(", "self", ".", "graph", ".", "vs", ")", "att_mappings", "=", "defaultdict", "(", "list", ")", "att_ind_end", "=", "self", ".", "_add_differential_expression_attributes", "(", "att_ind_start", ",", "att_mappings", ")", "if", "\"associated_diseases\"", "in", "self", ".", "graph", ".", "vs", ".", "attributes", "(", ")", ":", "self", ".", "_add_disease_association_attributes", "(", "att_ind_end", ",", "att_mappings", ")", "return", "att_mappings" ]
Get a dictionary of mappings between vertices and enumerated attributes. :return: Dictionary of mappings between vertices and enumerated attributes.
[ "Get", "a", "dictionary", "of", "mappings", "between", "vertices", "and", "enumerated", "attributes", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L36-L46
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork._add_differential_expression_attributes
def _add_differential_expression_attributes(self, att_ind_start, att_mappings): """Add differential expression information to the attribute mapping dictionary. :param int att_ind_start: Start index for enumerating the attributes. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. :return: End index for attribute enumeration. """ up_regulated_ind = self.graph.vs.select(up_regulated_eq=True).indices down_regulated_ind = self.graph.vs.select(down_regulated_eq=True).indices rest_ind = self.graph.vs.select(diff_expressed_eq=False).indices self._add_attribute_values(att_ind_start + 1, att_mappings, up_regulated_ind) self._add_attribute_values(att_ind_start + 2, att_mappings, down_regulated_ind) self._add_attribute_values(att_ind_start + 3, att_mappings, rest_ind) return att_ind_start + 4
python
def _add_differential_expression_attributes(self, att_ind_start, att_mappings): """Add differential expression information to the attribute mapping dictionary. :param int att_ind_start: Start index for enumerating the attributes. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. :return: End index for attribute enumeration. """ up_regulated_ind = self.graph.vs.select(up_regulated_eq=True).indices down_regulated_ind = self.graph.vs.select(down_regulated_eq=True).indices rest_ind = self.graph.vs.select(diff_expressed_eq=False).indices self._add_attribute_values(att_ind_start + 1, att_mappings, up_regulated_ind) self._add_attribute_values(att_ind_start + 2, att_mappings, down_regulated_ind) self._add_attribute_values(att_ind_start + 3, att_mappings, rest_ind) return att_ind_start + 4
[ "def", "_add_differential_expression_attributes", "(", "self", ",", "att_ind_start", ",", "att_mappings", ")", ":", "up_regulated_ind", "=", "self", ".", "graph", ".", "vs", ".", "select", "(", "up_regulated_eq", "=", "True", ")", ".", "indices", "down_regulated_ind", "=", "self", ".", "graph", ".", "vs", ".", "select", "(", "down_regulated_eq", "=", "True", ")", ".", "indices", "rest_ind", "=", "self", ".", "graph", ".", "vs", ".", "select", "(", "diff_expressed_eq", "=", "False", ")", ".", "indices", "self", ".", "_add_attribute_values", "(", "att_ind_start", "+", "1", ",", "att_mappings", ",", "up_regulated_ind", ")", "self", ".", "_add_attribute_values", "(", "att_ind_start", "+", "2", ",", "att_mappings", ",", "down_regulated_ind", ")", "self", ".", "_add_attribute_values", "(", "att_ind_start", "+", "3", ",", "att_mappings", ",", "rest_ind", ")", "return", "att_ind_start", "+", "4" ]
Add differential expression information to the attribute mapping dictionary. :param int att_ind_start: Start index for enumerating the attributes. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. :return: End index for attribute enumeration.
[ "Add", "differential", "expression", "information", "to", "the", "attribute", "mapping", "dictionary", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L48-L62
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork._add_attribute_values
def _add_attribute_values(self, value, att_mappings, indices): """Add an attribute value to the given vertices. :param int value: Attribute value. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. :param list indices: Indices of the vertices. """ for i in indices: att_mappings[i].append(value)
python
def _add_attribute_values(self, value, att_mappings, indices): """Add an attribute value to the given vertices. :param int value: Attribute value. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. :param list indices: Indices of the vertices. """ for i in indices: att_mappings[i].append(value)
[ "def", "_add_attribute_values", "(", "self", ",", "value", ",", "att_mappings", ",", "indices", ")", ":", "for", "i", "in", "indices", ":", "att_mappings", "[", "i", "]", ".", "append", "(", "value", ")" ]
Add an attribute value to the given vertices. :param int value: Attribute value. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. :param list indices: Indices of the vertices.
[ "Add", "an", "attribute", "value", "to", "the", "given", "vertices", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L64-L72
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork._add_disease_association_attributes
def _add_disease_association_attributes(self, att_ind_start, att_mappings): """Add disease association information to the attribute mapping dictionary. :param int att_ind_start: Start index for enumerating the attributes. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. """ disease_mappings = self.get_disease_mappings(att_ind_start) for vertex in self.graph.vs: assoc_diseases = vertex["associated_diseases"] if assoc_diseases is not None: assoc_disease_ids = [disease_mappings[disease] for disease in assoc_diseases] att_mappings[vertex.index].extend(assoc_disease_ids)
python
def _add_disease_association_attributes(self, att_ind_start, att_mappings): """Add disease association information to the attribute mapping dictionary. :param int att_ind_start: Start index for enumerating the attributes. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes. """ disease_mappings = self.get_disease_mappings(att_ind_start) for vertex in self.graph.vs: assoc_diseases = vertex["associated_diseases"] if assoc_diseases is not None: assoc_disease_ids = [disease_mappings[disease] for disease in assoc_diseases] att_mappings[vertex.index].extend(assoc_disease_ids)
[ "def", "_add_disease_association_attributes", "(", "self", ",", "att_ind_start", ",", "att_mappings", ")", ":", "disease_mappings", "=", "self", ".", "get_disease_mappings", "(", "att_ind_start", ")", "for", "vertex", "in", "self", ".", "graph", ".", "vs", ":", "assoc_diseases", "=", "vertex", "[", "\"associated_diseases\"", "]", "if", "assoc_diseases", "is", "not", "None", ":", "assoc_disease_ids", "=", "[", "disease_mappings", "[", "disease", "]", "for", "disease", "in", "assoc_diseases", "]", "att_mappings", "[", "vertex", ".", "index", "]", ".", "extend", "(", "assoc_disease_ids", ")" ]
Add disease association information to the attribute mapping dictionary. :param int att_ind_start: Start index for enumerating the attributes. :param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes.
[ "Add", "disease", "association", "information", "to", "the", "attribute", "mapping", "dictionary", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L74-L85
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork.get_disease_mappings
def get_disease_mappings(self, att_ind_start): """Get a dictionary of enumerations for diseases. :param int att_ind_start: Starting index for enumeration. :return: Dictionary of disease, number pairs. """ all_disease_ids = self.get_all_unique_diseases() disease_enum = enumerate(all_disease_ids, start=att_ind_start) disease_mappings = {} for num, dis in disease_enum: disease_mappings[dis] = num return disease_mappings
python
def get_disease_mappings(self, att_ind_start): """Get a dictionary of enumerations for diseases. :param int att_ind_start: Starting index for enumeration. :return: Dictionary of disease, number pairs. """ all_disease_ids = self.get_all_unique_diseases() disease_enum = enumerate(all_disease_ids, start=att_ind_start) disease_mappings = {} for num, dis in disease_enum: disease_mappings[dis] = num return disease_mappings
[ "def", "get_disease_mappings", "(", "self", ",", "att_ind_start", ")", ":", "all_disease_ids", "=", "self", ".", "get_all_unique_diseases", "(", ")", "disease_enum", "=", "enumerate", "(", "all_disease_ids", ",", "start", "=", "att_ind_start", ")", "disease_mappings", "=", "{", "}", "for", "num", ",", "dis", "in", "disease_enum", ":", "disease_mappings", "[", "dis", "]", "=", "num", "return", "disease_mappings" ]
Get a dictionary of enumerations for diseases. :param int att_ind_start: Starting index for enumeration. :return: Dictionary of disease, number pairs.
[ "Get", "a", "dictionary", "of", "enumerations", "for", "diseases", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L87-L98
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/attribute_network.py
AttributeNetwork.get_all_unique_diseases
def get_all_unique_diseases(self): """Get all unique diseases that are known to the network. :return: All unique disease identifiers. """ all_disease_ids = self.graph.vs["associated_diseases"] # remove None values from list all_disease_ids = [lst for lst in all_disease_ids if lst is not None] # flatten list of lists, get unique elements all_disease_ids = list(set([id for sublist in all_disease_ids for id in sublist])) return all_disease_ids
python
def get_all_unique_diseases(self): """Get all unique diseases that are known to the network. :return: All unique disease identifiers. """ all_disease_ids = self.graph.vs["associated_diseases"] # remove None values from list all_disease_ids = [lst for lst in all_disease_ids if lst is not None] # flatten list of lists, get unique elements all_disease_ids = list(set([id for sublist in all_disease_ids for id in sublist])) return all_disease_ids
[ "def", "get_all_unique_diseases", "(", "self", ")", ":", "all_disease_ids", "=", "self", ".", "graph", ".", "vs", "[", "\"associated_diseases\"", "]", "# remove None values from list", "all_disease_ids", "=", "[", "lst", "for", "lst", "in", "all_disease_ids", "if", "lst", "is", "not", "None", "]", "# flatten list of lists, get unique elements", "all_disease_ids", "=", "list", "(", "set", "(", "[", "id", "for", "sublist", "in", "all_disease_ids", "for", "id", "in", "sublist", "]", ")", ")", "return", "all_disease_ids" ]
Get all unique diseases that are known to the network. :return: All unique disease identifiers.
[ "Get", "all", "unique", "diseases", "that", "are", "known", "to", "the", "network", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/attribute_network.py#L100-L110
train
BernardFW/bernard
src/bernard/analytics/_helpers.py
page_view
def page_view(url): """ Page view decorator. Put that around a state handler function in order to log a page view each time the handler gets called. :param url: simili-URL that you want to give to the state """ def decorator(func): @wraps(func) async def wrapper(self: BaseState, *args, **kwargs): user_id = self.request.user.id try: user_lang = await self.request.user.get_locale() except NotImplementedError: user_lang = '' title = self.__class__.__name__ # noinspection PyTypeChecker async for p in providers(): await p.page_view(url, title, user_id, user_lang) return await func(self, *args, **kwargs) return wrapper return decorator
python
def page_view(url): """ Page view decorator. Put that around a state handler function in order to log a page view each time the handler gets called. :param url: simili-URL that you want to give to the state """ def decorator(func): @wraps(func) async def wrapper(self: BaseState, *args, **kwargs): user_id = self.request.user.id try: user_lang = await self.request.user.get_locale() except NotImplementedError: user_lang = '' title = self.__class__.__name__ # noinspection PyTypeChecker async for p in providers(): await p.page_view(url, title, user_id, user_lang) return await func(self, *args, **kwargs) return wrapper return decorator
[ "def", "page_view", "(", "url", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "async", "def", "wrapper", "(", "self", ":", "BaseState", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "self", ".", "request", ".", "user", ".", "id", "try", ":", "user_lang", "=", "await", "self", ".", "request", ".", "user", ".", "get_locale", "(", ")", "except", "NotImplementedError", ":", "user_lang", "=", "''", "title", "=", "self", ".", "__class__", ".", "__name__", "# noinspection PyTypeChecker", "async", "for", "p", "in", "providers", "(", ")", ":", "await", "p", ".", "page_view", "(", "url", ",", "title", ",", "user_id", ",", "user_lang", ")", "return", "await", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Page view decorator. Put that around a state handler function in order to log a page view each time the handler gets called. :param url: simili-URL that you want to give to the state
[ "Page", "view", "decorator", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/analytics/_helpers.py#L14-L42
train
pyQode/pyqode.cobol
pyqode/cobol/api/parsers/pic.py
parse_cobol
def parse_cobol(lines): """ Parses the COBOL - converts the COBOL line into a dictionary containing the information - parses the pic information into type, length, precision - ~~handles redefines~~ -> our implementation does not do that anymore because we want to display item that was redefined. """ output = [] intify = ["level", "occurs"] # All in 1 line now, let's parse for row in lines: match = CobolPatterns.row_pattern.match(row.strip()) if not match: _logger().warning("Found unmatched row %s" % row.strip()) continue match = match.groupdict() for i in intify: match[i] = int(match[i]) if match[i] is not None else None if match['pic'] is not None: match['pic_info'] = parse_pic_string(match['pic']) output.append(match) return output
python
def parse_cobol(lines): """ Parses the COBOL - converts the COBOL line into a dictionary containing the information - parses the pic information into type, length, precision - ~~handles redefines~~ -> our implementation does not do that anymore because we want to display item that was redefined. """ output = [] intify = ["level", "occurs"] # All in 1 line now, let's parse for row in lines: match = CobolPatterns.row_pattern.match(row.strip()) if not match: _logger().warning("Found unmatched row %s" % row.strip()) continue match = match.groupdict() for i in intify: match[i] = int(match[i]) if match[i] is not None else None if match['pic'] is not None: match['pic_info'] = parse_pic_string(match['pic']) output.append(match) return output
[ "def", "parse_cobol", "(", "lines", ")", ":", "output", "=", "[", "]", "intify", "=", "[", "\"level\"", ",", "\"occurs\"", "]", "# All in 1 line now, let's parse", "for", "row", "in", "lines", ":", "match", "=", "CobolPatterns", ".", "row_pattern", ".", "match", "(", "row", ".", "strip", "(", ")", ")", "if", "not", "match", ":", "_logger", "(", ")", ".", "warning", "(", "\"Found unmatched row %s\"", "%", "row", ".", "strip", "(", ")", ")", "continue", "match", "=", "match", ".", "groupdict", "(", ")", "for", "i", "in", "intify", ":", "match", "[", "i", "]", "=", "int", "(", "match", "[", "i", "]", ")", "if", "match", "[", "i", "]", "is", "not", "None", "else", "None", "if", "match", "[", "'pic'", "]", "is", "not", "None", ":", "match", "[", "'pic_info'", "]", "=", "parse_pic_string", "(", "match", "[", "'pic'", "]", ")", "output", ".", "append", "(", "match", ")", "return", "output" ]
Parses the COBOL - converts the COBOL line into a dictionary containing the information - parses the pic information into type, length, precision - ~~handles redefines~~ -> our implementation does not do that anymore because we want to display item that was redefined.
[ "Parses", "the", "COBOL", "-", "converts", "the", "COBOL", "line", "into", "a", "dictionary", "containing", "the", "information", "-", "parses", "the", "pic", "information", "into", "type", "length", "precision", "-", "~~handles", "redefines~~", "-", ">", "our", "implementation", "does", "not", "do", "that", "anymore", "because", "we", "want", "to", "display", "item", "that", "was", "redefined", "." ]
eedae4e320a4b2d0c44abb2c3061091321648fb7
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/pic.py#L114-L143
train
pyQode/pyqode.cobol
pyqode/cobol/api/parsers/pic.py
clean_names
def clean_names(lines, ensure_unique_names=False, strip_prefix=False, make_database_safe=False): """ Clean the names. Options to: - strip prefixes on names - enforce unique names - make database safe names by converting - to _ """ names = {} for row in lines: if strip_prefix: row['name'] = row['name'][row['name'].find('-') + 1:] if row['indexed_by'] is not None: row['indexed_by'] = row['indexed_by'][row['indexed_by'].find( '-') + 1:] if ensure_unique_names: i = 1 while (row['name'] if i == 1 else row['name'] + "-" + str(i)) in names: i += 1 names[row['name'] if i == 1 else row['name'] + "-" + str(i)] = 1 if i > 1: row['name'] = row['name'] + "-" + str(i) if make_database_safe: row['name'] = row['name'].replace("-", "_") return lines
python
def clean_names(lines, ensure_unique_names=False, strip_prefix=False, make_database_safe=False): """ Clean the names. Options to: - strip prefixes on names - enforce unique names - make database safe names by converting - to _ """ names = {} for row in lines: if strip_prefix: row['name'] = row['name'][row['name'].find('-') + 1:] if row['indexed_by'] is not None: row['indexed_by'] = row['indexed_by'][row['indexed_by'].find( '-') + 1:] if ensure_unique_names: i = 1 while (row['name'] if i == 1 else row['name'] + "-" + str(i)) in names: i += 1 names[row['name'] if i == 1 else row['name'] + "-" + str(i)] = 1 if i > 1: row['name'] = row['name'] + "-" + str(i) if make_database_safe: row['name'] = row['name'].replace("-", "_") return lines
[ "def", "clean_names", "(", "lines", ",", "ensure_unique_names", "=", "False", ",", "strip_prefix", "=", "False", ",", "make_database_safe", "=", "False", ")", ":", "names", "=", "{", "}", "for", "row", "in", "lines", ":", "if", "strip_prefix", ":", "row", "[", "'name'", "]", "=", "row", "[", "'name'", "]", "[", "row", "[", "'name'", "]", ".", "find", "(", "'-'", ")", "+", "1", ":", "]", "if", "row", "[", "'indexed_by'", "]", "is", "not", "None", ":", "row", "[", "'indexed_by'", "]", "=", "row", "[", "'indexed_by'", "]", "[", "row", "[", "'indexed_by'", "]", ".", "find", "(", "'-'", ")", "+", "1", ":", "]", "if", "ensure_unique_names", ":", "i", "=", "1", "while", "(", "row", "[", "'name'", "]", "if", "i", "==", "1", "else", "row", "[", "'name'", "]", "+", "\"-\"", "+", "str", "(", "i", ")", ")", "in", "names", ":", "i", "+=", "1", "names", "[", "row", "[", "'name'", "]", "if", "i", "==", "1", "else", "row", "[", "'name'", "]", "+", "\"-\"", "+", "str", "(", "i", ")", "]", "=", "1", "if", "i", ">", "1", ":", "row", "[", "'name'", "]", "=", "row", "[", "'name'", "]", "+", "\"-\"", "+", "str", "(", "i", ")", "if", "make_database_safe", ":", "row", "[", "'name'", "]", "=", "row", "[", "'name'", "]", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "return", "lines" ]
Clean the names. Options to: - strip prefixes on names - enforce unique names - make database safe names by converting - to _
[ "Clean", "the", "names", "." ]
eedae4e320a4b2d0c44abb2c3061091321648fb7
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/pic.py#L230-L263
train
ikalnytskyi/holocron
src/holocron/__main__.py
create_app_from_yml
def create_app_from_yml(path): """Return an application instance created from YAML.""" try: with open(path, "rt", encoding="UTF-8") as f: try: # Substitute ALL occurrences of '%(here)s' with a path to a # directory with '.holocron.yml'. Please note, we also want # wrap the result into 'io.StringIO' in order to preserve # original filename in 'yaml.safe_load()' errors. interpolated = io.StringIO(f.read() % { "here": os.path.abspath(os.path.dirname(path))}) interpolated.name = f.name conf = yaml.safe_load(interpolated) except yaml.YAMLError as exc: raise RuntimeError( "Cannot parse a configuration file. Context: " + str(exc)) except FileNotFoundError: conf = {"metadata": None, "pipes": {}} return core.create_app(conf["metadata"], pipes=conf["pipes"])
python
def create_app_from_yml(path): """Return an application instance created from YAML.""" try: with open(path, "rt", encoding="UTF-8") as f: try: # Substitute ALL occurrences of '%(here)s' with a path to a # directory with '.holocron.yml'. Please note, we also want # wrap the result into 'io.StringIO' in order to preserve # original filename in 'yaml.safe_load()' errors. interpolated = io.StringIO(f.read() % { "here": os.path.abspath(os.path.dirname(path))}) interpolated.name = f.name conf = yaml.safe_load(interpolated) except yaml.YAMLError as exc: raise RuntimeError( "Cannot parse a configuration file. Context: " + str(exc)) except FileNotFoundError: conf = {"metadata": None, "pipes": {}} return core.create_app(conf["metadata"], pipes=conf["pipes"])
[ "def", "create_app_from_yml", "(", "path", ")", ":", "try", ":", "with", "open", "(", "path", ",", "\"rt\"", ",", "encoding", "=", "\"UTF-8\"", ")", "as", "f", ":", "try", ":", "# Substitute ALL occurrences of '%(here)s' with a path to a", "# directory with '.holocron.yml'. Please note, we also want", "# wrap the result into 'io.StringIO' in order to preserve", "# original filename in 'yaml.safe_load()' errors.", "interpolated", "=", "io", ".", "StringIO", "(", "f", ".", "read", "(", ")", "%", "{", "\"here\"", ":", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "}", ")", "interpolated", ".", "name", "=", "f", ".", "name", "conf", "=", "yaml", ".", "safe_load", "(", "interpolated", ")", "except", "yaml", ".", "YAMLError", "as", "exc", ":", "raise", "RuntimeError", "(", "\"Cannot parse a configuration file. Context: \"", "+", "str", "(", "exc", ")", ")", "except", "FileNotFoundError", ":", "conf", "=", "{", "\"metadata\"", ":", "None", ",", "\"pipes\"", ":", "{", "}", "}", "return", "core", ".", "create_app", "(", "conf", "[", "\"metadata\"", "]", ",", "pipes", "=", "conf", "[", "\"pipes\"", "]", ")" ]
Return an application instance created from YAML.
[ "Return", "an", "application", "instance", "created", "from", "YAML", "." ]
d202f6bccfeca64162857c6d0ee5bb53e773d2f2
https://github.com/ikalnytskyi/holocron/blob/d202f6bccfeca64162857c6d0ee5bb53e773d2f2/src/holocron/__main__.py#L18-L40
train
ikalnytskyi/holocron
src/holocron/__main__.py
configure_logger
def configure_logger(level): """ Configure a root logger to print records in pretty format. The format is more readable for end users, since it's not necessary at all to know a record's dateime and a source of the record. Examples:: [INFO] message [WARN] message [ERRO] message :param level: a minimum logging level to be printed """ class _Formatter(logging.Formatter): def format(self, record): record.levelname = record.levelname[:4] return super(_Formatter, self).format(record) # create stream handler with custom formatter stream_handler = logging.StreamHandler() stream_handler.setFormatter(_Formatter("[%(levelname)s] %(message)s")) # configure root logger logger = logging.getLogger() logger.addHandler(stream_handler) logger.setLevel(level) # capture warnings issued by 'warnings' module logging.captureWarnings(True)
python
def configure_logger(level): """ Configure a root logger to print records in pretty format. The format is more readable for end users, since it's not necessary at all to know a record's dateime and a source of the record. Examples:: [INFO] message [WARN] message [ERRO] message :param level: a minimum logging level to be printed """ class _Formatter(logging.Formatter): def format(self, record): record.levelname = record.levelname[:4] return super(_Formatter, self).format(record) # create stream handler with custom formatter stream_handler = logging.StreamHandler() stream_handler.setFormatter(_Formatter("[%(levelname)s] %(message)s")) # configure root logger logger = logging.getLogger() logger.addHandler(stream_handler) logger.setLevel(level) # capture warnings issued by 'warnings' module logging.captureWarnings(True)
[ "def", "configure_logger", "(", "level", ")", ":", "class", "_Formatter", "(", "logging", ".", "Formatter", ")", ":", "def", "format", "(", "self", ",", "record", ")", ":", "record", ".", "levelname", "=", "record", ".", "levelname", "[", ":", "4", "]", "return", "super", "(", "_Formatter", ",", "self", ")", ".", "format", "(", "record", ")", "# create stream handler with custom formatter", "stream_handler", "=", "logging", ".", "StreamHandler", "(", ")", "stream_handler", ".", "setFormatter", "(", "_Formatter", "(", "\"[%(levelname)s] %(message)s\"", ")", ")", "# configure root logger", "logger", "=", "logging", ".", "getLogger", "(", ")", "logger", ".", "addHandler", "(", "stream_handler", ")", "logger", ".", "setLevel", "(", "level", ")", "# capture warnings issued by 'warnings' module", "logging", ".", "captureWarnings", "(", "True", ")" ]
Configure a root logger to print records in pretty format. The format is more readable for end users, since it's not necessary at all to know a record's dateime and a source of the record. Examples:: [INFO] message [WARN] message [ERRO] message :param level: a minimum logging level to be printed
[ "Configure", "a", "root", "logger", "to", "print", "records", "in", "pretty", "format", "." ]
d202f6bccfeca64162857c6d0ee5bb53e773d2f2
https://github.com/ikalnytskyi/holocron/blob/d202f6bccfeca64162857c6d0ee5bb53e773d2f2/src/holocron/__main__.py#L43-L73
train
ikalnytskyi/holocron
src/holocron/__main__.py
parse_command_line
def parse_command_line(args): """ Builds a command line interface, and parses its arguments. Returns an object with attributes, that are represent CLI arguments. :param args: a list of command line arguments :returns: a parsed object with cli options """ parser = argparse.ArgumentParser( description=( "Holocron is an easy and lightweight static blog generator, " "based on markup text and Jinja2 templates."), epilog=( "With no CONF, read .holocron.yml in the current working dir. " "If no CONF found, the default settings will be used.")) parser.add_argument( "-c", "--conf", dest="conf", default=".holocron.yml", help="set path to the settings file") parser.add_argument( "-q", "--quiet", dest="verbosity", action="store_const", const=logging.CRITICAL, help="show only critical errors") parser.add_argument( "-v", "--verbose", dest="verbosity", action="store_const", const=logging.INFO, help="show additional messages") parser.add_argument( "-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="show all messages") parser.add_argument( "--version", action="version", version=pkg_resources.get_distribution("holocron").version, help="show the holocron version and exit") command_parser = parser.add_subparsers( dest="command", help="command to execute") run_parser = command_parser.add_parser("run") run_parser.add_argument("pipe", help="a pipe to run") # parse cli and form arguments object arguments = parser.parse_args(args) # if no commands are specified display help if arguments.command is None: parser.print_help() parser.exit(1) return arguments
python
def parse_command_line(args): """ Builds a command line interface, and parses its arguments. Returns an object with attributes, that are represent CLI arguments. :param args: a list of command line arguments :returns: a parsed object with cli options """ parser = argparse.ArgumentParser( description=( "Holocron is an easy and lightweight static blog generator, " "based on markup text and Jinja2 templates."), epilog=( "With no CONF, read .holocron.yml in the current working dir. " "If no CONF found, the default settings will be used.")) parser.add_argument( "-c", "--conf", dest="conf", default=".holocron.yml", help="set path to the settings file") parser.add_argument( "-q", "--quiet", dest="verbosity", action="store_const", const=logging.CRITICAL, help="show only critical errors") parser.add_argument( "-v", "--verbose", dest="verbosity", action="store_const", const=logging.INFO, help="show additional messages") parser.add_argument( "-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="show all messages") parser.add_argument( "--version", action="version", version=pkg_resources.get_distribution("holocron").version, help="show the holocron version and exit") command_parser = parser.add_subparsers( dest="command", help="command to execute") run_parser = command_parser.add_parser("run") run_parser.add_argument("pipe", help="a pipe to run") # parse cli and form arguments object arguments = parser.parse_args(args) # if no commands are specified display help if arguments.command is None: parser.print_help() parser.exit(1) return arguments
[ "def", "parse_command_line", "(", "args", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "(", "\"Holocron is an easy and lightweight static blog generator, \"", "\"based on markup text and Jinja2 templates.\"", ")", ",", "epilog", "=", "(", "\"With no CONF, read .holocron.yml in the current working dir. \"", "\"If no CONF found, the default settings will be used.\"", ")", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--conf\"", ",", "dest", "=", "\"conf\"", ",", "default", "=", "\".holocron.yml\"", ",", "help", "=", "\"set path to the settings file\"", ")", "parser", ".", "add_argument", "(", "\"-q\"", ",", "\"--quiet\"", ",", "dest", "=", "\"verbosity\"", ",", "action", "=", "\"store_const\"", ",", "const", "=", "logging", ".", "CRITICAL", ",", "help", "=", "\"show only critical errors\"", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--verbose\"", ",", "dest", "=", "\"verbosity\"", ",", "action", "=", "\"store_const\"", ",", "const", "=", "logging", ".", "INFO", ",", "help", "=", "\"show additional messages\"", ")", "parser", ".", "add_argument", "(", "\"-d\"", ",", "\"--debug\"", ",", "dest", "=", "\"verbosity\"", ",", "action", "=", "\"store_const\"", ",", "const", "=", "logging", ".", "DEBUG", ",", "help", "=", "\"show all messages\"", ")", "parser", ".", "add_argument", "(", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "pkg_resources", ".", "get_distribution", "(", "\"holocron\"", ")", ".", "version", ",", "help", "=", "\"show the holocron version and exit\"", ")", "command_parser", "=", "parser", ".", "add_subparsers", "(", "dest", "=", "\"command\"", ",", "help", "=", "\"command to execute\"", ")", "run_parser", "=", "command_parser", ".", "add_parser", "(", "\"run\"", ")", "run_parser", ".", "add_argument", "(", "\"pipe\"", ",", "help", "=", "\"a pipe to run\"", ")", "# parse cli and form arguments object", "arguments", "=", "parser", ".", "parse_args", "(", "args", ")", "# if no commands are specified display help", "if", "arguments", ".", "command", "is", "None", ":", "parser", ".", "print_help", "(", ")", "parser", ".", "exit", "(", "1", ")", "return", "arguments" ]
Builds a command line interface, and parses its arguments. Returns an object with attributes, that are represent CLI arguments. :param args: a list of command line arguments :returns: a parsed object with cli options
[ "Builds", "a", "command", "line", "interface", "and", "parses", "its", "arguments", ".", "Returns", "an", "object", "with", "attributes", "that", "are", "represent", "CLI", "arguments", "." ]
d202f6bccfeca64162857c6d0ee5bb53e773d2f2
https://github.com/ikalnytskyi/holocron/blob/d202f6bccfeca64162857c6d0ee5bb53e773d2f2/src/holocron/__main__.py#L76-L127
train
BernardFW/bernard
src/bernard/cli/_live_reload.py
_list_syntax_error
def _list_syntax_error(): """ If we're going through a syntax error, add the directory of the error to the watchlist. """ _, e, _ = sys.exc_info() if isinstance(e, SyntaxError) and hasattr(e, 'filename'): yield path.dirname(e.filename)
python
def _list_syntax_error(): """ If we're going through a syntax error, add the directory of the error to the watchlist. """ _, e, _ = sys.exc_info() if isinstance(e, SyntaxError) and hasattr(e, 'filename'): yield path.dirname(e.filename)
[ "def", "_list_syntax_error", "(", ")", ":", "_", ",", "e", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "if", "isinstance", "(", "e", ",", "SyntaxError", ")", "and", "hasattr", "(", "e", ",", "'filename'", ")", ":", "yield", "path", ".", "dirname", "(", "e", ".", "filename", ")" ]
If we're going through a syntax error, add the directory of the error to the watchlist.
[ "If", "we", "re", "going", "through", "a", "syntax", "error", "add", "the", "directory", "of", "the", "error", "to", "the", "watchlist", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/cli/_live_reload.py#L51-L59
train
BernardFW/bernard
src/bernard/cli/_live_reload.py
list_dirs
def list_dirs(): """ List all directories known to hold project code. """ out = set() out.update(_list_config_dirs()) out.update(_list_module_dirs()) out.update(_list_syntax_error()) return out
python
def list_dirs(): """ List all directories known to hold project code. """ out = set() out.update(_list_config_dirs()) out.update(_list_module_dirs()) out.update(_list_syntax_error()) return out
[ "def", "list_dirs", "(", ")", ":", "out", "=", "set", "(", ")", "out", ".", "update", "(", "_list_config_dirs", "(", ")", ")", "out", ".", "update", "(", "_list_module_dirs", "(", ")", ")", "out", ".", "update", "(", "_list_syntax_error", "(", ")", ")", "return", "out" ]
List all directories known to hold project code.
[ "List", "all", "directories", "known", "to", "hold", "project", "code", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/cli/_live_reload.py#L62-L71
train
BernardFW/bernard
src/bernard/cli/_live_reload.py
start_child
async def start_child(): """ Start the child process that will look for changes in modules. """ logger.info('Started to watch for code changes') loop = asyncio.get_event_loop() watcher = aionotify.Watcher() flags = ( aionotify.Flags.MODIFY | aionotify.Flags.DELETE | aionotify.Flags.ATTRIB | aionotify.Flags.MOVED_TO | aionotify.Flags.MOVED_FROM | aionotify.Flags.CREATE | aionotify.Flags.DELETE_SELF | aionotify.Flags.MOVE_SELF ) watched_dirs = list_dirs() for dir_name in watched_dirs: watcher.watch(path=dir_name, flags=flags) await watcher.setup(loop) while True: evt = await watcher.get_event() file_path = path.join(evt.alias, evt.name) if file_path in watched_dirs or file_path.endswith('.py'): await asyncio.sleep(settings.CODE_RELOAD_DEBOUNCE) break watcher.close() exit_for_reload()
python
async def start_child(): """ Start the child process that will look for changes in modules. """ logger.info('Started to watch for code changes') loop = asyncio.get_event_loop() watcher = aionotify.Watcher() flags = ( aionotify.Flags.MODIFY | aionotify.Flags.DELETE | aionotify.Flags.ATTRIB | aionotify.Flags.MOVED_TO | aionotify.Flags.MOVED_FROM | aionotify.Flags.CREATE | aionotify.Flags.DELETE_SELF | aionotify.Flags.MOVE_SELF ) watched_dirs = list_dirs() for dir_name in watched_dirs: watcher.watch(path=dir_name, flags=flags) await watcher.setup(loop) while True: evt = await watcher.get_event() file_path = path.join(evt.alias, evt.name) if file_path in watched_dirs or file_path.endswith('.py'): await asyncio.sleep(settings.CODE_RELOAD_DEBOUNCE) break watcher.close() exit_for_reload()
[ "async", "def", "start_child", "(", ")", ":", "logger", ".", "info", "(", "'Started to watch for code changes'", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "watcher", "=", "aionotify", ".", "Watcher", "(", ")", "flags", "=", "(", "aionotify", ".", "Flags", ".", "MODIFY", "|", "aionotify", ".", "Flags", ".", "DELETE", "|", "aionotify", ".", "Flags", ".", "ATTRIB", "|", "aionotify", ".", "Flags", ".", "MOVED_TO", "|", "aionotify", ".", "Flags", ".", "MOVED_FROM", "|", "aionotify", ".", "Flags", ".", "CREATE", "|", "aionotify", ".", "Flags", ".", "DELETE_SELF", "|", "aionotify", ".", "Flags", ".", "MOVE_SELF", ")", "watched_dirs", "=", "list_dirs", "(", ")", "for", "dir_name", "in", "watched_dirs", ":", "watcher", ".", "watch", "(", "path", "=", "dir_name", ",", "flags", "=", "flags", ")", "await", "watcher", ".", "setup", "(", "loop", ")", "while", "True", ":", "evt", "=", "await", "watcher", ".", "get_event", "(", ")", "file_path", "=", "path", ".", "join", "(", "evt", ".", "alias", ",", "evt", ".", "name", ")", "if", "file_path", "in", "watched_dirs", "or", "file_path", ".", "endswith", "(", "'.py'", ")", ":", "await", "asyncio", ".", "sleep", "(", "settings", ".", "CODE_RELOAD_DEBOUNCE", ")", "break", "watcher", ".", "close", "(", ")", "exit_for_reload", "(", ")" ]
Start the child process that will look for changes in modules.
[ "Start", "the", "child", "process", "that", "will", "look", "for", "changes", "in", "modules", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/cli/_live_reload.py#L83-L120
train
BernardFW/bernard
src/bernard/cli/_live_reload.py
start_parent
def start_parent(): """ Start the parent that will simply run the child forever until stopped. """ while True: args = [sys.executable] + sys.argv new_environ = environ.copy() new_environ["_IN_CHILD"] = 'yes' ret = subprocess.call(args, env=new_environ) if ret != settings.CODE_RELOAD_EXIT: return ret
python
def start_parent(): """ Start the parent that will simply run the child forever until stopped. """ while True: args = [sys.executable] + sys.argv new_environ = environ.copy() new_environ["_IN_CHILD"] = 'yes' ret = subprocess.call(args, env=new_environ) if ret != settings.CODE_RELOAD_EXIT: return ret
[ "def", "start_parent", "(", ")", ":", "while", "True", ":", "args", "=", "[", "sys", ".", "executable", "]", "+", "sys", ".", "argv", "new_environ", "=", "environ", ".", "copy", "(", ")", "new_environ", "[", "\"_IN_CHILD\"", "]", "=", "'yes'", "ret", "=", "subprocess", ".", "call", "(", "args", ",", "env", "=", "new_environ", ")", "if", "ret", "!=", "settings", ".", "CODE_RELOAD_EXIT", ":", "return", "ret" ]
Start the parent that will simply run the child forever until stopped.
[ "Start", "the", "parent", "that", "will", "simply", "run", "the", "child", "forever", "until", "stopped", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/cli/_live_reload.py#L123-L135
train
jpscaletti/authcode
authcode/wsgi/werkzeug.py
get_from_params
def get_from_params(request, key): """Try to read a value named ``key`` from the GET parameters. """ data = getattr(request, 'json', None) or request.values value = data.get(key) return to_native(value)
python
def get_from_params(request, key): """Try to read a value named ``key`` from the GET parameters. """ data = getattr(request, 'json', None) or request.values value = data.get(key) return to_native(value)
[ "def", "get_from_params", "(", "request", ",", "key", ")", ":", "data", "=", "getattr", "(", "request", ",", "'json'", ",", "None", ")", "or", "request", ".", "values", "value", "=", "data", ".", "get", "(", "key", ")", "return", "to_native", "(", "value", ")" ]
Try to read a value named ``key`` from the GET parameters.
[ "Try", "to", "read", "a", "value", "named", "key", "from", "the", "GET", "parameters", "." ]
91529b6d0caec07d1452758d937e1e0745826139
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L55-L60
train
jpscaletti/authcode
authcode/wsgi/werkzeug.py
get_from_headers
def get_from_headers(request, key): """Try to read a value named ``key`` from the headers. """ value = request.headers.get(key) return to_native(value)
python
def get_from_headers(request, key): """Try to read a value named ``key`` from the headers. """ value = request.headers.get(key) return to_native(value)
[ "def", "get_from_headers", "(", "request", ",", "key", ")", ":", "value", "=", "request", ".", "headers", ".", "get", "(", "key", ")", "return", "to_native", "(", "value", ")" ]
Try to read a value named ``key`` from the headers.
[ "Try", "to", "read", "a", "value", "named", "key", "from", "the", "headers", "." ]
91529b6d0caec07d1452758d937e1e0745826139
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L63-L67
train
BernardFW/bernard
src/bernard/storage/redis.py
BaseRedisStore.async_init
async def async_init(self): """ Handle here the asynchronous part of the init. """ self.pool = await aioredis.create_pool( (self.host, self.port), db=self.db_id, minsize=self.min_pool_size, maxsize=self.max_pool_size, loop=asyncio.get_event_loop(), )
python
async def async_init(self): """ Handle here the asynchronous part of the init. """ self.pool = await aioredis.create_pool( (self.host, self.port), db=self.db_id, minsize=self.min_pool_size, maxsize=self.max_pool_size, loop=asyncio.get_event_loop(), )
[ "async", "def", "async_init", "(", "self", ")", ":", "self", ".", "pool", "=", "await", "aioredis", ".", "create_pool", "(", "(", "self", ".", "host", ",", "self", ".", "port", ")", ",", "db", "=", "self", ".", "db_id", ",", "minsize", "=", "self", ".", "min_pool_size", ",", "maxsize", "=", "self", ".", "max_pool_size", ",", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", ",", ")" ]
Handle here the asynchronous part of the init.
[ "Handle", "here", "the", "asynchronous", "part", "of", "the", "init", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/redis.py#L41-L52
train
rsgalloway/grit
grit/util/util.py
serialize
def serialize(d): """ Attempts to serialize values from a dictionary, skipping private attrs. :param d: A dictionary of params to serialize, typically cls.__dict__ """ ret = {} for k,v in d.items(): if not k.startswith('_'): ret[k] = str(d[k]) #ret['__class__'] = obj.__class__.__name__ return ret
python
def serialize(d): """ Attempts to serialize values from a dictionary, skipping private attrs. :param d: A dictionary of params to serialize, typically cls.__dict__ """ ret = {} for k,v in d.items(): if not k.startswith('_'): ret[k] = str(d[k]) #ret['__class__'] = obj.__class__.__name__ return ret
[ "def", "serialize", "(", "d", ")", ":", "ret", "=", "{", "}", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "not", "k", ".", "startswith", "(", "'_'", ")", ":", "ret", "[", "k", "]", "=", "str", "(", "d", "[", "k", "]", ")", "#ret['__class__'] = obj.__class__.__name__", "return", "ret" ]
Attempts to serialize values from a dictionary, skipping private attrs. :param d: A dictionary of params to serialize, typically cls.__dict__
[ "Attempts", "to", "serialize", "values", "from", "a", "dictionary", "skipping", "private", "attrs", "." ]
e6434ad8a1f4ac5d0903ebad630c81f8a5164d78
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/util/util.py#L31-L44
train
rsgalloway/grit
grit/util/util.py
user_config
def user_config(**kwargs): """ Initialize Git user config file. :param kwargs: key/value pairs are stored in the git user config file. """ for kw in kwargs: git('config --global user.%s "%s"' %(kw, kwargs.get(kw))).wait()
python
def user_config(**kwargs): """ Initialize Git user config file. :param kwargs: key/value pairs are stored in the git user config file. """ for kw in kwargs: git('config --global user.%s "%s"' %(kw, kwargs.get(kw))).wait()
[ "def", "user_config", "(", "*", "*", "kwargs", ")", ":", "for", "kw", "in", "kwargs", ":", "git", "(", "'config --global user.%s \"%s\"'", "%", "(", "kw", ",", "kwargs", ".", "get", "(", "kw", ")", ")", ")", ".", "wait", "(", ")" ]
Initialize Git user config file. :param kwargs: key/value pairs are stored in the git user config file.
[ "Initialize", "Git", "user", "config", "file", "." ]
e6434ad8a1f4ac5d0903ebad630c81f8a5164d78
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/util/util.py#L51-L58
train
blockstack-packages/jsontokens-py
jsontokens/token_signer.py
TokenSigner._make_header
def _make_header(self, token_type=None, signing_algorithm=None): """ Make a JWT header """ if not token_type: token_type = self.token_type if not signing_algorithm: signing_algorithm = self.signing_algorithm header = {'typ': token_type, 'alg': signing_algorithm} return header
python
def _make_header(self, token_type=None, signing_algorithm=None): """ Make a JWT header """ if not token_type: token_type = self.token_type if not signing_algorithm: signing_algorithm = self.signing_algorithm header = {'typ': token_type, 'alg': signing_algorithm} return header
[ "def", "_make_header", "(", "self", ",", "token_type", "=", "None", ",", "signing_algorithm", "=", "None", ")", ":", "if", "not", "token_type", ":", "token_type", "=", "self", ".", "token_type", "if", "not", "signing_algorithm", ":", "signing_algorithm", "=", "self", ".", "signing_algorithm", "header", "=", "{", "'typ'", ":", "token_type", ",", "'alg'", ":", "signing_algorithm", "}", "return", "header" ]
Make a JWT header
[ "Make", "a", "JWT", "header" ]
1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3
https://github.com/blockstack-packages/jsontokens-py/blob/1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3/jsontokens/token_signer.py#L38-L49
train
blockstack-packages/jsontokens-py
jsontokens/token_signer.py
TokenSigner._make_signature
def _make_signature(self, header_b64, payload_b64, signing_key): """ Sign a serialized header and payload. Return the urlsafe-base64-encoded signature. """ token_segments = [header_b64, payload_b64] signing_input = b'.'.join(token_segments) signer = self._get_signer(signing_key) signer.update(signing_input) signature = signer.finalize() raw_signature = der_to_raw_signature(signature, signing_key.curve) return base64url_encode(raw_signature)
python
def _make_signature(self, header_b64, payload_b64, signing_key): """ Sign a serialized header and payload. Return the urlsafe-base64-encoded signature. """ token_segments = [header_b64, payload_b64] signing_input = b'.'.join(token_segments) signer = self._get_signer(signing_key) signer.update(signing_input) signature = signer.finalize() raw_signature = der_to_raw_signature(signature, signing_key.curve) return base64url_encode(raw_signature)
[ "def", "_make_signature", "(", "self", ",", "header_b64", ",", "payload_b64", ",", "signing_key", ")", ":", "token_segments", "=", "[", "header_b64", ",", "payload_b64", "]", "signing_input", "=", "b'.'", ".", "join", "(", "token_segments", ")", "signer", "=", "self", ".", "_get_signer", "(", "signing_key", ")", "signer", ".", "update", "(", "signing_input", ")", "signature", "=", "signer", ".", "finalize", "(", ")", "raw_signature", "=", "der_to_raw_signature", "(", "signature", ",", "signing_key", ".", "curve", ")", "return", "base64url_encode", "(", "raw_signature", ")" ]
Sign a serialized header and payload. Return the urlsafe-base64-encoded signature.
[ "Sign", "a", "serialized", "header", "and", "payload", ".", "Return", "the", "urlsafe", "-", "base64", "-", "encoded", "signature", "." ]
1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3
https://github.com/blockstack-packages/jsontokens-py/blob/1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3/jsontokens/token_signer.py#L52-L65
train
blockstack-packages/jsontokens-py
jsontokens/token_signer.py
TokenSigner._sign_multi
def _sign_multi(self, payload, signing_keys): """ Make a multi-signature JWT. Returns a JSON-structured JWT. TODO: support multiple types of signatures """ if not isinstance(payload, Mapping): raise TypeError('Expecting a mapping object, as only ' 'JSON objects can be used as payloads.') if not isinstance(signing_keys, list): raise TypeError("Expecting a list of keys") headers = [] signatures = [] payload_b64 = base64url_encode(json_encode(payload)) for sk in signing_keys: signing_key = load_signing_key(sk, self.crypto_backend) header = self._make_header() header_b64 = base64url_encode(json_encode(header)) signature_b64 = self._make_signature(header_b64, payload_b64, signing_key) headers.append(header_b64) signatures.append(signature_b64) jwt = { "header": headers, "payload": payload_b64, "signature": signatures } return jwt
python
def _sign_multi(self, payload, signing_keys): """ Make a multi-signature JWT. Returns a JSON-structured JWT. TODO: support multiple types of signatures """ if not isinstance(payload, Mapping): raise TypeError('Expecting a mapping object, as only ' 'JSON objects can be used as payloads.') if not isinstance(signing_keys, list): raise TypeError("Expecting a list of keys") headers = [] signatures = [] payload_b64 = base64url_encode(json_encode(payload)) for sk in signing_keys: signing_key = load_signing_key(sk, self.crypto_backend) header = self._make_header() header_b64 = base64url_encode(json_encode(header)) signature_b64 = self._make_signature(header_b64, payload_b64, signing_key) headers.append(header_b64) signatures.append(signature_b64) jwt = { "header": headers, "payload": payload_b64, "signature": signatures } return jwt
[ "def", "_sign_multi", "(", "self", ",", "payload", ",", "signing_keys", ")", ":", "if", "not", "isinstance", "(", "payload", ",", "Mapping", ")", ":", "raise", "TypeError", "(", "'Expecting a mapping object, as only '", "'JSON objects can be used as payloads.'", ")", "if", "not", "isinstance", "(", "signing_keys", ",", "list", ")", ":", "raise", "TypeError", "(", "\"Expecting a list of keys\"", ")", "headers", "=", "[", "]", "signatures", "=", "[", "]", "payload_b64", "=", "base64url_encode", "(", "json_encode", "(", "payload", ")", ")", "for", "sk", "in", "signing_keys", ":", "signing_key", "=", "load_signing_key", "(", "sk", ",", "self", ".", "crypto_backend", ")", "header", "=", "self", ".", "_make_header", "(", ")", "header_b64", "=", "base64url_encode", "(", "json_encode", "(", "header", ")", ")", "signature_b64", "=", "self", ".", "_make_signature", "(", "header_b64", ",", "payload_b64", ",", "signing_key", ")", "headers", ".", "append", "(", "header_b64", ")", "signatures", ".", "append", "(", "signature_b64", ")", "jwt", "=", "{", "\"header\"", ":", "headers", ",", "\"payload\"", ":", "payload_b64", ",", "\"signature\"", ":", "signatures", "}", "return", "jwt" ]
Make a multi-signature JWT. Returns a JSON-structured JWT. TODO: support multiple types of signatures
[ "Make", "a", "multi", "-", "signature", "JWT", ".", "Returns", "a", "JSON", "-", "structured", "JWT", "." ]
1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3
https://github.com/blockstack-packages/jsontokens-py/blob/1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3/jsontokens/token_signer.py#L93-L127
train
blockstack-packages/jsontokens-py
jsontokens/token_signer.py
TokenSigner.sign
def sign(self, payload, signing_key_or_keys): """ Create a JWT with one or more keys. Returns a compact-form serialized JWT if there is only one key to sign with Returns a JSON-structured serialized JWT if there are multiple keys to sign with """ if isinstance(signing_key_or_keys, list): return self._sign_multi(payload, signing_key_or_keys) else: return self._sign_single(payload, signing_key_or_keys)
python
def sign(self, payload, signing_key_or_keys): """ Create a JWT with one or more keys. Returns a compact-form serialized JWT if there is only one key to sign with Returns a JSON-structured serialized JWT if there are multiple keys to sign with """ if isinstance(signing_key_or_keys, list): return self._sign_multi(payload, signing_key_or_keys) else: return self._sign_single(payload, signing_key_or_keys)
[ "def", "sign", "(", "self", ",", "payload", ",", "signing_key_or_keys", ")", ":", "if", "isinstance", "(", "signing_key_or_keys", ",", "list", ")", ":", "return", "self", ".", "_sign_multi", "(", "payload", ",", "signing_key_or_keys", ")", "else", ":", "return", "self", ".", "_sign_single", "(", "payload", ",", "signing_key_or_keys", ")" ]
Create a JWT with one or more keys. Returns a compact-form serialized JWT if there is only one key to sign with Returns a JSON-structured serialized JWT if there are multiple keys to sign with
[ "Create", "a", "JWT", "with", "one", "or", "more", "keys", ".", "Returns", "a", "compact", "-", "form", "serialized", "JWT", "if", "there", "is", "only", "one", "key", "to", "sign", "with", "Returns", "a", "JSON", "-", "structured", "serialized", "JWT", "if", "there", "are", "multiple", "keys", "to", "sign", "with" ]
1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3
https://github.com/blockstack-packages/jsontokens-py/blob/1a4e71ed63456e8381b7d3fd566ce38e6ebfa7d3/jsontokens/token_signer.py#L130-L140
train
pyslackers/sir-bot-a-lot
setup.py
parse_reqs
def parse_reqs(req_path='./requirements/requirements.txt'): """Recursively parse requirements from nested pip files.""" install_requires = [] with codecs.open(req_path, 'r') as handle: # remove comments and empty lines lines = (line.strip() for line in handle if line.strip() and not line.startswith('#')) for line in lines: # check for nested requirements files if line.startswith('-r'): # recursively call this function install_requires += parse_reqs(req_path=line[3:]) else: # add the line as a new requirement install_requires.append(line) return install_requires
python
def parse_reqs(req_path='./requirements/requirements.txt'): """Recursively parse requirements from nested pip files.""" install_requires = [] with codecs.open(req_path, 'r') as handle: # remove comments and empty lines lines = (line.strip() for line in handle if line.strip() and not line.startswith('#')) for line in lines: # check for nested requirements files if line.startswith('-r'): # recursively call this function install_requires += parse_reqs(req_path=line[3:]) else: # add the line as a new requirement install_requires.append(line) return install_requires
[ "def", "parse_reqs", "(", "req_path", "=", "'./requirements/requirements.txt'", ")", ":", "install_requires", "=", "[", "]", "with", "codecs", ".", "open", "(", "req_path", ",", "'r'", ")", "as", "handle", ":", "# remove comments and empty lines", "lines", "=", "(", "line", ".", "strip", "(", ")", "for", "line", "in", "handle", "if", "line", ".", "strip", "(", ")", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ")", "for", "line", "in", "lines", ":", "# check for nested requirements files", "if", "line", ".", "startswith", "(", "'-r'", ")", ":", "# recursively call this function", "install_requires", "+=", "parse_reqs", "(", "req_path", "=", "line", "[", "3", ":", "]", ")", "else", ":", "# add the line as a new requirement", "install_requires", ".", "append", "(", "line", ")", "return", "install_requires" ]
Recursively parse requirements from nested pip files.
[ "Recursively", "parse", "requirements", "from", "nested", "pip", "files", "." ]
22dfdd6a14d61dbe29423fd131b7a23e618b68d7
https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/setup.py#L25-L42
train
BernardFW/bernard
src/bernard/reporter/_raven.py
RavenReporter.report
def report(self, request: 'Request'=None, state: Text=None): """ Report current exception to Sentry. """ self._make_context(request, state) self.client.captureException() self._clear_context()
python
def report(self, request: 'Request'=None, state: Text=None): """ Report current exception to Sentry. """ self._make_context(request, state) self.client.captureException() self._clear_context()
[ "def", "report", "(", "self", ",", "request", ":", "'Request'", "=", "None", ",", "state", ":", "Text", "=", "None", ")", ":", "self", ".", "_make_context", "(", "request", ",", "state", ")", "self", ".", "client", ".", "captureException", "(", ")", "self", ".", "_clear_context", "(", ")" ]
Report current exception to Sentry.
[ "Report", "current", "exception", "to", "Sentry", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/reporter/_raven.py#L68-L74
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
vary_name
def vary_name(name: Text): """ Validates the name and creates variations """ snake = re.match(r'^[a-z][a-z0-9]*(?:_[a-z0-9]+)*$', name) if not snake: fail('The project name is not a valid snake-case Python variable name') camel = [x[0].upper() + x[1:] for x in name.split('_')] return { 'project_name_snake': name, 'project_name_camel': ''.join(camel), 'project_name_readable': ' '.join(camel), }
python
def vary_name(name: Text): """ Validates the name and creates variations """ snake = re.match(r'^[a-z][a-z0-9]*(?:_[a-z0-9]+)*$', name) if not snake: fail('The project name is not a valid snake-case Python variable name') camel = [x[0].upper() + x[1:] for x in name.split('_')] return { 'project_name_snake': name, 'project_name_camel': ''.join(camel), 'project_name_readable': ' '.join(camel), }
[ "def", "vary_name", "(", "name", ":", "Text", ")", ":", "snake", "=", "re", ".", "match", "(", "r'^[a-z][a-z0-9]*(?:_[a-z0-9]+)*$'", ",", "name", ")", "if", "not", "snake", ":", "fail", "(", "'The project name is not a valid snake-case Python variable name'", ")", "camel", "=", "[", "x", "[", "0", "]", ".", "upper", "(", ")", "+", "x", "[", "1", ":", "]", "for", "x", "in", "name", ".", "split", "(", "'_'", ")", "]", "return", "{", "'project_name_snake'", ":", "name", ",", "'project_name_camel'", ":", "''", ".", "join", "(", "camel", ")", ",", "'project_name_readable'", ":", "' '", ".", "join", "(", "camel", ")", ",", "}" ]
Validates the name and creates variations
[ "Validates", "the", "name", "and", "creates", "variations" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L30-L46
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
make_random_key
def make_random_key() -> Text: """ Generates a secure random string """ r = SystemRandom() allowed = \ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+/[]' return ''.join([r.choice(allowed) for _ in range(0, 50)])
python
def make_random_key() -> Text: """ Generates a secure random string """ r = SystemRandom() allowed = \ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+/[]' return ''.join([r.choice(allowed) for _ in range(0, 50)])
[ "def", "make_random_key", "(", ")", "->", "Text", ":", "r", "=", "SystemRandom", "(", ")", "allowed", "=", "'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+/[]'", "return", "''", ".", "join", "(", "[", "r", ".", "choice", "(", "allowed", ")", "for", "_", "in", "range", "(", "0", ",", "50", ")", "]", ")" ]
Generates a secure random string
[ "Generates", "a", "secure", "random", "string" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L49-L58
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
make_dir_path
def make_dir_path(project_dir, root, project_name): """ Generates the target path for a directory """ root = root.replace('__project_name_snake__', project_name) real_dir = path.realpath(project_dir) return path.join(real_dir, root)
python
def make_dir_path(project_dir, root, project_name): """ Generates the target path for a directory """ root = root.replace('__project_name_snake__', project_name) real_dir = path.realpath(project_dir) return path.join(real_dir, root)
[ "def", "make_dir_path", "(", "project_dir", ",", "root", ",", "project_name", ")", ":", "root", "=", "root", ".", "replace", "(", "'__project_name_snake__'", ",", "project_name", ")", "real_dir", "=", "path", ".", "realpath", "(", "project_dir", ")", "return", "path", ".", "join", "(", "real_dir", ",", "root", ")" ]
Generates the target path for a directory
[ "Generates", "the", "target", "path", "for", "a", "directory" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L61-L68
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
make_file_path
def make_file_path(project_dir, project_name, root, name): """ Generates the target path for a file """ return path.join(make_dir_path(project_dir, root, project_name), name)
python
def make_file_path(project_dir, project_name, root, name): """ Generates the target path for a file """ return path.join(make_dir_path(project_dir, root, project_name), name)
[ "def", "make_file_path", "(", "project_dir", ",", "project_name", ",", "root", ",", "name", ")", ":", "return", "path", ".", "join", "(", "make_dir_path", "(", "project_dir", ",", "root", ",", "project_name", ")", ",", "name", ")" ]
Generates the target path for a file
[ "Generates", "the", "target", "path", "for", "a", "file" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L71-L76
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
generate_vars
def generate_vars(project_name, project_dir): """ Generates the variables to replace in files """ out = vary_name(project_name) out['random_key'] = make_random_key() out['settings_file'] = make_file_path( project_dir, project_name, path.join('src', project_name), 'settings.py', ) return out
python
def generate_vars(project_name, project_dir): """ Generates the variables to replace in files """ out = vary_name(project_name) out['random_key'] = make_random_key() out['settings_file'] = make_file_path( project_dir, project_name, path.join('src', project_name), 'settings.py', ) return out
[ "def", "generate_vars", "(", "project_name", ",", "project_dir", ")", ":", "out", "=", "vary_name", "(", "project_name", ")", "out", "[", "'random_key'", "]", "=", "make_random_key", "(", ")", "out", "[", "'settings_file'", "]", "=", "make_file_path", "(", "project_dir", ",", "project_name", ",", "path", ".", "join", "(", "'src'", ",", "project_name", ")", ",", "'settings.py'", ",", ")", "return", "out" ]
Generates the variables to replace in files
[ "Generates", "the", "variables", "to", "replace", "in", "files" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L79-L93
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
get_files
def get_files(): """ Read all the template's files """ files_root = path.join(path.dirname(__file__), 'files') for root, dirs, files in walk(files_root): rel_root = path.relpath(root, files_root) for file_name in files: try: f = open(path.join(root, file_name), 'r', encoding='utf-8') with f: yield rel_root, file_name, f.read(), True except UnicodeError: f = open(path.join(root, file_name), 'rb') with f: yield rel_root, file_name, f.read(), False
python
def get_files(): """ Read all the template's files """ files_root = path.join(path.dirname(__file__), 'files') for root, dirs, files in walk(files_root): rel_root = path.relpath(root, files_root) for file_name in files: try: f = open(path.join(root, file_name), 'r', encoding='utf-8') with f: yield rel_root, file_name, f.read(), True except UnicodeError: f = open(path.join(root, file_name), 'rb') with f: yield rel_root, file_name, f.read(), False
[ "def", "get_files", "(", ")", ":", "files_root", "=", "path", ".", "join", "(", "path", ".", "dirname", "(", "__file__", ")", ",", "'files'", ")", "for", "root", ",", "dirs", ",", "files", "in", "walk", "(", "files_root", ")", ":", "rel_root", "=", "path", ".", "relpath", "(", "root", ",", "files_root", ")", "for", "file_name", "in", "files", ":", "try", ":", "f", "=", "open", "(", "path", ".", "join", "(", "root", ",", "file_name", ")", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "with", "f", ":", "yield", "rel_root", ",", "file_name", ",", "f", ".", "read", "(", ")", ",", "True", "except", "UnicodeError", ":", "f", "=", "open", "(", "path", ".", "join", "(", "root", ",", "file_name", ")", ",", "'rb'", ")", "with", "f", ":", "yield", "rel_root", ",", "file_name", ",", "f", ".", "read", "(", ")", ",", "False" ]
Read all the template's files
[ "Read", "all", "the", "template", "s", "files" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L96-L114
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
check_target
def check_target(target_path): """ Checks that the target path is not empty """ if not path.exists(target_path): return with scandir(target_path) as d: for entry in d: if not entry.name.startswith('.'): fail(f'Target directory "{target_path}" is not empty')
python
def check_target(target_path): """ Checks that the target path is not empty """ if not path.exists(target_path): return with scandir(target_path) as d: for entry in d: if not entry.name.startswith('.'): fail(f'Target directory "{target_path}" is not empty')
[ "def", "check_target", "(", "target_path", ")", ":", "if", "not", "path", ".", "exists", "(", "target_path", ")", ":", "return", "with", "scandir", "(", "target_path", ")", "as", "d", ":", "for", "entry", "in", "d", ":", "if", "not", "entry", ".", "name", ".", "startswith", "(", "'.'", ")", ":", "fail", "(", "f'Target directory \"{target_path}\" is not empty'", ")" ]
Checks that the target path is not empty
[ "Checks", "that", "the", "target", "path", "is", "not", "empty" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L117-L128
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
replace_content
def replace_content(content, project_vars): """ Replaces variables inside the content. """ for k, v in project_vars.items(): content = content.replace(f'__{k}__', v) return content
python
def replace_content(content, project_vars): """ Replaces variables inside the content. """ for k, v in project_vars.items(): content = content.replace(f'__{k}__', v) return content
[ "def", "replace_content", "(", "content", ",", "project_vars", ")", ":", "for", "k", ",", "v", "in", "project_vars", ".", "items", "(", ")", ":", "content", "=", "content", ".", "replace", "(", "f'__{k}__'", ",", "v", ")", "return", "content" ]
Replaces variables inside the content.
[ "Replaces", "variables", "inside", "the", "content", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L131-L139
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
copy_files
def copy_files(project_vars, project_dir, files): """ Copies files from the template into their target location. Unicode files get their variables replaced here and files with a shebang are set to be executable. """ for root, name, content, is_unicode in files: project_name = project_vars['project_name_snake'] if is_unicode: content = replace_content(content, project_vars) file_path = make_file_path(project_dir, project_name, root, name) makedirs(make_dir_path(project_dir, root, project_name), exist_ok=True) if is_unicode: with open(file_path, 'w') as f: f.write(content) if content.startswith('#!'): chmod(file_path, 0o755) else: with open(file_path, 'wb') as f: f.write(content)
python
def copy_files(project_vars, project_dir, files): """ Copies files from the template into their target location. Unicode files get their variables replaced here and files with a shebang are set to be executable. """ for root, name, content, is_unicode in files: project_name = project_vars['project_name_snake'] if is_unicode: content = replace_content(content, project_vars) file_path = make_file_path(project_dir, project_name, root, name) makedirs(make_dir_path(project_dir, root, project_name), exist_ok=True) if is_unicode: with open(file_path, 'w') as f: f.write(content) if content.startswith('#!'): chmod(file_path, 0o755) else: with open(file_path, 'wb') as f: f.write(content)
[ "def", "copy_files", "(", "project_vars", ",", "project_dir", ",", "files", ")", ":", "for", "root", ",", "name", ",", "content", ",", "is_unicode", "in", "files", ":", "project_name", "=", "project_vars", "[", "'project_name_snake'", "]", "if", "is_unicode", ":", "content", "=", "replace_content", "(", "content", ",", "project_vars", ")", "file_path", "=", "make_file_path", "(", "project_dir", ",", "project_name", ",", "root", ",", "name", ")", "makedirs", "(", "make_dir_path", "(", "project_dir", ",", "root", ",", "project_name", ")", ",", "exist_ok", "=", "True", ")", "if", "is_unicode", ":", "with", "open", "(", "file_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "content", ")", "if", "content", ".", "startswith", "(", "'#!'", ")", ":", "chmod", "(", "file_path", ",", "0o755", ")", "else", ":", "with", "open", "(", "file_path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "content", ")" ]
Copies files from the template into their target location. Unicode files get their variables replaced here and files with a shebang are set to be executable.
[ "Copies", "files", "from", "the", "template", "into", "their", "target", "location", ".", "Unicode", "files", "get", "their", "variables", "replaced", "here", "and", "files", "with", "a", "shebang", "are", "set", "to", "be", "executable", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L142-L166
train
mattlong/hermes
hermes/chatroom.py
Chatroom.connect
def connect(self): """Connect to the chatroom's server, sets up handlers, invites members as needed.""" for m in self.params['MEMBERS']: m['ONLINE'] = 0 m.setdefault('STATUS', 'INVITED') self.client = xmpp.Client(self.jid.getDomain(), debug=[]) conn = self.client.connect(server=self.params['SERVER']) if not conn: raise Exception("could not connect to server") auth = self.client.auth(self.jid.getNode(), self.params['PASSWORD']) if not auth: raise Exception("could not authenticate as chat server") #self.client.RegisterDisconnectHandler(self.on_disconnect) self.client.RegisterHandler('message', self.on_message) self.client.RegisterHandler('presence',self.on_presence) self.client.sendInitPresence(requestRoster=0) roster = self.client.getRoster() for m in self.params['MEMBERS']: self.invite_user(m, roster=roster)
python
def connect(self): """Connect to the chatroom's server, sets up handlers, invites members as needed.""" for m in self.params['MEMBERS']: m['ONLINE'] = 0 m.setdefault('STATUS', 'INVITED') self.client = xmpp.Client(self.jid.getDomain(), debug=[]) conn = self.client.connect(server=self.params['SERVER']) if not conn: raise Exception("could not connect to server") auth = self.client.auth(self.jid.getNode(), self.params['PASSWORD']) if not auth: raise Exception("could not authenticate as chat server") #self.client.RegisterDisconnectHandler(self.on_disconnect) self.client.RegisterHandler('message', self.on_message) self.client.RegisterHandler('presence',self.on_presence) self.client.sendInitPresence(requestRoster=0) roster = self.client.getRoster() for m in self.params['MEMBERS']: self.invite_user(m, roster=roster)
[ "def", "connect", "(", "self", ")", ":", "for", "m", "in", "self", ".", "params", "[", "'MEMBERS'", "]", ":", "m", "[", "'ONLINE'", "]", "=", "0", "m", ".", "setdefault", "(", "'STATUS'", ",", "'INVITED'", ")", "self", ".", "client", "=", "xmpp", ".", "Client", "(", "self", ".", "jid", ".", "getDomain", "(", ")", ",", "debug", "=", "[", "]", ")", "conn", "=", "self", ".", "client", ".", "connect", "(", "server", "=", "self", ".", "params", "[", "'SERVER'", "]", ")", "if", "not", "conn", ":", "raise", "Exception", "(", "\"could not connect to server\"", ")", "auth", "=", "self", ".", "client", ".", "auth", "(", "self", ".", "jid", ".", "getNode", "(", ")", ",", "self", ".", "params", "[", "'PASSWORD'", "]", ")", "if", "not", "auth", ":", "raise", "Exception", "(", "\"could not authenticate as chat server\"", ")", "#self.client.RegisterDisconnectHandler(self.on_disconnect)", "self", ".", "client", ".", "RegisterHandler", "(", "'message'", ",", "self", ".", "on_message", ")", "self", ".", "client", ".", "RegisterHandler", "(", "'presence'", ",", "self", ".", "on_presence", ")", "self", ".", "client", ".", "sendInitPresence", "(", "requestRoster", "=", "0", ")", "roster", "=", "self", ".", "client", ".", "getRoster", "(", ")", "for", "m", "in", "self", ".", "params", "[", "'MEMBERS'", "]", ":", "self", ".", "invite_user", "(", "m", ",", "roster", "=", "roster", ")" ]
Connect to the chatroom's server, sets up handlers, invites members as needed.
[ "Connect", "to", "the", "chatroom", "s", "server", "sets", "up", "handlers", "invites", "members", "as", "needed", "." ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L31-L53
train
mattlong/hermes
hermes/chatroom.py
Chatroom.get_member
def get_member(self, jid, default=None): """Get a chatroom member by JID""" member = filter(lambda m: m['JID'] == jid, self.params['MEMBERS']) if len(member) == 1: return member[0] elif len(member) == 0: return default else: raise Exception('Multple members have the same JID of [%s]' % (jid,))
python
def get_member(self, jid, default=None): """Get a chatroom member by JID""" member = filter(lambda m: m['JID'] == jid, self.params['MEMBERS']) if len(member) == 1: return member[0] elif len(member) == 0: return default else: raise Exception('Multple members have the same JID of [%s]' % (jid,))
[ "def", "get_member", "(", "self", ",", "jid", ",", "default", "=", "None", ")", ":", "member", "=", "filter", "(", "lambda", "m", ":", "m", "[", "'JID'", "]", "==", "jid", ",", "self", ".", "params", "[", "'MEMBERS'", "]", ")", "if", "len", "(", "member", ")", "==", "1", ":", "return", "member", "[", "0", "]", "elif", "len", "(", "member", ")", "==", "0", ":", "return", "default", "else", ":", "raise", "Exception", "(", "'Multple members have the same JID of [%s]'", "%", "(", "jid", ",", ")", ")" ]
Get a chatroom member by JID
[ "Get", "a", "chatroom", "member", "by", "JID" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L55-L63
train
mattlong/hermes
hermes/chatroom.py
Chatroom.is_member
def is_member(self, m): """Check if a user is a member of the chatroom""" if not m: return False elif isinstance(m, basestring): jid = m else: jid = m['JID'] is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0 return is_member
python
def is_member(self, m): """Check if a user is a member of the chatroom""" if not m: return False elif isinstance(m, basestring): jid = m else: jid = m['JID'] is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0 return is_member
[ "def", "is_member", "(", "self", ",", "m", ")", ":", "if", "not", "m", ":", "return", "False", "elif", "isinstance", "(", "m", ",", "basestring", ")", ":", "jid", "=", "m", "else", ":", "jid", "=", "m", "[", "'JID'", "]", "is_member", "=", "len", "(", "filter", "(", "lambda", "m", ":", "m", "[", "'JID'", "]", "==", "jid", "and", "m", ".", "get", "(", "'STATUS'", ")", "in", "(", "'ACTIVE'", ",", "'INVITED'", ")", ",", "self", ".", "params", "[", "'MEMBERS'", "]", ")", ")", ">", "0", "return", "is_member" ]
Check if a user is a member of the chatroom
[ "Check", "if", "a", "user", "is", "a", "member", "of", "the", "chatroom" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L65-L76
train
mattlong/hermes
hermes/chatroom.py
Chatroom.invite_user
def invite_user(self, new_member, inviter=None, roster=None): """Invites a new member to the chatroom""" roster = roster or self.client.getRoster() jid = new_member['JID'] logger.info('roster %s %s' % (jid, roster.getSubscription(jid))) if jid in roster.keys() and roster.getSubscription(jid) in ['both', 'to']: new_member['STATUS'] = 'ACTIVE' if inviter: self.send_message('%s is already a member' % (jid,), inviter) else: new_member['STATUS'] = 'INVITED' self.broadcast('inviting %s to the room' % (jid,)) #Add nickname according to http://xmpp.org/extensions/xep-0172.html subscribe_presence = xmpp.dispatcher.Presence(to=jid, typ='subscribe') if 'NICK' in self.params: subscribe_presence.addChild(name='nick', namespace=xmpp.protocol.NS_NICK, payload=self.params['NICK']) self.client.send(subscribe_presence) if not self.is_member(new_member): new_member.setdefault('NICK', jid.split('@')[0]) self.params['MEMBERS'].append(new_member)
python
def invite_user(self, new_member, inviter=None, roster=None): """Invites a new member to the chatroom""" roster = roster or self.client.getRoster() jid = new_member['JID'] logger.info('roster %s %s' % (jid, roster.getSubscription(jid))) if jid in roster.keys() and roster.getSubscription(jid) in ['both', 'to']: new_member['STATUS'] = 'ACTIVE' if inviter: self.send_message('%s is already a member' % (jid,), inviter) else: new_member['STATUS'] = 'INVITED' self.broadcast('inviting %s to the room' % (jid,)) #Add nickname according to http://xmpp.org/extensions/xep-0172.html subscribe_presence = xmpp.dispatcher.Presence(to=jid, typ='subscribe') if 'NICK' in self.params: subscribe_presence.addChild(name='nick', namespace=xmpp.protocol.NS_NICK, payload=self.params['NICK']) self.client.send(subscribe_presence) if not self.is_member(new_member): new_member.setdefault('NICK', jid.split('@')[0]) self.params['MEMBERS'].append(new_member)
[ "def", "invite_user", "(", "self", ",", "new_member", ",", "inviter", "=", "None", ",", "roster", "=", "None", ")", ":", "roster", "=", "roster", "or", "self", ".", "client", ".", "getRoster", "(", ")", "jid", "=", "new_member", "[", "'JID'", "]", "logger", ".", "info", "(", "'roster %s %s'", "%", "(", "jid", ",", "roster", ".", "getSubscription", "(", "jid", ")", ")", ")", "if", "jid", "in", "roster", ".", "keys", "(", ")", "and", "roster", ".", "getSubscription", "(", "jid", ")", "in", "[", "'both'", ",", "'to'", "]", ":", "new_member", "[", "'STATUS'", "]", "=", "'ACTIVE'", "if", "inviter", ":", "self", ".", "send_message", "(", "'%s is already a member'", "%", "(", "jid", ",", ")", ",", "inviter", ")", "else", ":", "new_member", "[", "'STATUS'", "]", "=", "'INVITED'", "self", ".", "broadcast", "(", "'inviting %s to the room'", "%", "(", "jid", ",", ")", ")", "#Add nickname according to http://xmpp.org/extensions/xep-0172.html", "subscribe_presence", "=", "xmpp", ".", "dispatcher", ".", "Presence", "(", "to", "=", "jid", ",", "typ", "=", "'subscribe'", ")", "if", "'NICK'", "in", "self", ".", "params", ":", "subscribe_presence", ".", "addChild", "(", "name", "=", "'nick'", ",", "namespace", "=", "xmpp", ".", "protocol", ".", "NS_NICK", ",", "payload", "=", "self", ".", "params", "[", "'NICK'", "]", ")", "self", ".", "client", ".", "send", "(", "subscribe_presence", ")", "if", "not", "self", ".", "is_member", "(", "new_member", ")", ":", "new_member", ".", "setdefault", "(", "'NICK'", ",", "jid", ".", "split", "(", "'@'", ")", "[", "0", "]", ")", "self", ".", "params", "[", "'MEMBERS'", "]", ".", "append", "(", "new_member", ")" ]
Invites a new member to the chatroom
[ "Invites", "a", "new", "member", "to", "the", "chatroom" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L78-L100
train
mattlong/hermes
hermes/chatroom.py
Chatroom.kick_user
def kick_user(self, jid): """Kicks a member from the chatroom. Kicked user will receive no more messages.""" for member in filter(lambda m: m['JID'] == jid, self.params['MEMBERS']): member['STATUS'] = 'KICKED' self.send_message('You have been kicked from %s' % (self.name,), member) self.client.sendPresence(jid=member['JID'], typ='unsubscribed') self.client.sendPresence(jid=member['JID'], typ='unsubscribe') self.broadcast('kicking %s from the room' % (jid,))
python
def kick_user(self, jid): """Kicks a member from the chatroom. Kicked user will receive no more messages.""" for member in filter(lambda m: m['JID'] == jid, self.params['MEMBERS']): member['STATUS'] = 'KICKED' self.send_message('You have been kicked from %s' % (self.name,), member) self.client.sendPresence(jid=member['JID'], typ='unsubscribed') self.client.sendPresence(jid=member['JID'], typ='unsubscribe') self.broadcast('kicking %s from the room' % (jid,))
[ "def", "kick_user", "(", "self", ",", "jid", ")", ":", "for", "member", "in", "filter", "(", "lambda", "m", ":", "m", "[", "'JID'", "]", "==", "jid", ",", "self", ".", "params", "[", "'MEMBERS'", "]", ")", ":", "member", "[", "'STATUS'", "]", "=", "'KICKED'", "self", ".", "send_message", "(", "'You have been kicked from %s'", "%", "(", "self", ".", "name", ",", ")", ",", "member", ")", "self", ".", "client", ".", "sendPresence", "(", "jid", "=", "member", "[", "'JID'", "]", ",", "typ", "=", "'unsubscribed'", ")", "self", ".", "client", ".", "sendPresence", "(", "jid", "=", "member", "[", "'JID'", "]", ",", "typ", "=", "'unsubscribe'", ")", "self", ".", "broadcast", "(", "'kicking %s from the room'", "%", "(", "jid", ",", ")", ")" ]
Kicks a member from the chatroom. Kicked user will receive no more messages.
[ "Kicks", "a", "member", "from", "the", "chatroom", ".", "Kicked", "user", "will", "receive", "no", "more", "messages", "." ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L102-L110
train
mattlong/hermes
hermes/chatroom.py
Chatroom.send_message
def send_message(self, body, to, quiet=False, html_body=None): """Send a message to a single member""" if to.get('MUTED'): to['QUEUED_MESSAGES'].append(body) else: if not quiet: logger.info('message on %s to %s: %s' % (self.name, to['JID'], body)) message = xmpp.protocol.Message(to=to['JID'], body=body, typ='chat') if html_body: html = xmpp.Node('html', {'xmlns': 'http://jabber.org/protocol/xhtml-im'}) html.addChild(node=xmpp.simplexml.XML2Node("<body xmlns='http://www.w3.org/1999/xhtml'>" + html_body.encode('utf-8') + "</body>")) message.addChild(node=html) self.client.send(message)
python
def send_message(self, body, to, quiet=False, html_body=None): """Send a message to a single member""" if to.get('MUTED'): to['QUEUED_MESSAGES'].append(body) else: if not quiet: logger.info('message on %s to %s: %s' % (self.name, to['JID'], body)) message = xmpp.protocol.Message(to=to['JID'], body=body, typ='chat') if html_body: html = xmpp.Node('html', {'xmlns': 'http://jabber.org/protocol/xhtml-im'}) html.addChild(node=xmpp.simplexml.XML2Node("<body xmlns='http://www.w3.org/1999/xhtml'>" + html_body.encode('utf-8') + "</body>")) message.addChild(node=html) self.client.send(message)
[ "def", "send_message", "(", "self", ",", "body", ",", "to", ",", "quiet", "=", "False", ",", "html_body", "=", "None", ")", ":", "if", "to", ".", "get", "(", "'MUTED'", ")", ":", "to", "[", "'QUEUED_MESSAGES'", "]", ".", "append", "(", "body", ")", "else", ":", "if", "not", "quiet", ":", "logger", ".", "info", "(", "'message on %s to %s: %s'", "%", "(", "self", ".", "name", ",", "to", "[", "'JID'", "]", ",", "body", ")", ")", "message", "=", "xmpp", ".", "protocol", ".", "Message", "(", "to", "=", "to", "[", "'JID'", "]", ",", "body", "=", "body", ",", "typ", "=", "'chat'", ")", "if", "html_body", ":", "html", "=", "xmpp", ".", "Node", "(", "'html'", ",", "{", "'xmlns'", ":", "'http://jabber.org/protocol/xhtml-im'", "}", ")", "html", ".", "addChild", "(", "node", "=", "xmpp", ".", "simplexml", ".", "XML2Node", "(", "\"<body xmlns='http://www.w3.org/1999/xhtml'>\"", "+", "html_body", ".", "encode", "(", "'utf-8'", ")", "+", "\"</body>\"", ")", ")", "message", ".", "addChild", "(", "node", "=", "html", ")", "self", ".", "client", ".", "send", "(", "message", ")" ]
Send a message to a single member
[ "Send", "a", "message", "to", "a", "single", "member" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L112-L126
train
mattlong/hermes
hermes/chatroom.py
Chatroom.broadcast
def broadcast(self, body, html_body=None, exclude=()): """Broadcast a message to users in the chatroom""" logger.info('broadcast on %s: %s' % (self.name, body,)) for member in filter(lambda m: m.get('STATUS') == 'ACTIVE' and m not in exclude, self.params['MEMBERS']): logger.debug(member['JID']) self.send_message(body, member, html_body=html_body, quiet=True)
python
def broadcast(self, body, html_body=None, exclude=()): """Broadcast a message to users in the chatroom""" logger.info('broadcast on %s: %s' % (self.name, body,)) for member in filter(lambda m: m.get('STATUS') == 'ACTIVE' and m not in exclude, self.params['MEMBERS']): logger.debug(member['JID']) self.send_message(body, member, html_body=html_body, quiet=True)
[ "def", "broadcast", "(", "self", ",", "body", ",", "html_body", "=", "None", ",", "exclude", "=", "(", ")", ")", ":", "logger", ".", "info", "(", "'broadcast on %s: %s'", "%", "(", "self", ".", "name", ",", "body", ",", ")", ")", "for", "member", "in", "filter", "(", "lambda", "m", ":", "m", ".", "get", "(", "'STATUS'", ")", "==", "'ACTIVE'", "and", "m", "not", "in", "exclude", ",", "self", ".", "params", "[", "'MEMBERS'", "]", ")", ":", "logger", ".", "debug", "(", "member", "[", "'JID'", "]", ")", "self", ".", "send_message", "(", "body", ",", "member", ",", "html_body", "=", "html_body", ",", "quiet", "=", "True", ")" ]
Broadcast a message to users in the chatroom
[ "Broadcast", "a", "message", "to", "users", "in", "the", "chatroom" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L128-L133
train
mattlong/hermes
hermes/chatroom.py
Chatroom.do_invite
def do_invite(self, sender, body, args): """Invite members to the chatroom on a user's behalf""" for invitee in args: new_member = { 'JID': invitee } self.invite_user(new_member, inviter=sender)
python
def do_invite(self, sender, body, args): """Invite members to the chatroom on a user's behalf""" for invitee in args: new_member = { 'JID': invitee } self.invite_user(new_member, inviter=sender)
[ "def", "do_invite", "(", "self", ",", "sender", ",", "body", ",", "args", ")", ":", "for", "invitee", "in", "args", ":", "new_member", "=", "{", "'JID'", ":", "invitee", "}", "self", ".", "invite_user", "(", "new_member", ",", "inviter", "=", "sender", ")" ]
Invite members to the chatroom on a user's behalf
[ "Invite", "members", "to", "the", "chatroom", "on", "a", "user", "s", "behalf" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L141-L145
train
mattlong/hermes
hermes/chatroom.py
Chatroom.do_kick
def do_kick(self, sender, body, args): """Kick a member from the chatroom. Must be Admin to kick users""" if sender.get('ADMIN') != True: return for user in args: self.kick_user(user)
python
def do_kick(self, sender, body, args): """Kick a member from the chatroom. Must be Admin to kick users""" if sender.get('ADMIN') != True: return for user in args: self.kick_user(user)
[ "def", "do_kick", "(", "self", ",", "sender", ",", "body", ",", "args", ")", ":", "if", "sender", ".", "get", "(", "'ADMIN'", ")", "!=", "True", ":", "return", "for", "user", "in", "args", ":", "self", ".", "kick_user", "(", "user", ")" ]
Kick a member from the chatroom. Must be Admin to kick users
[ "Kick", "a", "member", "from", "the", "chatroom", ".", "Must", "be", "Admin", "to", "kick", "users" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L147-L151
train
mattlong/hermes
hermes/chatroom.py
Chatroom.do_mute
def do_mute(self, sender, body, args): """Temporarily mutes chatroom for a user""" if sender.get('MUTED'): self.send_message('you are already muted', sender) else: self.broadcast('%s has muted this chatroom' % (sender['NICK'],)) sender['QUEUED_MESSAGES'] = [] sender['MUTED'] = True
python
def do_mute(self, sender, body, args): """Temporarily mutes chatroom for a user""" if sender.get('MUTED'): self.send_message('you are already muted', sender) else: self.broadcast('%s has muted this chatroom' % (sender['NICK'],)) sender['QUEUED_MESSAGES'] = [] sender['MUTED'] = True
[ "def", "do_mute", "(", "self", ",", "sender", ",", "body", ",", "args", ")", ":", "if", "sender", ".", "get", "(", "'MUTED'", ")", ":", "self", ".", "send_message", "(", "'you are already muted'", ",", "sender", ")", "else", ":", "self", ".", "broadcast", "(", "'%s has muted this chatroom'", "%", "(", "sender", "[", "'NICK'", "]", ",", ")", ")", "sender", "[", "'QUEUED_MESSAGES'", "]", "=", "[", "]", "sender", "[", "'MUTED'", "]", "=", "True" ]
Temporarily mutes chatroom for a user
[ "Temporarily", "mutes", "chatroom", "for", "a", "user" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L153-L160
train
mattlong/hermes
hermes/chatroom.py
Chatroom.do_unmute
def do_unmute(self, sender, body, args): """Unmutes the chatroom for a user""" if sender.get('MUTED'): sender['MUTED'] = False self.broadcast('%s has unmuted this chatroom' % (sender['NICK'],)) for msg in sender.get('QUEUED_MESSAGES', []): self.send_message(msg, sender) sender['QUEUED_MESSAGES'] = [] else: self.send_message('you were not muted', sender)
python
def do_unmute(self, sender, body, args): """Unmutes the chatroom for a user""" if sender.get('MUTED'): sender['MUTED'] = False self.broadcast('%s has unmuted this chatroom' % (sender['NICK'],)) for msg in sender.get('QUEUED_MESSAGES', []): self.send_message(msg, sender) sender['QUEUED_MESSAGES'] = [] else: self.send_message('you were not muted', sender)
[ "def", "do_unmute", "(", "self", ",", "sender", ",", "body", ",", "args", ")", ":", "if", "sender", ".", "get", "(", "'MUTED'", ")", ":", "sender", "[", "'MUTED'", "]", "=", "False", "self", ".", "broadcast", "(", "'%s has unmuted this chatroom'", "%", "(", "sender", "[", "'NICK'", "]", ",", ")", ")", "for", "msg", "in", "sender", ".", "get", "(", "'QUEUED_MESSAGES'", ",", "[", "]", ")", ":", "self", ".", "send_message", "(", "msg", ",", "sender", ")", "sender", "[", "'QUEUED_MESSAGES'", "]", "=", "[", "]", "else", ":", "self", ".", "send_message", "(", "'you were not muted'", ",", "sender", ")" ]
Unmutes the chatroom for a user
[ "Unmutes", "the", "chatroom", "for", "a", "user" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L162-L171
train