repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
mattlong/hermes
hermes/server.py
run_server
def run_server(chatrooms, use_default_logging=True): """Sets up and serves specified chatrooms. Main entrypoint to Hermes. :param chatrooms: Dictionary of chatrooms to serve. :param use_default_logging: (optional) Boolean. Set to True if Hermes should setup its default logging configuration. """ if use_default_logging: configure_logging() logger.info('Starting Hermes chatroom server...') bots = [] for name, params in chatrooms.items(): bot_class = params.get('CLASS', 'hermes.Chatroom') if type(bot_class) == type: pass else: bot_class_path = bot_class.split('.') if len(bot_class_path) == 1: module, classname = '__main__', bot_class_path[-1] else: module, classname = '.'.join(bot_class_path[:-1]), bot_class_path[-1] _ = __import__(module, globals(), locals(), [classname]) bot_class = getattr(_, classname) bot = bot_class(name, params) bots.append(bot) while True: try: logger.info("Connecting to servers...") sockets = _get_sockets(bots) if len(sockets.keys()) == 0: logger.info('No chatrooms defined. Exiting.') return _listen(sockets) except socket.error, ex: if ex.errno == 9: logger.exception('broken socket detected') else: logger.exception('Unknown socket error %d' % (ex.errno,)) except Exception: logger.exception('Unexpected exception') time.sleep(1)
python
def run_server(chatrooms, use_default_logging=True): """Sets up and serves specified chatrooms. Main entrypoint to Hermes. :param chatrooms: Dictionary of chatrooms to serve. :param use_default_logging: (optional) Boolean. Set to True if Hermes should setup its default logging configuration. """ if use_default_logging: configure_logging() logger.info('Starting Hermes chatroom server...') bots = [] for name, params in chatrooms.items(): bot_class = params.get('CLASS', 'hermes.Chatroom') if type(bot_class) == type: pass else: bot_class_path = bot_class.split('.') if len(bot_class_path) == 1: module, classname = '__main__', bot_class_path[-1] else: module, classname = '.'.join(bot_class_path[:-1]), bot_class_path[-1] _ = __import__(module, globals(), locals(), [classname]) bot_class = getattr(_, classname) bot = bot_class(name, params) bots.append(bot) while True: try: logger.info("Connecting to servers...") sockets = _get_sockets(bots) if len(sockets.keys()) == 0: logger.info('No chatrooms defined. Exiting.') return _listen(sockets) except socket.error, ex: if ex.errno == 9: logger.exception('broken socket detected') else: logger.exception('Unknown socket error %d' % (ex.errno,)) except Exception: logger.exception('Unexpected exception') time.sleep(1)
[ "def", "run_server", "(", "chatrooms", ",", "use_default_logging", "=", "True", ")", ":", "if", "use_default_logging", ":", "configure_logging", "(", ")", "logger", ".", "info", "(", "'Starting Hermes chatroom server...'", ")", "bots", "=", "[", "]", "for", "name", ",", "params", "in", "chatrooms", ".", "items", "(", ")", ":", "bot_class", "=", "params", ".", "get", "(", "'CLASS'", ",", "'hermes.Chatroom'", ")", "if", "type", "(", "bot_class", ")", "==", "type", ":", "pass", "else", ":", "bot_class_path", "=", "bot_class", ".", "split", "(", "'.'", ")", "if", "len", "(", "bot_class_path", ")", "==", "1", ":", "module", ",", "classname", "=", "'__main__'", ",", "bot_class_path", "[", "-", "1", "]", "else", ":", "module", ",", "classname", "=", "'.'", ".", "join", "(", "bot_class_path", "[", ":", "-", "1", "]", ")", ",", "bot_class_path", "[", "-", "1", "]", "_", "=", "__import__", "(", "module", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "[", "classname", "]", ")", "bot_class", "=", "getattr", "(", "_", ",", "classname", ")", "bot", "=", "bot_class", "(", "name", ",", "params", ")", "bots", ".", "append", "(", "bot", ")", "while", "True", ":", "try", ":", "logger", ".", "info", "(", "\"Connecting to servers...\"", ")", "sockets", "=", "_get_sockets", "(", "bots", ")", "if", "len", "(", "sockets", ".", "keys", "(", ")", ")", "==", "0", ":", "logger", ".", "info", "(", "'No chatrooms defined. Exiting.'", ")", "return", "_listen", "(", "sockets", ")", "except", "socket", ".", "error", ",", "ex", ":", "if", "ex", ".", "errno", "==", "9", ":", "logger", ".", "exception", "(", "'broken socket detected'", ")", "else", ":", "logger", ".", "exception", "(", "'Unknown socket error %d'", "%", "(", "ex", ".", "errno", ",", ")", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "'Unexpected exception'", ")", "time", ".", "sleep", "(", "1", ")" ]
Sets up and serves specified chatrooms. Main entrypoint to Hermes. :param chatrooms: Dictionary of chatrooms to serve. :param use_default_logging: (optional) Boolean. Set to True if Hermes should setup its default logging configuration.
[ "Sets", "up", "and", "serves", "specified", "chatrooms", ".", "Main", "entrypoint", "to", "Hermes", "." ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/server.py#L23-L67
train
mattlong/hermes
hermes/server.py
_get_sockets
def _get_sockets(bots): """Connects and gathers sockets for all chatrooms""" sockets = {} #sockets[sys.stdin] = 'stdio' for bot in bots: bot.connect() sockets[bot.client.Connection._sock] = bot return sockets
python
def _get_sockets(bots): """Connects and gathers sockets for all chatrooms""" sockets = {} #sockets[sys.stdin] = 'stdio' for bot in bots: bot.connect() sockets[bot.client.Connection._sock] = bot return sockets
[ "def", "_get_sockets", "(", "bots", ")", ":", "sockets", "=", "{", "}", "#sockets[sys.stdin] = 'stdio'", "for", "bot", "in", "bots", ":", "bot", ".", "connect", "(", ")", "sockets", "[", "bot", ".", "client", ".", "Connection", ".", "_sock", "]", "=", "bot", "return", "sockets" ]
Connects and gathers sockets for all chatrooms
[ "Connects", "and", "gathers", "sockets", "for", "all", "chatrooms" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/server.py#L69-L76
train
mattlong/hermes
hermes/server.py
_listen
def _listen(sockets): """Main server loop. Listens for incoming events and dispatches them to appropriate chatroom""" while True: (i , o, e) = select.select(sockets.keys(),[],[],1) for socket in i: if isinstance(sockets[socket], Chatroom): data_len = sockets[socket].client.Process(1) if data_len is None or data_len == 0: raise Exception('Disconnected from server') #elif sockets[socket] == 'stdio': # msg = sys.stdin.readline().rstrip('\r\n') # logger.info('stdin: [%s]' % (msg,)) else: raise Exception("Unknown socket type: %s" % repr(sockets[socket]))
python
def _listen(sockets): """Main server loop. Listens for incoming events and dispatches them to appropriate chatroom""" while True: (i , o, e) = select.select(sockets.keys(),[],[],1) for socket in i: if isinstance(sockets[socket], Chatroom): data_len = sockets[socket].client.Process(1) if data_len is None or data_len == 0: raise Exception('Disconnected from server') #elif sockets[socket] == 'stdio': # msg = sys.stdin.readline().rstrip('\r\n') # logger.info('stdin: [%s]' % (msg,)) else: raise Exception("Unknown socket type: %s" % repr(sockets[socket]))
[ "def", "_listen", "(", "sockets", ")", ":", "while", "True", ":", "(", "i", ",", "o", ",", "e", ")", "=", "select", ".", "select", "(", "sockets", ".", "keys", "(", ")", ",", "[", "]", ",", "[", "]", ",", "1", ")", "for", "socket", "in", "i", ":", "if", "isinstance", "(", "sockets", "[", "socket", "]", ",", "Chatroom", ")", ":", "data_len", "=", "sockets", "[", "socket", "]", ".", "client", ".", "Process", "(", "1", ")", "if", "data_len", "is", "None", "or", "data_len", "==", "0", ":", "raise", "Exception", "(", "'Disconnected from server'", ")", "#elif sockets[socket] == 'stdio':", "# msg = sys.stdin.readline().rstrip('\\r\\n')", "# logger.info('stdin: [%s]' % (msg,))", "else", ":", "raise", "Exception", "(", "\"Unknown socket type: %s\"", "%", "repr", "(", "sockets", "[", "socket", "]", ")", ")" ]
Main server loop. Listens for incoming events and dispatches them to appropriate chatroom
[ "Main", "server", "loop", ".", "Listens", "for", "incoming", "events", "and", "dispatches", "them", "to", "appropriate", "chatroom" ]
63a5afcafe90ca99aeb44edeee9ed6f90baae431
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/server.py#L78-L91
train
transifex/transifex-python-library
txlib/http/http_requests.py
HttpRequest._send
def _send(self, method, path, data, filename): """Send data to a remote server, either with a POST or a PUT request. Args: `method`: The method (POST or PUT) to use. `path`: The path to the resource. `data`: The data to send. `filename`: The filename of the file to send (if any). Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response. """ if filename is None: return self._send_json(method, path, data) else: return self._send_file(method, path, data, filename)
python
def _send(self, method, path, data, filename): """Send data to a remote server, either with a POST or a PUT request. Args: `method`: The method (POST or PUT) to use. `path`: The path to the resource. `data`: The data to send. `filename`: The filename of the file to send (if any). Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response. """ if filename is None: return self._send_json(method, path, data) else: return self._send_file(method, path, data, filename)
[ "def", "_send", "(", "self", ",", "method", ",", "path", ",", "data", ",", "filename", ")", ":", "if", "filename", "is", "None", ":", "return", "self", ".", "_send_json", "(", "method", ",", "path", ",", "data", ")", "else", ":", "return", "self", ".", "_send_file", "(", "method", ",", "path", ",", "data", ",", "filename", ")" ]
Send data to a remote server, either with a POST or a PUT request. Args: `method`: The method (POST or PUT) to use. `path`: The path to the resource. `data`: The data to send. `filename`: The filename of the file to send (if any). Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
[ "Send", "data", "to", "a", "remote", "server", "either", "with", "a", "POST", "or", "a", "PUT", "request", "." ]
9fea86b718973de35ccca6d54bd1f445c9632406
https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L123-L139
train
BernardFW/bernard
src/bernard/platforms/management.py
get_platform_settings
def get_platform_settings(): """ Returns the content of `settings.PLATFORMS` with a twist. The platforms settings was created to stay compatible with the old way of declaring the FB configuration, in order not to break production bots. This function will convert the legacy configuration into the new configuration if required. As a result, it should be the only used way to access the platform configuration. """ s = settings.PLATFORMS if hasattr(settings, 'FACEBOOK') and settings.FACEBOOK: s.append({ 'class': 'bernard.platforms.facebook.platform.Facebook', 'settings': settings.FACEBOOK, }) return s
python
def get_platform_settings(): """ Returns the content of `settings.PLATFORMS` with a twist. The platforms settings was created to stay compatible with the old way of declaring the FB configuration, in order not to break production bots. This function will convert the legacy configuration into the new configuration if required. As a result, it should be the only used way to access the platform configuration. """ s = settings.PLATFORMS if hasattr(settings, 'FACEBOOK') and settings.FACEBOOK: s.append({ 'class': 'bernard.platforms.facebook.platform.Facebook', 'settings': settings.FACEBOOK, }) return s
[ "def", "get_platform_settings", "(", ")", ":", "s", "=", "settings", ".", "PLATFORMS", "if", "hasattr", "(", "settings", ",", "'FACEBOOK'", ")", "and", "settings", ".", "FACEBOOK", ":", "s", ".", "append", "(", "{", "'class'", ":", "'bernard.platforms.facebook.platform.Facebook'", ",", "'settings'", ":", "settings", ".", "FACEBOOK", ",", "}", ")", "return", "s" ]
Returns the content of `settings.PLATFORMS` with a twist. The platforms settings was created to stay compatible with the old way of declaring the FB configuration, in order not to break production bots. This function will convert the legacy configuration into the new configuration if required. As a result, it should be the only used way to access the platform configuration.
[ "Returns", "the", "content", "of", "settings", ".", "PLATFORMS", "with", "a", "twist", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L39-L58
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.run_checks
async def run_checks(self): """ Run checks on itself and on the FSM """ async for check in self.fsm.health_check(): yield check async for check in self.self_check(): yield check for check in MiddlewareManager.health_check(): yield check
python
async def run_checks(self): """ Run checks on itself and on the FSM """ async for check in self.fsm.health_check(): yield check async for check in self.self_check(): yield check for check in MiddlewareManager.health_check(): yield check
[ "async", "def", "run_checks", "(", "self", ")", ":", "async", "for", "check", "in", "self", ".", "fsm", ".", "health_check", "(", ")", ":", "yield", "check", "async", "for", "check", "in", "self", ".", "self_check", "(", ")", ":", "yield", "check", "for", "check", "in", "MiddlewareManager", ".", "health_check", "(", ")", ":", "yield", "check" ]
Run checks on itself and on the FSM
[ "Run", "checks", "on", "itself", "and", "on", "the", "FSM" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L109-L121
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.self_check
async def self_check(self): """ Checks that the platforms configuration is all right. """ platforms = set() for platform in get_platform_settings(): try: name = platform['class'] cls: Type[Platform] = import_class(name) except KeyError: yield HealthCheckFail( '00004', 'Missing platform `class` name in configuration.' ) except (AttributeError, ImportError, ValueError): yield HealthCheckFail( '00003', f'Platform "{name}" cannot be imported.' ) else: if cls in platforms: yield HealthCheckFail( '00002', f'Platform "{name}" is imported more than once.' ) platforms.add(cls) # noinspection PyTypeChecker async for check in cls.self_check(): yield check
python
async def self_check(self): """ Checks that the platforms configuration is all right. """ platforms = set() for platform in get_platform_settings(): try: name = platform['class'] cls: Type[Platform] = import_class(name) except KeyError: yield HealthCheckFail( '00004', 'Missing platform `class` name in configuration.' ) except (AttributeError, ImportError, ValueError): yield HealthCheckFail( '00003', f'Platform "{name}" cannot be imported.' ) else: if cls in platforms: yield HealthCheckFail( '00002', f'Platform "{name}" is imported more than once.' ) platforms.add(cls) # noinspection PyTypeChecker async for check in cls.self_check(): yield check
[ "async", "def", "self_check", "(", "self", ")", ":", "platforms", "=", "set", "(", ")", "for", "platform", "in", "get_platform_settings", "(", ")", ":", "try", ":", "name", "=", "platform", "[", "'class'", "]", "cls", ":", "Type", "[", "Platform", "]", "=", "import_class", "(", "name", ")", "except", "KeyError", ":", "yield", "HealthCheckFail", "(", "'00004'", ",", "'Missing platform `class` name in configuration.'", ")", "except", "(", "AttributeError", ",", "ImportError", ",", "ValueError", ")", ":", "yield", "HealthCheckFail", "(", "'00003'", ",", "f'Platform \"{name}\" cannot be imported.'", ")", "else", ":", "if", "cls", "in", "platforms", ":", "yield", "HealthCheckFail", "(", "'00002'", ",", "f'Platform \"{name}\" is imported more than once.'", ")", "platforms", ".", "add", "(", "cls", ")", "# noinspection PyTypeChecker", "async", "for", "check", "in", "cls", ".", "self_check", "(", ")", ":", "yield", "check" ]
Checks that the platforms configuration is all right.
[ "Checks", "that", "the", "platforms", "configuration", "is", "all", "right", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L123-L154
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager._index_classes
def _index_classes(self) -> Dict[Text, Type[Platform]]: """ Build a name index for all platform classes """ out = {} for p in get_platform_settings(): cls: Type[Platform] = import_class(p['class']) if 'name' in p: out[p['name']] = cls else: out[cls.NAME] = cls return out
python
def _index_classes(self) -> Dict[Text, Type[Platform]]: """ Build a name index for all platform classes """ out = {} for p in get_platform_settings(): cls: Type[Platform] = import_class(p['class']) if 'name' in p: out[p['name']] = cls else: out[cls.NAME] = cls return out
[ "def", "_index_classes", "(", "self", ")", "->", "Dict", "[", "Text", ",", "Type", "[", "Platform", "]", "]", ":", "out", "=", "{", "}", "for", "p", "in", "get_platform_settings", "(", ")", ":", "cls", ":", "Type", "[", "Platform", "]", "=", "import_class", "(", "p", "[", "'class'", "]", ")", "if", "'name'", "in", "p", ":", "out", "[", "p", "[", "'name'", "]", "]", "=", "cls", "else", ":", "out", "[", "cls", ".", "NAME", "]", "=", "cls", "return", "out" ]
Build a name index for all platform classes
[ "Build", "a", "name", "index", "for", "all", "platform", "classes" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L156-L171
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.build_platform
async def build_platform(self, cls: Type[Platform], custom_id): """ Build the Facebook platform. Nothing fancy. """ from bernard.server.http import router p = cls() if custom_id: p._id = custom_id await p.async_init() p.on_message(self.fsm.handle_message) p.hook_up(router) return p
python
async def build_platform(self, cls: Type[Platform], custom_id): """ Build the Facebook platform. Nothing fancy. """ from bernard.server.http import router p = cls() if custom_id: p._id = custom_id await p.async_init() p.on_message(self.fsm.handle_message) p.hook_up(router) return p
[ "async", "def", "build_platform", "(", "self", ",", "cls", ":", "Type", "[", "Platform", "]", ",", "custom_id", ")", ":", "from", "bernard", ".", "server", ".", "http", "import", "router", "p", "=", "cls", "(", ")", "if", "custom_id", ":", "p", ".", "_id", "=", "custom_id", "await", "p", ".", "async_init", "(", ")", "p", ".", "on_message", "(", "self", ".", "fsm", ".", "handle_message", ")", "p", ".", "hook_up", "(", "router", ")", "return", "p" ]
Build the Facebook platform. Nothing fancy.
[ "Build", "the", "Facebook", "platform", ".", "Nothing", "fancy", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L173-L188
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.get_class
def get_class(self, platform) -> Type[Platform]: """ For a given platform name, gets the matching class """ if platform in self._classes: return self._classes[platform] raise PlatformDoesNotExist('Platform "{}" is not in configuration' .format(platform))
python
def get_class(self, platform) -> Type[Platform]: """ For a given platform name, gets the matching class """ if platform in self._classes: return self._classes[platform] raise PlatformDoesNotExist('Platform "{}" is not in configuration' .format(platform))
[ "def", "get_class", "(", "self", ",", "platform", ")", "->", "Type", "[", "Platform", "]", ":", "if", "platform", "in", "self", ".", "_classes", ":", "return", "self", ".", "_classes", "[", "platform", "]", "raise", "PlatformDoesNotExist", "(", "'Platform \"{}\" is not in configuration'", ".", "format", "(", "platform", ")", ")" ]
For a given platform name, gets the matching class
[ "For", "a", "given", "platform", "name", "gets", "the", "matching", "class" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L190-L199
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.get_platform
async def get_platform(self, name: Text): """ Get a valid instance of the specified platform. Do not cache this object, it might change with configuration changes. """ if not self._is_init: await self.init() if name not in self.platforms: self.platforms[name] = \ await self.build_platform(self.get_class(name), name) return self.platforms[name]
python
async def get_platform(self, name: Text): """ Get a valid instance of the specified platform. Do not cache this object, it might change with configuration changes. """ if not self._is_init: await self.init() if name not in self.platforms: self.platforms[name] = \ await self.build_platform(self.get_class(name), name) return self.platforms[name]
[ "async", "def", "get_platform", "(", "self", ",", "name", ":", "Text", ")", ":", "if", "not", "self", ".", "_is_init", ":", "await", "self", ".", "init", "(", ")", "if", "name", "not", "in", "self", ".", "platforms", ":", "self", ".", "platforms", "[", "name", "]", "=", "await", "self", ".", "build_platform", "(", "self", ".", "get_class", "(", "name", ")", ",", "name", ")", "return", "self", ".", "platforms", "[", "name", "]" ]
Get a valid instance of the specified platform. Do not cache this object, it might change with configuration changes.
[ "Get", "a", "valid", "instance", "of", "the", "specified", "platform", ".", "Do", "not", "cache", "this", "object", "it", "might", "change", "with", "configuration", "changes", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L201-L214
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.get_all_platforms
async def get_all_platforms(self) -> AsyncIterator[Platform]: """ Returns all platform instances """ for name in self._classes.keys(): yield await self.get_platform(name)
python
async def get_all_platforms(self) -> AsyncIterator[Platform]: """ Returns all platform instances """ for name in self._classes.keys(): yield await self.get_platform(name)
[ "async", "def", "get_all_platforms", "(", "self", ")", "->", "AsyncIterator", "[", "Platform", "]", ":", "for", "name", "in", "self", ".", "_classes", ".", "keys", "(", ")", ":", "yield", "await", "self", ".", "get_platform", "(", "name", ")" ]
Returns all platform instances
[ "Returns", "all", "platform", "instances" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L216-L222
train
BernardFW/bernard
src/bernard/platforms/management.py
PlatformManager.message_from_token
async def message_from_token(self, token: Text, payload: Any) \ -> Tuple[Optional[BaseMessage], Optional[Platform]]: """ Given an authentication token, find the right platform that can recognize this token and create a message for this platform. The payload will be inserted into a Postback layer. """ async for platform in self.get_all_platforms(): m = await platform.message_from_token(token, payload) if m: return m, platform return None, None
python
async def message_from_token(self, token: Text, payload: Any) \ -> Tuple[Optional[BaseMessage], Optional[Platform]]: """ Given an authentication token, find the right platform that can recognize this token and create a message for this platform. The payload will be inserted into a Postback layer. """ async for platform in self.get_all_platforms(): m = await platform.message_from_token(token, payload) if m: return m, platform return None, None
[ "async", "def", "message_from_token", "(", "self", ",", "token", ":", "Text", ",", "payload", ":", "Any", ")", "->", "Tuple", "[", "Optional", "[", "BaseMessage", "]", ",", "Optional", "[", "Platform", "]", "]", ":", "async", "for", "platform", "in", "self", ".", "get_all_platforms", "(", ")", ":", "m", "=", "await", "platform", ".", "message_from_token", "(", "token", ",", "payload", ")", "if", "m", ":", "return", "m", ",", "platform", "return", "None", ",", "None" ]
Given an authentication token, find the right platform that can recognize this token and create a message for this platform. The payload will be inserted into a Postback layer.
[ "Given", "an", "authentication", "token", "find", "the", "right", "platform", "that", "can", "recognize", "this", "token", "and", "create", "a", "message", "for", "this", "platform", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L224-L239
train
openvax/varlens
varlens/reads_util.py
add_args
def add_args(parser, positional=False): """ Extends a commandline argument parser with arguments for specifying read sources. """ group = parser.add_argument_group("read loading") group.add_argument("reads" if positional else "--reads", nargs="+", default=[], help="Paths to bam files. Any number of paths may be specified.") group.add_argument( "--read-source-name", nargs="+", help="Names for each read source. The number of names specified " "must match the number of bam files. If not specified, filenames are " "used for names.") # Add filters group = parser.add_argument_group( "read filtering", "A number of read filters are available. See the pysam " "documentation (http://pysam.readthedocs.org/en/latest/api.html) " "for details on what these fields mean. When multiple filter " "options are specified, reads must match *all* filters.") for (name, (kind, message, function)) in READ_FILTERS.items(): extra = {} if kind is bool: extra["action"] = "store_true" extra["default"] = None elif kind is int: extra["type"] = int extra["metavar"] = "N" elif kind is str: extra["metavar"] = "STRING" group.add_argument("--" + name.replace("_", "-"), help=message, **extra)
python
def add_args(parser, positional=False): """ Extends a commandline argument parser with arguments for specifying read sources. """ group = parser.add_argument_group("read loading") group.add_argument("reads" if positional else "--reads", nargs="+", default=[], help="Paths to bam files. Any number of paths may be specified.") group.add_argument( "--read-source-name", nargs="+", help="Names for each read source. The number of names specified " "must match the number of bam files. If not specified, filenames are " "used for names.") # Add filters group = parser.add_argument_group( "read filtering", "A number of read filters are available. See the pysam " "documentation (http://pysam.readthedocs.org/en/latest/api.html) " "for details on what these fields mean. When multiple filter " "options are specified, reads must match *all* filters.") for (name, (kind, message, function)) in READ_FILTERS.items(): extra = {} if kind is bool: extra["action"] = "store_true" extra["default"] = None elif kind is int: extra["type"] = int extra["metavar"] = "N" elif kind is str: extra["metavar"] = "STRING" group.add_argument("--" + name.replace("_", "-"), help=message, **extra)
[ "def", "add_args", "(", "parser", ",", "positional", "=", "False", ")", ":", "group", "=", "parser", ".", "add_argument_group", "(", "\"read loading\"", ")", "group", ".", "add_argument", "(", "\"reads\"", "if", "positional", "else", "\"--reads\"", ",", "nargs", "=", "\"+\"", ",", "default", "=", "[", "]", ",", "help", "=", "\"Paths to bam files. Any number of paths may be specified.\"", ")", "group", ".", "add_argument", "(", "\"--read-source-name\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Names for each read source. The number of names specified \"", "\"must match the number of bam files. If not specified, filenames are \"", "\"used for names.\"", ")", "# Add filters", "group", "=", "parser", ".", "add_argument_group", "(", "\"read filtering\"", ",", "\"A number of read filters are available. See the pysam \"", "\"documentation (http://pysam.readthedocs.org/en/latest/api.html) \"", "\"for details on what these fields mean. When multiple filter \"", "\"options are specified, reads must match *all* filters.\"", ")", "for", "(", "name", ",", "(", "kind", ",", "message", ",", "function", ")", ")", "in", "READ_FILTERS", ".", "items", "(", ")", ":", "extra", "=", "{", "}", "if", "kind", "is", "bool", ":", "extra", "[", "\"action\"", "]", "=", "\"store_true\"", "extra", "[", "\"default\"", "]", "=", "None", "elif", "kind", "is", "int", ":", "extra", "[", "\"type\"", "]", "=", "int", "extra", "[", "\"metavar\"", "]", "=", "\"N\"", "elif", "kind", "is", "str", ":", "extra", "[", "\"metavar\"", "]", "=", "\"STRING\"", "group", ".", "add_argument", "(", "\"--\"", "+", "name", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ",", "help", "=", "message", ",", "*", "*", "extra", ")" ]
Extends a commandline argument parser with arguments for specifying read sources.
[ "Extends", "a", "commandline", "argument", "parser", "with", "arguments", "for", "specifying", "read", "sources", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/reads_util.py#L104-L141
train
openvax/varlens
varlens/reads_util.py
load_from_args
def load_from_args(args): """ Given parsed commandline arguments, returns a list of ReadSource objects """ if not args.reads: return None if args.read_source_name: read_source_names = util.expand( args.read_source_name, 'read_source_name', 'read source', len(args.reads)) else: read_source_names = util.drop_prefix(args.reads) filters = [] for (name, info) in READ_FILTERS.items(): value = getattr(args, name) if value is not None: filters.append(functools.partial(info[-1], value)) return [ load_bam(filename, name, filters) for (filename, name) in zip(args.reads, read_source_names) ]
python
def load_from_args(args): """ Given parsed commandline arguments, returns a list of ReadSource objects """ if not args.reads: return None if args.read_source_name: read_source_names = util.expand( args.read_source_name, 'read_source_name', 'read source', len(args.reads)) else: read_source_names = util.drop_prefix(args.reads) filters = [] for (name, info) in READ_FILTERS.items(): value = getattr(args, name) if value is not None: filters.append(functools.partial(info[-1], value)) return [ load_bam(filename, name, filters) for (filename, name) in zip(args.reads, read_source_names) ]
[ "def", "load_from_args", "(", "args", ")", ":", "if", "not", "args", ".", "reads", ":", "return", "None", "if", "args", ".", "read_source_name", ":", "read_source_names", "=", "util", ".", "expand", "(", "args", ".", "read_source_name", ",", "'read_source_name'", ",", "'read source'", ",", "len", "(", "args", ".", "reads", ")", ")", "else", ":", "read_source_names", "=", "util", ".", "drop_prefix", "(", "args", ".", "reads", ")", "filters", "=", "[", "]", "for", "(", "name", ",", "info", ")", "in", "READ_FILTERS", ".", "items", "(", ")", ":", "value", "=", "getattr", "(", "args", ",", "name", ")", "if", "value", "is", "not", "None", ":", "filters", ".", "append", "(", "functools", ".", "partial", "(", "info", "[", "-", "1", "]", ",", "value", ")", ")", "return", "[", "load_bam", "(", "filename", ",", "name", ",", "filters", ")", "for", "(", "filename", ",", "name", ")", "in", "zip", "(", "args", ".", "reads", ",", "read_source_names", ")", "]" ]
Given parsed commandline arguments, returns a list of ReadSource objects
[ "Given", "parsed", "commandline", "arguments", "returns", "a", "list", "of", "ReadSource", "objects" ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/reads_util.py#L143-L169
train
klmitch/turnstile
turnstile/compactor.py
get_int
def get_int(config, key, default): """ A helper to retrieve an integer value from a given dictionary containing string values. If the requested value is not present in the dictionary, or if it cannot be converted to an integer, a default value will be returned instead. :param config: The dictionary containing the desired value. :param key: The dictionary key for the desired value. :param default: The default value to return, if the key isn't set in the dictionary, or if the value set isn't a legal integer value. :returns: The desired integer value. """ try: return int(config[key]) except (KeyError, ValueError): return default
python
def get_int(config, key, default): """ A helper to retrieve an integer value from a given dictionary containing string values. If the requested value is not present in the dictionary, or if it cannot be converted to an integer, a default value will be returned instead. :param config: The dictionary containing the desired value. :param key: The dictionary key for the desired value. :param default: The default value to return, if the key isn't set in the dictionary, or if the value set isn't a legal integer value. :returns: The desired integer value. """ try: return int(config[key]) except (KeyError, ValueError): return default
[ "def", "get_int", "(", "config", ",", "key", ",", "default", ")", ":", "try", ":", "return", "int", "(", "config", "[", "key", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "return", "default" ]
A helper to retrieve an integer value from a given dictionary containing string values. If the requested value is not present in the dictionary, or if it cannot be converted to an integer, a default value will be returned instead. :param config: The dictionary containing the desired value. :param key: The dictionary key for the desired value. :param default: The default value to return, if the key isn't set in the dictionary, or if the value set isn't a legal integer value. :returns: The desired integer value.
[ "A", "helper", "to", "retrieve", "an", "integer", "value", "from", "a", "given", "dictionary", "containing", "string", "values", ".", "If", "the", "requested", "value", "is", "not", "present", "in", "the", "dictionary", "or", "if", "it", "cannot", "be", "converted", "to", "an", "integer", "a", "default", "value", "will", "be", "returned", "instead", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/compactor.py#L64-L83
train
klmitch/turnstile
turnstile/compactor.py
compact_bucket
def compact_bucket(db, buck_key, limit): """ Perform the compaction operation. This reads in the bucket information from the database, builds a compacted bucket record, inserts that record in the appropriate place in the database, then removes outdated updates. :param db: A database handle for the Redis database. :param buck_key: A turnstile.limits.BucketKey instance containing the bucket key. :param limit: The turnstile.limits.Limit object corresponding to the bucket. """ # Suck in the bucket records and generate our bucket records = db.lrange(str(buck_key), 0, -1) loader = limits.BucketLoader(limit.bucket_class, db, limit, str(buck_key), records, stop_summarize=True) # We now have the bucket loaded in; generate a 'bucket' record buck_record = msgpack.dumps(dict(bucket=loader.bucket.dehydrate(), uuid=str(uuid.uuid4()))) # Now we need to insert it into the record list result = db.linsert(str(buck_key), 'after', loader.last_summarize_rec, buck_record) # Were we successful? if result < 0: # Insert failed; we'll try again when max_age is hit LOG.warning("Bucket compaction on %s failed; will retry" % buck_key) return # OK, we have confirmed that the compacted bucket record has been # inserted correctly; now all we need to do is trim off the # outdated update records db.ltrim(str(buck_key), loader.last_summarize_idx + 1, -1)
python
def compact_bucket(db, buck_key, limit): """ Perform the compaction operation. This reads in the bucket information from the database, builds a compacted bucket record, inserts that record in the appropriate place in the database, then removes outdated updates. :param db: A database handle for the Redis database. :param buck_key: A turnstile.limits.BucketKey instance containing the bucket key. :param limit: The turnstile.limits.Limit object corresponding to the bucket. """ # Suck in the bucket records and generate our bucket records = db.lrange(str(buck_key), 0, -1) loader = limits.BucketLoader(limit.bucket_class, db, limit, str(buck_key), records, stop_summarize=True) # We now have the bucket loaded in; generate a 'bucket' record buck_record = msgpack.dumps(dict(bucket=loader.bucket.dehydrate(), uuid=str(uuid.uuid4()))) # Now we need to insert it into the record list result = db.linsert(str(buck_key), 'after', loader.last_summarize_rec, buck_record) # Were we successful? if result < 0: # Insert failed; we'll try again when max_age is hit LOG.warning("Bucket compaction on %s failed; will retry" % buck_key) return # OK, we have confirmed that the compacted bucket record has been # inserted correctly; now all we need to do is trim off the # outdated update records db.ltrim(str(buck_key), loader.last_summarize_idx + 1, -1)
[ "def", "compact_bucket", "(", "db", ",", "buck_key", ",", "limit", ")", ":", "# Suck in the bucket records and generate our bucket", "records", "=", "db", ".", "lrange", "(", "str", "(", "buck_key", ")", ",", "0", ",", "-", "1", ")", "loader", "=", "limits", ".", "BucketLoader", "(", "limit", ".", "bucket_class", ",", "db", ",", "limit", ",", "str", "(", "buck_key", ")", ",", "records", ",", "stop_summarize", "=", "True", ")", "# We now have the bucket loaded in; generate a 'bucket' record", "buck_record", "=", "msgpack", ".", "dumps", "(", "dict", "(", "bucket", "=", "loader", ".", "bucket", ".", "dehydrate", "(", ")", ",", "uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", ")", "# Now we need to insert it into the record list", "result", "=", "db", ".", "linsert", "(", "str", "(", "buck_key", ")", ",", "'after'", ",", "loader", ".", "last_summarize_rec", ",", "buck_record", ")", "# Were we successful?", "if", "result", "<", "0", ":", "# Insert failed; we'll try again when max_age is hit", "LOG", ".", "warning", "(", "\"Bucket compaction on %s failed; will retry\"", "%", "buck_key", ")", "return", "# OK, we have confirmed that the compacted bucket record has been", "# inserted correctly; now all we need to do is trim off the", "# outdated update records", "db", ".", "ltrim", "(", "str", "(", "buck_key", ")", ",", "loader", ".", "last_summarize_idx", "+", "1", ",", "-", "1", ")" ]
Perform the compaction operation. This reads in the bucket information from the database, builds a compacted bucket record, inserts that record in the appropriate place in the database, then removes outdated updates. :param db: A database handle for the Redis database. :param buck_key: A turnstile.limits.BucketKey instance containing the bucket key. :param limit: The turnstile.limits.Limit object corresponding to the bucket.
[ "Perform", "the", "compaction", "operation", ".", "This", "reads", "in", "the", "bucket", "information", "from", "the", "database", "builds", "a", "compacted", "bucket", "record", "inserts", "that", "record", "in", "the", "appropriate", "place", "in", "the", "database", "then", "removes", "outdated", "updates", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/compactor.py#L379-L415
train
klmitch/turnstile
turnstile/compactor.py
compactor
def compactor(conf): """ The compactor daemon. This fuction watches the sorted set containing bucket keys that need to be compacted, performing the necessary compaction. :param conf: A turnstile.config.Config instance containing the configuration for the compactor daemon. Note that a ControlDaemon is also started, so appropriate configuration for that must also be present, as must appropriate Redis connection information. """ # Get the database handle db = conf.get_database('compactor') # Get the limits container limit_map = LimitContainer(conf, db) # Get the compactor configuration config = conf['compactor'] # Make sure compaction is enabled if get_int(config, 'max_updates', 0) <= 0: # We'll just warn about it, since they could be running # the compactor with a different configuration file LOG.warning("Compaction is not enabled. Enable it by " "setting a positive integer value for " "'compactor.max_updates' in the configuration.") # Select the bucket key getter key_getter = GetBucketKey.factory(config, db) LOG.info("Compactor initialized") # Now enter our loop while True: # Get a bucket key to compact try: buck_key = limits.BucketKey.decode(key_getter()) except ValueError as exc: # Warn about invalid bucket keys LOG.warning("Error interpreting bucket key: %s" % exc) continue # Ignore version 1 keys--they can't be compacted if buck_key.version < 2: continue # Get the corresponding limit class try: limit = limit_map[buck_key.uuid] except KeyError: # Warn about missing limits LOG.warning("Unable to compact bucket for limit %s" % buck_key.uuid) continue LOG.debug("Compacting bucket %s" % buck_key) # OK, we now have the limit (which we really only need for # the bucket class); let's compact the bucket try: compact_bucket(db, buck_key, limit) except Exception: LOG.exception("Failed to compact bucket %s" % buck_key) else: LOG.debug("Finished compacting bucket %s" % buck_key)
python
def compactor(conf): """ The compactor daemon. This fuction watches the sorted set containing bucket keys that need to be compacted, performing the necessary compaction. :param conf: A turnstile.config.Config instance containing the configuration for the compactor daemon. Note that a ControlDaemon is also started, so appropriate configuration for that must also be present, as must appropriate Redis connection information. """ # Get the database handle db = conf.get_database('compactor') # Get the limits container limit_map = LimitContainer(conf, db) # Get the compactor configuration config = conf['compactor'] # Make sure compaction is enabled if get_int(config, 'max_updates', 0) <= 0: # We'll just warn about it, since they could be running # the compactor with a different configuration file LOG.warning("Compaction is not enabled. Enable it by " "setting a positive integer value for " "'compactor.max_updates' in the configuration.") # Select the bucket key getter key_getter = GetBucketKey.factory(config, db) LOG.info("Compactor initialized") # Now enter our loop while True: # Get a bucket key to compact try: buck_key = limits.BucketKey.decode(key_getter()) except ValueError as exc: # Warn about invalid bucket keys LOG.warning("Error interpreting bucket key: %s" % exc) continue # Ignore version 1 keys--they can't be compacted if buck_key.version < 2: continue # Get the corresponding limit class try: limit = limit_map[buck_key.uuid] except KeyError: # Warn about missing limits LOG.warning("Unable to compact bucket for limit %s" % buck_key.uuid) continue LOG.debug("Compacting bucket %s" % buck_key) # OK, we now have the limit (which we really only need for # the bucket class); let's compact the bucket try: compact_bucket(db, buck_key, limit) except Exception: LOG.exception("Failed to compact bucket %s" % buck_key) else: LOG.debug("Finished compacting bucket %s" % buck_key)
[ "def", "compactor", "(", "conf", ")", ":", "# Get the database handle", "db", "=", "conf", ".", "get_database", "(", "'compactor'", ")", "# Get the limits container", "limit_map", "=", "LimitContainer", "(", "conf", ",", "db", ")", "# Get the compactor configuration", "config", "=", "conf", "[", "'compactor'", "]", "# Make sure compaction is enabled", "if", "get_int", "(", "config", ",", "'max_updates'", ",", "0", ")", "<=", "0", ":", "# We'll just warn about it, since they could be running", "# the compactor with a different configuration file", "LOG", ".", "warning", "(", "\"Compaction is not enabled. Enable it by \"", "\"setting a positive integer value for \"", "\"'compactor.max_updates' in the configuration.\"", ")", "# Select the bucket key getter", "key_getter", "=", "GetBucketKey", ".", "factory", "(", "config", ",", "db", ")", "LOG", ".", "info", "(", "\"Compactor initialized\"", ")", "# Now enter our loop", "while", "True", ":", "# Get a bucket key to compact", "try", ":", "buck_key", "=", "limits", ".", "BucketKey", ".", "decode", "(", "key_getter", "(", ")", ")", "except", "ValueError", "as", "exc", ":", "# Warn about invalid bucket keys", "LOG", ".", "warning", "(", "\"Error interpreting bucket key: %s\"", "%", "exc", ")", "continue", "# Ignore version 1 keys--they can't be compacted", "if", "buck_key", ".", "version", "<", "2", ":", "continue", "# Get the corresponding limit class", "try", ":", "limit", "=", "limit_map", "[", "buck_key", ".", "uuid", "]", "except", "KeyError", ":", "# Warn about missing limits", "LOG", ".", "warning", "(", "\"Unable to compact bucket for limit %s\"", "%", "buck_key", ".", "uuid", ")", "continue", "LOG", ".", "debug", "(", "\"Compacting bucket %s\"", "%", "buck_key", ")", "# OK, we now have the limit (which we really only need for", "# the bucket class); let's compact the bucket", "try", ":", "compact_bucket", "(", "db", ",", "buck_key", ",", "limit", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Failed to compact bucket %s\"", "%", "buck_key", ")", "else", ":", "LOG", ".", "debug", "(", "\"Finished compacting bucket %s\"", "%", "buck_key", ")" ]
The compactor daemon. This fuction watches the sorted set containing bucket keys that need to be compacted, performing the necessary compaction. :param conf: A turnstile.config.Config instance containing the configuration for the compactor daemon. Note that a ControlDaemon is also started, so appropriate configuration for that must also be present, as must appropriate Redis connection information.
[ "The", "compactor", "daemon", ".", "This", "fuction", "watches", "the", "sorted", "set", "containing", "bucket", "keys", "that", "need", "to", "be", "compacted", "performing", "the", "necessary", "compaction", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/compactor.py#L418-L485
train
klmitch/turnstile
turnstile/compactor.py
GetBucketKey.factory
def factory(cls, config, db): """ Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis. """ # Make sure that the client supports register_script() if not hasattr(db, 'register_script'): LOG.debug("Redis client does not support register_script()") return GetBucketKeyByLock(config, db) # OK, the client supports register_script(); what about the # server? info = db.info() if version_greater('2.6', info['redis_version']): LOG.debug("Redis server supports register_script()") return GetBucketKeyByScript(config, db) # OK, use our fallback... LOG.debug("Redis server does not support register_script()") return GetBucketKeyByLock(config, db)
python
def factory(cls, config, db): """ Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis. """ # Make sure that the client supports register_script() if not hasattr(db, 'register_script'): LOG.debug("Redis client does not support register_script()") return GetBucketKeyByLock(config, db) # OK, the client supports register_script(); what about the # server? info = db.info() if version_greater('2.6', info['redis_version']): LOG.debug("Redis server supports register_script()") return GetBucketKeyByScript(config, db) # OK, use our fallback... LOG.debug("Redis server does not support register_script()") return GetBucketKeyByLock(config, db)
[ "def", "factory", "(", "cls", ",", "config", ",", "db", ")", ":", "# Make sure that the client supports register_script()", "if", "not", "hasattr", "(", "db", ",", "'register_script'", ")", ":", "LOG", ".", "debug", "(", "\"Redis client does not support register_script()\"", ")", "return", "GetBucketKeyByLock", "(", "config", ",", "db", ")", "# OK, the client supports register_script(); what about the", "# server?", "info", "=", "db", ".", "info", "(", ")", "if", "version_greater", "(", "'2.6'", ",", "info", "[", "'redis_version'", "]", ")", ":", "LOG", ".", "debug", "(", "\"Redis server supports register_script()\"", ")", "return", "GetBucketKeyByScript", "(", "config", ",", "db", ")", "# OK, use our fallback...", "LOG", ".", "debug", "(", "\"Redis server does not support register_script()\"", ")", "return", "GetBucketKeyByLock", "(", "config", ",", "db", ")" ]
Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis.
[ "Given", "a", "configuration", "and", "database", "select", "and", "return", "an", "appropriate", "instance", "of", "a", "subclass", "of", "GetBucketKey", ".", "This", "will", "ensure", "that", "both", "client", "and", "server", "support", "are", "available", "for", "the", "Lua", "script", "feature", "of", "Redis", "and", "if", "not", "a", "lock", "will", "be", "used", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/compactor.py#L99-L128
train
klmitch/turnstile
turnstile/compactor.py
GetBucketKeyByLock.get
def get(self, now): """ Get a bucket key to compact. If none are available, returns None. This uses a configured lock to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. """ with self.lock: items = self.db.zrangebyscore(self.key, 0, now - self.min_age, start=0, num=1) # Did we get any items? if not items: return None # Drop the item we got item = items[0] self.db.zrem(item) return item
python
def get(self, now): """ Get a bucket key to compact. If none are available, returns None. This uses a configured lock to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. """ with self.lock: items = self.db.zrangebyscore(self.key, 0, now - self.min_age, start=0, num=1) # Did we get any items? if not items: return None # Drop the item we got item = items[0] self.db.zrem(item) return item
[ "def", "get", "(", "self", ",", "now", ")", ":", "with", "self", ".", "lock", ":", "items", "=", "self", ".", "db", ".", "zrangebyscore", "(", "self", ".", "key", ",", "0", ",", "now", "-", "self", ".", "min_age", ",", "start", "=", "0", ",", "num", "=", "1", ")", "# Did we get any items?", "if", "not", "items", ":", "return", "None", "# Drop the item we got", "item", "=", "items", "[", "0", "]", "self", ".", "db", ".", "zrem", "(", "item", ")", "return", "item" ]
Get a bucket key to compact. If none are available, returns None. This uses a configured lock to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently.
[ "Get", "a", "bucket", "key", "to", "compact", ".", "If", "none", "are", "available", "returns", "None", ".", "This", "uses", "a", "configured", "lock", "to", "ensure", "that", "the", "bucket", "key", "is", "popped", "off", "the", "sorted", "set", "in", "an", "atomic", "fashion", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/compactor.py#L210-L236
train
klmitch/turnstile
turnstile/compactor.py
GetBucketKeyByScript.get
def get(self, now): """ Get a bucket key to compact. If none are available, returns None. This uses a Lua script to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. """ items = self.script(keys=[self.key], args=[now - self.min_age]) return items[0] if items else None
python
def get(self, now): """ Get a bucket key to compact. If none are available, returns None. This uses a Lua script to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. """ items = self.script(keys=[self.key], args=[now - self.min_age]) return items[0] if items else None
[ "def", "get", "(", "self", ",", "now", ")", ":", "items", "=", "self", ".", "script", "(", "keys", "=", "[", "self", ".", "key", "]", ",", "args", "=", "[", "now", "-", "self", ".", "min_age", "]", ")", "return", "items", "[", "0", "]", "if", "items", "else", "None" ]
Get a bucket key to compact. If none are available, returns None. This uses a Lua script to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently.
[ "Get", "a", "bucket", "key", "to", "compact", ".", "If", "none", "are", "available", "returns", "None", ".", "This", "uses", "a", "Lua", "script", "to", "ensure", "that", "the", "bucket", "key", "is", "popped", "off", "the", "sorted", "set", "in", "an", "atomic", "fashion", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/compactor.py#L265-L281
train
frostming/marko
marko/inline_parser.py
parse
def parse(text, elements, fallback): """Parse given text and produce a list of inline elements. :param text: the text to be parsed. :param elements: the element types to be included in parsing :param fallback: fallback class when no other element type is matched. """ # this is a raw list of elements that may contain overlaps. tokens = [] for etype in elements: for match in etype.find(text): tokens.append(Token(etype, match, text, fallback)) tokens.sort() tokens = _resolve_overlap(tokens) return make_elements(tokens, text, fallback=fallback)
python
def parse(text, elements, fallback): """Parse given text and produce a list of inline elements. :param text: the text to be parsed. :param elements: the element types to be included in parsing :param fallback: fallback class when no other element type is matched. """ # this is a raw list of elements that may contain overlaps. tokens = [] for etype in elements: for match in etype.find(text): tokens.append(Token(etype, match, text, fallback)) tokens.sort() tokens = _resolve_overlap(tokens) return make_elements(tokens, text, fallback=fallback)
[ "def", "parse", "(", "text", ",", "elements", ",", "fallback", ")", ":", "# this is a raw list of elements that may contain overlaps.", "tokens", "=", "[", "]", "for", "etype", "in", "elements", ":", "for", "match", "in", "etype", ".", "find", "(", "text", ")", ":", "tokens", ".", "append", "(", "Token", "(", "etype", ",", "match", ",", "text", ",", "fallback", ")", ")", "tokens", ".", "sort", "(", ")", "tokens", "=", "_resolve_overlap", "(", "tokens", ")", "return", "make_elements", "(", "tokens", ",", "text", ",", "fallback", "=", "fallback", ")" ]
Parse given text and produce a list of inline elements. :param text: the text to be parsed. :param elements: the element types to be included in parsing :param fallback: fallback class when no other element type is matched.
[ "Parse", "given", "text", "and", "produce", "a", "list", "of", "inline", "elements", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/inline_parser.py#L12-L26
train
frostming/marko
marko/inline_parser.py
make_elements
def make_elements(tokens, text, start=0, end=None, fallback=None): """Make elements from a list of parsed tokens. It will turn all unmatched holes into fallback elements. :param tokens: a list of parsed tokens. :param text: the original tet. :param start: the offset of where parsing starts. Defaults to the start of text. :param end: the offset of where parsing ends. Defauls to the end of text. :param fallback: fallback element type. :returns: a list of inline elements. """ result = [] end = end or len(text) prev_end = start for token in tokens: if prev_end < token.start: result.append(fallback(text[prev_end:token.start])) result.append(token.as_element()) prev_end = token.end if prev_end < end: result.append(fallback(text[prev_end:end])) return result
python
def make_elements(tokens, text, start=0, end=None, fallback=None): """Make elements from a list of parsed tokens. It will turn all unmatched holes into fallback elements. :param tokens: a list of parsed tokens. :param text: the original tet. :param start: the offset of where parsing starts. Defaults to the start of text. :param end: the offset of where parsing ends. Defauls to the end of text. :param fallback: fallback element type. :returns: a list of inline elements. """ result = [] end = end or len(text) prev_end = start for token in tokens: if prev_end < token.start: result.append(fallback(text[prev_end:token.start])) result.append(token.as_element()) prev_end = token.end if prev_end < end: result.append(fallback(text[prev_end:end])) return result
[ "def", "make_elements", "(", "tokens", ",", "text", ",", "start", "=", "0", ",", "end", "=", "None", ",", "fallback", "=", "None", ")", ":", "result", "=", "[", "]", "end", "=", "end", "or", "len", "(", "text", ")", "prev_end", "=", "start", "for", "token", "in", "tokens", ":", "if", "prev_end", "<", "token", ".", "start", ":", "result", ".", "append", "(", "fallback", "(", "text", "[", "prev_end", ":", "token", ".", "start", "]", ")", ")", "result", ".", "append", "(", "token", ".", "as_element", "(", ")", ")", "prev_end", "=", "token", ".", "end", "if", "prev_end", "<", "end", ":", "result", ".", "append", "(", "fallback", "(", "text", "[", "prev_end", ":", "end", "]", ")", ")", "return", "result" ]
Make elements from a list of parsed tokens. It will turn all unmatched holes into fallback elements. :param tokens: a list of parsed tokens. :param text: the original tet. :param start: the offset of where parsing starts. Defaults to the start of text. :param end: the offset of where parsing ends. Defauls to the end of text. :param fallback: fallback element type. :returns: a list of inline elements.
[ "Make", "elements", "from", "a", "list", "of", "parsed", "tokens", ".", "It", "will", "turn", "all", "unmatched", "holes", "into", "fallback", "elements", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/inline_parser.py#L47-L68
train
azogue/i2csense
i2csense/__main__.py
main_cli
def main_cli(): """CLI minimal interface.""" # Get params args = _cli_argument_parser() delta_secs = args.delay i2cbus = args.bus i2c_address = args.address sensor_key = args.sensor sensor_params = args.params params = {} if sensor_params: def _parse_param(str_param): key, value = str_param.split('=') try: value = int(value) except ValueError: pass return {key.strip(): value} [params.update(_parse_param(sp)) for sp in sensor_params] if sensor_key: from time import sleep # Bus init try: # noinspection PyUnresolvedReferences import smbus bus_handler = smbus.SMBus(i2cbus) except ImportError as exc: print(exc, "\n", "Please install smbus-cffi before.") sys.exit(-1) # Sensor selection try: sensor_handler, i2c_default_address = SENSORS[sensor_key] except KeyError: print("'%s' is not recognized as an implemented i2c sensor." % sensor_key) sys.exit(-1) if i2c_address: i2c_address = hex(int(i2c_address, 0)) else: i2c_address = i2c_default_address # Sensor init sensor = sensor_handler(bus_handler, i2c_address, **params) # Infinite loop try: while True: sensor.update() if not sensor.sample_ok: print("An error has occured.") break print(sensor.current_state_str) sleep(delta_secs) except KeyboardInterrupt: print("Bye!") else: # Run detection mode from subprocess import check_output cmd = '/usr/sbin/i2cdetect -y {}'.format(i2cbus) try: output = check_output(cmd.split()) print("Running i2cdetect utility in i2c bus {}:\n" "The command '{}' has returned:\n{}" .format(i2cbus, cmd, output.decode())) except FileNotFoundError: print("Please install i2cdetect before.") sys.exit(-1) # Parse output addresses = ['0x' + l for line in output.decode().splitlines()[1:] for l in line.split()[1:] if l != '--'] if addresses: print("{} sensors detected in {}" .format(len(addresses), ', '.join(addresses))) else: print("No i2c sensors detected.")
python
def main_cli(): """CLI minimal interface.""" # Get params args = _cli_argument_parser() delta_secs = args.delay i2cbus = args.bus i2c_address = args.address sensor_key = args.sensor sensor_params = args.params params = {} if sensor_params: def _parse_param(str_param): key, value = str_param.split('=') try: value = int(value) except ValueError: pass return {key.strip(): value} [params.update(_parse_param(sp)) for sp in sensor_params] if sensor_key: from time import sleep # Bus init try: # noinspection PyUnresolvedReferences import smbus bus_handler = smbus.SMBus(i2cbus) except ImportError as exc: print(exc, "\n", "Please install smbus-cffi before.") sys.exit(-1) # Sensor selection try: sensor_handler, i2c_default_address = SENSORS[sensor_key] except KeyError: print("'%s' is not recognized as an implemented i2c sensor." % sensor_key) sys.exit(-1) if i2c_address: i2c_address = hex(int(i2c_address, 0)) else: i2c_address = i2c_default_address # Sensor init sensor = sensor_handler(bus_handler, i2c_address, **params) # Infinite loop try: while True: sensor.update() if not sensor.sample_ok: print("An error has occured.") break print(sensor.current_state_str) sleep(delta_secs) except KeyboardInterrupt: print("Bye!") else: # Run detection mode from subprocess import check_output cmd = '/usr/sbin/i2cdetect -y {}'.format(i2cbus) try: output = check_output(cmd.split()) print("Running i2cdetect utility in i2c bus {}:\n" "The command '{}' has returned:\n{}" .format(i2cbus, cmd, output.decode())) except FileNotFoundError: print("Please install i2cdetect before.") sys.exit(-1) # Parse output addresses = ['0x' + l for line in output.decode().splitlines()[1:] for l in line.split()[1:] if l != '--'] if addresses: print("{} sensors detected in {}" .format(len(addresses), ', '.join(addresses))) else: print("No i2c sensors detected.")
[ "def", "main_cli", "(", ")", ":", "# Get params", "args", "=", "_cli_argument_parser", "(", ")", "delta_secs", "=", "args", ".", "delay", "i2cbus", "=", "args", ".", "bus", "i2c_address", "=", "args", ".", "address", "sensor_key", "=", "args", ".", "sensor", "sensor_params", "=", "args", ".", "params", "params", "=", "{", "}", "if", "sensor_params", ":", "def", "_parse_param", "(", "str_param", ")", ":", "key", ",", "value", "=", "str_param", ".", "split", "(", "'='", ")", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "return", "{", "key", ".", "strip", "(", ")", ":", "value", "}", "[", "params", ".", "update", "(", "_parse_param", "(", "sp", ")", ")", "for", "sp", "in", "sensor_params", "]", "if", "sensor_key", ":", "from", "time", "import", "sleep", "# Bus init", "try", ":", "# noinspection PyUnresolvedReferences", "import", "smbus", "bus_handler", "=", "smbus", ".", "SMBus", "(", "i2cbus", ")", "except", "ImportError", "as", "exc", ":", "print", "(", "exc", ",", "\"\\n\"", ",", "\"Please install smbus-cffi before.\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "# Sensor selection", "try", ":", "sensor_handler", ",", "i2c_default_address", "=", "SENSORS", "[", "sensor_key", "]", "except", "KeyError", ":", "print", "(", "\"'%s' is not recognized as an implemented i2c sensor.\"", "%", "sensor_key", ")", "sys", ".", "exit", "(", "-", "1", ")", "if", "i2c_address", ":", "i2c_address", "=", "hex", "(", "int", "(", "i2c_address", ",", "0", ")", ")", "else", ":", "i2c_address", "=", "i2c_default_address", "# Sensor init", "sensor", "=", "sensor_handler", "(", "bus_handler", ",", "i2c_address", ",", "*", "*", "params", ")", "# Infinite loop", "try", ":", "while", "True", ":", "sensor", ".", "update", "(", ")", "if", "not", "sensor", ".", "sample_ok", ":", "print", "(", "\"An error has occured.\"", ")", "break", "print", "(", "sensor", ".", "current_state_str", ")", "sleep", "(", "delta_secs", ")", "except", "KeyboardInterrupt", ":", "print", "(", "\"Bye!\"", ")", "else", ":", "# Run detection mode", "from", "subprocess", "import", "check_output", "cmd", "=", "'/usr/sbin/i2cdetect -y {}'", ".", "format", "(", "i2cbus", ")", "try", ":", "output", "=", "check_output", "(", "cmd", ".", "split", "(", ")", ")", "print", "(", "\"Running i2cdetect utility in i2c bus {}:\\n\"", "\"The command '{}' has returned:\\n{}\"", ".", "format", "(", "i2cbus", ",", "cmd", ",", "output", ".", "decode", "(", ")", ")", ")", "except", "FileNotFoundError", ":", "print", "(", "\"Please install i2cdetect before.\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "# Parse output", "addresses", "=", "[", "'0x'", "+", "l", "for", "line", "in", "output", ".", "decode", "(", ")", ".", "splitlines", "(", ")", "[", "1", ":", "]", "for", "l", "in", "line", ".", "split", "(", ")", "[", "1", ":", "]", "if", "l", "!=", "'--'", "]", "if", "addresses", ":", "print", "(", "\"{} sensors detected in {}\"", ".", "format", "(", "len", "(", "addresses", ")", ",", "', '", ".", "join", "(", "addresses", ")", ")", ")", "else", ":", "print", "(", "\"No i2c sensors detected.\"", ")" ]
CLI minimal interface.
[ "CLI", "minimal", "interface", "." ]
ecc6806dcee9de827a5414a9e836d271fedca9b9
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/__main__.py#L60-L139
train
BernardFW/bernard
examples/number_bot/src/number_bot/settings.py
extract_domain
def extract_domain(var_name, output): """ Extracts just the domain name from an URL and adds it to a list """ var = getenv(var_name) if var: p = urlparse(var) output.append(p.hostname)
python
def extract_domain(var_name, output): """ Extracts just the domain name from an URL and adds it to a list """ var = getenv(var_name) if var: p = urlparse(var) output.append(p.hostname)
[ "def", "extract_domain", "(", "var_name", ",", "output", ")", ":", "var", "=", "getenv", "(", "var_name", ")", "if", "var", ":", "p", "=", "urlparse", "(", "var", ")", "output", ".", "append", "(", "p", ".", "hostname", ")" ]
Extracts just the domain name from an URL and adds it to a list
[ "Extracts", "just", "the", "domain", "name", "from", "an", "URL", "and", "adds", "it", "to", "a", "list" ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/examples/number_bot/src/number_bot/settings.py#L11-L20
train
gesellkammer/sndfileio
sndfileio/util.py
numchannels
def numchannels(samples:np.ndarray) -> int: """ return the number of channels present in samples samples: a numpy array as returned by sndread for multichannel audio, samples is always interleaved, meaning that samples[n] returns always a frame, which is either a single scalar for mono audio, or an array for multichannel audio. """ if len(samples.shape) == 1: return 1 else: return samples.shape[1]
python
def numchannels(samples:np.ndarray) -> int: """ return the number of channels present in samples samples: a numpy array as returned by sndread for multichannel audio, samples is always interleaved, meaning that samples[n] returns always a frame, which is either a single scalar for mono audio, or an array for multichannel audio. """ if len(samples.shape) == 1: return 1 else: return samples.shape[1]
[ "def", "numchannels", "(", "samples", ":", "np", ".", "ndarray", ")", "->", "int", ":", "if", "len", "(", "samples", ".", "shape", ")", "==", "1", ":", "return", "1", "else", ":", "return", "samples", ".", "shape", "[", "1", "]" ]
return the number of channels present in samples samples: a numpy array as returned by sndread for multichannel audio, samples is always interleaved, meaning that samples[n] returns always a frame, which is either a single scalar for mono audio, or an array for multichannel audio.
[ "return", "the", "number", "of", "channels", "present", "in", "samples" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/util.py#L5-L19
train
klmitch/turnstile
turnstile/middleware.py
turnstile_filter
def turnstile_filter(global_conf, **local_conf): """ Factory function for turnstile. Returns a function which, when passed the application, returns an instance of the TurnstileMiddleware. """ # Select the appropriate middleware class to return klass = TurnstileMiddleware if 'turnstile' in local_conf: klass = utils.find_entrypoint('turnstile.middleware', local_conf['turnstile'], required=True) def wrapper(app): return klass(app, local_conf) return wrapper
python
def turnstile_filter(global_conf, **local_conf): """ Factory function for turnstile. Returns a function which, when passed the application, returns an instance of the TurnstileMiddleware. """ # Select the appropriate middleware class to return klass = TurnstileMiddleware if 'turnstile' in local_conf: klass = utils.find_entrypoint('turnstile.middleware', local_conf['turnstile'], required=True) def wrapper(app): return klass(app, local_conf) return wrapper
[ "def", "turnstile_filter", "(", "global_conf", ",", "*", "*", "local_conf", ")", ":", "# Select the appropriate middleware class to return", "klass", "=", "TurnstileMiddleware", "if", "'turnstile'", "in", "local_conf", ":", "klass", "=", "utils", ".", "find_entrypoint", "(", "'turnstile.middleware'", ",", "local_conf", "[", "'turnstile'", "]", ",", "required", "=", "True", ")", "def", "wrapper", "(", "app", ")", ":", "return", "klass", "(", "app", ",", "local_conf", ")", "return", "wrapper" ]
Factory function for turnstile. Returns a function which, when passed the application, returns an instance of the TurnstileMiddleware.
[ "Factory", "function", "for", "turnstile", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/middleware.py#L133-L150
train
klmitch/turnstile
turnstile/middleware.py
TurnstileMiddleware.format_delay
def format_delay(self, delay, limit, bucket, environ, start_response): """ Formats the over-limit response for the request. May be overridden in subclasses to allow alternate responses. """ # Set up the default status status = self.conf.status # Set up the retry-after header... headers = HeadersDict([('Retry-After', "%d" % math.ceil(delay))]) # Let format fiddle with the headers status, entity = limit.format(status, headers, environ, bucket, delay) # Return the response start_response(status, headers.items()) return entity
python
def format_delay(self, delay, limit, bucket, environ, start_response): """ Formats the over-limit response for the request. May be overridden in subclasses to allow alternate responses. """ # Set up the default status status = self.conf.status # Set up the retry-after header... headers = HeadersDict([('Retry-After', "%d" % math.ceil(delay))]) # Let format fiddle with the headers status, entity = limit.format(status, headers, environ, bucket, delay) # Return the response start_response(status, headers.items()) return entity
[ "def", "format_delay", "(", "self", ",", "delay", ",", "limit", ",", "bucket", ",", "environ", ",", "start_response", ")", ":", "# Set up the default status", "status", "=", "self", ".", "conf", ".", "status", "# Set up the retry-after header...", "headers", "=", "HeadersDict", "(", "[", "(", "'Retry-After'", ",", "\"%d\"", "%", "math", ".", "ceil", "(", "delay", ")", ")", "]", ")", "# Let format fiddle with the headers", "status", ",", "entity", "=", "limit", ".", "format", "(", "status", ",", "headers", ",", "environ", ",", "bucket", ",", "delay", ")", "# Return the response", "start_response", "(", "status", ",", "headers", ".", "items", "(", ")", ")", "return", "entity" ]
Formats the over-limit response for the request. May be overridden in subclasses to allow alternate responses.
[ "Formats", "the", "over", "-", "limit", "response", "for", "the", "request", ".", "May", "be", "overridden", "in", "subclasses", "to", "allow", "alternate", "responses", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/middleware.py#L337-L355
train
klmitch/turnstile
turnstile/utils.py
find_entrypoint
def find_entrypoint(group, name, compat=True, required=False): """ Finds the first available entrypoint with the given name in the given group. :param group: The entrypoint group the name can be found in. If None, the name is not presumed to be an entrypoint. :param name: The name of the entrypoint. :param compat: If True, and if the name parameter contains a ':', the name will be interpreted as a module name and an object name, separated by a colon. This is provided for compatibility. :param required: If True, and no corresponding entrypoint can be found, an ImportError will be raised. If False (the default), None will be returned instead. :returns: The entrypoint object, or None if one could not be loaded. """ if group is None or (compat and ':' in name): try: return pkg_resources.EntryPoint.parse("x=" + name).load(False) except (ImportError, pkg_resources.UnknownExtra) as exc: pass else: for ep in pkg_resources.iter_entry_points(group, name): try: # Load and return the object return ep.load() except (ImportError, pkg_resources.UnknownExtra): # Couldn't load it; try the next one continue # Raise an ImportError if requested if required: raise ImportError("Cannot import %r entrypoint %r" % (group, name)) # Couldn't find one... return None
python
def find_entrypoint(group, name, compat=True, required=False): """ Finds the first available entrypoint with the given name in the given group. :param group: The entrypoint group the name can be found in. If None, the name is not presumed to be an entrypoint. :param name: The name of the entrypoint. :param compat: If True, and if the name parameter contains a ':', the name will be interpreted as a module name and an object name, separated by a colon. This is provided for compatibility. :param required: If True, and no corresponding entrypoint can be found, an ImportError will be raised. If False (the default), None will be returned instead. :returns: The entrypoint object, or None if one could not be loaded. """ if group is None or (compat and ':' in name): try: return pkg_resources.EntryPoint.parse("x=" + name).load(False) except (ImportError, pkg_resources.UnknownExtra) as exc: pass else: for ep in pkg_resources.iter_entry_points(group, name): try: # Load and return the object return ep.load() except (ImportError, pkg_resources.UnknownExtra): # Couldn't load it; try the next one continue # Raise an ImportError if requested if required: raise ImportError("Cannot import %r entrypoint %r" % (group, name)) # Couldn't find one... return None
[ "def", "find_entrypoint", "(", "group", ",", "name", ",", "compat", "=", "True", ",", "required", "=", "False", ")", ":", "if", "group", "is", "None", "or", "(", "compat", "and", "':'", "in", "name", ")", ":", "try", ":", "return", "pkg_resources", ".", "EntryPoint", ".", "parse", "(", "\"x=\"", "+", "name", ")", ".", "load", "(", "False", ")", "except", "(", "ImportError", ",", "pkg_resources", ".", "UnknownExtra", ")", "as", "exc", ":", "pass", "else", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", ",", "name", ")", ":", "try", ":", "# Load and return the object", "return", "ep", ".", "load", "(", ")", "except", "(", "ImportError", ",", "pkg_resources", ".", "UnknownExtra", ")", ":", "# Couldn't load it; try the next one", "continue", "# Raise an ImportError if requested", "if", "required", ":", "raise", "ImportError", "(", "\"Cannot import %r entrypoint %r\"", "%", "(", "group", ",", "name", ")", ")", "# Couldn't find one...", "return", "None" ]
Finds the first available entrypoint with the given name in the given group. :param group: The entrypoint group the name can be found in. If None, the name is not presumed to be an entrypoint. :param name: The name of the entrypoint. :param compat: If True, and if the name parameter contains a ':', the name will be interpreted as a module name and an object name, separated by a colon. This is provided for compatibility. :param required: If True, and no corresponding entrypoint can be found, an ImportError will be raised. If False (the default), None will be returned instead. :returns: The entrypoint object, or None if one could not be loaded.
[ "Finds", "the", "first", "available", "entrypoint", "with", "the", "given", "name", "in", "the", "given", "group", "." ]
8fe9a359b45e505d3192ab193ecf9be177ab1a17
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/utils.py#L21-L60
train
COALAIP/pycoalaip
coalaip/entities.py
TransferrableEntity.transfer
def transfer(self, transfer_payload=None, *, from_user, to_user): """Transfer this entity to another owner on the backing persistence layer Args: transfer_payload (dict): Payload for the transfer from_user (any): A user based on the model specified by the persistence layer to_user (any): A user based on the model specified by the persistence layer Returns: str: Id of the resulting transfer action on the persistence layer Raises: :exc:`~.EntityNotYetPersistedError`: If the entity being transferred is not associated with an id on the persistence layer (:attr:`~Entity.persist_id`) yet :exc:`~.EntityNotFoundError`: If the entity could not be found on the persistence layer :exc:`~.EntityTransferError`: If the entity fails to be transferred on the persistence layer :exc:`~.PersistenceError`: If any other unhandled error in the plugin occurred """ if self.persist_id is None: raise EntityNotYetPersistedError(('Entities cannot be transferred ' 'until they have been ' 'persisted')) return self.plugin.transfer(self.persist_id, transfer_payload, from_user=from_user, to_user=to_user)
python
def transfer(self, transfer_payload=None, *, from_user, to_user): """Transfer this entity to another owner on the backing persistence layer Args: transfer_payload (dict): Payload for the transfer from_user (any): A user based on the model specified by the persistence layer to_user (any): A user based on the model specified by the persistence layer Returns: str: Id of the resulting transfer action on the persistence layer Raises: :exc:`~.EntityNotYetPersistedError`: If the entity being transferred is not associated with an id on the persistence layer (:attr:`~Entity.persist_id`) yet :exc:`~.EntityNotFoundError`: If the entity could not be found on the persistence layer :exc:`~.EntityTransferError`: If the entity fails to be transferred on the persistence layer :exc:`~.PersistenceError`: If any other unhandled error in the plugin occurred """ if self.persist_id is None: raise EntityNotYetPersistedError(('Entities cannot be transferred ' 'until they have been ' 'persisted')) return self.plugin.transfer(self.persist_id, transfer_payload, from_user=from_user, to_user=to_user)
[ "def", "transfer", "(", "self", ",", "transfer_payload", "=", "None", ",", "*", ",", "from_user", ",", "to_user", ")", ":", "if", "self", ".", "persist_id", "is", "None", ":", "raise", "EntityNotYetPersistedError", "(", "(", "'Entities cannot be transferred '", "'until they have been '", "'persisted'", ")", ")", "return", "self", ".", "plugin", ".", "transfer", "(", "self", ".", "persist_id", ",", "transfer_payload", ",", "from_user", "=", "from_user", ",", "to_user", "=", "to_user", ")" ]
Transfer this entity to another owner on the backing persistence layer Args: transfer_payload (dict): Payload for the transfer from_user (any): A user based on the model specified by the persistence layer to_user (any): A user based on the model specified by the persistence layer Returns: str: Id of the resulting transfer action on the persistence layer Raises: :exc:`~.EntityNotYetPersistedError`: If the entity being transferred is not associated with an id on the persistence layer (:attr:`~Entity.persist_id`) yet :exc:`~.EntityNotFoundError`: If the entity could not be found on the persistence layer :exc:`~.EntityTransferError`: If the entity fails to be transferred on the persistence layer :exc:`~.PersistenceError`: If any other unhandled error in the plugin occurred
[ "Transfer", "this", "entity", "to", "another", "owner", "on", "the", "backing", "persistence", "layer" ]
cecc8f6ff4733f0525fafcee63647753e832f0be
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/entities.py#L409-L442
train
COALAIP/pycoalaip
coalaip/entities.py
Right.transfer
def transfer(self, rights_assignment_data=None, *, from_user, to_user, rights_assignment_format='jsonld'): """Transfer this Right to another owner on the backing persistence layer. Args: rights_assignment_data (dict): Model data for the resulting :class:`~.RightsAssignment` from_user (any, keyword): A user based on the model specified by the persistence layer to_user (any, keyword): A user based on the model specified by the persistence layer rights_assignment_format (str, keyword, optional): Data format of the created entity; must be one of: - 'jsonld' (default) - 'json' - 'ipld' Returns: :class:`~.RightsAssignment`: The RightsAssignment entity created from this transfer Raises: See :meth:`~.TransferrableEntity.transfer` """ rights_assignment = RightsAssignment.from_data( rights_assignment_data or {}, plugin=self.plugin) transfer_payload = rights_assignment._to_format( data_format=rights_assignment_format) transfer_id = super().transfer(transfer_payload, from_user=from_user, to_user=to_user) rights_assignment.persist_id = transfer_id return rights_assignment
python
def transfer(self, rights_assignment_data=None, *, from_user, to_user, rights_assignment_format='jsonld'): """Transfer this Right to another owner on the backing persistence layer. Args: rights_assignment_data (dict): Model data for the resulting :class:`~.RightsAssignment` from_user (any, keyword): A user based on the model specified by the persistence layer to_user (any, keyword): A user based on the model specified by the persistence layer rights_assignment_format (str, keyword, optional): Data format of the created entity; must be one of: - 'jsonld' (default) - 'json' - 'ipld' Returns: :class:`~.RightsAssignment`: The RightsAssignment entity created from this transfer Raises: See :meth:`~.TransferrableEntity.transfer` """ rights_assignment = RightsAssignment.from_data( rights_assignment_data or {}, plugin=self.plugin) transfer_payload = rights_assignment._to_format( data_format=rights_assignment_format) transfer_id = super().transfer(transfer_payload, from_user=from_user, to_user=to_user) rights_assignment.persist_id = transfer_id return rights_assignment
[ "def", "transfer", "(", "self", ",", "rights_assignment_data", "=", "None", ",", "*", ",", "from_user", ",", "to_user", ",", "rights_assignment_format", "=", "'jsonld'", ")", ":", "rights_assignment", "=", "RightsAssignment", ".", "from_data", "(", "rights_assignment_data", "or", "{", "}", ",", "plugin", "=", "self", ".", "plugin", ")", "transfer_payload", "=", "rights_assignment", ".", "_to_format", "(", "data_format", "=", "rights_assignment_format", ")", "transfer_id", "=", "super", "(", ")", ".", "transfer", "(", "transfer_payload", ",", "from_user", "=", "from_user", ",", "to_user", "=", "to_user", ")", "rights_assignment", ".", "persist_id", "=", "transfer_id", "return", "rights_assignment" ]
Transfer this Right to another owner on the backing persistence layer. Args: rights_assignment_data (dict): Model data for the resulting :class:`~.RightsAssignment` from_user (any, keyword): A user based on the model specified by the persistence layer to_user (any, keyword): A user based on the model specified by the persistence layer rights_assignment_format (str, keyword, optional): Data format of the created entity; must be one of: - 'jsonld' (default) - 'json' - 'ipld' Returns: :class:`~.RightsAssignment`: The RightsAssignment entity created from this transfer Raises: See :meth:`~.TransferrableEntity.transfer`
[ "Transfer", "this", "Right", "to", "another", "owner", "on", "the", "backing", "persistence", "layer", "." ]
cecc8f6ff4733f0525fafcee63647753e832f0be
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/entities.py#L506-L542
train
rsgalloway/grit
grit/repo/base.py
Repo._set_repo
def _set_repo(self, url): """sets the underlying repo object""" if url.startswith('http'): try: self.repo = Proxy(url) except ProxyError, e: log.exception('Error setting repo: %s' % url) raise GritError(e) else: try: self.repo = Local(url) except NotGitRepository: raise GritError('Invalid url: %s' % url) except Exception, e: log.exception('Error setting repo: %s' % url) raise GritError(e)
python
def _set_repo(self, url): """sets the underlying repo object""" if url.startswith('http'): try: self.repo = Proxy(url) except ProxyError, e: log.exception('Error setting repo: %s' % url) raise GritError(e) else: try: self.repo = Local(url) except NotGitRepository: raise GritError('Invalid url: %s' % url) except Exception, e: log.exception('Error setting repo: %s' % url) raise GritError(e)
[ "def", "_set_repo", "(", "self", ",", "url", ")", ":", "if", "url", ".", "startswith", "(", "'http'", ")", ":", "try", ":", "self", ".", "repo", "=", "Proxy", "(", "url", ")", "except", "ProxyError", ",", "e", ":", "log", ".", "exception", "(", "'Error setting repo: %s'", "%", "url", ")", "raise", "GritError", "(", "e", ")", "else", ":", "try", ":", "self", ".", "repo", "=", "Local", "(", "url", ")", "except", "NotGitRepository", ":", "raise", "GritError", "(", "'Invalid url: %s'", "%", "url", ")", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "'Error setting repo: %s'", "%", "url", ")", "raise", "GritError", "(", "e", ")" ]
sets the underlying repo object
[ "sets", "the", "underlying", "repo", "object" ]
e6434ad8a1f4ac5d0903ebad630c81f8a5164d78
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/base.py#L73-L88
train
rsgalloway/grit
grit/repo/base.py
Repo.new
def new(self, url, clone_from=None, bare=True): """ Creates a new Repo instance. :param url: Path or remote URL of new repo. :param clone_from: Path or URL of repo to clone from. :param bare: Create as bare repo. :returns: grit.Repo instance. For example: >>> r = Repo.new('/tmp/projects') >>> r <grit.Repo "/tmp/projects"> """ #note to self: look into using templates (--template) if clone_from: self.clone(path=url, bare=bare) else: if url.startswith('http'): proxy = Proxy(url) proxy.new(path=url, bare=bare) else: local = Local.new(path=url, bare=bare) return Repo(url)
python
def new(self, url, clone_from=None, bare=True): """ Creates a new Repo instance. :param url: Path or remote URL of new repo. :param clone_from: Path or URL of repo to clone from. :param bare: Create as bare repo. :returns: grit.Repo instance. For example: >>> r = Repo.new('/tmp/projects') >>> r <grit.Repo "/tmp/projects"> """ #note to self: look into using templates (--template) if clone_from: self.clone(path=url, bare=bare) else: if url.startswith('http'): proxy = Proxy(url) proxy.new(path=url, bare=bare) else: local = Local.new(path=url, bare=bare) return Repo(url)
[ "def", "new", "(", "self", ",", "url", ",", "clone_from", "=", "None", ",", "bare", "=", "True", ")", ":", "#note to self: look into using templates (--template)", "if", "clone_from", ":", "self", ".", "clone", "(", "path", "=", "url", ",", "bare", "=", "bare", ")", "else", ":", "if", "url", ".", "startswith", "(", "'http'", ")", ":", "proxy", "=", "Proxy", "(", "url", ")", "proxy", ".", "new", "(", "path", "=", "url", ",", "bare", "=", "bare", ")", "else", ":", "local", "=", "Local", ".", "new", "(", "path", "=", "url", ",", "bare", "=", "bare", ")", "return", "Repo", "(", "url", ")" ]
Creates a new Repo instance. :param url: Path or remote URL of new repo. :param clone_from: Path or URL of repo to clone from. :param bare: Create as bare repo. :returns: grit.Repo instance. For example: >>> r = Repo.new('/tmp/projects') >>> r <grit.Repo "/tmp/projects">
[ "Creates", "a", "new", "Repo", "instance", "." ]
e6434ad8a1f4ac5d0903ebad630c81f8a5164d78
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/base.py#L91-L117
train
xypnox/email_purifier
epurifier/email_checker.py
EmailPurifier.CheckEmails
def CheckEmails(self, checkTypo=False, fillWrong=True): '''Checks Emails in List Wether they are Correct or not''' self.wrong_emails = [] for email in self.emails: if self.CheckEmail(email, checkTypo) is False: self.wrong_emails.append(email)
python
def CheckEmails(self, checkTypo=False, fillWrong=True): '''Checks Emails in List Wether they are Correct or not''' self.wrong_emails = [] for email in self.emails: if self.CheckEmail(email, checkTypo) is False: self.wrong_emails.append(email)
[ "def", "CheckEmails", "(", "self", ",", "checkTypo", "=", "False", ",", "fillWrong", "=", "True", ")", ":", "self", ".", "wrong_emails", "=", "[", "]", "for", "email", "in", "self", ".", "emails", ":", "if", "self", ".", "CheckEmail", "(", "email", ",", "checkTypo", ")", "is", "False", ":", "self", ".", "wrong_emails", ".", "append", "(", "email", ")" ]
Checks Emails in List Wether they are Correct or not
[ "Checks", "Emails", "in", "List", "Wether", "they", "are", "Correct", "or", "not" ]
a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/email_checker.py#L31-L36
train
xypnox/email_purifier
epurifier/email_checker.py
EmailPurifier.CheckEmail
def CheckEmail(self, email, checkTypo=False): '''Checks a Single email if it is correct''' contents = email.split('@') if len(contents) == 2: if contents[1] in self.valid: return True return False
python
def CheckEmail(self, email, checkTypo=False): '''Checks a Single email if it is correct''' contents = email.split('@') if len(contents) == 2: if contents[1] in self.valid: return True return False
[ "def", "CheckEmail", "(", "self", ",", "email", ",", "checkTypo", "=", "False", ")", ":", "contents", "=", "email", ".", "split", "(", "'@'", ")", "if", "len", "(", "contents", ")", "==", "2", ":", "if", "contents", "[", "1", "]", "in", "self", ".", "valid", ":", "return", "True", "return", "False" ]
Checks a Single email if it is correct
[ "Checks", "a", "Single", "email", "if", "it", "is", "correct" ]
a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/email_checker.py#L38-L44
train
xypnox/email_purifier
epurifier/email_checker.py
EmailPurifier.CorrectWrongEmails
def CorrectWrongEmails(self, askInput=True): '''Corrects Emails in wrong_emails''' for email in self.wrong_emails: corrected_email = self.CorrectEmail(email) self.emails[self.emails.index(email)] = corrected_email self.wrong_emails = []
python
def CorrectWrongEmails(self, askInput=True): '''Corrects Emails in wrong_emails''' for email in self.wrong_emails: corrected_email = self.CorrectEmail(email) self.emails[self.emails.index(email)] = corrected_email self.wrong_emails = []
[ "def", "CorrectWrongEmails", "(", "self", ",", "askInput", "=", "True", ")", ":", "for", "email", "in", "self", ".", "wrong_emails", ":", "corrected_email", "=", "self", ".", "CorrectEmail", "(", "email", ")", "self", ".", "emails", "[", "self", ".", "emails", ".", "index", "(", "email", ")", "]", "=", "corrected_email", "self", ".", "wrong_emails", "=", "[", "]" ]
Corrects Emails in wrong_emails
[ "Corrects", "Emails", "in", "wrong_emails" ]
a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/email_checker.py#L46-L52
train
xypnox/email_purifier
epurifier/email_checker.py
EmailPurifier.CorrectEmail
def CorrectEmail(self, email): '''Returns a Corrected email USER INPUT REQUIRED''' print("Wrong Email : "+email) contents = email.split('@') if len(contents) == 2: domain_data = contents[1].split('.') for vemail in self.valid: alters = perms(vemail.split('.', 1)[0]) if domain_data[0] in alters and qyn.query_yes_no("Did you mean : " + contents[0] + '@' + vemail) is True: return contents[0] + '@' + vemail corrected = input('Enter Corrected Email : ') while self.CheckEmail(corrected) is False: corrected = input('PLEASE Enter "Corrected" Email : ') return corrected else: print('Looks like you missed/overused `@`') if len(contents) == 1: for vemail in self.valid: if email[len(email) - len(vemail):] == vemail and qyn.query_yes_no("Did you mean : " + email[:len(email) - len(vemail)] + '@' + vemail) is True: return email[:len(email) - len(vemail)] + '@' + vemail corrected = input('Enter Corrected Email : ') while self.CheckEmail(corrected) is False: corrected = input('PLEASE Enter "Corrected" Email : ') return corrected
python
def CorrectEmail(self, email): '''Returns a Corrected email USER INPUT REQUIRED''' print("Wrong Email : "+email) contents = email.split('@') if len(contents) == 2: domain_data = contents[1].split('.') for vemail in self.valid: alters = perms(vemail.split('.', 1)[0]) if domain_data[0] in alters and qyn.query_yes_no("Did you mean : " + contents[0] + '@' + vemail) is True: return contents[0] + '@' + vemail corrected = input('Enter Corrected Email : ') while self.CheckEmail(corrected) is False: corrected = input('PLEASE Enter "Corrected" Email : ') return corrected else: print('Looks like you missed/overused `@`') if len(contents) == 1: for vemail in self.valid: if email[len(email) - len(vemail):] == vemail and qyn.query_yes_no("Did you mean : " + email[:len(email) - len(vemail)] + '@' + vemail) is True: return email[:len(email) - len(vemail)] + '@' + vemail corrected = input('Enter Corrected Email : ') while self.CheckEmail(corrected) is False: corrected = input('PLEASE Enter "Corrected" Email : ') return corrected
[ "def", "CorrectEmail", "(", "self", ",", "email", ")", ":", "print", "(", "\"Wrong Email : \"", "+", "email", ")", "contents", "=", "email", ".", "split", "(", "'@'", ")", "if", "len", "(", "contents", ")", "==", "2", ":", "domain_data", "=", "contents", "[", "1", "]", ".", "split", "(", "'.'", ")", "for", "vemail", "in", "self", ".", "valid", ":", "alters", "=", "perms", "(", "vemail", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", ")", "if", "domain_data", "[", "0", "]", "in", "alters", "and", "qyn", ".", "query_yes_no", "(", "\"Did you mean : \"", "+", "contents", "[", "0", "]", "+", "'@'", "+", "vemail", ")", "is", "True", ":", "return", "contents", "[", "0", "]", "+", "'@'", "+", "vemail", "corrected", "=", "input", "(", "'Enter Corrected Email : '", ")", "while", "self", ".", "CheckEmail", "(", "corrected", ")", "is", "False", ":", "corrected", "=", "input", "(", "'PLEASE Enter \"Corrected\" Email : '", ")", "return", "corrected", "else", ":", "print", "(", "'Looks like you missed/overused `@`'", ")", "if", "len", "(", "contents", ")", "==", "1", ":", "for", "vemail", "in", "self", ".", "valid", ":", "if", "email", "[", "len", "(", "email", ")", "-", "len", "(", "vemail", ")", ":", "]", "==", "vemail", "and", "qyn", ".", "query_yes_no", "(", "\"Did you mean : \"", "+", "email", "[", ":", "len", "(", "email", ")", "-", "len", "(", "vemail", ")", "]", "+", "'@'", "+", "vemail", ")", "is", "True", ":", "return", "email", "[", ":", "len", "(", "email", ")", "-", "len", "(", "vemail", ")", "]", "+", "'@'", "+", "vemail", "corrected", "=", "input", "(", "'Enter Corrected Email : '", ")", "while", "self", ".", "CheckEmail", "(", "corrected", ")", "is", "False", ":", "corrected", "=", "input", "(", "'PLEASE Enter \"Corrected\" Email : '", ")", "return", "corrected" ]
Returns a Corrected email USER INPUT REQUIRED
[ "Returns", "a", "Corrected", "email", "USER", "INPUT", "REQUIRED" ]
a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/email_checker.py#L54-L80
train
frostming/marko
marko/parser.py
Parser.add_element
def add_element(self, element, override=False): """Add an element to the parser. :param element: the element class. :param override: whether to replace the default element based on. .. note:: If one needs to call it inside ``__init__()``, please call it after ``super().__init__()`` is called. """ if issubclass(element, inline.InlineElement): dest = self.inline_elements elif issubclass(element, block.BlockElement): dest = self.block_elements else: raise TypeError( 'The element should be a subclass of either `BlockElement` or ' '`InlineElement`.' ) if not override: dest[element.__name__] = element else: for cls in element.__bases__: if cls in dest.values(): dest[cls.__name__] = element break else: dest[element.__name__] = element
python
def add_element(self, element, override=False): """Add an element to the parser. :param element: the element class. :param override: whether to replace the default element based on. .. note:: If one needs to call it inside ``__init__()``, please call it after ``super().__init__()`` is called. """ if issubclass(element, inline.InlineElement): dest = self.inline_elements elif issubclass(element, block.BlockElement): dest = self.block_elements else: raise TypeError( 'The element should be a subclass of either `BlockElement` or ' '`InlineElement`.' ) if not override: dest[element.__name__] = element else: for cls in element.__bases__: if cls in dest.values(): dest[cls.__name__] = element break else: dest[element.__name__] = element
[ "def", "add_element", "(", "self", ",", "element", ",", "override", "=", "False", ")", ":", "if", "issubclass", "(", "element", ",", "inline", ".", "InlineElement", ")", ":", "dest", "=", "self", ".", "inline_elements", "elif", "issubclass", "(", "element", ",", "block", ".", "BlockElement", ")", ":", "dest", "=", "self", ".", "block_elements", "else", ":", "raise", "TypeError", "(", "'The element should be a subclass of either `BlockElement` or '", "'`InlineElement`.'", ")", "if", "not", "override", ":", "dest", "[", "element", ".", "__name__", "]", "=", "element", "else", ":", "for", "cls", "in", "element", ".", "__bases__", ":", "if", "cls", "in", "dest", ".", "values", "(", ")", ":", "dest", "[", "cls", ".", "__name__", "]", "=", "element", "break", "else", ":", "dest", "[", "element", ".", "__name__", "]", "=", "element" ]
Add an element to the parser. :param element: the element class. :param override: whether to replace the default element based on. .. note:: If one needs to call it inside ``__init__()``, please call it after ``super().__init__()`` is called.
[ "Add", "an", "element", "to", "the", "parser", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/parser.py#L37-L63
train
frostming/marko
marko/parser.py
Parser.parse
def parse(self, source_or_text): """Do the actual parsing and returns an AST or parsed element. :param source_or_text: the text or source object. Based on the type, it will do following: - text: returns the parsed Document element. - source: parse the source and returns the parsed children as a list. """ if isinstance(source_or_text, string_types): block.parser = self inline.parser = self return self.block_elements['Document'](source_or_text) element_list = self._build_block_element_list() ast = [] while not source_or_text.exhausted: for ele_type in element_list: if ele_type.match(source_or_text): result = ele_type.parse(source_or_text) if not hasattr(result, 'priority'): result = ele_type(result) ast.append(result) break else: # Quit the current parsing and go back to the last level. break return ast
python
def parse(self, source_or_text): """Do the actual parsing and returns an AST or parsed element. :param source_or_text: the text or source object. Based on the type, it will do following: - text: returns the parsed Document element. - source: parse the source and returns the parsed children as a list. """ if isinstance(source_or_text, string_types): block.parser = self inline.parser = self return self.block_elements['Document'](source_or_text) element_list = self._build_block_element_list() ast = [] while not source_or_text.exhausted: for ele_type in element_list: if ele_type.match(source_or_text): result = ele_type.parse(source_or_text) if not hasattr(result, 'priority'): result = ele_type(result) ast.append(result) break else: # Quit the current parsing and go back to the last level. break return ast
[ "def", "parse", "(", "self", ",", "source_or_text", ")", ":", "if", "isinstance", "(", "source_or_text", ",", "string_types", ")", ":", "block", ".", "parser", "=", "self", "inline", ".", "parser", "=", "self", "return", "self", ".", "block_elements", "[", "'Document'", "]", "(", "source_or_text", ")", "element_list", "=", "self", ".", "_build_block_element_list", "(", ")", "ast", "=", "[", "]", "while", "not", "source_or_text", ".", "exhausted", ":", "for", "ele_type", "in", "element_list", ":", "if", "ele_type", ".", "match", "(", "source_or_text", ")", ":", "result", "=", "ele_type", ".", "parse", "(", "source_or_text", ")", "if", "not", "hasattr", "(", "result", ",", "'priority'", ")", ":", "result", "=", "ele_type", "(", "result", ")", "ast", ".", "append", "(", "result", ")", "break", "else", ":", "# Quit the current parsing and go back to the last level.", "break", "return", "ast" ]
Do the actual parsing and returns an AST or parsed element. :param source_or_text: the text or source object. Based on the type, it will do following: - text: returns the parsed Document element. - source: parse the source and returns the parsed children as a list.
[ "Do", "the", "actual", "parsing", "and", "returns", "an", "AST", "or", "parsed", "element", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/parser.py#L65-L90
train
frostming/marko
marko/parser.py
Parser.parse_inline
def parse_inline(self, text): """Parses text into inline elements. RawText is not considered in parsing but created as a wrapper of holes that don't match any other elements. :param text: the text to be parsed. :returns: a list of inline elements. """ element_list = self._build_inline_element_list() return inline_parser.parse( text, element_list, fallback=self.inline_elements['RawText'] )
python
def parse_inline(self, text): """Parses text into inline elements. RawText is not considered in parsing but created as a wrapper of holes that don't match any other elements. :param text: the text to be parsed. :returns: a list of inline elements. """ element_list = self._build_inline_element_list() return inline_parser.parse( text, element_list, fallback=self.inline_elements['RawText'] )
[ "def", "parse_inline", "(", "self", ",", "text", ")", ":", "element_list", "=", "self", ".", "_build_inline_element_list", "(", ")", "return", "inline_parser", ".", "parse", "(", "text", ",", "element_list", ",", "fallback", "=", "self", ".", "inline_elements", "[", "'RawText'", "]", ")" ]
Parses text into inline elements. RawText is not considered in parsing but created as a wrapper of holes that don't match any other elements. :param text: the text to be parsed. :returns: a list of inline elements.
[ "Parses", "text", "into", "inline", "elements", ".", "RawText", "is", "not", "considered", "in", "parsing", "but", "created", "as", "a", "wrapper", "of", "holes", "that", "don", "t", "match", "any", "other", "elements", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/parser.py#L92-L103
train
frostming/marko
marko/parser.py
Parser._build_block_element_list
def _build_block_element_list(self): """Return a list of block elements, ordered from highest priority to lowest. """ return sorted( [e for e in self.block_elements.values() if not e.virtual], key=lambda e: e.priority, reverse=True )
python
def _build_block_element_list(self): """Return a list of block elements, ordered from highest priority to lowest. """ return sorted( [e for e in self.block_elements.values() if not e.virtual], key=lambda e: e.priority, reverse=True )
[ "def", "_build_block_element_list", "(", "self", ")", ":", "return", "sorted", "(", "[", "e", "for", "e", "in", "self", ".", "block_elements", ".", "values", "(", ")", "if", "not", "e", ".", "virtual", "]", ",", "key", "=", "lambda", "e", ":", "e", ".", "priority", ",", "reverse", "=", "True", ")" ]
Return a list of block elements, ordered from highest priority to lowest.
[ "Return", "a", "list", "of", "block", "elements", "ordered", "from", "highest", "priority", "to", "lowest", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/parser.py#L105-L112
train
rsgalloway/grit
grit/server/server.py
make_app
def make_app(*args, **kw): ''' Assembles basic WSGI-compatible application providing functionality of git-http-backend. content_path (Defaults to '.' = "current" directory) The path to the folder that will be the root of served files. Accepts relative paths. uri_marker (Defaults to '') Acts as a "virtual folder" separator between decorative URI portion and the actual (relative to content_path) path that will be appended to content_path and used for pulling an actual file. the URI does not have to start with contents of uri_marker. It can be preceeded by any number of "virtual" folders. For --uri_marker 'my' all of these will take you to the same repo: http://localhost/my/HEAD http://localhost/admysf/mylar/zxmy/my/HEAD This WSGI hanlder will cut and rebase the URI when it's time to read from file system. Default of '' means that no cutting marker is used, and whole URI after FQDN is used to find file relative to content_path. returns WSGI application instance. ''' default_options = [ ['content_path','.'], ['uri_marker',''] ] args = list(args) options = dict(default_options) options.update(kw) while default_options and args: _d = default_options.pop(0) _a = args.pop(0) options[_d[0]] = _a options['content_path'] = os.path.abspath(options['content_path'].decode('utf8')) options['uri_marker'] = options['uri_marker'].decode('utf8') selector = WSGIHandlerSelector() git_inforefs_handler = GitHTTPBackendInfoRefs(**options) git_rpc_handler = GitHTTPBackendSmartHTTP(**options) static_handler = StaticServer(**options) file_handler = FileServer(**options) json_handler = JSONServer(**options) ui_handler = UIServer(**options) if options['uri_marker']: marker_regex = r'(?P<decorative_path>.*?)(?:/'+ options['uri_marker'] + ')' else: marker_regex = '' selector.add( marker_regex + r'(?P<working_path>.*?)/info/refs\?.*?service=(?P<git_command>git-[^&]+).*$', GET = git_inforefs_handler, HEAD = git_inforefs_handler ) selector.add( marker_regex + r'(?P<working_path>.*)/(?P<git_command>git-[^/]+)$', POST = git_rpc_handler ) selector.add( marker_regex + r'/static/(?P<working_path>.*)$', GET = static_handler, HEAD = static_handler) selector.add( marker_regex + r'(?P<working_path>.*)/file$', GET = file_handler, HEAD = file_handler) selector.add( marker_regex + r'(?P<working_path>.*)$', GET = ui_handler, POST = json_handler, HEAD = ui_handler) return selector
python
def make_app(*args, **kw): ''' Assembles basic WSGI-compatible application providing functionality of git-http-backend. content_path (Defaults to '.' = "current" directory) The path to the folder that will be the root of served files. Accepts relative paths. uri_marker (Defaults to '') Acts as a "virtual folder" separator between decorative URI portion and the actual (relative to content_path) path that will be appended to content_path and used for pulling an actual file. the URI does not have to start with contents of uri_marker. It can be preceeded by any number of "virtual" folders. For --uri_marker 'my' all of these will take you to the same repo: http://localhost/my/HEAD http://localhost/admysf/mylar/zxmy/my/HEAD This WSGI hanlder will cut and rebase the URI when it's time to read from file system. Default of '' means that no cutting marker is used, and whole URI after FQDN is used to find file relative to content_path. returns WSGI application instance. ''' default_options = [ ['content_path','.'], ['uri_marker',''] ] args = list(args) options = dict(default_options) options.update(kw) while default_options and args: _d = default_options.pop(0) _a = args.pop(0) options[_d[0]] = _a options['content_path'] = os.path.abspath(options['content_path'].decode('utf8')) options['uri_marker'] = options['uri_marker'].decode('utf8') selector = WSGIHandlerSelector() git_inforefs_handler = GitHTTPBackendInfoRefs(**options) git_rpc_handler = GitHTTPBackendSmartHTTP(**options) static_handler = StaticServer(**options) file_handler = FileServer(**options) json_handler = JSONServer(**options) ui_handler = UIServer(**options) if options['uri_marker']: marker_regex = r'(?P<decorative_path>.*?)(?:/'+ options['uri_marker'] + ')' else: marker_regex = '' selector.add( marker_regex + r'(?P<working_path>.*?)/info/refs\?.*?service=(?P<git_command>git-[^&]+).*$', GET = git_inforefs_handler, HEAD = git_inforefs_handler ) selector.add( marker_regex + r'(?P<working_path>.*)/(?P<git_command>git-[^/]+)$', POST = git_rpc_handler ) selector.add( marker_regex + r'/static/(?P<working_path>.*)$', GET = static_handler, HEAD = static_handler) selector.add( marker_regex + r'(?P<working_path>.*)/file$', GET = file_handler, HEAD = file_handler) selector.add( marker_regex + r'(?P<working_path>.*)$', GET = ui_handler, POST = json_handler, HEAD = ui_handler) return selector
[ "def", "make_app", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "default_options", "=", "[", "[", "'content_path'", ",", "'.'", "]", ",", "[", "'uri_marker'", ",", "''", "]", "]", "args", "=", "list", "(", "args", ")", "options", "=", "dict", "(", "default_options", ")", "options", ".", "update", "(", "kw", ")", "while", "default_options", "and", "args", ":", "_d", "=", "default_options", ".", "pop", "(", "0", ")", "_a", "=", "args", ".", "pop", "(", "0", ")", "options", "[", "_d", "[", "0", "]", "]", "=", "_a", "options", "[", "'content_path'", "]", "=", "os", ".", "path", ".", "abspath", "(", "options", "[", "'content_path'", "]", ".", "decode", "(", "'utf8'", ")", ")", "options", "[", "'uri_marker'", "]", "=", "options", "[", "'uri_marker'", "]", ".", "decode", "(", "'utf8'", ")", "selector", "=", "WSGIHandlerSelector", "(", ")", "git_inforefs_handler", "=", "GitHTTPBackendInfoRefs", "(", "*", "*", "options", ")", "git_rpc_handler", "=", "GitHTTPBackendSmartHTTP", "(", "*", "*", "options", ")", "static_handler", "=", "StaticServer", "(", "*", "*", "options", ")", "file_handler", "=", "FileServer", "(", "*", "*", "options", ")", "json_handler", "=", "JSONServer", "(", "*", "*", "options", ")", "ui_handler", "=", "UIServer", "(", "*", "*", "options", ")", "if", "options", "[", "'uri_marker'", "]", ":", "marker_regex", "=", "r'(?P<decorative_path>.*?)(?:/'", "+", "options", "[", "'uri_marker'", "]", "+", "')'", "else", ":", "marker_regex", "=", "''", "selector", ".", "add", "(", "marker_regex", "+", "r'(?P<working_path>.*?)/info/refs\\?.*?service=(?P<git_command>git-[^&]+).*$'", ",", "GET", "=", "git_inforefs_handler", ",", "HEAD", "=", "git_inforefs_handler", ")", "selector", ".", "add", "(", "marker_regex", "+", "r'(?P<working_path>.*)/(?P<git_command>git-[^/]+)$'", ",", "POST", "=", "git_rpc_handler", ")", "selector", ".", "add", "(", "marker_regex", "+", "r'/static/(?P<working_path>.*)$'", ",", "GET", "=", "static_handler", ",", "HEAD", "=", "static_handler", ")", "selector", ".", "add", "(", "marker_regex", "+", "r'(?P<working_path>.*)/file$'", ",", "GET", "=", "file_handler", ",", "HEAD", "=", "file_handler", ")", "selector", ".", "add", "(", "marker_regex", "+", "r'(?P<working_path>.*)$'", ",", "GET", "=", "ui_handler", ",", "POST", "=", "json_handler", ",", "HEAD", "=", "ui_handler", ")", "return", "selector" ]
Assembles basic WSGI-compatible application providing functionality of git-http-backend. content_path (Defaults to '.' = "current" directory) The path to the folder that will be the root of served files. Accepts relative paths. uri_marker (Defaults to '') Acts as a "virtual folder" separator between decorative URI portion and the actual (relative to content_path) path that will be appended to content_path and used for pulling an actual file. the URI does not have to start with contents of uri_marker. It can be preceeded by any number of "virtual" folders. For --uri_marker 'my' all of these will take you to the same repo: http://localhost/my/HEAD http://localhost/admysf/mylar/zxmy/my/HEAD This WSGI hanlder will cut and rebase the URI when it's time to read from file system. Default of '' means that no cutting marker is used, and whole URI after FQDN is used to find file relative to content_path. returns WSGI application instance.
[ "Assembles", "basic", "WSGI", "-", "compatible", "application", "providing", "functionality", "of", "git", "-", "http", "-", "backend", "." ]
e6434ad8a1f4ac5d0903ebad630c81f8a5164d78
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/server.py#L41-L116
train
polyaxon/hestia
hestia/tz_utils.py
now
def now(tzinfo=True): """ Return an aware or naive datetime.datetime, depending on settings.USE_TZ. """ if dj_now: return dj_now() if tzinfo: # timeit shows that datetime.now(tz=utc) is 24% slower return datetime.utcnow().replace(tzinfo=utc) return datetime.now()
python
def now(tzinfo=True): """ Return an aware or naive datetime.datetime, depending on settings.USE_TZ. """ if dj_now: return dj_now() if tzinfo: # timeit shows that datetime.now(tz=utc) is 24% slower return datetime.utcnow().replace(tzinfo=utc) return datetime.now()
[ "def", "now", "(", "tzinfo", "=", "True", ")", ":", "if", "dj_now", ":", "return", "dj_now", "(", ")", "if", "tzinfo", ":", "# timeit shows that datetime.now(tz=utc) is 24% slower", "return", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "utc", ")", "return", "datetime", ".", "now", "(", ")" ]
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
[ "Return", "an", "aware", "or", "naive", "datetime", ".", "datetime", "depending", "on", "settings", ".", "USE_TZ", "." ]
382ed139cff8bf35c987cfc30a31b72c0d6b808e
https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/tz_utils.py#L21-L31
train
inveniosoftware/invenio-query-parser
invenio_query_parser/walkers/match_unit.py
match_unit
def match_unit(data, p, m='a'): """Match data to basic match unit.""" if data is None: return p is None # compile search value only once for non exact search if m != 'e' and isinstance(p, six.string_types): p = re.compile(p) if isinstance(data, Sequence) and not isinstance(data, six.string_types): return any([match_unit(field, p, m=m) for field in data]) elif isinstance(data, MutableMapping): return any([match_unit(field, p, m=m) for field in data.values()]) # Inclusive range query if isinstance(p, tuple): left, right = p return (left <= data) and (data <= right) if m == 'e': return six.text_type(data) == p return p.search(six.text_type(data)) is not None
python
def match_unit(data, p, m='a'): """Match data to basic match unit.""" if data is None: return p is None # compile search value only once for non exact search if m != 'e' and isinstance(p, six.string_types): p = re.compile(p) if isinstance(data, Sequence) and not isinstance(data, six.string_types): return any([match_unit(field, p, m=m) for field in data]) elif isinstance(data, MutableMapping): return any([match_unit(field, p, m=m) for field in data.values()]) # Inclusive range query if isinstance(p, tuple): left, right = p return (left <= data) and (data <= right) if m == 'e': return six.text_type(data) == p return p.search(six.text_type(data)) is not None
[ "def", "match_unit", "(", "data", ",", "p", ",", "m", "=", "'a'", ")", ":", "if", "data", "is", "None", ":", "return", "p", "is", "None", "# compile search value only once for non exact search", "if", "m", "!=", "'e'", "and", "isinstance", "(", "p", ",", "six", ".", "string_types", ")", ":", "p", "=", "re", ".", "compile", "(", "p", ")", "if", "isinstance", "(", "data", ",", "Sequence", ")", "and", "not", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "return", "any", "(", "[", "match_unit", "(", "field", ",", "p", ",", "m", "=", "m", ")", "for", "field", "in", "data", "]", ")", "elif", "isinstance", "(", "data", ",", "MutableMapping", ")", ":", "return", "any", "(", "[", "match_unit", "(", "field", ",", "p", ",", "m", "=", "m", ")", "for", "field", "in", "data", ".", "values", "(", ")", "]", ")", "# Inclusive range query", "if", "isinstance", "(", "p", ",", "tuple", ")", ":", "left", ",", "right", "=", "p", "return", "(", "left", "<=", "data", ")", "and", "(", "data", "<=", "right", ")", "if", "m", "==", "'e'", ":", "return", "six", ".", "text_type", "(", "data", ")", "==", "p", "return", "p", ".", "search", "(", "six", ".", "text_type", "(", "data", ")", ")", "is", "not", "None" ]
Match data to basic match unit.
[ "Match", "data", "to", "basic", "match", "unit", "." ]
21a2c36318003ff52d2e18e7196bb420db8ecb4b
https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/walkers/match_unit.py#L57-L80
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/pipeline.py
generate_ppi_network
def generate_ppi_network( ppi_graph_path: str, dge_list: List[Gene], max_adj_p: float, max_log2_fold_change: float, min_log2_fold_change: float, ppi_edge_min_confidence: Optional[float] = None, current_disease_ids_path: Optional[str] = None, disease_associations_path: Optional[str] = None, ) -> Network: """Generate the protein-protein interaction network. :return Network: Protein-protein interaction network with information on differential expression. """ # Compilation of a protein-protein interaction (PPI) graph (HIPPIE) protein_interactions = parsers.parse_ppi_graph(ppi_graph_path, ppi_edge_min_confidence) protein_interactions = protein_interactions.simplify() if disease_associations_path is not None and current_disease_ids_path is not None: current_disease_ids = parsers.parse_disease_ids(current_disease_ids_path) disease_associations = parsers.parse_disease_associations(disease_associations_path, current_disease_ids) else: disease_associations = None # Build an undirected weighted graph with the remaining interactions based on Entrez gene IDs network = Network( protein_interactions, max_adj_p=max_adj_p, max_l2fc=max_log2_fold_change, min_l2fc=min_log2_fold_change, ) network.set_up_network(dge_list, disease_associations=disease_associations) return network
python
def generate_ppi_network( ppi_graph_path: str, dge_list: List[Gene], max_adj_p: float, max_log2_fold_change: float, min_log2_fold_change: float, ppi_edge_min_confidence: Optional[float] = None, current_disease_ids_path: Optional[str] = None, disease_associations_path: Optional[str] = None, ) -> Network: """Generate the protein-protein interaction network. :return Network: Protein-protein interaction network with information on differential expression. """ # Compilation of a protein-protein interaction (PPI) graph (HIPPIE) protein_interactions = parsers.parse_ppi_graph(ppi_graph_path, ppi_edge_min_confidence) protein_interactions = protein_interactions.simplify() if disease_associations_path is not None and current_disease_ids_path is not None: current_disease_ids = parsers.parse_disease_ids(current_disease_ids_path) disease_associations = parsers.parse_disease_associations(disease_associations_path, current_disease_ids) else: disease_associations = None # Build an undirected weighted graph with the remaining interactions based on Entrez gene IDs network = Network( protein_interactions, max_adj_p=max_adj_p, max_l2fc=max_log2_fold_change, min_l2fc=min_log2_fold_change, ) network.set_up_network(dge_list, disease_associations=disease_associations) return network
[ "def", "generate_ppi_network", "(", "ppi_graph_path", ":", "str", ",", "dge_list", ":", "List", "[", "Gene", "]", ",", "max_adj_p", ":", "float", ",", "max_log2_fold_change", ":", "float", ",", "min_log2_fold_change", ":", "float", ",", "ppi_edge_min_confidence", ":", "Optional", "[", "float", "]", "=", "None", ",", "current_disease_ids_path", ":", "Optional", "[", "str", "]", "=", "None", ",", "disease_associations_path", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "Network", ":", "# Compilation of a protein-protein interaction (PPI) graph (HIPPIE)", "protein_interactions", "=", "parsers", ".", "parse_ppi_graph", "(", "ppi_graph_path", ",", "ppi_edge_min_confidence", ")", "protein_interactions", "=", "protein_interactions", ".", "simplify", "(", ")", "if", "disease_associations_path", "is", "not", "None", "and", "current_disease_ids_path", "is", "not", "None", ":", "current_disease_ids", "=", "parsers", ".", "parse_disease_ids", "(", "current_disease_ids_path", ")", "disease_associations", "=", "parsers", ".", "parse_disease_associations", "(", "disease_associations_path", ",", "current_disease_ids", ")", "else", ":", "disease_associations", "=", "None", "# Build an undirected weighted graph with the remaining interactions based on Entrez gene IDs", "network", "=", "Network", "(", "protein_interactions", ",", "max_adj_p", "=", "max_adj_p", ",", "max_l2fc", "=", "max_log2_fold_change", ",", "min_l2fc", "=", "min_log2_fold_change", ",", ")", "network", ".", "set_up_network", "(", "dge_list", ",", "disease_associations", "=", "disease_associations", ")", "return", "network" ]
Generate the protein-protein interaction network. :return Network: Protein-protein interaction network with information on differential expression.
[ "Generate", "the", "protein", "-", "protein", "interaction", "network", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/pipeline.py#L20-L54
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/pipeline.py
parse_dge
def parse_dge( dge_path: str, entrez_id_header: str, log2_fold_change_header: str, adj_p_header: str, entrez_delimiter: str, base_mean_header: Optional[str] = None ) -> List[Gene]: """Parse a differential expression file. :param dge_path: Path to the file. :param entrez_id_header: Header for the Entrez identifier column :param log2_fold_change_header: Header for the log2 fold change column :param adj_p_header: Header for the adjusted p-value column :param entrez_delimiter: Delimiter between Entrez ids. :param base_mean_header: Header for the base mean column. :return: A list of genes. """ if dge_path.endswith('.xlsx'): return parsers.parse_excel( dge_path, entrez_id_header=entrez_id_header, log_fold_change_header=log2_fold_change_header, adjusted_p_value_header=adj_p_header, entrez_delimiter=entrez_delimiter, base_mean_header=base_mean_header, ) if dge_path.endswith('.csv'): return parsers.parse_csv( dge_path, entrez_id_header=entrez_id_header, log_fold_change_header=log2_fold_change_header, adjusted_p_value_header=adj_p_header, entrez_delimiter=entrez_delimiter, base_mean_header=base_mean_header, ) if dge_path.endswith('.tsv'): return parsers.parse_csv( dge_path, entrez_id_header=entrez_id_header, log_fold_change_header=log2_fold_change_header, adjusted_p_value_header=adj_p_header, entrez_delimiter=entrez_delimiter, base_mean_header=base_mean_header, sep="\t" ) raise ValueError(f'Unsupported extension: {dge_path}')
python
def parse_dge( dge_path: str, entrez_id_header: str, log2_fold_change_header: str, adj_p_header: str, entrez_delimiter: str, base_mean_header: Optional[str] = None ) -> List[Gene]: """Parse a differential expression file. :param dge_path: Path to the file. :param entrez_id_header: Header for the Entrez identifier column :param log2_fold_change_header: Header for the log2 fold change column :param adj_p_header: Header for the adjusted p-value column :param entrez_delimiter: Delimiter between Entrez ids. :param base_mean_header: Header for the base mean column. :return: A list of genes. """ if dge_path.endswith('.xlsx'): return parsers.parse_excel( dge_path, entrez_id_header=entrez_id_header, log_fold_change_header=log2_fold_change_header, adjusted_p_value_header=adj_p_header, entrez_delimiter=entrez_delimiter, base_mean_header=base_mean_header, ) if dge_path.endswith('.csv'): return parsers.parse_csv( dge_path, entrez_id_header=entrez_id_header, log_fold_change_header=log2_fold_change_header, adjusted_p_value_header=adj_p_header, entrez_delimiter=entrez_delimiter, base_mean_header=base_mean_header, ) if dge_path.endswith('.tsv'): return parsers.parse_csv( dge_path, entrez_id_header=entrez_id_header, log_fold_change_header=log2_fold_change_header, adjusted_p_value_header=adj_p_header, entrez_delimiter=entrez_delimiter, base_mean_header=base_mean_header, sep="\t" ) raise ValueError(f'Unsupported extension: {dge_path}')
[ "def", "parse_dge", "(", "dge_path", ":", "str", ",", "entrez_id_header", ":", "str", ",", "log2_fold_change_header", ":", "str", ",", "adj_p_header", ":", "str", ",", "entrez_delimiter", ":", "str", ",", "base_mean_header", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "List", "[", "Gene", "]", ":", "if", "dge_path", ".", "endswith", "(", "'.xlsx'", ")", ":", "return", "parsers", ".", "parse_excel", "(", "dge_path", ",", "entrez_id_header", "=", "entrez_id_header", ",", "log_fold_change_header", "=", "log2_fold_change_header", ",", "adjusted_p_value_header", "=", "adj_p_header", ",", "entrez_delimiter", "=", "entrez_delimiter", ",", "base_mean_header", "=", "base_mean_header", ",", ")", "if", "dge_path", ".", "endswith", "(", "'.csv'", ")", ":", "return", "parsers", ".", "parse_csv", "(", "dge_path", ",", "entrez_id_header", "=", "entrez_id_header", ",", "log_fold_change_header", "=", "log2_fold_change_header", ",", "adjusted_p_value_header", "=", "adj_p_header", ",", "entrez_delimiter", "=", "entrez_delimiter", ",", "base_mean_header", "=", "base_mean_header", ",", ")", "if", "dge_path", ".", "endswith", "(", "'.tsv'", ")", ":", "return", "parsers", ".", "parse_csv", "(", "dge_path", ",", "entrez_id_header", "=", "entrez_id_header", ",", "log_fold_change_header", "=", "log2_fold_change_header", ",", "adjusted_p_value_header", "=", "adj_p_header", ",", "entrez_delimiter", "=", "entrez_delimiter", ",", "base_mean_header", "=", "base_mean_header", ",", "sep", "=", "\"\\t\"", ")", "raise", "ValueError", "(", "f'Unsupported extension: {dge_path}'", ")" ]
Parse a differential expression file. :param dge_path: Path to the file. :param entrez_id_header: Header for the Entrez identifier column :param log2_fold_change_header: Header for the log2 fold change column :param adj_p_header: Header for the adjusted p-value column :param entrez_delimiter: Delimiter between Entrez ids. :param base_mean_header: Header for the base mean column. :return: A list of genes.
[ "Parse", "a", "differential", "expression", "file", "." ]
4d7b6713485f2d0a0957e6457edc1b1b5a237460
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/pipeline.py#L57-L106
train
BernardFW/bernard
src/bernard/conf/loader.py
Settings._load
def _load(self, file_path: Text) -> None: """ Load the configuration from a plain Python file. This file is executed on its own. Only keys matching the CONFIG_ATTR will be loaded. Basically, it's CONFIG_KEYS_LIKE_THIS. :param file_path: Path to the file to load """ # noinspection PyUnresolvedReferences module_ = types.ModuleType('settings') module_.__file__ = file_path try: with open(file_path, encoding='utf-8') as f: exec(compile(f.read(), file_path, 'exec'), module_.__dict__) except IOError as e: e.strerror = 'Unable to load configuration file ({})'\ .format(e.strerror) raise for key in dir(module_): if CONFIG_ATTR.match(key): self[key] = getattr(module_, key)
python
def _load(self, file_path: Text) -> None: """ Load the configuration from a plain Python file. This file is executed on its own. Only keys matching the CONFIG_ATTR will be loaded. Basically, it's CONFIG_KEYS_LIKE_THIS. :param file_path: Path to the file to load """ # noinspection PyUnresolvedReferences module_ = types.ModuleType('settings') module_.__file__ = file_path try: with open(file_path, encoding='utf-8') as f: exec(compile(f.read(), file_path, 'exec'), module_.__dict__) except IOError as e: e.strerror = 'Unable to load configuration file ({})'\ .format(e.strerror) raise for key in dir(module_): if CONFIG_ATTR.match(key): self[key] = getattr(module_, key)
[ "def", "_load", "(", "self", ",", "file_path", ":", "Text", ")", "->", "None", ":", "# noinspection PyUnresolvedReferences", "module_", "=", "types", ".", "ModuleType", "(", "'settings'", ")", "module_", ".", "__file__", "=", "file_path", "try", ":", "with", "open", "(", "file_path", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "exec", "(", "compile", "(", "f", ".", "read", "(", ")", ",", "file_path", ",", "'exec'", ")", ",", "module_", ".", "__dict__", ")", "except", "IOError", "as", "e", ":", "e", ".", "strerror", "=", "'Unable to load configuration file ({})'", ".", "format", "(", "e", ".", "strerror", ")", "raise", "for", "key", "in", "dir", "(", "module_", ")", ":", "if", "CONFIG_ATTR", ".", "match", "(", "key", ")", ":", "self", "[", "key", "]", "=", "getattr", "(", "module_", ",", "key", ")" ]
Load the configuration from a plain Python file. This file is executed on its own. Only keys matching the CONFIG_ATTR will be loaded. Basically, it's CONFIG_KEYS_LIKE_THIS. :param file_path: Path to the file to load
[ "Load", "the", "configuration", "from", "a", "plain", "Python", "file", ".", "This", "file", "is", "executed", "on", "its", "own", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/conf/loader.py#L38-L63
train
BernardFW/bernard
src/bernard/conf/loader.py
LazySettings._settings
def _settings(self) -> Settings: """ Return the actual settings object, or create it if missing. """ if self.__dict__['__settings'] is None: self.__dict__['__settings'] = Settings() for file_path in self._get_files(): if file_path: # noinspection PyProtectedMember self.__dict__['__settings']._load(file_path) return self.__dict__['__settings']
python
def _settings(self) -> Settings: """ Return the actual settings object, or create it if missing. """ if self.__dict__['__settings'] is None: self.__dict__['__settings'] = Settings() for file_path in self._get_files(): if file_path: # noinspection PyProtectedMember self.__dict__['__settings']._load(file_path) return self.__dict__['__settings']
[ "def", "_settings", "(", "self", ")", "->", "Settings", ":", "if", "self", ".", "__dict__", "[", "'__settings'", "]", "is", "None", ":", "self", ".", "__dict__", "[", "'__settings'", "]", "=", "Settings", "(", ")", "for", "file_path", "in", "self", ".", "_get_files", "(", ")", ":", "if", "file_path", ":", "# noinspection PyProtectedMember", "self", ".", "__dict__", "[", "'__settings'", "]", ".", "_load", "(", "file_path", ")", "return", "self", ".", "__dict__", "[", "'__settings'", "]" ]
Return the actual settings object, or create it if missing.
[ "Return", "the", "actual", "settings", "object", "or", "create", "it", "if", "missing", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/conf/loader.py#L84-L94
train
BernardFW/bernard
src/bernard/storage/register/redis.py
RedisRegisterStore._start
async def _start(self, key: Text) -> None: """ Start the lock. Here we use a SETNX-based algorithm. It's quite shitty, change it. """ for _ in range(0, 1000): with await self.pool as r: just_set = await r.set( self.lock_key(key), '', expire=settings.REGISTER_LOCK_TIME, exist=r.SET_IF_NOT_EXIST, ) if just_set: break await asyncio.sleep(settings.REDIS_POLL_INTERVAL)
python
async def _start(self, key: Text) -> None: """ Start the lock. Here we use a SETNX-based algorithm. It's quite shitty, change it. """ for _ in range(0, 1000): with await self.pool as r: just_set = await r.set( self.lock_key(key), '', expire=settings.REGISTER_LOCK_TIME, exist=r.SET_IF_NOT_EXIST, ) if just_set: break await asyncio.sleep(settings.REDIS_POLL_INTERVAL)
[ "async", "def", "_start", "(", "self", ",", "key", ":", "Text", ")", "->", "None", ":", "for", "_", "in", "range", "(", "0", ",", "1000", ")", ":", "with", "await", "self", ".", "pool", "as", "r", ":", "just_set", "=", "await", "r", ".", "set", "(", "self", ".", "lock_key", "(", "key", ")", ",", "''", ",", "expire", "=", "settings", ".", "REGISTER_LOCK_TIME", ",", "exist", "=", "r", ".", "SET_IF_NOT_EXIST", ",", ")", "if", "just_set", ":", "break", "await", "asyncio", ".", "sleep", "(", "settings", ".", "REDIS_POLL_INTERVAL", ")" ]
Start the lock. Here we use a SETNX-based algorithm. It's quite shitty, change it.
[ "Start", "the", "lock", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/register/redis.py#L42-L60
train
BernardFW/bernard
src/bernard/storage/register/redis.py
RedisRegisterStore._get
async def _get(self, key: Text) -> Dict[Text, Any]: """ Get the value for the key. It is automatically deserialized from JSON and returns an empty dictionary by default. """ try: with await self.pool as r: return ujson.loads(await r.get(self.register_key(key))) except (ValueError, TypeError): return {}
python
async def _get(self, key: Text) -> Dict[Text, Any]: """ Get the value for the key. It is automatically deserialized from JSON and returns an empty dictionary by default. """ try: with await self.pool as r: return ujson.loads(await r.get(self.register_key(key))) except (ValueError, TypeError): return {}
[ "async", "def", "_get", "(", "self", ",", "key", ":", "Text", ")", "->", "Dict", "[", "Text", ",", "Any", "]", ":", "try", ":", "with", "await", "self", ".", "pool", "as", "r", ":", "return", "ujson", ".", "loads", "(", "await", "r", ".", "get", "(", "self", ".", "register_key", "(", "key", ")", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "{", "}" ]
Get the value for the key. It is automatically deserialized from JSON and returns an empty dictionary by default.
[ "Get", "the", "value", "for", "the", "key", ".", "It", "is", "automatically", "deserialized", "from", "JSON", "and", "returns", "an", "empty", "dictionary", "by", "default", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/register/redis.py#L70-L80
train
BernardFW/bernard
src/bernard/storage/register/redis.py
RedisRegisterStore._replace
async def _replace(self, key: Text, data: Dict[Text, Any]) -> None: """ Replace the register with a new value. """ with await self.pool as r: await r.set(self.register_key(key), ujson.dumps(data))
python
async def _replace(self, key: Text, data: Dict[Text, Any]) -> None: """ Replace the register with a new value. """ with await self.pool as r: await r.set(self.register_key(key), ujson.dumps(data))
[ "async", "def", "_replace", "(", "self", ",", "key", ":", "Text", ",", "data", ":", "Dict", "[", "Text", ",", "Any", "]", ")", "->", "None", ":", "with", "await", "self", ".", "pool", "as", "r", ":", "await", "r", ".", "set", "(", "self", ".", "register_key", "(", "key", ")", ",", "ujson", ".", "dumps", "(", "data", ")", ")" ]
Replace the register with a new value.
[ "Replace", "the", "register", "with", "a", "new", "value", "." ]
9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/register/redis.py#L82-L88
train
giancosta86/Iris
info/gianlucacosta/iris/maven.py
MavenArtifact.getFileName
def getFileName(self, suffix=None, extension="jar"): """ Returns the basename of the artifact's file, using Maven's conventions. In particular, it will be: <artifactId>-<version>[-<suffix>][.<extension>] """ assert (self._artifactId is not None) assert (self._version is not None) return "{0}-{1}{2}{3}".format( self._artifactId, self._version.getRawString(), "-" + suffix.lstrip("-") if suffix is not None else "", "." + extension.lstrip(".") if extension is not None else "" )
python
def getFileName(self, suffix=None, extension="jar"): """ Returns the basename of the artifact's file, using Maven's conventions. In particular, it will be: <artifactId>-<version>[-<suffix>][.<extension>] """ assert (self._artifactId is not None) assert (self._version is not None) return "{0}-{1}{2}{3}".format( self._artifactId, self._version.getRawString(), "-" + suffix.lstrip("-") if suffix is not None else "", "." + extension.lstrip(".") if extension is not None else "" )
[ "def", "getFileName", "(", "self", ",", "suffix", "=", "None", ",", "extension", "=", "\"jar\"", ")", ":", "assert", "(", "self", ".", "_artifactId", "is", "not", "None", ")", "assert", "(", "self", ".", "_version", "is", "not", "None", ")", "return", "\"{0}-{1}{2}{3}\"", ".", "format", "(", "self", ".", "_artifactId", ",", "self", ".", "_version", ".", "getRawString", "(", ")", ",", "\"-\"", "+", "suffix", ".", "lstrip", "(", "\"-\"", ")", "if", "suffix", "is", "not", "None", "else", "\"\"", ",", "\".\"", "+", "extension", ".", "lstrip", "(", "\".\"", ")", "if", "extension", "is", "not", "None", "else", "\"\"", ")" ]
Returns the basename of the artifact's file, using Maven's conventions. In particular, it will be: <artifactId>-<version>[-<suffix>][.<extension>]
[ "Returns", "the", "basename", "of", "the", "artifact", "s", "file", "using", "Maven", "s", "conventions", "." ]
b3d92cca5cce3653519bd032346b211c46a57d05
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/maven.py#L50-L67
train
giancosta86/Iris
info/gianlucacosta/iris/maven.py
MavenArtifact.getPath
def getPath(self, suffix=None, extension="jar", separator=os.sep): """ Returns the full path, relative to the root of a Maven repository, of the current artifact, using Maven's conventions. In particular, it will be: <groupId with "." replaced by <separator>>[<separator><artifactId><separator>[<version><separator><basename obtained via getFileName()>]] By default, <separator>=os.sep """ assert (self._groupId is not None) resultComponents = [ self._groupId.replace(".", separator) ] if self._artifactId is not None: resultComponents.append(self._artifactId) version = self._version if version is not None: resultComponents.append(version.getRawString()) resultComponents.append(self.getFileName(suffix, extension)) return separator.join(resultComponents)
python
def getPath(self, suffix=None, extension="jar", separator=os.sep): """ Returns the full path, relative to the root of a Maven repository, of the current artifact, using Maven's conventions. In particular, it will be: <groupId with "." replaced by <separator>>[<separator><artifactId><separator>[<version><separator><basename obtained via getFileName()>]] By default, <separator>=os.sep """ assert (self._groupId is not None) resultComponents = [ self._groupId.replace(".", separator) ] if self._artifactId is not None: resultComponents.append(self._artifactId) version = self._version if version is not None: resultComponents.append(version.getRawString()) resultComponents.append(self.getFileName(suffix, extension)) return separator.join(resultComponents)
[ "def", "getPath", "(", "self", ",", "suffix", "=", "None", ",", "extension", "=", "\"jar\"", ",", "separator", "=", "os", ".", "sep", ")", ":", "assert", "(", "self", ".", "_groupId", "is", "not", "None", ")", "resultComponents", "=", "[", "self", ".", "_groupId", ".", "replace", "(", "\".\"", ",", "separator", ")", "]", "if", "self", ".", "_artifactId", "is", "not", "None", ":", "resultComponents", ".", "append", "(", "self", ".", "_artifactId", ")", "version", "=", "self", ".", "_version", "if", "version", "is", "not", "None", ":", "resultComponents", ".", "append", "(", "version", ".", "getRawString", "(", ")", ")", "resultComponents", ".", "append", "(", "self", ".", "getFileName", "(", "suffix", ",", "extension", ")", ")", "return", "separator", ".", "join", "(", "resultComponents", ")" ]
Returns the full path, relative to the root of a Maven repository, of the current artifact, using Maven's conventions. In particular, it will be: <groupId with "." replaced by <separator>>[<separator><artifactId><separator>[<version><separator><basename obtained via getFileName()>]] By default, <separator>=os.sep
[ "Returns", "the", "full", "path", "relative", "to", "the", "root", "of", "a", "Maven", "repository", "of", "the", "current", "artifact", "using", "Maven", "s", "conventions", "." ]
b3d92cca5cce3653519bd032346b211c46a57d05
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/maven.py#L70-L96
train
openvax/varlens
varlens/support.py
allele_support_df
def allele_support_df(loci, sources): """ Returns a DataFrame of allele counts for all given loci in the read sources """ return pandas.DataFrame( allele_support_rows(loci, sources), columns=EXPECTED_COLUMNS)
python
def allele_support_df(loci, sources): """ Returns a DataFrame of allele counts for all given loci in the read sources """ return pandas.DataFrame( allele_support_rows(loci, sources), columns=EXPECTED_COLUMNS)
[ "def", "allele_support_df", "(", "loci", ",", "sources", ")", ":", "return", "pandas", ".", "DataFrame", "(", "allele_support_rows", "(", "loci", ",", "sources", ")", ",", "columns", "=", "EXPECTED_COLUMNS", ")" ]
Returns a DataFrame of allele counts for all given loci in the read sources
[ "Returns", "a", "DataFrame", "of", "allele", "counts", "for", "all", "given", "loci", "in", "the", "read", "sources" ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/support.py#L29-L35
train
openvax/varlens
varlens/support.py
variant_support
def variant_support(variants, allele_support_df, ignore_missing=False): ''' Collect the read evidence support for the given variants. Parameters ---------- variants : iterable of varcode.Variant allele_support_df : dataframe Allele support dataframe, as output by the varlens-allele-support tool. It should have columns: source, contig, interbase_start, interbase_end, allele. The remaining columns are interpreted as read counts of various subsets of reads (e.g. all reads, non-duplicate reads, etc.) ignore_missing : boolean If True, then varaints with no allele counts will be interpreted as having 0 depth. If False, then an exception will be raised if any variants have no allele counts. Returns ---------- A pandas.Panel4D frame with these axes: labels (axis=0) : the type of read being counted, i.e. the read count fields in allele_support_df. items (axis=1) : the type of measurement (num_alt, num_ref, num_other, total_depth, alt_fraction, any_alt_fraction) major axis (axis=2) : the variants minor axis (axis=3) : the sources ''' missing = [ c for c in EXPECTED_COLUMNS if c not in allele_support_df.columns ] if missing: raise ValueError("Missing columns: %s" % " ".join(missing)) # Ensure our start and end fields are ints. allele_support_df[["interbase_start", "interbase_end"]] = ( allele_support_df[["interbase_start", "interbase_end"]].astype(int)) sources = sorted(allele_support_df["source"].unique()) allele_support_dict = collections.defaultdict(dict) for (i, row) in allele_support_df.iterrows(): key = ( row['source'], row.contig, row.interbase_start, row.interbase_end) allele_support_dict[key][row.allele] = row["count"] # We want an exception on bad lookups, so convert to a regular dict. allele_support_dict = dict(allele_support_dict) dataframe_dicts = collections.defaultdict( lambda: collections.defaultdict(list)) for variant in variants: for source in sources: key = (source, variant.contig, variant.start - 1, variant.end) try: alleles = allele_support_dict[key] except KeyError: message = ( "No allele counts in source %s for variant %s" % ( source, str(variant))) if ignore_missing: logging.warning(message) alleles = {} else: raise ValueError(message) alt = alleles.get(variant.alt, 0) ref = alleles.get(variant.ref, 0) total = sum(alleles.values()) other = total - alt - ref dataframe_dicts["num_alt"][source].append(alt) dataframe_dicts["num_ref"][source].append(ref) dataframe_dicts["num_other"][source].append(other) dataframe_dicts["total_depth"][source].append(total) dataframe_dicts["alt_fraction"][source].append( float(alt) / max(1, total)) dataframe_dicts["any_alt_fraction"][source].append( float(alt + other) / max(1, total)) dataframes = dict( (label, pandas.DataFrame(value, index=variants)) for (label, value) in dataframe_dicts.items()) return pandas.Panel(dataframes)
python
def variant_support(variants, allele_support_df, ignore_missing=False): ''' Collect the read evidence support for the given variants. Parameters ---------- variants : iterable of varcode.Variant allele_support_df : dataframe Allele support dataframe, as output by the varlens-allele-support tool. It should have columns: source, contig, interbase_start, interbase_end, allele. The remaining columns are interpreted as read counts of various subsets of reads (e.g. all reads, non-duplicate reads, etc.) ignore_missing : boolean If True, then varaints with no allele counts will be interpreted as having 0 depth. If False, then an exception will be raised if any variants have no allele counts. Returns ---------- A pandas.Panel4D frame with these axes: labels (axis=0) : the type of read being counted, i.e. the read count fields in allele_support_df. items (axis=1) : the type of measurement (num_alt, num_ref, num_other, total_depth, alt_fraction, any_alt_fraction) major axis (axis=2) : the variants minor axis (axis=3) : the sources ''' missing = [ c for c in EXPECTED_COLUMNS if c not in allele_support_df.columns ] if missing: raise ValueError("Missing columns: %s" % " ".join(missing)) # Ensure our start and end fields are ints. allele_support_df[["interbase_start", "interbase_end"]] = ( allele_support_df[["interbase_start", "interbase_end"]].astype(int)) sources = sorted(allele_support_df["source"].unique()) allele_support_dict = collections.defaultdict(dict) for (i, row) in allele_support_df.iterrows(): key = ( row['source'], row.contig, row.interbase_start, row.interbase_end) allele_support_dict[key][row.allele] = row["count"] # We want an exception on bad lookups, so convert to a regular dict. allele_support_dict = dict(allele_support_dict) dataframe_dicts = collections.defaultdict( lambda: collections.defaultdict(list)) for variant in variants: for source in sources: key = (source, variant.contig, variant.start - 1, variant.end) try: alleles = allele_support_dict[key] except KeyError: message = ( "No allele counts in source %s for variant %s" % ( source, str(variant))) if ignore_missing: logging.warning(message) alleles = {} else: raise ValueError(message) alt = alleles.get(variant.alt, 0) ref = alleles.get(variant.ref, 0) total = sum(alleles.values()) other = total - alt - ref dataframe_dicts["num_alt"][source].append(alt) dataframe_dicts["num_ref"][source].append(ref) dataframe_dicts["num_other"][source].append(other) dataframe_dicts["total_depth"][source].append(total) dataframe_dicts["alt_fraction"][source].append( float(alt) / max(1, total)) dataframe_dicts["any_alt_fraction"][source].append( float(alt + other) / max(1, total)) dataframes = dict( (label, pandas.DataFrame(value, index=variants)) for (label, value) in dataframe_dicts.items()) return pandas.Panel(dataframes)
[ "def", "variant_support", "(", "variants", ",", "allele_support_df", ",", "ignore_missing", "=", "False", ")", ":", "missing", "=", "[", "c", "for", "c", "in", "EXPECTED_COLUMNS", "if", "c", "not", "in", "allele_support_df", ".", "columns", "]", "if", "missing", ":", "raise", "ValueError", "(", "\"Missing columns: %s\"", "%", "\" \"", ".", "join", "(", "missing", ")", ")", "# Ensure our start and end fields are ints.", "allele_support_df", "[", "[", "\"interbase_start\"", ",", "\"interbase_end\"", "]", "]", "=", "(", "allele_support_df", "[", "[", "\"interbase_start\"", ",", "\"interbase_end\"", "]", "]", ".", "astype", "(", "int", ")", ")", "sources", "=", "sorted", "(", "allele_support_df", "[", "\"source\"", "]", ".", "unique", "(", ")", ")", "allele_support_dict", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "(", "i", ",", "row", ")", "in", "allele_support_df", ".", "iterrows", "(", ")", ":", "key", "=", "(", "row", "[", "'source'", "]", ",", "row", ".", "contig", ",", "row", ".", "interbase_start", ",", "row", ".", "interbase_end", ")", "allele_support_dict", "[", "key", "]", "[", "row", ".", "allele", "]", "=", "row", "[", "\"count\"", "]", "# We want an exception on bad lookups, so convert to a regular dict.", "allele_support_dict", "=", "dict", "(", "allele_support_dict", ")", "dataframe_dicts", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "collections", ".", "defaultdict", "(", "list", ")", ")", "for", "variant", "in", "variants", ":", "for", "source", "in", "sources", ":", "key", "=", "(", "source", ",", "variant", ".", "contig", ",", "variant", ".", "start", "-", "1", ",", "variant", ".", "end", ")", "try", ":", "alleles", "=", "allele_support_dict", "[", "key", "]", "except", "KeyError", ":", "message", "=", "(", "\"No allele counts in source %s for variant %s\"", "%", "(", "source", ",", "str", "(", "variant", ")", ")", ")", "if", "ignore_missing", ":", "logging", ".", "warning", "(", "message", ")", "alleles", "=", "{", "}", "else", ":", "raise", "ValueError", "(", "message", ")", "alt", "=", "alleles", ".", "get", "(", "variant", ".", "alt", ",", "0", ")", "ref", "=", "alleles", ".", "get", "(", "variant", ".", "ref", ",", "0", ")", "total", "=", "sum", "(", "alleles", ".", "values", "(", ")", ")", "other", "=", "total", "-", "alt", "-", "ref", "dataframe_dicts", "[", "\"num_alt\"", "]", "[", "source", "]", ".", "append", "(", "alt", ")", "dataframe_dicts", "[", "\"num_ref\"", "]", "[", "source", "]", ".", "append", "(", "ref", ")", "dataframe_dicts", "[", "\"num_other\"", "]", "[", "source", "]", ".", "append", "(", "other", ")", "dataframe_dicts", "[", "\"total_depth\"", "]", "[", "source", "]", ".", "append", "(", "total", ")", "dataframe_dicts", "[", "\"alt_fraction\"", "]", "[", "source", "]", ".", "append", "(", "float", "(", "alt", ")", "/", "max", "(", "1", ",", "total", ")", ")", "dataframe_dicts", "[", "\"any_alt_fraction\"", "]", "[", "source", "]", ".", "append", "(", "float", "(", "alt", "+", "other", ")", "/", "max", "(", "1", ",", "total", ")", ")", "dataframes", "=", "dict", "(", "(", "label", ",", "pandas", ".", "DataFrame", "(", "value", ",", "index", "=", "variants", ")", ")", "for", "(", "label", ",", "value", ")", "in", "dataframe_dicts", ".", "items", "(", ")", ")", "return", "pandas", ".", "Panel", "(", "dataframes", ")" ]
Collect the read evidence support for the given variants. Parameters ---------- variants : iterable of varcode.Variant allele_support_df : dataframe Allele support dataframe, as output by the varlens-allele-support tool. It should have columns: source, contig, interbase_start, interbase_end, allele. The remaining columns are interpreted as read counts of various subsets of reads (e.g. all reads, non-duplicate reads, etc.) ignore_missing : boolean If True, then varaints with no allele counts will be interpreted as having 0 depth. If False, then an exception will be raised if any variants have no allele counts. Returns ---------- A pandas.Panel4D frame with these axes: labels (axis=0) : the type of read being counted, i.e. the read count fields in allele_support_df. items (axis=1) : the type of measurement (num_alt, num_ref, num_other, total_depth, alt_fraction, any_alt_fraction) major axis (axis=2) : the variants minor axis (axis=3) : the sources
[ "Collect", "the", "read", "evidence", "support", "for", "the", "given", "variants", "." ]
715d3ede5893757b2fcba4117515621bca7b1e5d
https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/support.py#L57-L153
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
sndinfo
def sndinfo(path:str) -> SndInfo: """ Get info about a soundfile path (str): the path to a soundfile RETURNS --> an instance of SndInfo: samplerate, nframes, channels, encoding, fileformat """ backend = _getBackend(path) logger.debug(f"sndinfo: using backend {backend.name}") return backend.getinfo(path)
python
def sndinfo(path:str) -> SndInfo: """ Get info about a soundfile path (str): the path to a soundfile RETURNS --> an instance of SndInfo: samplerate, nframes, channels, encoding, fileformat """ backend = _getBackend(path) logger.debug(f"sndinfo: using backend {backend.name}") return backend.getinfo(path)
[ "def", "sndinfo", "(", "path", ":", "str", ")", "->", "SndInfo", ":", "backend", "=", "_getBackend", "(", "path", ")", "logger", ".", "debug", "(", "f\"sndinfo: using backend {backend.name}\"", ")", "return", "backend", ".", "getinfo", "(", "path", ")" ]
Get info about a soundfile path (str): the path to a soundfile RETURNS --> an instance of SndInfo: samplerate, nframes, channels, encoding, fileformat
[ "Get", "info", "about", "a", "soundfile" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L215-L225
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
asmono
def asmono(samples:np.ndarray, channel:Union[int, str]=0) -> np.ndarray: """ convert samples to mono if they are not mono already. The returned array will always have the shape (numframes,) channel: the channel number to use, or 'mix' to mix-down all channels """ if numchannels(samples) == 1: # it could be [1,2,3,4,...], or [[1], [2], [3], [4], ...] if isinstance(samples[0], float): return samples elif isinstance(samples[0], np.dnarray): return np.reshape(samples, (len(samples),)) else: raise TypeError("Samples should be numeric, found: %s" % str(type(samples[0]))) if isinstance(channel, int): return samples[:, channel] elif channel == 'mix': return _mix(samples, scale_by_numchannels=True) else: raise ValueError("channel has to be an integer indicating a channel," " or 'mix' to mix down all channels")
python
def asmono(samples:np.ndarray, channel:Union[int, str]=0) -> np.ndarray: """ convert samples to mono if they are not mono already. The returned array will always have the shape (numframes,) channel: the channel number to use, or 'mix' to mix-down all channels """ if numchannels(samples) == 1: # it could be [1,2,3,4,...], or [[1], [2], [3], [4], ...] if isinstance(samples[0], float): return samples elif isinstance(samples[0], np.dnarray): return np.reshape(samples, (len(samples),)) else: raise TypeError("Samples should be numeric, found: %s" % str(type(samples[0]))) if isinstance(channel, int): return samples[:, channel] elif channel == 'mix': return _mix(samples, scale_by_numchannels=True) else: raise ValueError("channel has to be an integer indicating a channel," " or 'mix' to mix down all channels")
[ "def", "asmono", "(", "samples", ":", "np", ".", "ndarray", ",", "channel", ":", "Union", "[", "int", ",", "str", "]", "=", "0", ")", "->", "np", ".", "ndarray", ":", "if", "numchannels", "(", "samples", ")", "==", "1", ":", "# it could be [1,2,3,4,...], or [[1], [2], [3], [4], ...]", "if", "isinstance", "(", "samples", "[", "0", "]", ",", "float", ")", ":", "return", "samples", "elif", "isinstance", "(", "samples", "[", "0", "]", ",", "np", ".", "dnarray", ")", ":", "return", "np", ".", "reshape", "(", "samples", ",", "(", "len", "(", "samples", ")", ",", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Samples should be numeric, found: %s\"", "%", "str", "(", "type", "(", "samples", "[", "0", "]", ")", ")", ")", "if", "isinstance", "(", "channel", ",", "int", ")", ":", "return", "samples", "[", ":", ",", "channel", "]", "elif", "channel", "==", "'mix'", ":", "return", "_mix", "(", "samples", ",", "scale_by_numchannels", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "\"channel has to be an integer indicating a channel,\"", "\" or 'mix' to mix down all channels\"", ")" ]
convert samples to mono if they are not mono already. The returned array will always have the shape (numframes,) channel: the channel number to use, or 'mix' to mix-down all channels
[ "convert", "samples", "to", "mono", "if", "they", "are", "not", "mono", "already", "." ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L298-L322
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
getchannel
def getchannel(samples: np.ndarray, ch:int) -> np.ndarray: """ Returns a view into a channel of samples. samples : a numpy array representing the audio data ch : the channel to extract (channels begin with 0) """ N = numchannels(samples) if ch > (N - 1): raise ValueError("channel %d out of range" % ch) if N == 1: return samples return samples[:, ch]
python
def getchannel(samples: np.ndarray, ch:int) -> np.ndarray: """ Returns a view into a channel of samples. samples : a numpy array representing the audio data ch : the channel to extract (channels begin with 0) """ N = numchannels(samples) if ch > (N - 1): raise ValueError("channel %d out of range" % ch) if N == 1: return samples return samples[:, ch]
[ "def", "getchannel", "(", "samples", ":", "np", ".", "ndarray", ",", "ch", ":", "int", ")", "->", "np", ".", "ndarray", ":", "N", "=", "numchannels", "(", "samples", ")", "if", "ch", ">", "(", "N", "-", "1", ")", ":", "raise", "ValueError", "(", "\"channel %d out of range\"", "%", "ch", ")", "if", "N", "==", "1", ":", "return", "samples", "return", "samples", "[", ":", ",", "ch", "]" ]
Returns a view into a channel of samples. samples : a numpy array representing the audio data ch : the channel to extract (channels begin with 0)
[ "Returns", "a", "view", "into", "a", "channel", "of", "samples", "." ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L325-L337
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
bitdepth
def bitdepth(data:np.ndarray, snap:bool=True) -> int: """ returns the number of bits actually used to represent the data. data: a numpy.array (mono or multi-channel) snap: snap to 8, 16, 24 or 32 bits. """ data = asmono(data) maxitems = min(4096, data.shape[0]) maxbits = max(x.as_integer_ratio()[1] for x in data[:maxitems]).bit_length() if snap: if maxbits <= 8: maxbits = 8 elif maxbits <= 16: maxbits = 16 elif maxbits <= 24: maxbits = 24 elif maxbits <= 32: maxbits = 32 else: maxbits = 64 return maxbits
python
def bitdepth(data:np.ndarray, snap:bool=True) -> int: """ returns the number of bits actually used to represent the data. data: a numpy.array (mono or multi-channel) snap: snap to 8, 16, 24 or 32 bits. """ data = asmono(data) maxitems = min(4096, data.shape[0]) maxbits = max(x.as_integer_ratio()[1] for x in data[:maxitems]).bit_length() if snap: if maxbits <= 8: maxbits = 8 elif maxbits <= 16: maxbits = 16 elif maxbits <= 24: maxbits = 24 elif maxbits <= 32: maxbits = 32 else: maxbits = 64 return maxbits
[ "def", "bitdepth", "(", "data", ":", "np", ".", "ndarray", ",", "snap", ":", "bool", "=", "True", ")", "->", "int", ":", "data", "=", "asmono", "(", "data", ")", "maxitems", "=", "min", "(", "4096", ",", "data", ".", "shape", "[", "0", "]", ")", "maxbits", "=", "max", "(", "x", ".", "as_integer_ratio", "(", ")", "[", "1", "]", "for", "x", "in", "data", "[", ":", "maxitems", "]", ")", ".", "bit_length", "(", ")", "if", "snap", ":", "if", "maxbits", "<=", "8", ":", "maxbits", "=", "8", "elif", "maxbits", "<=", "16", ":", "maxbits", "=", "16", "elif", "maxbits", "<=", "24", ":", "maxbits", "=", "24", "elif", "maxbits", "<=", "32", ":", "maxbits", "=", "32", "else", ":", "maxbits", "=", "64", "return", "maxbits" ]
returns the number of bits actually used to represent the data. data: a numpy.array (mono or multi-channel) snap: snap to 8, 16, 24 or 32 bits.
[ "returns", "the", "number", "of", "bits", "actually", "used", "to", "represent", "the", "data", "." ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L340-L362
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
sndwrite_like
def sndwrite_like(samples:np.ndarray, likefile:str, outfile:str) -> None: """ Write samples to outfile with samplerate and encoding taken from likefile """ info = sndinfo(likefile) sndwrite(samples, info.samplerate, outfile, encoding=info.encoding)
python
def sndwrite_like(samples:np.ndarray, likefile:str, outfile:str) -> None: """ Write samples to outfile with samplerate and encoding taken from likefile """ info = sndinfo(likefile) sndwrite(samples, info.samplerate, outfile, encoding=info.encoding)
[ "def", "sndwrite_like", "(", "samples", ":", "np", ".", "ndarray", ",", "likefile", ":", "str", ",", "outfile", ":", "str", ")", "->", "None", ":", "info", "=", "sndinfo", "(", "likefile", ")", "sndwrite", "(", "samples", ",", "info", ".", "samplerate", ",", "outfile", ",", "encoding", "=", "info", ".", "encoding", ")" ]
Write samples to outfile with samplerate and encoding taken from likefile
[ "Write", "samples", "to", "outfile", "with", "samplerate", "and", "encoding", "taken", "from", "likefile" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L365-L371
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
_wavReadData
def _wavReadData(fid, size:int, channels:int, encoding:str, bigendian:bool) -> np.ndarray: """ adapted from scipy.io.wavfile._read_data_chunk assume we are at the data (after having read the size) """ bits = int(encoding[3:]) if bits == 8: data = np.fromfile(fid, dtype=np.ubyte, count=size) if channels > 1: data = data.reshape(-1, channels) else: bytes = bits // 8 if encoding in ('pcm16', 'pcm32', 'pcm64'): if bigendian: dtype = '>i%d' % bytes else: dtype = '<i%d' % bytes data = np.fromfile(fid, dtype=dtype, count=size // bytes) if channels > 1: data = data.reshape(-1, channels) elif encoding[:3] == 'flt': print("flt32!") if bits == 32: if bigendian: dtype = '>f4' else: dtype = '<f4' else: raise NotImplementedError data = np.fromfile(fid, dtype=dtype, count=size // bytes) if channels > 1: data = data.reshape(-1, channels) elif encoding == 'pcm24': # this conversion approach is really bad for long files # TODO: do the same but in chunks data = _numpy24to32bit(np.fromfile(fid, dtype=np.ubyte, count=size), bigendian=False) if channels > 1: data = data.reshape(-1, channels) return data
python
def _wavReadData(fid, size:int, channels:int, encoding:str, bigendian:bool) -> np.ndarray: """ adapted from scipy.io.wavfile._read_data_chunk assume we are at the data (after having read the size) """ bits = int(encoding[3:]) if bits == 8: data = np.fromfile(fid, dtype=np.ubyte, count=size) if channels > 1: data = data.reshape(-1, channels) else: bytes = bits // 8 if encoding in ('pcm16', 'pcm32', 'pcm64'): if bigendian: dtype = '>i%d' % bytes else: dtype = '<i%d' % bytes data = np.fromfile(fid, dtype=dtype, count=size // bytes) if channels > 1: data = data.reshape(-1, channels) elif encoding[:3] == 'flt': print("flt32!") if bits == 32: if bigendian: dtype = '>f4' else: dtype = '<f4' else: raise NotImplementedError data = np.fromfile(fid, dtype=dtype, count=size // bytes) if channels > 1: data = data.reshape(-1, channels) elif encoding == 'pcm24': # this conversion approach is really bad for long files # TODO: do the same but in chunks data = _numpy24to32bit(np.fromfile(fid, dtype=np.ubyte, count=size), bigendian=False) if channels > 1: data = data.reshape(-1, channels) return data
[ "def", "_wavReadData", "(", "fid", ",", "size", ":", "int", ",", "channels", ":", "int", ",", "encoding", ":", "str", ",", "bigendian", ":", "bool", ")", "->", "np", ".", "ndarray", ":", "bits", "=", "int", "(", "encoding", "[", "3", ":", "]", ")", "if", "bits", "==", "8", ":", "data", "=", "np", ".", "fromfile", "(", "fid", ",", "dtype", "=", "np", ".", "ubyte", ",", "count", "=", "size", ")", "if", "channels", ">", "1", ":", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "channels", ")", "else", ":", "bytes", "=", "bits", "//", "8", "if", "encoding", "in", "(", "'pcm16'", ",", "'pcm32'", ",", "'pcm64'", ")", ":", "if", "bigendian", ":", "dtype", "=", "'>i%d'", "%", "bytes", "else", ":", "dtype", "=", "'<i%d'", "%", "bytes", "data", "=", "np", ".", "fromfile", "(", "fid", ",", "dtype", "=", "dtype", ",", "count", "=", "size", "//", "bytes", ")", "if", "channels", ">", "1", ":", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "channels", ")", "elif", "encoding", "[", ":", "3", "]", "==", "'flt'", ":", "print", "(", "\"flt32!\"", ")", "if", "bits", "==", "32", ":", "if", "bigendian", ":", "dtype", "=", "'>f4'", "else", ":", "dtype", "=", "'<f4'", "else", ":", "raise", "NotImplementedError", "data", "=", "np", ".", "fromfile", "(", "fid", ",", "dtype", "=", "dtype", ",", "count", "=", "size", "//", "bytes", ")", "if", "channels", ">", "1", ":", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "channels", ")", "elif", "encoding", "==", "'pcm24'", ":", "# this conversion approach is really bad for long files", "# TODO: do the same but in chunks", "data", "=", "_numpy24to32bit", "(", "np", ".", "fromfile", "(", "fid", ",", "dtype", "=", "np", ".", "ubyte", ",", "count", "=", "size", ")", ",", "bigendian", "=", "False", ")", "if", "channels", ">", "1", ":", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "channels", ")", "return", "data" ]
adapted from scipy.io.wavfile._read_data_chunk assume we are at the data (after having read the size)
[ "adapted", "from", "scipy", ".", "io", ".", "wavfile", ".", "_read_data_chunk" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L788-L832
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
_wavGetInfo
def _wavGetInfo(f:Union[IO, str]) -> Tuple[SndInfo, Dict[str, Any]]: """ Read the info of a wav file. taken mostly from scipy.io.wavfile if extended: returns also fsize and bigendian """ if isinstance(f, (str, bytes)): f = open(f, 'rb') needsclosing = True else: needsclosing = False fsize, bigendian = _wavReadRiff(f) fmt = ">i" if bigendian else "<i" while (f.tell() < fsize): chunk_id = f.read(4) if chunk_id == b'fmt ': chunksize, sampfmt, chans, sr, byterate, align, bits = _wavReadFmt(f, bigendian) elif chunk_id == b'data': datasize = _struct.unpack(fmt, f.read(4))[0] nframes = int(datasize / (chans * (bits / 8))) break else: _warnings.warn("chunk not understood: %s" % chunk_id) data = f.read(4) size = _struct.unpack(fmt, data)[0] f.seek(size, 1) encoding = _encoding(sampfmt, bits) if needsclosing: f.close() info = SndInfo(sr, nframes, chans, encoding, "wav") return info, {'fsize': fsize, 'bigendian': bigendian, 'datasize': datasize}
python
def _wavGetInfo(f:Union[IO, str]) -> Tuple[SndInfo, Dict[str, Any]]: """ Read the info of a wav file. taken mostly from scipy.io.wavfile if extended: returns also fsize and bigendian """ if isinstance(f, (str, bytes)): f = open(f, 'rb') needsclosing = True else: needsclosing = False fsize, bigendian = _wavReadRiff(f) fmt = ">i" if bigendian else "<i" while (f.tell() < fsize): chunk_id = f.read(4) if chunk_id == b'fmt ': chunksize, sampfmt, chans, sr, byterate, align, bits = _wavReadFmt(f, bigendian) elif chunk_id == b'data': datasize = _struct.unpack(fmt, f.read(4))[0] nframes = int(datasize / (chans * (bits / 8))) break else: _warnings.warn("chunk not understood: %s" % chunk_id) data = f.read(4) size = _struct.unpack(fmt, data)[0] f.seek(size, 1) encoding = _encoding(sampfmt, bits) if needsclosing: f.close() info = SndInfo(sr, nframes, chans, encoding, "wav") return info, {'fsize': fsize, 'bigendian': bigendian, 'datasize': datasize}
[ "def", "_wavGetInfo", "(", "f", ":", "Union", "[", "IO", ",", "str", "]", ")", "->", "Tuple", "[", "SndInfo", ",", "Dict", "[", "str", ",", "Any", "]", "]", ":", "if", "isinstance", "(", "f", ",", "(", "str", ",", "bytes", ")", ")", ":", "f", "=", "open", "(", "f", ",", "'rb'", ")", "needsclosing", "=", "True", "else", ":", "needsclosing", "=", "False", "fsize", ",", "bigendian", "=", "_wavReadRiff", "(", "f", ")", "fmt", "=", "\">i\"", "if", "bigendian", "else", "\"<i\"", "while", "(", "f", ".", "tell", "(", ")", "<", "fsize", ")", ":", "chunk_id", "=", "f", ".", "read", "(", "4", ")", "if", "chunk_id", "==", "b'fmt '", ":", "chunksize", ",", "sampfmt", ",", "chans", ",", "sr", ",", "byterate", ",", "align", ",", "bits", "=", "_wavReadFmt", "(", "f", ",", "bigendian", ")", "elif", "chunk_id", "==", "b'data'", ":", "datasize", "=", "_struct", ".", "unpack", "(", "fmt", ",", "f", ".", "read", "(", "4", ")", ")", "[", "0", "]", "nframes", "=", "int", "(", "datasize", "/", "(", "chans", "*", "(", "bits", "/", "8", ")", ")", ")", "break", "else", ":", "_warnings", ".", "warn", "(", "\"chunk not understood: %s\"", "%", "chunk_id", ")", "data", "=", "f", ".", "read", "(", "4", ")", "size", "=", "_struct", ".", "unpack", "(", "fmt", ",", "data", ")", "[", "0", "]", "f", ".", "seek", "(", "size", ",", "1", ")", "encoding", "=", "_encoding", "(", "sampfmt", ",", "bits", ")", "if", "needsclosing", ":", "f", ".", "close", "(", ")", "info", "=", "SndInfo", "(", "sr", ",", "nframes", ",", "chans", ",", "encoding", ",", "\"wav\"", ")", "return", "info", ",", "{", "'fsize'", ":", "fsize", ",", "'bigendian'", ":", "bigendian", ",", "'datasize'", ":", "datasize", "}" ]
Read the info of a wav file. taken mostly from scipy.io.wavfile if extended: returns also fsize and bigendian
[ "Read", "the", "info", "of", "a", "wav", "file", ".", "taken", "mostly", "from", "scipy", ".", "io", ".", "wavfile" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L866-L896
train
bacher09/xrcon
xrcon/client.py
QuakeProtocol.connect
def connect(self): "Create connection to server" family, stype, proto, cname, sockaddr = self.best_connection_params( self.host, self.port) self.sock = socket.socket(family, stype) self.sock.settimeout(self.timeout) self.sock.connect(sockaddr)
python
def connect(self): "Create connection to server" family, stype, proto, cname, sockaddr = self.best_connection_params( self.host, self.port) self.sock = socket.socket(family, stype) self.sock.settimeout(self.timeout) self.sock.connect(sockaddr)
[ "def", "connect", "(", "self", ")", ":", "family", ",", "stype", ",", "proto", ",", "cname", ",", "sockaddr", "=", "self", ".", "best_connection_params", "(", "self", ".", "host", ",", "self", ".", "port", ")", "self", ".", "sock", "=", "socket", ".", "socket", "(", "family", ",", "stype", ")", "self", ".", "sock", ".", "settimeout", "(", "self", ".", "timeout", ")", "self", ".", "sock", ".", "connect", "(", "sockaddr", ")" ]
Create connection to server
[ "Create", "connection", "to", "server" ]
6a883f780265cbca31af7a379dc7cb28fdd8b73f
https://github.com/bacher09/xrcon/blob/6a883f780265cbca31af7a379dc7cb28fdd8b73f/xrcon/client.py#L53-L59
train
bacher09/xrcon
xrcon/client.py
QuakeProtocol.getchallenge
def getchallenge(self): "Return server challenge" self.sock.send(CHALLENGE_PACKET) # wait challenge response for packet in self.read_iterator(self.CHALLENGE_TIMEOUT): if packet.startswith(CHALLENGE_RESPONSE_HEADER): return parse_challenge_response(packet)
python
def getchallenge(self): "Return server challenge" self.sock.send(CHALLENGE_PACKET) # wait challenge response for packet in self.read_iterator(self.CHALLENGE_TIMEOUT): if packet.startswith(CHALLENGE_RESPONSE_HEADER): return parse_challenge_response(packet)
[ "def", "getchallenge", "(", "self", ")", ":", "self", ".", "sock", ".", "send", "(", "CHALLENGE_PACKET", ")", "# wait challenge response", "for", "packet", "in", "self", ".", "read_iterator", "(", "self", ".", "CHALLENGE_TIMEOUT", ")", ":", "if", "packet", ".", "startswith", "(", "CHALLENGE_RESPONSE_HEADER", ")", ":", "return", "parse_challenge_response", "(", "packet", ")" ]
Return server challenge
[ "Return", "server", "challenge" ]
6a883f780265cbca31af7a379dc7cb28fdd8b73f
https://github.com/bacher09/xrcon/blob/6a883f780265cbca31af7a379dc7cb28fdd8b73f/xrcon/client.py#L86-L92
train
bacher09/xrcon
xrcon/client.py
XRcon.send
def send(self, command): "Send rcon command to server" if self.secure_rcon == self.RCON_NOSECURE: self.sock.send(rcon_nosecure_packet(self.password, command)) elif self.secure_rcon == self.RCON_SECURE_TIME: self.sock.send(rcon_secure_time_packet(self.password, command)) elif self.secure_rcon == self.RCON_SECURE_CHALLENGE: challenge = self.getchallenge() self.sock.send(rcon_secure_challenge_packet(self.password, challenge, command)) else: raise ValueError("Bad value of secure_rcon")
python
def send(self, command): "Send rcon command to server" if self.secure_rcon == self.RCON_NOSECURE: self.sock.send(rcon_nosecure_packet(self.password, command)) elif self.secure_rcon == self.RCON_SECURE_TIME: self.sock.send(rcon_secure_time_packet(self.password, command)) elif self.secure_rcon == self.RCON_SECURE_CHALLENGE: challenge = self.getchallenge() self.sock.send(rcon_secure_challenge_packet(self.password, challenge, command)) else: raise ValueError("Bad value of secure_rcon")
[ "def", "send", "(", "self", ",", "command", ")", ":", "if", "self", ".", "secure_rcon", "==", "self", ".", "RCON_NOSECURE", ":", "self", ".", "sock", ".", "send", "(", "rcon_nosecure_packet", "(", "self", ".", "password", ",", "command", ")", ")", "elif", "self", ".", "secure_rcon", "==", "self", ".", "RCON_SECURE_TIME", ":", "self", ".", "sock", ".", "send", "(", "rcon_secure_time_packet", "(", "self", ".", "password", ",", "command", ")", ")", "elif", "self", ".", "secure_rcon", "==", "self", ".", "RCON_SECURE_CHALLENGE", ":", "challenge", "=", "self", ".", "getchallenge", "(", ")", "self", ".", "sock", ".", "send", "(", "rcon_secure_challenge_packet", "(", "self", ".", "password", ",", "challenge", ",", "command", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Bad value of secure_rcon\"", ")" ]
Send rcon command to server
[ "Send", "rcon", "command", "to", "server" ]
6a883f780265cbca31af7a379dc7cb28fdd8b73f
https://github.com/bacher09/xrcon/blob/6a883f780265cbca31af7a379dc7cb28fdd8b73f/xrcon/client.py#L174-L185
train
romankoblov/leaf
leaf/__init__.py
parse
def parse(html_string, wrapper=Parser, *args, **kwargs): """ Parse html with wrapper """ return Parser(lxml.html.fromstring(html_string), *args, **kwargs)
python
def parse(html_string, wrapper=Parser, *args, **kwargs): """ Parse html with wrapper """ return Parser(lxml.html.fromstring(html_string), *args, **kwargs)
[ "def", "parse", "(", "html_string", ",", "wrapper", "=", "Parser", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "Parser", "(", "lxml", ".", "html", ".", "fromstring", "(", "html_string", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Parse html with wrapper
[ "Parse", "html", "with", "wrapper" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L112-L114
train
romankoblov/leaf
leaf/__init__.py
str2int
def str2int(string_with_int): """ Collect digits from a string """ return int("".join([char for char in string_with_int if char in string.digits]) or 0)
python
def str2int(string_with_int): """ Collect digits from a string """ return int("".join([char for char in string_with_int if char in string.digits]) or 0)
[ "def", "str2int", "(", "string_with_int", ")", ":", "return", "int", "(", "\"\"", ".", "join", "(", "[", "char", "for", "char", "in", "string_with_int", "if", "char", "in", "string", ".", "digits", "]", ")", "or", "0", ")" ]
Collect digits from a string
[ "Collect", "digits", "from", "a", "string" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L117-L119
train
romankoblov/leaf
leaf/__init__.py
to_unicode
def to_unicode(obj, encoding='utf-8'): """ Convert string to unicode string """ if isinstance(obj, string_types) or isinstance(obj, binary_type): if not isinstance(obj, text_type): obj = text_type(obj, encoding) return obj
python
def to_unicode(obj, encoding='utf-8'): """ Convert string to unicode string """ if isinstance(obj, string_types) or isinstance(obj, binary_type): if not isinstance(obj, text_type): obj = text_type(obj, encoding) return obj
[ "def", "to_unicode", "(", "obj", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "obj", ",", "string_types", ")", "or", "isinstance", "(", "obj", ",", "binary_type", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "text_type", ")", ":", "obj", "=", "text_type", "(", "obj", ",", "encoding", ")", "return", "obj" ]
Convert string to unicode string
[ "Convert", "string", "to", "unicode", "string" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L122-L127
train
romankoblov/leaf
leaf/__init__.py
strip_spaces
def strip_spaces(s): """ Strip excess spaces from a string """ return u" ".join([c for c in s.split(u' ') if c])
python
def strip_spaces(s): """ Strip excess spaces from a string """ return u" ".join([c for c in s.split(u' ') if c])
[ "def", "strip_spaces", "(", "s", ")", ":", "return", "u\" \"", ".", "join", "(", "[", "c", "for", "c", "in", "s", ".", "split", "(", "u' '", ")", "if", "c", "]", ")" ]
Strip excess spaces from a string
[ "Strip", "excess", "spaces", "from", "a", "string" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L162-L164
train
romankoblov/leaf
leaf/__init__.py
strip_linebreaks
def strip_linebreaks(s): """ Strip excess line breaks from a string """ return u"\n".join([c for c in s.split(u'\n') if c])
python
def strip_linebreaks(s): """ Strip excess line breaks from a string """ return u"\n".join([c for c in s.split(u'\n') if c])
[ "def", "strip_linebreaks", "(", "s", ")", ":", "return", "u\"\\n\"", ".", "join", "(", "[", "c", "for", "c", "in", "s", ".", "split", "(", "u'\\n'", ")", "if", "c", "]", ")" ]
Strip excess line breaks from a string
[ "Strip", "excess", "line", "breaks", "from", "a", "string" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L167-L169
train
romankoblov/leaf
leaf/__init__.py
Parser.get
def get(self, selector, index=0, default=None): """ Get first element from CSSSelector """ elements = self(selector) if elements: try: return elements[index] except (IndexError): pass return default
python
def get(self, selector, index=0, default=None): """ Get first element from CSSSelector """ elements = self(selector) if elements: try: return elements[index] except (IndexError): pass return default
[ "def", "get", "(", "self", ",", "selector", ",", "index", "=", "0", ",", "default", "=", "None", ")", ":", "elements", "=", "self", "(", "selector", ")", "if", "elements", ":", "try", ":", "return", "elements", "[", "index", "]", "except", "(", "IndexError", ")", ":", "pass", "return", "default" ]
Get first element from CSSSelector
[ "Get", "first", "element", "from", "CSSSelector" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L26-L34
train
romankoblov/leaf
leaf/__init__.py
Parser.html
def html(self, unicode=False): """ Return HTML of element """ html = lxml.html.tostring(self.element, encoding=self.encoding) if unicode: html = html.decode(self.encoding) return html
python
def html(self, unicode=False): """ Return HTML of element """ html = lxml.html.tostring(self.element, encoding=self.encoding) if unicode: html = html.decode(self.encoding) return html
[ "def", "html", "(", "self", ",", "unicode", "=", "False", ")", ":", "html", "=", "lxml", ".", "html", ".", "tostring", "(", "self", ".", "element", ",", "encoding", "=", "self", ".", "encoding", ")", "if", "unicode", ":", "html", "=", "html", ".", "decode", "(", "self", ".", "encoding", ")", "return", "html" ]
Return HTML of element
[ "Return", "HTML", "of", "element" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L36-L41
train
romankoblov/leaf
leaf/__init__.py
Parser.parse
def parse(self, func, *args, **kwargs): """ Parse element with given function""" result = [] for element in self.xpath('child::node()'): if isinstance(element, Parser): children = element.parse(func, *args, **kwargs) element_result = func(element, children, *args, **kwargs) if element_result: result.append(element_result) else: result.append(element) return u"".join(result)
python
def parse(self, func, *args, **kwargs): """ Parse element with given function""" result = [] for element in self.xpath('child::node()'): if isinstance(element, Parser): children = element.parse(func, *args, **kwargs) element_result = func(element, children, *args, **kwargs) if element_result: result.append(element_result) else: result.append(element) return u"".join(result)
[ "def", "parse", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "[", "]", "for", "element", "in", "self", ".", "xpath", "(", "'child::node()'", ")", ":", "if", "isinstance", "(", "element", ",", "Parser", ")", ":", "children", "=", "element", ".", "parse", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "element_result", "=", "func", "(", "element", ",", "children", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "element_result", ":", "result", ".", "append", "(", "element_result", ")", "else", ":", "result", ".", "append", "(", "element", ")", "return", "u\"\"", ".", "join", "(", "result", ")" ]
Parse element with given function
[ "Parse", "element", "with", "given", "function" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L55-L66
train
romankoblov/leaf
leaf/__init__.py
Parser._wrap_result
def _wrap_result(self, func): """ Wrap result in Parser instance """ def wrapper(*args): result = func(*args) if hasattr(result, '__iter__') and not isinstance(result, etree._Element): return [self._wrap_element(element) for element in result] else: return self._wrap_element(result) return wrapper
python
def _wrap_result(self, func): """ Wrap result in Parser instance """ def wrapper(*args): result = func(*args) if hasattr(result, '__iter__') and not isinstance(result, etree._Element): return [self._wrap_element(element) for element in result] else: return self._wrap_element(result) return wrapper
[ "def", "_wrap_result", "(", "self", ",", "func", ")", ":", "def", "wrapper", "(", "*", "args", ")", ":", "result", "=", "func", "(", "*", "args", ")", "if", "hasattr", "(", "result", ",", "'__iter__'", ")", "and", "not", "isinstance", "(", "result", ",", "etree", ".", "_Element", ")", ":", "return", "[", "self", ".", "_wrap_element", "(", "element", ")", "for", "element", "in", "result", "]", "else", ":", "return", "self", ".", "_wrap_element", "(", "result", ")", "return", "wrapper" ]
Wrap result in Parser instance
[ "Wrap", "result", "in", "Parser", "instance" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L68-L76
train
romankoblov/leaf
leaf/__init__.py
Parser._wrap_element
def _wrap_element(self, result): """ Wrap single element in Parser instance """ if isinstance(result, lxml.html.HtmlElement): return Parser(result) else: return result
python
def _wrap_element(self, result): """ Wrap single element in Parser instance """ if isinstance(result, lxml.html.HtmlElement): return Parser(result) else: return result
[ "def", "_wrap_element", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "lxml", ".", "html", ".", "HtmlElement", ")", ":", "return", "Parser", "(", "result", ")", "else", ":", "return", "result" ]
Wrap single element in Parser instance
[ "Wrap", "single", "element", "in", "Parser", "instance" ]
e042d91ec462c834318d03f199fcc4a9f565cb84
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L78-L83
train
frostming/marko
marko/block.py
BlockElement.parse_inline
def parse_inline(self): """Inline parsing is postponed so that all link references are seen before that. """ if self.inline_children: self.children = parser.parse_inline(self.children) elif isinstance(getattr(self, 'children', None), list): for child in self.children: if isinstance(child, BlockElement): child.parse_inline()
python
def parse_inline(self): """Inline parsing is postponed so that all link references are seen before that. """ if self.inline_children: self.children = parser.parse_inline(self.children) elif isinstance(getattr(self, 'children', None), list): for child in self.children: if isinstance(child, BlockElement): child.parse_inline()
[ "def", "parse_inline", "(", "self", ")", ":", "if", "self", ".", "inline_children", ":", "self", ".", "children", "=", "parser", ".", "parse_inline", "(", "self", ".", "children", ")", "elif", "isinstance", "(", "getattr", "(", "self", ",", "'children'", ",", "None", ")", ",", "list", ")", ":", "for", "child", "in", "self", ".", "children", ":", "if", "isinstance", "(", "child", ",", "BlockElement", ")", ":", "child", ".", "parse_inline", "(", ")" ]
Inline parsing is postponed so that all link references are seen before that.
[ "Inline", "parsing", "is", "postponed", "so", "that", "all", "link", "references", "are", "seen", "before", "that", "." ]
1cd030b665fa37bad1f8b3a25a89ce1a7c491dde
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/block.py#L59-L68
train
COALAIP/pycoalaip
coalaip/models.py
work_model_factory
def work_model_factory(*, validator=validators.is_work_model, **kwargs): """Generate a Work model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword argument is given. """ kwargs['ld_type'] = 'AbstractWork' return _model_factory(validator=validator, **kwargs)
python
def work_model_factory(*, validator=validators.is_work_model, **kwargs): """Generate a Work model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword argument is given. """ kwargs['ld_type'] = 'AbstractWork' return _model_factory(validator=validator, **kwargs)
[ "def", "work_model_factory", "(", "*", ",", "validator", "=", "validators", ".", "is_work_model", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'ld_type'", "]", "=", "'AbstractWork'", "return", "_model_factory", "(", "validator", "=", "validator", ",", "*", "*", "kwargs", ")" ]
Generate a Work model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword argument is given.
[ "Generate", "a", "Work", "model", "." ]
cecc8f6ff4733f0525fafcee63647753e832f0be
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L241-L252
train
COALAIP/pycoalaip
coalaip/models.py
manifestation_model_factory
def manifestation_model_factory(*, validator=validators.is_manifestation_model, ld_type='CreativeWork', **kwargs): """Generate a Manifestation model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments. """ return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
python
def manifestation_model_factory(*, validator=validators.is_manifestation_model, ld_type='CreativeWork', **kwargs): """Generate a Manifestation model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments. """ return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
[ "def", "manifestation_model_factory", "(", "*", ",", "validator", "=", "validators", ".", "is_manifestation_model", ",", "ld_type", "=", "'CreativeWork'", ",", "*", "*", "kwargs", ")", ":", "return", "_model_factory", "(", "validator", "=", "validator", ",", "ld_type", "=", "ld_type", ",", "*", "*", "kwargs", ")" ]
Generate a Manifestation model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments.
[ "Generate", "a", "Manifestation", "model", "." ]
cecc8f6ff4733f0525fafcee63647753e832f0be
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L255-L262
train
COALAIP/pycoalaip
coalaip/models.py
right_model_factory
def right_model_factory(*, validator=validators.is_right_model, ld_type='Right', **kwargs): """Generate a Right model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments. """ return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
python
def right_model_factory(*, validator=validators.is_right_model, ld_type='Right', **kwargs): """Generate a Right model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments. """ return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
[ "def", "right_model_factory", "(", "*", ",", "validator", "=", "validators", ".", "is_right_model", ",", "ld_type", "=", "'Right'", ",", "*", "*", "kwargs", ")", ":", "return", "_model_factory", "(", "validator", "=", "validator", ",", "ld_type", "=", "ld_type", ",", "*", "*", "kwargs", ")" ]
Generate a Right model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments.
[ "Generate", "a", "Right", "model", "." ]
cecc8f6ff4733f0525fafcee63647753e832f0be
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L265-L272
train
COALAIP/pycoalaip
coalaip/models.py
copyright_model_factory
def copyright_model_factory(*, validator=validators.is_copyright_model, **kwargs): """Generate a Copyright model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword argument is given. """ kwargs['ld_type'] = 'Copyright' return _model_factory(validator=validator, **kwargs)
python
def copyright_model_factory(*, validator=validators.is_copyright_model, **kwargs): """Generate a Copyright model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword argument is given. """ kwargs['ld_type'] = 'Copyright' return _model_factory(validator=validator, **kwargs)
[ "def", "copyright_model_factory", "(", "*", ",", "validator", "=", "validators", ".", "is_copyright_model", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'ld_type'", "]", "=", "'Copyright'", "return", "_model_factory", "(", "validator", "=", "validator", ",", "*", "*", "kwargs", ")" ]
Generate a Copyright model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword argument is given.
[ "Generate", "a", "Copyright", "model", "." ]
cecc8f6ff4733f0525fafcee63647753e832f0be
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L276-L288
train
Pylons/pyramid_retry
src/pyramid_retry/__init__.py
mark_error_retryable
def mark_error_retryable(error): """ Mark an exception instance or type as retryable. If this exception is caught by ``pyramid_retry`` then it may retry the request. """ if isinstance(error, Exception): alsoProvides(error, IRetryableError) elif inspect.isclass(error) and issubclass(error, Exception): classImplements(error, IRetryableError) else: raise ValueError( 'only exception objects or types may be marked retryable')
python
def mark_error_retryable(error): """ Mark an exception instance or type as retryable. If this exception is caught by ``pyramid_retry`` then it may retry the request. """ if isinstance(error, Exception): alsoProvides(error, IRetryableError) elif inspect.isclass(error) and issubclass(error, Exception): classImplements(error, IRetryableError) else: raise ValueError( 'only exception objects or types may be marked retryable')
[ "def", "mark_error_retryable", "(", "error", ")", ":", "if", "isinstance", "(", "error", ",", "Exception", ")", ":", "alsoProvides", "(", "error", ",", "IRetryableError", ")", "elif", "inspect", ".", "isclass", "(", "error", ")", "and", "issubclass", "(", "error", ",", "Exception", ")", ":", "classImplements", "(", "error", ",", "IRetryableError", ")", "else", ":", "raise", "ValueError", "(", "'only exception objects or types may be marked retryable'", ")" ]
Mark an exception instance or type as retryable. If this exception is caught by ``pyramid_retry`` then it may retry the request.
[ "Mark", "an", "exception", "instance", "or", "type", "as", "retryable", ".", "If", "this", "exception", "is", "caught", "by", "pyramid_retry", "then", "it", "may", "retry", "the", "request", "." ]
4518d0655159fcf5cf79c0d7d4c86e8315f16082
https://github.com/Pylons/pyramid_retry/blob/4518d0655159fcf5cf79c0d7d4c86e8315f16082/src/pyramid_retry/__init__.py#L149-L161
train
Pylons/pyramid_retry
src/pyramid_retry/__init__.py
is_last_attempt
def is_last_attempt(request): """ Return ``True`` if the request is on its last attempt, meaning that ``pyramid_retry`` will not be issuing any new attempts, regardless of what happens when executing this request. This will return ``True`` if ``pyramid_retry`` is inactive for the request. """ environ = request.environ attempt = environ.get('retry.attempt') attempts = environ.get('retry.attempts') if attempt is None or attempts is None: return True return attempt + 1 == attempts
python
def is_last_attempt(request): """ Return ``True`` if the request is on its last attempt, meaning that ``pyramid_retry`` will not be issuing any new attempts, regardless of what happens when executing this request. This will return ``True`` if ``pyramid_retry`` is inactive for the request. """ environ = request.environ attempt = environ.get('retry.attempt') attempts = environ.get('retry.attempts') if attempt is None or attempts is None: return True return attempt + 1 == attempts
[ "def", "is_last_attempt", "(", "request", ")", ":", "environ", "=", "request", ".", "environ", "attempt", "=", "environ", ".", "get", "(", "'retry.attempt'", ")", "attempts", "=", "environ", ".", "get", "(", "'retry.attempts'", ")", "if", "attempt", "is", "None", "or", "attempts", "is", "None", ":", "return", "True", "return", "attempt", "+", "1", "==", "attempts" ]
Return ``True`` if the request is on its last attempt, meaning that ``pyramid_retry`` will not be issuing any new attempts, regardless of what happens when executing this request. This will return ``True`` if ``pyramid_retry`` is inactive for the request.
[ "Return", "True", "if", "the", "request", "is", "on", "its", "last", "attempt", "meaning", "that", "pyramid_retry", "will", "not", "be", "issuing", "any", "new", "attempts", "regardless", "of", "what", "happens", "when", "executing", "this", "request", "." ]
4518d0655159fcf5cf79c0d7d4c86e8315f16082
https://github.com/Pylons/pyramid_retry/blob/4518d0655159fcf5cf79c0d7d4c86e8315f16082/src/pyramid_retry/__init__.py#L182-L198
train
Pylons/pyramid_retry
src/pyramid_retry/__init__.py
includeme
def includeme(config): """ Activate the ``pyramid_retry`` execution policy in your application. This will add the :func:`pyramid_retry.RetryableErrorPolicy` with ``attempts`` pulled from the ``retry.attempts`` setting. The ``last_retry_attempt`` and ``retryable_error`` view predicates are registered. This should be included in your Pyramid application via ``config.include('pyramid_retry')``. """ settings = config.get_settings() config.add_view_predicate('last_retry_attempt', LastAttemptPredicate) config.add_view_predicate('retryable_error', RetryableErrorPredicate) def register(): attempts = int(settings.get('retry.attempts') or 3) settings['retry.attempts'] = attempts activate_hook = settings.get('retry.activate_hook') activate_hook = config.maybe_dotted(activate_hook) policy = RetryableExecutionPolicy( attempts, activate_hook=activate_hook, ) config.set_execution_policy(policy) # defer registration to allow time to modify settings config.action(None, register, order=PHASE1_CONFIG)
python
def includeme(config): """ Activate the ``pyramid_retry`` execution policy in your application. This will add the :func:`pyramid_retry.RetryableErrorPolicy` with ``attempts`` pulled from the ``retry.attempts`` setting. The ``last_retry_attempt`` and ``retryable_error`` view predicates are registered. This should be included in your Pyramid application via ``config.include('pyramid_retry')``. """ settings = config.get_settings() config.add_view_predicate('last_retry_attempt', LastAttemptPredicate) config.add_view_predicate('retryable_error', RetryableErrorPredicate) def register(): attempts = int(settings.get('retry.attempts') or 3) settings['retry.attempts'] = attempts activate_hook = settings.get('retry.activate_hook') activate_hook = config.maybe_dotted(activate_hook) policy = RetryableExecutionPolicy( attempts, activate_hook=activate_hook, ) config.set_execution_policy(policy) # defer registration to allow time to modify settings config.action(None, register, order=PHASE1_CONFIG)
[ "def", "includeme", "(", "config", ")", ":", "settings", "=", "config", ".", "get_settings", "(", ")", "config", ".", "add_view_predicate", "(", "'last_retry_attempt'", ",", "LastAttemptPredicate", ")", "config", ".", "add_view_predicate", "(", "'retryable_error'", ",", "RetryableErrorPredicate", ")", "def", "register", "(", ")", ":", "attempts", "=", "int", "(", "settings", ".", "get", "(", "'retry.attempts'", ")", "or", "3", ")", "settings", "[", "'retry.attempts'", "]", "=", "attempts", "activate_hook", "=", "settings", ".", "get", "(", "'retry.activate_hook'", ")", "activate_hook", "=", "config", ".", "maybe_dotted", "(", "activate_hook", ")", "policy", "=", "RetryableExecutionPolicy", "(", "attempts", ",", "activate_hook", "=", "activate_hook", ",", ")", "config", ".", "set_execution_policy", "(", "policy", ")", "# defer registration to allow time to modify settings", "config", ".", "action", "(", "None", ",", "register", ",", "order", "=", "PHASE1_CONFIG", ")" ]
Activate the ``pyramid_retry`` execution policy in your application. This will add the :func:`pyramid_retry.RetryableErrorPolicy` with ``attempts`` pulled from the ``retry.attempts`` setting. The ``last_retry_attempt`` and ``retryable_error`` view predicates are registered. This should be included in your Pyramid application via ``config.include('pyramid_retry')``.
[ "Activate", "the", "pyramid_retry", "execution", "policy", "in", "your", "application", "." ]
4518d0655159fcf5cf79c0d7d4c86e8315f16082
https://github.com/Pylons/pyramid_retry/blob/4518d0655159fcf5cf79c0d7d4c86e8315f16082/src/pyramid_retry/__init__.py#L259-L292
train
gesellkammer/sndfileio
sndfileio/dsp.py
filter_butter_coeffs
def filter_butter_coeffs(filtertype, freq, samplerate, order=5): # type: (str, Union[float, Tuple[float, float]], int, int) -> Tuple[np.ndarray, np.ndarray] """ calculates the coefficients for a digital butterworth filter filtertype: 'low', 'high', 'band' freq : cutoff freq. in the case of 'band': (low, high) Returns --> (b, a) """ assert filtertype in ('low', 'high', 'band') nyq = 0.5 * samplerate if isinstance(freq, tuple): assert filtertype == 'band' low, high = freq low /= nyq high /= nyq b, a = signal.butter(order, [low, high], btype='band') else: freq = freq / nyq b, a = signal.butter(order, freq, btype=filtertype) return b, a
python
def filter_butter_coeffs(filtertype, freq, samplerate, order=5): # type: (str, Union[float, Tuple[float, float]], int, int) -> Tuple[np.ndarray, np.ndarray] """ calculates the coefficients for a digital butterworth filter filtertype: 'low', 'high', 'band' freq : cutoff freq. in the case of 'band': (low, high) Returns --> (b, a) """ assert filtertype in ('low', 'high', 'band') nyq = 0.5 * samplerate if isinstance(freq, tuple): assert filtertype == 'band' low, high = freq low /= nyq high /= nyq b, a = signal.butter(order, [low, high], btype='band') else: freq = freq / nyq b, a = signal.butter(order, freq, btype=filtertype) return b, a
[ "def", "filter_butter_coeffs", "(", "filtertype", ",", "freq", ",", "samplerate", ",", "order", "=", "5", ")", ":", "# type: (str, Union[float, Tuple[float, float]], int, int) -> Tuple[np.ndarray, np.ndarray]", "assert", "filtertype", "in", "(", "'low'", ",", "'high'", ",", "'band'", ")", "nyq", "=", "0.5", "*", "samplerate", "if", "isinstance", "(", "freq", ",", "tuple", ")", ":", "assert", "filtertype", "==", "'band'", "low", ",", "high", "=", "freq", "low", "/=", "nyq", "high", "/=", "nyq", "b", ",", "a", "=", "signal", ".", "butter", "(", "order", ",", "[", "low", ",", "high", "]", ",", "btype", "=", "'band'", ")", "else", ":", "freq", "=", "freq", "/", "nyq", "b", ",", "a", "=", "signal", ".", "butter", "(", "order", ",", "freq", ",", "btype", "=", "filtertype", ")", "return", "b", ",", "a" ]
calculates the coefficients for a digital butterworth filter filtertype: 'low', 'high', 'band' freq : cutoff freq. in the case of 'band': (low, high) Returns --> (b, a)
[ "calculates", "the", "coefficients", "for", "a", "digital", "butterworth", "filter" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/dsp.py#L63-L85
train
gesellkammer/sndfileio
sndfileio/dsp.py
filter_butter
def filter_butter(samples, samplerate, filtertype, freq, order=5): # type: (np.ndarray, int, str, float, int) -> np.ndarray """ Filters the samples with a digital butterworth filter samples : mono samples filtertype: 'low', 'band', 'high' freq : for low or high, the cutoff freq for band, (low, high) samplerate: the sampling-rate order : the order of the butterworth filter Returns --> the filtered samples NB: calls filter_butter_coeffs to calculate the coefficients """ assert filtertype in ('low', 'high', 'band') b, a = filter_butter_coeffs(filtertype, freq, samplerate, order=order) return apply_multichannel(samples, lambda data:signal.lfilter(b, a, data))
python
def filter_butter(samples, samplerate, filtertype, freq, order=5): # type: (np.ndarray, int, str, float, int) -> np.ndarray """ Filters the samples with a digital butterworth filter samples : mono samples filtertype: 'low', 'band', 'high' freq : for low or high, the cutoff freq for band, (low, high) samplerate: the sampling-rate order : the order of the butterworth filter Returns --> the filtered samples NB: calls filter_butter_coeffs to calculate the coefficients """ assert filtertype in ('low', 'high', 'band') b, a = filter_butter_coeffs(filtertype, freq, samplerate, order=order) return apply_multichannel(samples, lambda data:signal.lfilter(b, a, data))
[ "def", "filter_butter", "(", "samples", ",", "samplerate", ",", "filtertype", ",", "freq", ",", "order", "=", "5", ")", ":", "# type: (np.ndarray, int, str, float, int) -> np.ndarray", "assert", "filtertype", "in", "(", "'low'", ",", "'high'", ",", "'band'", ")", "b", ",", "a", "=", "filter_butter_coeffs", "(", "filtertype", ",", "freq", ",", "samplerate", ",", "order", "=", "order", ")", "return", "apply_multichannel", "(", "samples", ",", "lambda", "data", ":", "signal", ".", "lfilter", "(", "b", ",", "a", ",", "data", ")", ")" ]
Filters the samples with a digital butterworth filter samples : mono samples filtertype: 'low', 'band', 'high' freq : for low or high, the cutoff freq for band, (low, high) samplerate: the sampling-rate order : the order of the butterworth filter Returns --> the filtered samples NB: calls filter_butter_coeffs to calculate the coefficients
[ "Filters", "the", "samples", "with", "a", "digital", "butterworth", "filter" ]
8e2b264cadb652f09d2e775f54090c0a3cb2ced2
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/dsp.py#L88-L106
train
johnnoone/aioconsul
aioconsul/api.py
token_middleware
def token_middleware(ctx, get_response): """Reinject token and consistency into requests. """ async def middleware(request): params = request.setdefault('params', {}) if params.get("token") is None: params['token'] = ctx.token return await get_response(request) return middleware
python
def token_middleware(ctx, get_response): """Reinject token and consistency into requests. """ async def middleware(request): params = request.setdefault('params', {}) if params.get("token") is None: params['token'] = ctx.token return await get_response(request) return middleware
[ "def", "token_middleware", "(", "ctx", ",", "get_response", ")", ":", "async", "def", "middleware", "(", "request", ")", ":", "params", "=", "request", ".", "setdefault", "(", "'params'", ",", "{", "}", ")", "if", "params", ".", "get", "(", "\"token\"", ")", "is", "None", ":", "params", "[", "'token'", "]", "=", "ctx", ".", "token", "return", "await", "get_response", "(", "request", ")", "return", "middleware" ]
Reinject token and consistency into requests.
[ "Reinject", "token", "and", "consistency", "into", "requests", "." ]
02f7a529d7dc2e49bed942111067aa5faf320e90
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/api.py#L212-L220
train
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttdepitem.py
XGanttDepItem.rebuild
def rebuild( self ): """ Rebuilds the dependency path for this item. """ scene = self.scene() if ( not scene ): return sourcePos = self.sourceItem().viewItem().pos() sourceRect = self.sourceItem().viewItem().rect() targetPos = self.targetItem().viewItem().pos() targetRect = self.targetItem().viewItem().rect() cellWidth = scene.ganttWidget().cellWidth() startX = sourcePos.x() + sourceRect.width() - (cellWidth / 2.0) startY = sourcePos.y() + (sourceRect.height() / 2.0) endX = targetPos.x() - 2 endY = targetPos.y() + (targetRect.height() / 2.0) path = QPainterPath() path.moveTo(startX, startY) path.lineTo(startX, endY) path.lineTo(endX, endY) a = QPointF(endX - 10, endY - 3) b = QPointF(endX, endY) c = QPointF(endX - 10, endY + 3) self._polygon = QPolygonF([a, b, c, a]) path.addPolygon(self._polygon) self.setPath(path)
python
def rebuild( self ): """ Rebuilds the dependency path for this item. """ scene = self.scene() if ( not scene ): return sourcePos = self.sourceItem().viewItem().pos() sourceRect = self.sourceItem().viewItem().rect() targetPos = self.targetItem().viewItem().pos() targetRect = self.targetItem().viewItem().rect() cellWidth = scene.ganttWidget().cellWidth() startX = sourcePos.x() + sourceRect.width() - (cellWidth / 2.0) startY = sourcePos.y() + (sourceRect.height() / 2.0) endX = targetPos.x() - 2 endY = targetPos.y() + (targetRect.height() / 2.0) path = QPainterPath() path.moveTo(startX, startY) path.lineTo(startX, endY) path.lineTo(endX, endY) a = QPointF(endX - 10, endY - 3) b = QPointF(endX, endY) c = QPointF(endX - 10, endY + 3) self._polygon = QPolygonF([a, b, c, a]) path.addPolygon(self._polygon) self.setPath(path)
[ "def", "rebuild", "(", "self", ")", ":", "scene", "=", "self", ".", "scene", "(", ")", "if", "(", "not", "scene", ")", ":", "return", "sourcePos", "=", "self", ".", "sourceItem", "(", ")", ".", "viewItem", "(", ")", ".", "pos", "(", ")", "sourceRect", "=", "self", ".", "sourceItem", "(", ")", ".", "viewItem", "(", ")", ".", "rect", "(", ")", "targetPos", "=", "self", ".", "targetItem", "(", ")", ".", "viewItem", "(", ")", ".", "pos", "(", ")", "targetRect", "=", "self", ".", "targetItem", "(", ")", ".", "viewItem", "(", ")", ".", "rect", "(", ")", "cellWidth", "=", "scene", ".", "ganttWidget", "(", ")", ".", "cellWidth", "(", ")", "startX", "=", "sourcePos", ".", "x", "(", ")", "+", "sourceRect", ".", "width", "(", ")", "-", "(", "cellWidth", "/", "2.0", ")", "startY", "=", "sourcePos", ".", "y", "(", ")", "+", "(", "sourceRect", ".", "height", "(", ")", "/", "2.0", ")", "endX", "=", "targetPos", ".", "x", "(", ")", "-", "2", "endY", "=", "targetPos", ".", "y", "(", ")", "+", "(", "targetRect", ".", "height", "(", ")", "/", "2.0", ")", "path", "=", "QPainterPath", "(", ")", "path", ".", "moveTo", "(", "startX", ",", "startY", ")", "path", ".", "lineTo", "(", "startX", ",", "endY", ")", "path", ".", "lineTo", "(", "endX", ",", "endY", ")", "a", "=", "QPointF", "(", "endX", "-", "10", ",", "endY", "-", "3", ")", "b", "=", "QPointF", "(", "endX", ",", "endY", ")", "c", "=", "QPointF", "(", "endX", "-", "10", ",", "endY", "+", "3", ")", "self", ".", "_polygon", "=", "QPolygonF", "(", "[", "a", ",", "b", ",", "c", ",", "a", "]", ")", "path", ".", "addPolygon", "(", "self", ".", "_polygon", ")", "self", ".", "setPath", "(", "path", ")" ]
Rebuilds the dependency path for this item.
[ "Rebuilds", "the", "dependency", "path", "for", "this", "item", "." ]
f18a73bec84df90b034ca69b9deea118dbedfc4d
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttdepitem.py#L60-L95
train
starling-lab/rnlp
rnlp/parse.py
_writeBlock
def _writeBlock(block, blockID): '''writes the block to a file with the id''' with open("blockIDs.txt", "a") as fp: fp.write("blockID: " + str(blockID) + "\n") sentences = "" for sentence in block: sentences += sentence+"," fp.write("block sentences: "+sentences[:-1]+"\n") fp.write("\n")
python
def _writeBlock(block, blockID): '''writes the block to a file with the id''' with open("blockIDs.txt", "a") as fp: fp.write("blockID: " + str(blockID) + "\n") sentences = "" for sentence in block: sentences += sentence+"," fp.write("block sentences: "+sentences[:-1]+"\n") fp.write("\n")
[ "def", "_writeBlock", "(", "block", ",", "blockID", ")", ":", "with", "open", "(", "\"blockIDs.txt\"", ",", "\"a\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "\"blockID: \"", "+", "str", "(", "blockID", ")", "+", "\"\\n\"", ")", "sentences", "=", "\"\"", "for", "sentence", "in", "block", ":", "sentences", "+=", "sentence", "+", "\",\"", "fp", ".", "write", "(", "\"block sentences: \"", "+", "sentences", "[", ":", "-", "1", "]", "+", "\"\\n\"", ")", "fp", ".", "write", "(", "\"\\n\"", ")" ]
writes the block to a file with the id
[ "writes", "the", "block", "to", "a", "file", "with", "the", "id" ]
72054cc2c0cbaea1d281bf3d56b271d4da29fc4a
https://github.com/starling-lab/rnlp/blob/72054cc2c0cbaea1d281bf3d56b271d4da29fc4a/rnlp/parse.py#L38-L46
train
starling-lab/rnlp
rnlp/parse.py
_writeSentenceInBlock
def _writeSentenceInBlock(sentence, blockID, sentenceID): '''writes the sentence in a block to a file with the id''' with open("sentenceIDs.txt", "a") as fp: fp.write("sentenceID: "+str(blockID)+"_"+str(sentenceID)+"\n") fp.write("sentence string: "+sentence+"\n") fp.write("\n")
python
def _writeSentenceInBlock(sentence, blockID, sentenceID): '''writes the sentence in a block to a file with the id''' with open("sentenceIDs.txt", "a") as fp: fp.write("sentenceID: "+str(blockID)+"_"+str(sentenceID)+"\n") fp.write("sentence string: "+sentence+"\n") fp.write("\n")
[ "def", "_writeSentenceInBlock", "(", "sentence", ",", "blockID", ",", "sentenceID", ")", ":", "with", "open", "(", "\"sentenceIDs.txt\"", ",", "\"a\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "\"sentenceID: \"", "+", "str", "(", "blockID", ")", "+", "\"_\"", "+", "str", "(", "sentenceID", ")", "+", "\"\\n\"", ")", "fp", ".", "write", "(", "\"sentence string: \"", "+", "sentence", "+", "\"\\n\"", ")", "fp", ".", "write", "(", "\"\\n\"", ")" ]
writes the sentence in a block to a file with the id
[ "writes", "the", "sentence", "in", "a", "block", "to", "a", "file", "with", "the", "id" ]
72054cc2c0cbaea1d281bf3d56b271d4da29fc4a
https://github.com/starling-lab/rnlp/blob/72054cc2c0cbaea1d281bf3d56b271d4da29fc4a/rnlp/parse.py#L49-L54
train
starling-lab/rnlp
rnlp/parse.py
_writeWordFromSentenceInBlock
def _writeWordFromSentenceInBlock(word, blockID, sentenceID, wordID): '''writes the word from a sentence in a block to a file with the id''' with open("wordIDs.txt", "a") as fp: fp.write("wordID: " + str(blockID) + "_" + str(sentenceID) + "_" + str(wordID) + "\n") fp.write("wordString: " + word + "\n") fp.write("\n")
python
def _writeWordFromSentenceInBlock(word, blockID, sentenceID, wordID): '''writes the word from a sentence in a block to a file with the id''' with open("wordIDs.txt", "a") as fp: fp.write("wordID: " + str(blockID) + "_" + str(sentenceID) + "_" + str(wordID) + "\n") fp.write("wordString: " + word + "\n") fp.write("\n")
[ "def", "_writeWordFromSentenceInBlock", "(", "word", ",", "blockID", ",", "sentenceID", ",", "wordID", ")", ":", "with", "open", "(", "\"wordIDs.txt\"", ",", "\"a\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "\"wordID: \"", "+", "str", "(", "blockID", ")", "+", "\"_\"", "+", "str", "(", "sentenceID", ")", "+", "\"_\"", "+", "str", "(", "wordID", ")", "+", "\"\\n\"", ")", "fp", ".", "write", "(", "\"wordString: \"", "+", "word", "+", "\"\\n\"", ")", "fp", ".", "write", "(", "\"\\n\"", ")" ]
writes the word from a sentence in a block to a file with the id
[ "writes", "the", "word", "from", "a", "sentence", "in", "a", "block", "to", "a", "file", "with", "the", "id" ]
72054cc2c0cbaea1d281bf3d56b271d4da29fc4a
https://github.com/starling-lab/rnlp/blob/72054cc2c0cbaea1d281bf3d56b271d4da29fc4a/rnlp/parse.py#L57-L63
train
starling-lab/rnlp
rnlp/parse.py
_writeBk
def _writeBk(target="sentenceContainsTarget(+SID,+WID).", treeDepth="3", nodeSize="3", numOfClauses="8"): """ Writes a background file to disk. :param target: Target predicate with modes. :type target: str. :param treeDepth: Depth of the tree. :type treeDepth: str. :param nodeSize: Maximum size of each node in the tree. :type nodeSize: str. :param numOfClauses: Number of clauses in total. :type numOfClauses: str. """ with open('bk.txt', 'w') as bk: bk.write("useStdLogicVariables: true\n") bk.write("setParam: treeDepth=" + str(treeDepth) + '.\n') bk.write("setParam: nodeSize=" + str(nodeSize) + '.\n') bk.write("setParam: numOfClauses=" + str(numOfClauses) + '.\n') bk.write("mode: nextSentenceInBlock(+BID,+SID,-SID).\n") bk.write("mode: nextSentenceInBlock(+BID,-SID,+SID).\n") bk.write("mode: earlySentenceInBlock(+BID,-SID).\n") bk.write("mode: midWaySentenceInBlock(+BID,-SID).\n") bk.write("mode: lateSentenceInBlock(+BID,-SID).\n") bk.write("mode: sentenceInBlock(-SID,+BID).\n") bk.write("mode: wordString(+WID,#WSTR).\n") bk.write("mode: partOfSpeechTag(+WID,#WPOS).\n") bk.write("mode: nextWordInSentence(+SID,+WID,-WID).\n") bk.write("mode: earlyWordInSentence(+SID,-WID).\n") bk.write("mode: midWayWordInSentence(+SID,-WID).\n") bk.write("mode: lateWordInSentence(+SID,-WID).\n") bk.write("mode: wordInSentence(-WID,+SID).\n") bk.write("mode: " + target + "\n") return
python
def _writeBk(target="sentenceContainsTarget(+SID,+WID).", treeDepth="3", nodeSize="3", numOfClauses="8"): """ Writes a background file to disk. :param target: Target predicate with modes. :type target: str. :param treeDepth: Depth of the tree. :type treeDepth: str. :param nodeSize: Maximum size of each node in the tree. :type nodeSize: str. :param numOfClauses: Number of clauses in total. :type numOfClauses: str. """ with open('bk.txt', 'w') as bk: bk.write("useStdLogicVariables: true\n") bk.write("setParam: treeDepth=" + str(treeDepth) + '.\n') bk.write("setParam: nodeSize=" + str(nodeSize) + '.\n') bk.write("setParam: numOfClauses=" + str(numOfClauses) + '.\n') bk.write("mode: nextSentenceInBlock(+BID,+SID,-SID).\n") bk.write("mode: nextSentenceInBlock(+BID,-SID,+SID).\n") bk.write("mode: earlySentenceInBlock(+BID,-SID).\n") bk.write("mode: midWaySentenceInBlock(+BID,-SID).\n") bk.write("mode: lateSentenceInBlock(+BID,-SID).\n") bk.write("mode: sentenceInBlock(-SID,+BID).\n") bk.write("mode: wordString(+WID,#WSTR).\n") bk.write("mode: partOfSpeechTag(+WID,#WPOS).\n") bk.write("mode: nextWordInSentence(+SID,+WID,-WID).\n") bk.write("mode: earlyWordInSentence(+SID,-WID).\n") bk.write("mode: midWayWordInSentence(+SID,-WID).\n") bk.write("mode: lateWordInSentence(+SID,-WID).\n") bk.write("mode: wordInSentence(-WID,+SID).\n") bk.write("mode: " + target + "\n") return
[ "def", "_writeBk", "(", "target", "=", "\"sentenceContainsTarget(+SID,+WID).\"", ",", "treeDepth", "=", "\"3\"", ",", "nodeSize", "=", "\"3\"", ",", "numOfClauses", "=", "\"8\"", ")", ":", "with", "open", "(", "'bk.txt'", ",", "'w'", ")", "as", "bk", ":", "bk", ".", "write", "(", "\"useStdLogicVariables: true\\n\"", ")", "bk", ".", "write", "(", "\"setParam: treeDepth=\"", "+", "str", "(", "treeDepth", ")", "+", "'.\\n'", ")", "bk", ".", "write", "(", "\"setParam: nodeSize=\"", "+", "str", "(", "nodeSize", ")", "+", "'.\\n'", ")", "bk", ".", "write", "(", "\"setParam: numOfClauses=\"", "+", "str", "(", "numOfClauses", ")", "+", "'.\\n'", ")", "bk", ".", "write", "(", "\"mode: nextSentenceInBlock(+BID,+SID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: nextSentenceInBlock(+BID,-SID,+SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: earlySentenceInBlock(+BID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: midWaySentenceInBlock(+BID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: lateSentenceInBlock(+BID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: sentenceInBlock(-SID,+BID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: wordString(+WID,#WSTR).\\n\"", ")", "bk", ".", "write", "(", "\"mode: partOfSpeechTag(+WID,#WPOS).\\n\"", ")", "bk", ".", "write", "(", "\"mode: nextWordInSentence(+SID,+WID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: earlyWordInSentence(+SID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: midWayWordInSentence(+SID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: lateWordInSentence(+SID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: wordInSentence(-WID,+SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: \"", "+", "target", "+", "\"\\n\"", ")", "return" ]
Writes a background file to disk. :param target: Target predicate with modes. :type target: str. :param treeDepth: Depth of the tree. :type treeDepth: str. :param nodeSize: Maximum size of each node in the tree. :type nodeSize: str. :param numOfClauses: Number of clauses in total. :type numOfClauses: str.
[ "Writes", "a", "background", "file", "to", "disk", "." ]
72054cc2c0cbaea1d281bf3d56b271d4da29fc4a
https://github.com/starling-lab/rnlp/blob/72054cc2c0cbaea1d281bf3d56b271d4da29fc4a/rnlp/parse.py#L72-L111
train
mikhaildubov/AST-text-analysis
east/asts/easa.py
EnhancedAnnotatedSuffixArray.traverse_depth_first_pre_order
def traverse_depth_first_pre_order(self, callback): """Visits the internal "nodes" of the enhanced suffix array in depth-first pre-order. Based on Abouelhoda et al. (2004). """ n = len(self.suftab) root = [0, 0, n - 1, ""] # <l, i, j, char> def _traverse_top_down(interval): # TODO: Rewrite with stack? As in bottom-up callback(interval) i, j = interval[1], interval[2] if i != j: children = self._get_child_intervals(i, j) children.sort(key=lambda child: child[3]) for child in children: _traverse_top_down(child) _traverse_top_down(root)
python
def traverse_depth_first_pre_order(self, callback): """Visits the internal "nodes" of the enhanced suffix array in depth-first pre-order. Based on Abouelhoda et al. (2004). """ n = len(self.suftab) root = [0, 0, n - 1, ""] # <l, i, j, char> def _traverse_top_down(interval): # TODO: Rewrite with stack? As in bottom-up callback(interval) i, j = interval[1], interval[2] if i != j: children = self._get_child_intervals(i, j) children.sort(key=lambda child: child[3]) for child in children: _traverse_top_down(child) _traverse_top_down(root)
[ "def", "traverse_depth_first_pre_order", "(", "self", ",", "callback", ")", ":", "n", "=", "len", "(", "self", ".", "suftab", ")", "root", "=", "[", "0", ",", "0", ",", "n", "-", "1", ",", "\"\"", "]", "# <l, i, j, char>", "def", "_traverse_top_down", "(", "interval", ")", ":", "# TODO: Rewrite with stack? As in bottom-up", "callback", "(", "interval", ")", "i", ",", "j", "=", "interval", "[", "1", "]", ",", "interval", "[", "2", "]", "if", "i", "!=", "j", ":", "children", "=", "self", ".", "_get_child_intervals", "(", "i", ",", "j", ")", "children", ".", "sort", "(", "key", "=", "lambda", "child", ":", "child", "[", "3", "]", ")", "for", "child", "in", "children", ":", "_traverse_top_down", "(", "child", ")", "_traverse_top_down", "(", "root", ")" ]
Visits the internal "nodes" of the enhanced suffix array in depth-first pre-order. Based on Abouelhoda et al. (2004).
[ "Visits", "the", "internal", "nodes", "of", "the", "enhanced", "suffix", "array", "in", "depth", "-", "first", "pre", "-", "order", "." ]
055ad8d2492c100bbbaa25309ec1074bdf1dfaa5
https://github.com/mikhaildubov/AST-text-analysis/blob/055ad8d2492c100bbbaa25309ec1074bdf1dfaa5/east/asts/easa.py#L38-L55
train
mikhaildubov/AST-text-analysis
east/asts/easa.py
EnhancedAnnotatedSuffixArray.traverse_depth_first_post_order
def traverse_depth_first_post_order(self, callback): """Visits the internal "nodes" of the enhanced suffix array in depth-first post-order. Kasai et. al. (2001), Abouelhoda et al. (2004). """ # a. Reimplement without python lists?.. # b. Interface will require it to have not internal nodes only?.. # (but actually this implementation gives a ~2x gain of performance) last_interval = None n = len(self.suftab) stack = [[0, 0, None, []]] # <l, i, j, children> for i in xrange(1, n): lb = i - 1 while self.lcptab[i] < stack[-1][0]: stack[-1][2] = i - 1 last_interval = stack.pop() callback(last_interval) lb = last_interval[1] if self.lcptab[i] <= stack[-1][0]: stack[-1][3].append(last_interval) last_interval = None if self.lcptab[i] > stack[-1][0]: if last_interval: stack.append([self.lcptab[i], lb, None, [last_interval]]) last_interval = None else: stack.append([self.lcptab[i], lb, None, []]) stack[-1][2] = n - 1 callback(stack[-1])
python
def traverse_depth_first_post_order(self, callback): """Visits the internal "nodes" of the enhanced suffix array in depth-first post-order. Kasai et. al. (2001), Abouelhoda et al. (2004). """ # a. Reimplement without python lists?.. # b. Interface will require it to have not internal nodes only?.. # (but actually this implementation gives a ~2x gain of performance) last_interval = None n = len(self.suftab) stack = [[0, 0, None, []]] # <l, i, j, children> for i in xrange(1, n): lb = i - 1 while self.lcptab[i] < stack[-1][0]: stack[-1][2] = i - 1 last_interval = stack.pop() callback(last_interval) lb = last_interval[1] if self.lcptab[i] <= stack[-1][0]: stack[-1][3].append(last_interval) last_interval = None if self.lcptab[i] > stack[-1][0]: if last_interval: stack.append([self.lcptab[i], lb, None, [last_interval]]) last_interval = None else: stack.append([self.lcptab[i], lb, None, []]) stack[-1][2] = n - 1 callback(stack[-1])
[ "def", "traverse_depth_first_post_order", "(", "self", ",", "callback", ")", ":", "# a. Reimplement without python lists?..", "# b. Interface will require it to have not internal nodes only?..", "# (but actually this implementation gives a ~2x gain of performance)", "last_interval", "=", "None", "n", "=", "len", "(", "self", ".", "suftab", ")", "stack", "=", "[", "[", "0", ",", "0", ",", "None", ",", "[", "]", "]", "]", "# <l, i, j, children>", "for", "i", "in", "xrange", "(", "1", ",", "n", ")", ":", "lb", "=", "i", "-", "1", "while", "self", ".", "lcptab", "[", "i", "]", "<", "stack", "[", "-", "1", "]", "[", "0", "]", ":", "stack", "[", "-", "1", "]", "[", "2", "]", "=", "i", "-", "1", "last_interval", "=", "stack", ".", "pop", "(", ")", "callback", "(", "last_interval", ")", "lb", "=", "last_interval", "[", "1", "]", "if", "self", ".", "lcptab", "[", "i", "]", "<=", "stack", "[", "-", "1", "]", "[", "0", "]", ":", "stack", "[", "-", "1", "]", "[", "3", "]", ".", "append", "(", "last_interval", ")", "last_interval", "=", "None", "if", "self", ".", "lcptab", "[", "i", "]", ">", "stack", "[", "-", "1", "]", "[", "0", "]", ":", "if", "last_interval", ":", "stack", ".", "append", "(", "[", "self", ".", "lcptab", "[", "i", "]", ",", "lb", ",", "None", ",", "[", "last_interval", "]", "]", ")", "last_interval", "=", "None", "else", ":", "stack", ".", "append", "(", "[", "self", ".", "lcptab", "[", "i", "]", ",", "lb", ",", "None", ",", "[", "]", "]", ")", "stack", "[", "-", "1", "]", "[", "2", "]", "=", "n", "-", "1", "callback", "(", "stack", "[", "-", "1", "]", ")" ]
Visits the internal "nodes" of the enhanced suffix array in depth-first post-order. Kasai et. al. (2001), Abouelhoda et al. (2004).
[ "Visits", "the", "internal", "nodes", "of", "the", "enhanced", "suffix", "array", "in", "depth", "-", "first", "post", "-", "order", "." ]
055ad8d2492c100bbbaa25309ec1074bdf1dfaa5
https://github.com/mikhaildubov/AST-text-analysis/blob/055ad8d2492c100bbbaa25309ec1074bdf1dfaa5/east/asts/easa.py#L57-L85
train
talkincode/txradius
txradius/radius/packet.py
Packet._DecodeKey
def _DecodeKey(self, key): """Turn a key into a string if possible""" if self.dict.attrindex.HasBackward(key): return self.dict.attrindex.GetBackward(key) return key
python
def _DecodeKey(self, key): """Turn a key into a string if possible""" if self.dict.attrindex.HasBackward(key): return self.dict.attrindex.GetBackward(key) return key
[ "def", "_DecodeKey", "(", "self", ",", "key", ")", ":", "if", "self", ".", "dict", ".", "attrindex", ".", "HasBackward", "(", "key", ")", ":", "return", "self", ".", "dict", ".", "attrindex", ".", "GetBackward", "(", "key", ")", "return", "key" ]
Turn a key into a string if possible
[ "Turn", "a", "key", "into", "a", "string", "if", "possible" ]
b86fdbc9be41183680b82b07d3a8e8ea10926e01
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L146-L151
train
talkincode/txradius
txradius/radius/packet.py
Packet.AddAttribute
def AddAttribute(self, key, value): """Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute """ if isinstance(value, list): values = value else: values = [value] (key, values) = self._EncodeKeyValues(key, values) self.setdefault(key, []).extend(values)
python
def AddAttribute(self, key, value): """Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute """ if isinstance(value, list): values = value else: values = [value] (key, values) = self._EncodeKeyValues(key, values) self.setdefault(key, []).extend(values)
[ "def", "AddAttribute", "(", "self", ",", "key", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "values", "=", "value", "else", ":", "values", "=", "[", "value", "]", "(", "key", ",", "values", ")", "=", "self", ".", "_EncodeKeyValues", "(", "key", ",", "values", ")", "self", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "extend", "(", "values", ")" ]
Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute
[ "Add", "an", "attribute", "to", "the", "packet", "." ]
b86fdbc9be41183680b82b07d3a8e8ea10926e01
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L153-L167
train
talkincode/txradius
txradius/radius/packet.py
Packet.CreateAuthenticator
def CreateAuthenticator(): """Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string """ data = [] for i in range(16): data.append(random_generator.randrange(0, 256)) if six.PY3: return bytes(data) else: return ''.join(chr(b) for b in data)
python
def CreateAuthenticator(): """Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string """ data = [] for i in range(16): data.append(random_generator.randrange(0, 256)) if six.PY3: return bytes(data) else: return ''.join(chr(b) for b in data)
[ "def", "CreateAuthenticator", "(", ")", ":", "data", "=", "[", "]", "for", "i", "in", "range", "(", "16", ")", ":", "data", ".", "append", "(", "random_generator", ".", "randrange", "(", "0", ",", "256", ")", ")", "if", "six", ".", "PY3", ":", "return", "bytes", "(", "data", ")", "else", ":", "return", "''", ".", "join", "(", "chr", "(", "b", ")", "for", "b", "in", "data", ")" ]
Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string
[ "Create", "a", "packet", "autenticator", ".", "All", "RADIUS", "packets", "contain", "a", "sixteen", "byte", "authenticator", "which", "is", "used", "to", "authenticate", "replies", "from", "the", "RADIUS", "server", "and", "in", "the", "password", "hiding", "algorithm", ".", "This", "function", "returns", "a", "suitable", "random", "string", "that", "can", "be", "used", "as", "an", "authenticator", "." ]
b86fdbc9be41183680b82b07d3a8e8ea10926e01
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L208-L224
train
talkincode/txradius
txradius/radius/packet.py
Packet.DecodePacket
def DecodePacket(self, packet): """Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string""" try: (self.code, self.id, length, self.authenticator) = \ struct.unpack('!BBH16s', packet[0:20]) except struct.error: raise PacketError('Packet header is corrupt') if len(packet) != length: raise PacketError('Packet has invalid length') if length > 8192: raise PacketError('Packet length is too long (%d)' % length) self.clear() packet = packet[20:] while packet: try: (key, attrlen) = struct.unpack('!BB', packet[0:2]) except struct.error: raise PacketError('Attribute header is corrupt') if attrlen < 2: raise PacketError( 'Attribute length is too small (%d)' % attrlen) value = packet[2:attrlen] if key == 26: # 26 is the Vendor-Specific attribute (vendor, subattrs) = self._PktDecodeVendorAttribute(value) if vendor is None: self.setdefault(key, []).append(value) else: for (k, v) in subattrs: self.setdefault((vendor, k), []).append(v) else: self.setdefault(key, []).append(value) packet = packet[attrlen:]
python
def DecodePacket(self, packet): """Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string""" try: (self.code, self.id, length, self.authenticator) = \ struct.unpack('!BBH16s', packet[0:20]) except struct.error: raise PacketError('Packet header is corrupt') if len(packet) != length: raise PacketError('Packet has invalid length') if length > 8192: raise PacketError('Packet length is too long (%d)' % length) self.clear() packet = packet[20:] while packet: try: (key, attrlen) = struct.unpack('!BB', packet[0:2]) except struct.error: raise PacketError('Attribute header is corrupt') if attrlen < 2: raise PacketError( 'Attribute length is too small (%d)' % attrlen) value = packet[2:attrlen] if key == 26: # 26 is the Vendor-Specific attribute (vendor, subattrs) = self._PktDecodeVendorAttribute(value) if vendor is None: self.setdefault(key, []).append(value) else: for (k, v) in subattrs: self.setdefault((vendor, k), []).append(v) else: self.setdefault(key, []).append(value) packet = packet[attrlen:]
[ "def", "DecodePacket", "(", "self", ",", "packet", ")", ":", "try", ":", "(", "self", ".", "code", ",", "self", ".", "id", ",", "length", ",", "self", ".", "authenticator", ")", "=", "struct", ".", "unpack", "(", "'!BBH16s'", ",", "packet", "[", "0", ":", "20", "]", ")", "except", "struct", ".", "error", ":", "raise", "PacketError", "(", "'Packet header is corrupt'", ")", "if", "len", "(", "packet", ")", "!=", "length", ":", "raise", "PacketError", "(", "'Packet has invalid length'", ")", "if", "length", ">", "8192", ":", "raise", "PacketError", "(", "'Packet length is too long (%d)'", "%", "length", ")", "self", ".", "clear", "(", ")", "packet", "=", "packet", "[", "20", ":", "]", "while", "packet", ":", "try", ":", "(", "key", ",", "attrlen", ")", "=", "struct", ".", "unpack", "(", "'!BB'", ",", "packet", "[", "0", ":", "2", "]", ")", "except", "struct", ".", "error", ":", "raise", "PacketError", "(", "'Attribute header is corrupt'", ")", "if", "attrlen", "<", "2", ":", "raise", "PacketError", "(", "'Attribute length is too small (%d)'", "%", "attrlen", ")", "value", "=", "packet", "[", "2", ":", "attrlen", "]", "if", "key", "==", "26", ":", "# 26 is the Vendor-Specific attribute", "(", "vendor", ",", "subattrs", ")", "=", "self", ".", "_PktDecodeVendorAttribute", "(", "value", ")", "if", "vendor", "is", "None", ":", "self", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "append", "(", "value", ")", "else", ":", "for", "(", "k", ",", "v", ")", "in", "subattrs", ":", "self", ".", "setdefault", "(", "(", "vendor", ",", "k", ")", ",", "[", "]", ")", ".", "append", "(", "v", ")", "else", ":", "self", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "append", "(", "value", ")", "packet", "=", "packet", "[", "attrlen", ":", "]" ]
Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string
[ "Initialize", "the", "object", "from", "raw", "packet", "data", ".", "Decode", "a", "packet", "as", "received", "from", "the", "network", "and", "decode", "it", "." ]
b86fdbc9be41183680b82b07d3a8e8ea10926e01
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L308-L350
train
talkincode/txradius
txradius/radius/packet.py
AuthPacket.PwDecrypt
def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8')
python
def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8')
[ "def", "PwDecrypt", "(", "self", ",", "password", ")", ":", "buf", "=", "password", "pw", "=", "six", ".", "b", "(", "''", ")", "last", "=", "self", ".", "authenticator", "while", "buf", ":", "hash", "=", "md5_constructor", "(", "self", ".", "secret", "+", "last", ")", ".", "digest", "(", ")", "if", "six", ".", "PY3", ":", "for", "i", "in", "range", "(", "16", ")", ":", "pw", "+=", "bytes", "(", "(", "hash", "[", "i", "]", "^", "buf", "[", "i", "]", ",", ")", ")", "else", ":", "for", "i", "in", "range", "(", "16", ")", ":", "pw", "+=", "chr", "(", "ord", "(", "hash", "[", "i", "]", ")", "^", "ord", "(", "buf", "[", "i", "]", ")", ")", "(", "last", ",", "buf", ")", "=", "(", "buf", "[", ":", "16", "]", ",", "buf", "[", "16", ":", "]", ")", "while", "pw", ".", "endswith", "(", "six", ".", "b", "(", "'\\x00'", ")", ")", ":", "pw", "=", "pw", "[", ":", "-", "1", "]", "return", "pw", ".", "decode", "(", "'utf-8'", ")" ]
Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string
[ "Unobfuscate", "a", "RADIUS", "password", ".", "RADIUS", "hides", "passwords", "in", "packets", "by", "using", "an", "algorithm", "based", "on", "the", "MD5", "hash", "of", "the", "packet", "authenticator", "and", "RADIUS", "secret", ".", "This", "function", "reverses", "the", "obfuscation", "process", "." ]
b86fdbc9be41183680b82b07d3a8e8ea10926e01
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L404-L432
train
talkincode/txradius
txradius/radius/packet.py
AuthPacket.PwCrypt
def PwCrypt(self, password): """Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string """ if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if isinstance(password, six.text_type): password = password.encode('utf-8') buf = password if len(password) % 16 != 0: buf += six.b('\x00') * (16 - (len(password) % 16)) hash = md5_constructor(self.secret + self.authenticator).digest() result = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): result += bytes((hash[i] ^ buf[i],)) else: for i in range(16): result += chr(ord(hash[i]) ^ ord(buf[i])) last = result[-16:] buf = buf[16:] return result
python
def PwCrypt(self, password): """Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string """ if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if isinstance(password, six.text_type): password = password.encode('utf-8') buf = password if len(password) % 16 != 0: buf += six.b('\x00') * (16 - (len(password) % 16)) hash = md5_constructor(self.secret + self.authenticator).digest() result = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): result += bytes((hash[i] ^ buf[i],)) else: for i in range(16): result += chr(ord(hash[i]) ^ ord(buf[i])) last = result[-16:] buf = buf[16:] return result
[ "def", "PwCrypt", "(", "self", ",", "password", ")", ":", "if", "self", ".", "authenticator", "is", "None", ":", "self", ".", "authenticator", "=", "self", ".", "CreateAuthenticator", "(", ")", "if", "isinstance", "(", "password", ",", "six", ".", "text_type", ")", ":", "password", "=", "password", ".", "encode", "(", "'utf-8'", ")", "buf", "=", "password", "if", "len", "(", "password", ")", "%", "16", "!=", "0", ":", "buf", "+=", "six", ".", "b", "(", "'\\x00'", ")", "*", "(", "16", "-", "(", "len", "(", "password", ")", "%", "16", ")", ")", "hash", "=", "md5_constructor", "(", "self", ".", "secret", "+", "self", ".", "authenticator", ")", ".", "digest", "(", ")", "result", "=", "six", ".", "b", "(", "''", ")", "last", "=", "self", ".", "authenticator", "while", "buf", ":", "hash", "=", "md5_constructor", "(", "self", ".", "secret", "+", "last", ")", ".", "digest", "(", ")", "if", "six", ".", "PY3", ":", "for", "i", "in", "range", "(", "16", ")", ":", "result", "+=", "bytes", "(", "(", "hash", "[", "i", "]", "^", "buf", "[", "i", "]", ",", ")", ")", "else", ":", "for", "i", "in", "range", "(", "16", ")", ":", "result", "+=", "chr", "(", "ord", "(", "hash", "[", "i", "]", ")", "^", "ord", "(", "buf", "[", "i", "]", ")", ")", "last", "=", "result", "[", "-", "16", ":", "]", "buf", "=", "buf", "[", "16", ":", "]", "return", "result" ]
Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string
[ "Obfuscate", "password", ".", "RADIUS", "hides", "passwords", "in", "packets", "by", "using", "an", "algorithm", "based", "on", "the", "MD5", "hash", "of", "the", "packet", "authenticator", "and", "RADIUS", "secret", ".", "If", "no", "authenticator", "has", "been", "set", "before", "calling", "PwCrypt", "one", "is", "created", "automatically", ".", "Changing", "the", "authenticator", "after", "setting", "a", "password", "that", "has", "been", "encrypted", "using", "this", "function", "will", "not", "work", "." ]
b86fdbc9be41183680b82b07d3a8e8ea10926e01
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L434-L474
train
bitesofcode/projexui
projexui/widgets/xtoolbar.py
XToolBar.clear
def clear(self): """ Clears out this toolbar from the system. """ # preserve the collapse button super(XToolBar, self).clear() # clears out the toolbar if self.isCollapsable(): self._collapseButton = QToolButton(self) self._collapseButton.setAutoRaise(True) self._collapseButton.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.addWidget(self._collapseButton) self.refreshButton() # create connection self._collapseButton.clicked.connect(self.toggleCollapsed) elif self._collapseButton: self._collapseButton.setParent(None) self._collapseButton.deleteLater() self._collapseButton = None
python
def clear(self): """ Clears out this toolbar from the system. """ # preserve the collapse button super(XToolBar, self).clear() # clears out the toolbar if self.isCollapsable(): self._collapseButton = QToolButton(self) self._collapseButton.setAutoRaise(True) self._collapseButton.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.addWidget(self._collapseButton) self.refreshButton() # create connection self._collapseButton.clicked.connect(self.toggleCollapsed) elif self._collapseButton: self._collapseButton.setParent(None) self._collapseButton.deleteLater() self._collapseButton = None
[ "def", "clear", "(", "self", ")", ":", "# preserve the collapse button", "super", "(", "XToolBar", ",", "self", ")", ".", "clear", "(", ")", "# clears out the toolbar", "if", "self", ".", "isCollapsable", "(", ")", ":", "self", ".", "_collapseButton", "=", "QToolButton", "(", "self", ")", "self", ".", "_collapseButton", ".", "setAutoRaise", "(", "True", ")", "self", ".", "_collapseButton", ".", "setSizePolicy", "(", "QSizePolicy", ".", "Expanding", ",", "QSizePolicy", ".", "Expanding", ")", "self", ".", "addWidget", "(", "self", ".", "_collapseButton", ")", "self", ".", "refreshButton", "(", ")", "# create connection", "self", ".", "_collapseButton", ".", "clicked", ".", "connect", "(", "self", ".", "toggleCollapsed", ")", "elif", "self", ".", "_collapseButton", ":", "self", ".", "_collapseButton", ".", "setParent", "(", "None", ")", "self", ".", "_collapseButton", ".", "deleteLater", "(", ")", "self", ".", "_collapseButton", "=", "None" ]
Clears out this toolbar from the system.
[ "Clears", "out", "this", "toolbar", "from", "the", "system", "." ]
f18a73bec84df90b034ca69b9deea118dbedfc4d
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtoolbar.py#L64-L87
train