repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
projectshift/shift-boiler
boiler/user/models.py
User.password_link_expired
def password_link_expired(self, now=None): """ Check if password link expired """ if not now: now = datetime.datetime.utcnow() return self.password_link_expires < now
python
def password_link_expired(self, now=None): """ Check if password link expired """ if not now: now = datetime.datetime.utcnow() return self.password_link_expires < now
[ "def", "password_link_expired", "(", "self", ",", "now", "=", "None", ")", ":", "if", "not", "now", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "return", "self", ".", "password_link_expires", "<", "now" ]
Check if password link expired
[ "Check", "if", "password", "link", "expired" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L346-L349
train
projectshift/shift-boiler
boiler/user/models.py
User.add_role
def add_role(self, role): """ Add role to user Role must be valid and saved first, otherwise will raise an exception. """ schema = RoleSchema() ok = schema.process(role) if not ok or not role.id: err = 'Role must be valid and saved before adding to user' raise x.UserException(err) self.__roles.append(role)
python
def add_role(self, role): """ Add role to user Role must be valid and saved first, otherwise will raise an exception. """ schema = RoleSchema() ok = schema.process(role) if not ok or not role.id: err = 'Role must be valid and saved before adding to user' raise x.UserException(err) self.__roles.append(role)
[ "def", "add_role", "(", "self", ",", "role", ")", ":", "schema", "=", "RoleSchema", "(", ")", "ok", "=", "schema", ".", "process", "(", "role", ")", "if", "not", "ok", "or", "not", "role", ".", "id", ":", "err", "=", "'Role must be valid and saved before adding to user'", "raise", "x", ".", "UserException", "(", "err", ")", "self", ".", "__roles", ".", "append", "(", "role", ")" ]
Add role to user Role must be valid and saved first, otherwise will raise an exception.
[ "Add", "role", "to", "user", "Role", "must", "be", "valid", "and", "saved", "first", "otherwise", "will", "raise", "an", "exception", "." ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L363-L375
train
projectshift/shift-boiler
boiler/user/models.py
User.has_role
def has_role(self, role_or_handle): """ Checks if user has role """ if not isinstance(role_or_handle, str): return role_or_handle in self.roles has_role = False for role in self.roles: if role.handle == role_or_handle: has_role = True break return has_role
python
def has_role(self, role_or_handle): """ Checks if user has role """ if not isinstance(role_or_handle, str): return role_or_handle in self.roles has_role = False for role in self.roles: if role.handle == role_or_handle: has_role = True break return has_role
[ "def", "has_role", "(", "self", ",", "role_or_handle", ")", ":", "if", "not", "isinstance", "(", "role_or_handle", ",", "str", ")", ":", "return", "role_or_handle", "in", "self", ".", "roles", "has_role", "=", "False", "for", "role", "in", "self", ".", "roles", ":", "if", "role", ".", "handle", "==", "role_or_handle", ":", "has_role", "=", "True", "break", "return", "has_role" ]
Checks if user has role
[ "Checks", "if", "user", "has", "role" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L382-L393
train
ronhanson/python-tbx
fabfile/git.py
push
def push(remote='origin', branch='master'): """git push commit""" print(cyan("Pulling changes from repo ( %s / %s)..." % (remote, branch))) local("git push %s %s" % (remote, branch))
python
def push(remote='origin', branch='master'): """git push commit""" print(cyan("Pulling changes from repo ( %s / %s)..." % (remote, branch))) local("git push %s %s" % (remote, branch))
[ "def", "push", "(", "remote", "=", "'origin'", ",", "branch", "=", "'master'", ")", ":", "print", "(", "cyan", "(", "\"Pulling changes from repo ( %s / %s)...\"", "%", "(", "remote", ",", "branch", ")", ")", ")", "local", "(", "\"git push %s %s\"", "%", "(", "remote", ",", "branch", ")", ")" ]
git push commit
[ "git", "push", "commit" ]
87f72ae0cadecafbcd144f1e930181fba77f6b83
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/fabfile/git.py#L7-L10
train
ronhanson/python-tbx
fabfile/git.py
pull
def pull(remote='origin', branch='master'): """git pull commit""" print(cyan("Pulling changes from repo ( %s / %s)..." % (remote, branch))) local("git pull %s %s" % (remote, branch))
python
def pull(remote='origin', branch='master'): """git pull commit""" print(cyan("Pulling changes from repo ( %s / %s)..." % (remote, branch))) local("git pull %s %s" % (remote, branch))
[ "def", "pull", "(", "remote", "=", "'origin'", ",", "branch", "=", "'master'", ")", ":", "print", "(", "cyan", "(", "\"Pulling changes from repo ( %s / %s)...\"", "%", "(", "remote", ",", "branch", ")", ")", ")", "local", "(", "\"git pull %s %s\"", "%", "(", "remote", ",", "branch", ")", ")" ]
git pull commit
[ "git", "pull", "commit" ]
87f72ae0cadecafbcd144f1e930181fba77f6b83
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/fabfile/git.py#L14-L17
train
ronhanson/python-tbx
fabfile/git.py
sync
def sync(remote='origin', branch='master'): """git pull and push commit""" pull(branch, remote) push(branch, remote) print(cyan("Git Synced!"))
python
def sync(remote='origin', branch='master'): """git pull and push commit""" pull(branch, remote) push(branch, remote) print(cyan("Git Synced!"))
[ "def", "sync", "(", "remote", "=", "'origin'", ",", "branch", "=", "'master'", ")", ":", "pull", "(", "branch", ",", "remote", ")", "push", "(", "branch", ",", "remote", ")", "print", "(", "cyan", "(", "\"Git Synced!\"", ")", ")" ]
git pull and push commit
[ "git", "pull", "and", "push", "commit" ]
87f72ae0cadecafbcd144f1e930181fba77f6b83
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/fabfile/git.py#L21-L25
train
ZEDGR/pychal
challonge/attachments.py
update
def update(tournament, match, attachment, **params): """Update the attributes of a match attachment.""" api.fetch( "PUT", "tournaments/%s/matches/%s/attachments/%s" % (tournament, match, attachment), "match_attachment", **params)
python
def update(tournament, match, attachment, **params): """Update the attributes of a match attachment.""" api.fetch( "PUT", "tournaments/%s/matches/%s/attachments/%s" % (tournament, match, attachment), "match_attachment", **params)
[ "def", "update", "(", "tournament", ",", "match", ",", "attachment", ",", "*", "*", "params", ")", ":", "api", ".", "fetch", "(", "\"PUT\"", ",", "\"tournaments/%s/matches/%s/attachments/%s\"", "%", "(", "tournament", ",", "match", ",", "attachment", ")", ",", "\"match_attachment\"", ",", "*", "*", "params", ")" ]
Update the attributes of a match attachment.
[ "Update", "the", "attributes", "of", "a", "match", "attachment", "." ]
3600fa9e0557a2a14eb1ad0c0711d28dad3693d7
https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/attachments.py#L27-L33
train
MacHu-GWU/sqlalchemy_mate-project
sqlalchemy_mate/crud/selecting.py
count_row
def count_row(engine, table): """ Return number of rows in a table. Example:: >>> count_row(engine, table_user) 3 **中文文档** 返回一个表中的行数。 """ return engine.execute(select([func.count()]).select_from(table)).fetchone()[0]
python
def count_row(engine, table): """ Return number of rows in a table. Example:: >>> count_row(engine, table_user) 3 **中文文档** 返回一个表中的行数。 """ return engine.execute(select([func.count()]).select_from(table)).fetchone()[0]
[ "def", "count_row", "(", "engine", ",", "table", ")", ":", "return", "engine", ".", "execute", "(", "select", "(", "[", "func", ".", "count", "(", ")", "]", ")", ".", "select_from", "(", "table", ")", ")", ".", "fetchone", "(", ")", "[", "0", "]" ]
Return number of rows in a table. Example:: >>> count_row(engine, table_user) 3 **中文文档** 返回一个表中的行数。
[ "Return", "number", "of", "rows", "in", "a", "table", "." ]
946754744c8870f083fd7b4339fca15d1d6128b2
https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/crud/selecting.py#L10-L23
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
OauthProviders.get_providers
def get_providers(self): """ Get OAuth providers Returns a dictionary of oauth applications ready to be registered with flask oauth extension at application bootstrap. """ if self.providers: return self.providers providers = dict() for provider in self.config: configurator = provider.lower() + '_config' if not hasattr(self, configurator): err = 'Provider [{}] not recognized'.format(provider) raise ValueError(err) provider_config = self.config[provider] configurator = getattr(self, configurator) providers[provider] = configurator( id=provider_config.get('id'), secret=provider_config.get('secret'), scope=provider_config.get('scope'), offline=provider_config.get('offline') ) self.providers = providers return self.providers
python
def get_providers(self): """ Get OAuth providers Returns a dictionary of oauth applications ready to be registered with flask oauth extension at application bootstrap. """ if self.providers: return self.providers providers = dict() for provider in self.config: configurator = provider.lower() + '_config' if not hasattr(self, configurator): err = 'Provider [{}] not recognized'.format(provider) raise ValueError(err) provider_config = self.config[provider] configurator = getattr(self, configurator) providers[provider] = configurator( id=provider_config.get('id'), secret=provider_config.get('secret'), scope=provider_config.get('scope'), offline=provider_config.get('offline') ) self.providers = providers return self.providers
[ "def", "get_providers", "(", "self", ")", ":", "if", "self", ".", "providers", ":", "return", "self", ".", "providers", "providers", "=", "dict", "(", ")", "for", "provider", "in", "self", ".", "config", ":", "configurator", "=", "provider", ".", "lower", "(", ")", "+", "'_config'", "if", "not", "hasattr", "(", "self", ",", "configurator", ")", ":", "err", "=", "'Provider [{}] not recognized'", ".", "format", "(", "provider", ")", "raise", "ValueError", "(", "err", ")", "provider_config", "=", "self", ".", "config", "[", "provider", "]", "configurator", "=", "getattr", "(", "self", ",", "configurator", ")", "providers", "[", "provider", "]", "=", "configurator", "(", "id", "=", "provider_config", ".", "get", "(", "'id'", ")", ",", "secret", "=", "provider_config", ".", "get", "(", "'secret'", ")", ",", "scope", "=", "provider_config", ".", "get", "(", "'scope'", ")", ",", "offline", "=", "provider_config", ".", "get", "(", "'offline'", ")", ")", "self", ".", "providers", "=", "providers", "return", "self", ".", "providers" ]
Get OAuth providers Returns a dictionary of oauth applications ready to be registered with flask oauth extension at application bootstrap.
[ "Get", "OAuth", "providers", "Returns", "a", "dictionary", "of", "oauth", "applications", "ready", "to", "be", "registered", "with", "flask", "oauth", "extension", "at", "application", "bootstrap", "." ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L21-L47
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
OauthProviders.token_getter
def token_getter(provider, token=None): """ Generic token getter for all the providers """ session_key = provider + '_token' if token is None: token = session.get(session_key) return token
python
def token_getter(provider, token=None): """ Generic token getter for all the providers """ session_key = provider + '_token' if token is None: token = session.get(session_key) return token
[ "def", "token_getter", "(", "provider", ",", "token", "=", "None", ")", ":", "session_key", "=", "provider", "+", "'_token'", "if", "token", "is", "None", ":", "token", "=", "session", ".", "get", "(", "session_key", ")", "return", "token" ]
Generic token getter for all the providers
[ "Generic", "token", "getter", "for", "all", "the", "providers" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L50-L55
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
OauthProviders.register_token_getter
def register_token_getter(self, provider): """ Register callback to retrieve token from session """ app = oauth.remote_apps[provider] decorator = getattr(app, 'tokengetter') def getter(token=None): return self.token_getter(provider, token) decorator(getter)
python
def register_token_getter(self, provider): """ Register callback to retrieve token from session """ app = oauth.remote_apps[provider] decorator = getattr(app, 'tokengetter') def getter(token=None): return self.token_getter(provider, token) decorator(getter)
[ "def", "register_token_getter", "(", "self", ",", "provider", ")", ":", "app", "=", "oauth", ".", "remote_apps", "[", "provider", "]", "decorator", "=", "getattr", "(", "app", ",", "'tokengetter'", ")", "def", "getter", "(", "token", "=", "None", ")", ":", "return", "self", ".", "token_getter", "(", "provider", ",", "token", ")", "decorator", "(", "getter", ")" ]
Register callback to retrieve token from session
[ "Register", "callback", "to", "retrieve", "token", "from", "session" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L57-L65
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
OauthProviders.vkontakte_config
def vkontakte_config(self, id, secret, scope=None, offline=False, **_): """ Get config dictionary for vkontakte oauth """ if scope is None: scope = 'email,offline' if offline: scope += ',offline' token_params = dict(scope=scope) config = dict( request_token_url=None, access_token_url='https://oauth.vk.com/access_token', authorize_url='https://oauth.vk.com/authorize', base_url='https://api.vk.com/method/', consumer_key=id, consumer_secret=secret, request_token_params=token_params ) return config
python
def vkontakte_config(self, id, secret, scope=None, offline=False, **_): """ Get config dictionary for vkontakte oauth """ if scope is None: scope = 'email,offline' if offline: scope += ',offline' token_params = dict(scope=scope) config = dict( request_token_url=None, access_token_url='https://oauth.vk.com/access_token', authorize_url='https://oauth.vk.com/authorize', base_url='https://api.vk.com/method/', consumer_key=id, consumer_secret=secret, request_token_params=token_params ) return config
[ "def", "vkontakte_config", "(", "self", ",", "id", ",", "secret", ",", "scope", "=", "None", ",", "offline", "=", "False", ",", "*", "*", "_", ")", ":", "if", "scope", "is", "None", ":", "scope", "=", "'email,offline'", "if", "offline", ":", "scope", "+=", "',offline'", "token_params", "=", "dict", "(", "scope", "=", "scope", ")", "config", "=", "dict", "(", "request_token_url", "=", "None", ",", "access_token_url", "=", "'https://oauth.vk.com/access_token'", ",", "authorize_url", "=", "'https://oauth.vk.com/authorize'", ",", "base_url", "=", "'https://api.vk.com/method/'", ",", "consumer_key", "=", "id", ",", "consumer_secret", "=", "secret", ",", "request_token_params", "=", "token_params", ")", "return", "config" ]
Get config dictionary for vkontakte oauth
[ "Get", "config", "dictionary", "for", "vkontakte", "oauth" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L87-L102
train
projectshift/shift-boiler
boiler/user/util/oauth_providers.py
OauthProviders.instagram_config
def instagram_config(self, id, secret, scope=None, **_): """ Get config dictionary for instagram oauth """ scope = scope if scope else 'basic' token_params = dict(scope=scope) config = dict( # request_token_url=None, access_token_url='/oauth/access_token/', authorize_url='/oauth/authorize/', base_url='https://api.instagram.com/', consumer_key=id, consumer_secret=secret, request_token_params=token_params ) return config
python
def instagram_config(self, id, secret, scope=None, **_): """ Get config dictionary for instagram oauth """ scope = scope if scope else 'basic' token_params = dict(scope=scope) config = dict( # request_token_url=None, access_token_url='/oauth/access_token/', authorize_url='/oauth/authorize/', base_url='https://api.instagram.com/', consumer_key=id, consumer_secret=secret, request_token_params=token_params ) return config
[ "def", "instagram_config", "(", "self", ",", "id", ",", "secret", ",", "scope", "=", "None", ",", "*", "*", "_", ")", ":", "scope", "=", "scope", "if", "scope", "else", "'basic'", "token_params", "=", "dict", "(", "scope", "=", "scope", ")", "config", "=", "dict", "(", "# request_token_url=None,", "access_token_url", "=", "'/oauth/access_token/'", ",", "authorize_url", "=", "'/oauth/authorize/'", ",", "base_url", "=", "'https://api.instagram.com/'", ",", "consumer_key", "=", "id", ",", "consumer_secret", "=", "secret", ",", "request_token_params", "=", "token_params", ")", "return", "config" ]
Get config dictionary for instagram oauth
[ "Get", "config", "dictionary", "for", "instagram", "oauth" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/util/oauth_providers.py#L123-L137
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix.convert
def convert(self, chain_id, residue_id, from_scheme, to_scheme): '''The API conversion function. This converts between the different residue ID schemes.''' # At the cost of three function calls, we ignore the case of the scheme parameters to be more user-friendly. from_scheme = from_scheme.lower() to_scheme = to_scheme.lower() assert(from_scheme in ResidueRelatrix.schemes) assert(to_scheme in ResidueRelatrix.schemes) return self._convert(chain_id, residue_id, from_scheme, to_scheme)
python
def convert(self, chain_id, residue_id, from_scheme, to_scheme): '''The API conversion function. This converts between the different residue ID schemes.''' # At the cost of three function calls, we ignore the case of the scheme parameters to be more user-friendly. from_scheme = from_scheme.lower() to_scheme = to_scheme.lower() assert(from_scheme in ResidueRelatrix.schemes) assert(to_scheme in ResidueRelatrix.schemes) return self._convert(chain_id, residue_id, from_scheme, to_scheme)
[ "def", "convert", "(", "self", ",", "chain_id", ",", "residue_id", ",", "from_scheme", ",", "to_scheme", ")", ":", "# At the cost of three function calls, we ignore the case of the scheme parameters to be more user-friendly.", "from_scheme", "=", "from_scheme", ".", "lower", "(", ")", "to_scheme", "=", "to_scheme", ".", "lower", "(", ")", "assert", "(", "from_scheme", "in", "ResidueRelatrix", ".", "schemes", ")", "assert", "(", "to_scheme", "in", "ResidueRelatrix", ".", "schemes", ")", "return", "self", ".", "_convert", "(", "chain_id", ",", "residue_id", ",", "from_scheme", ",", "to_scheme", ")" ]
The API conversion function. This converts between the different residue ID schemes.
[ "The", "API", "conversion", "function", ".", "This", "converts", "between", "the", "different", "residue", "ID", "schemes", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L143-L151
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix._convert
def _convert(self, chain_id, residue_id, from_scheme, to_scheme): '''The actual 'private' conversion function.''' # There are 12 valid combinations but rather than write them all out explicitly, we will use recursion, sacrificing speed for brevity if from_scheme == 'rosetta': atom_id = self.rosetta_to_atom_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'atom': return atom_id else: return self._convert(chain_id, atom_id, 'atom', to_scheme) if from_scheme == 'atom': if to_scheme == 'rosetta': return self.atom_to_rosetta_sequence_maps.get(chain_id, {})[residue_id] else: seqres_id = self.atom_to_seqres_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'seqres': return seqres_id return self.convert(chain_id, seqres_id, 'seqres', to_scheme) if from_scheme == 'seqres': if to_scheme == 'uniparc': return self.seqres_to_uniparc_sequence_maps.get(chain_id, {})[residue_id] else: atom_id = self.seqres_to_atom_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'atom': return atom_id return self.convert(chain_id, atom_id, 'atom', to_scheme) if from_scheme == 'uniparc': seqres_id = self.uniparc_to_seqres_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'seqres': return seqres_id else: return self._convert(chain_id, seqres_id, 'seqres', to_scheme) raise Exception("We should never reach this line.")
python
def _convert(self, chain_id, residue_id, from_scheme, to_scheme): '''The actual 'private' conversion function.''' # There are 12 valid combinations but rather than write them all out explicitly, we will use recursion, sacrificing speed for brevity if from_scheme == 'rosetta': atom_id = self.rosetta_to_atom_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'atom': return atom_id else: return self._convert(chain_id, atom_id, 'atom', to_scheme) if from_scheme == 'atom': if to_scheme == 'rosetta': return self.atom_to_rosetta_sequence_maps.get(chain_id, {})[residue_id] else: seqres_id = self.atom_to_seqres_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'seqres': return seqres_id return self.convert(chain_id, seqres_id, 'seqres', to_scheme) if from_scheme == 'seqres': if to_scheme == 'uniparc': return self.seqres_to_uniparc_sequence_maps.get(chain_id, {})[residue_id] else: atom_id = self.seqres_to_atom_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'atom': return atom_id return self.convert(chain_id, atom_id, 'atom', to_scheme) if from_scheme == 'uniparc': seqres_id = self.uniparc_to_seqres_sequence_maps.get(chain_id, {})[residue_id] if to_scheme == 'seqres': return seqres_id else: return self._convert(chain_id, seqres_id, 'seqres', to_scheme) raise Exception("We should never reach this line.")
[ "def", "_convert", "(", "self", ",", "chain_id", ",", "residue_id", ",", "from_scheme", ",", "to_scheme", ")", ":", "# There are 12 valid combinations but rather than write them all out explicitly, we will use recursion, sacrificing speed for brevity", "if", "from_scheme", "==", "'rosetta'", ":", "atom_id", "=", "self", ".", "rosetta_to_atom_sequence_maps", ".", "get", "(", "chain_id", ",", "{", "}", ")", "[", "residue_id", "]", "if", "to_scheme", "==", "'atom'", ":", "return", "atom_id", "else", ":", "return", "self", ".", "_convert", "(", "chain_id", ",", "atom_id", ",", "'atom'", ",", "to_scheme", ")", "if", "from_scheme", "==", "'atom'", ":", "if", "to_scheme", "==", "'rosetta'", ":", "return", "self", ".", "atom_to_rosetta_sequence_maps", ".", "get", "(", "chain_id", ",", "{", "}", ")", "[", "residue_id", "]", "else", ":", "seqres_id", "=", "self", ".", "atom_to_seqres_sequence_maps", ".", "get", "(", "chain_id", ",", "{", "}", ")", "[", "residue_id", "]", "if", "to_scheme", "==", "'seqres'", ":", "return", "seqres_id", "return", "self", ".", "convert", "(", "chain_id", ",", "seqres_id", ",", "'seqres'", ",", "to_scheme", ")", "if", "from_scheme", "==", "'seqres'", ":", "if", "to_scheme", "==", "'uniparc'", ":", "return", "self", ".", "seqres_to_uniparc_sequence_maps", ".", "get", "(", "chain_id", ",", "{", "}", ")", "[", "residue_id", "]", "else", ":", "atom_id", "=", "self", ".", "seqres_to_atom_sequence_maps", ".", "get", "(", "chain_id", ",", "{", "}", ")", "[", "residue_id", "]", "if", "to_scheme", "==", "'atom'", ":", "return", "atom_id", "return", "self", ".", "convert", "(", "chain_id", ",", "atom_id", ",", "'atom'", ",", "to_scheme", ")", "if", "from_scheme", "==", "'uniparc'", ":", "seqres_id", "=", "self", ".", "uniparc_to_seqres_sequence_maps", ".", "get", "(", "chain_id", ",", "{", "}", ")", "[", "residue_id", "]", "if", "to_scheme", "==", "'seqres'", ":", "return", "seqres_id", "else", ":", "return", "self", ".", "_convert", "(", "chain_id", ",", "seqres_id", ",", "'seqres'", ",", "to_scheme", ")", "raise", "Exception", "(", "\"We should never reach this line.\"", ")" ]
The actual 'private' conversion function.
[ "The", "actual", "private", "conversion", "function", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L153-L186
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix.convert_from_rosetta
def convert_from_rosetta(self, residue_id, to_scheme): '''A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier.''' assert(type(residue_id) == types.IntType) # Find the chain_id associated with the residue_id # Scan *all* sequences without breaking out to make sure that we do not have any duplicate maps chain_id = None for c, sequence in self.rosetta_sequences.iteritems(): for id, r in sequence: if r.ResidueID == residue_id: assert(chain_id == None) chain_id = c if chain_id: return self.convert(chain_id, residue_id, 'rosetta', to_scheme) else: return None
python
def convert_from_rosetta(self, residue_id, to_scheme): '''A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier.''' assert(type(residue_id) == types.IntType) # Find the chain_id associated with the residue_id # Scan *all* sequences without breaking out to make sure that we do not have any duplicate maps chain_id = None for c, sequence in self.rosetta_sequences.iteritems(): for id, r in sequence: if r.ResidueID == residue_id: assert(chain_id == None) chain_id = c if chain_id: return self.convert(chain_id, residue_id, 'rosetta', to_scheme) else: return None
[ "def", "convert_from_rosetta", "(", "self", ",", "residue_id", ",", "to_scheme", ")", ":", "assert", "(", "type", "(", "residue_id", ")", "==", "types", ".", "IntType", ")", "# Find the chain_id associated with the residue_id", "# Scan *all* sequences without breaking out to make sure that we do not have any duplicate maps", "chain_id", "=", "None", "for", "c", ",", "sequence", "in", "self", ".", "rosetta_sequences", ".", "iteritems", "(", ")", ":", "for", "id", ",", "r", "in", "sequence", ":", "if", "r", ".", "ResidueID", "==", "residue_id", ":", "assert", "(", "chain_id", "==", "None", ")", "chain_id", "=", "c", "if", "chain_id", ":", "return", "self", ".", "convert", "(", "chain_id", ",", "residue_id", ",", "'rosetta'", ",", "to_scheme", ")", "else", ":", "return", "None" ]
A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier.
[ "A", "simpler", "conversion", "function", "to", "convert", "from", "Rosetta", "numbering", "without", "requiring", "the", "chain", "identifier", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L189-L206
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix._validate
def _validate(self): '''Validate the mappings.''' self._validate_fasta_vs_seqres() self._validate_mapping_signature() self._validate_id_types() self._validate_residue_types()
python
def _validate(self): '''Validate the mappings.''' self._validate_fasta_vs_seqres() self._validate_mapping_signature() self._validate_id_types() self._validate_residue_types()
[ "def", "_validate", "(", "self", ")", ":", "self", ".", "_validate_fasta_vs_seqres", "(", ")", "self", ".", "_validate_mapping_signature", "(", ")", "self", ".", "_validate_id_types", "(", ")", "self", ".", "_validate_residue_types", "(", ")" ]
Validate the mappings.
[ "Validate", "the", "mappings", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L210-L216
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix._validate_id_types
def _validate_id_types(self): '''Check that the ID types are integers for Rosetta, SEQRES, and UniParc sequences and 6-character PDB IDs for the ATOM sequences.''' for sequences in [self.uniparc_sequences, self.fasta_sequences, self.seqres_sequences, self.rosetta_sequences]: for chain_id, sequence in sequences.iteritems(): sequence_id_types = set(map(type, sequence.ids())) if sequence_id_types: assert(len(sequence_id_types) == 1) assert(sequence_id_types.pop() == types.IntType) for chain_id, sequence in self.atom_sequences.iteritems(): sequence_id_types = set(map(type, sequence.ids())) assert(len(sequence_id_types) == 1) sequence_id_type = sequence_id_types.pop() assert(sequence_id_type == types.StringType or sequence_id_type == types.UnicodeType)
python
def _validate_id_types(self): '''Check that the ID types are integers for Rosetta, SEQRES, and UniParc sequences and 6-character PDB IDs for the ATOM sequences.''' for sequences in [self.uniparc_sequences, self.fasta_sequences, self.seqres_sequences, self.rosetta_sequences]: for chain_id, sequence in sequences.iteritems(): sequence_id_types = set(map(type, sequence.ids())) if sequence_id_types: assert(len(sequence_id_types) == 1) assert(sequence_id_types.pop() == types.IntType) for chain_id, sequence in self.atom_sequences.iteritems(): sequence_id_types = set(map(type, sequence.ids())) assert(len(sequence_id_types) == 1) sequence_id_type = sequence_id_types.pop() assert(sequence_id_type == types.StringType or sequence_id_type == types.UnicodeType)
[ "def", "_validate_id_types", "(", "self", ")", ":", "for", "sequences", "in", "[", "self", ".", "uniparc_sequences", ",", "self", ".", "fasta_sequences", ",", "self", ".", "seqres_sequences", ",", "self", ".", "rosetta_sequences", "]", ":", "for", "chain_id", ",", "sequence", "in", "sequences", ".", "iteritems", "(", ")", ":", "sequence_id_types", "=", "set", "(", "map", "(", "type", ",", "sequence", ".", "ids", "(", ")", ")", ")", "if", "sequence_id_types", ":", "assert", "(", "len", "(", "sequence_id_types", ")", "==", "1", ")", "assert", "(", "sequence_id_types", ".", "pop", "(", ")", "==", "types", ".", "IntType", ")", "for", "chain_id", ",", "sequence", "in", "self", ".", "atom_sequences", ".", "iteritems", "(", ")", ":", "sequence_id_types", "=", "set", "(", "map", "(", "type", ",", "sequence", ".", "ids", "(", ")", ")", ")", "assert", "(", "len", "(", "sequence_id_types", ")", "==", "1", ")", "sequence_id_type", "=", "sequence_id_types", ".", "pop", "(", ")", "assert", "(", "sequence_id_type", "==", "types", ".", "StringType", "or", "sequence_id_type", "==", "types", ".", "UnicodeType", ")" ]
Check that the ID types are integers for Rosetta, SEQRES, and UniParc sequences and 6-character PDB IDs for the ATOM sequences.
[ "Check", "that", "the", "ID", "types", "are", "integers", "for", "Rosetta", "SEQRES", "and", "UniParc", "sequences", "and", "6", "-", "character", "PDB", "IDs", "for", "the", "ATOM", "sequences", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L303-L317
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix._validate_residue_types
def _validate_residue_types(self): '''Make sure all the residue types map through translation.''' for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.iteritems(): rosetta_sequence = self.rosetta_sequences[chain_id] atom_sequence = self.atom_sequences[chain_id] for rosetta_id, atom_id, _ in sequence_map: assert(rosetta_sequence[rosetta_id].ResidueAA == atom_sequence[atom_id].ResidueAA) for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.iteritems(): atom_sequence = self.atom_sequences[chain_id] seqres_sequence = self.seqres_sequences[chain_id] for atom_id, seqres_id, _ in sorted(sequence_map): assert(atom_sequence[atom_id].ResidueAA == seqres_sequence[seqres_id].ResidueAA) for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.iteritems(): if self.pdb_chain_to_uniparc_chain_mapping.get(chain_id): seqres_sequence = self.seqres_sequences[chain_id] uniparc_sequence = self.uniparc_sequences[self.pdb_chain_to_uniparc_chain_mapping[chain_id]] for seqres_id, uniparc_id_resid_pair, substitution_match in sequence_map: uniparc_id = uniparc_id_resid_pair[1] # Some of the matches may not be identical but all the '*' Clustal Omega matches should be identical if substitution_match and substitution_match.clustal == 1: assert(seqres_sequence[seqres_id].ResidueAA == uniparc_sequence[uniparc_id].ResidueAA)
python
def _validate_residue_types(self): '''Make sure all the residue types map through translation.''' for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.iteritems(): rosetta_sequence = self.rosetta_sequences[chain_id] atom_sequence = self.atom_sequences[chain_id] for rosetta_id, atom_id, _ in sequence_map: assert(rosetta_sequence[rosetta_id].ResidueAA == atom_sequence[atom_id].ResidueAA) for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.iteritems(): atom_sequence = self.atom_sequences[chain_id] seqres_sequence = self.seqres_sequences[chain_id] for atom_id, seqres_id, _ in sorted(sequence_map): assert(atom_sequence[atom_id].ResidueAA == seqres_sequence[seqres_id].ResidueAA) for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.iteritems(): if self.pdb_chain_to_uniparc_chain_mapping.get(chain_id): seqres_sequence = self.seqres_sequences[chain_id] uniparc_sequence = self.uniparc_sequences[self.pdb_chain_to_uniparc_chain_mapping[chain_id]] for seqres_id, uniparc_id_resid_pair, substitution_match in sequence_map: uniparc_id = uniparc_id_resid_pair[1] # Some of the matches may not be identical but all the '*' Clustal Omega matches should be identical if substitution_match and substitution_match.clustal == 1: assert(seqres_sequence[seqres_id].ResidueAA == uniparc_sequence[uniparc_id].ResidueAA)
[ "def", "_validate_residue_types", "(", "self", ")", ":", "for", "chain_id", ",", "sequence_map", "in", "self", ".", "rosetta_to_atom_sequence_maps", ".", "iteritems", "(", ")", ":", "rosetta_sequence", "=", "self", ".", "rosetta_sequences", "[", "chain_id", "]", "atom_sequence", "=", "self", ".", "atom_sequences", "[", "chain_id", "]", "for", "rosetta_id", ",", "atom_id", ",", "_", "in", "sequence_map", ":", "assert", "(", "rosetta_sequence", "[", "rosetta_id", "]", ".", "ResidueAA", "==", "atom_sequence", "[", "atom_id", "]", ".", "ResidueAA", ")", "for", "chain_id", ",", "sequence_map", "in", "self", ".", "atom_to_seqres_sequence_maps", ".", "iteritems", "(", ")", ":", "atom_sequence", "=", "self", ".", "atom_sequences", "[", "chain_id", "]", "seqres_sequence", "=", "self", ".", "seqres_sequences", "[", "chain_id", "]", "for", "atom_id", ",", "seqres_id", ",", "_", "in", "sorted", "(", "sequence_map", ")", ":", "assert", "(", "atom_sequence", "[", "atom_id", "]", ".", "ResidueAA", "==", "seqres_sequence", "[", "seqres_id", "]", ".", "ResidueAA", ")", "for", "chain_id", ",", "sequence_map", "in", "self", ".", "seqres_to_uniparc_sequence_maps", ".", "iteritems", "(", ")", ":", "if", "self", ".", "pdb_chain_to_uniparc_chain_mapping", ".", "get", "(", "chain_id", ")", ":", "seqres_sequence", "=", "self", ".", "seqres_sequences", "[", "chain_id", "]", "uniparc_sequence", "=", "self", ".", "uniparc_sequences", "[", "self", ".", "pdb_chain_to_uniparc_chain_mapping", "[", "chain_id", "]", "]", "for", "seqres_id", ",", "uniparc_id_resid_pair", ",", "substitution_match", "in", "sequence_map", ":", "uniparc_id", "=", "uniparc_id_resid_pair", "[", "1", "]", "# Some of the matches may not be identical but all the '*' Clustal Omega matches should be identical", "if", "substitution_match", "and", "substitution_match", ".", "clustal", "==", "1", ":", "assert", "(", "seqres_sequence", "[", "seqres_id", "]", ".", "ResidueAA", "==", "uniparc_sequence", "[", "uniparc_id", "]", ".", "ResidueAA", ")" ]
Make sure all the residue types map through translation.
[ "Make", "sure", "all", "the", "residue", "types", "map", "through", "translation", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L320-L343
train
Kortemme-Lab/klab
klab/bio/relatrix.py
ResidueRelatrix._create_sequences
def _create_sequences(self): '''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.''' # Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences try: self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir) except PDBMissingMainchainAtomsException: self.pdb_to_rosetta_residue_map_error = True # Get all the Sequences if self.pdb_id not in do_not_use_the_sequence_aligner: self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences else: self.uniparc_sequences = self.sifts.get_uniparc_sequences() self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id) self.seqres_sequences = self.pdb.seqres_sequences self.atom_sequences = self.pdb.atom_sequences if self.pdb_to_rosetta_residue_map_error: self.rosetta_sequences = {} for c in self.atom_sequences.keys(): self.rosetta_sequences[c] = Sequence() else: self.rosetta_sequences = self.pdb.rosetta_sequences # Update the chain types for the UniParc sequences uniparc_pdb_chain_mapping = {} if self.pdb_id not in do_not_use_the_sequence_aligner: for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.iteritems(): if matches: # we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc. uniparc_chain_id = matches.keys()[0] assert(len(matches) == 1) uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) else: for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().iteritems(): for uniparc_chain_id in uniparc_chain_ids: uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.iteritems(): sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids]) assert(len(sequence_type) == 1) sequence_type = sequence_type.pop() assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None) self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type) for p in pdb_chain_ids: self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id # Update the chain types for the FASTA sequences for chain_id, sequence in self.seqres_sequences.iteritems(): self.fasta_sequences[chain_id].set_type(sequence.sequence_type)
python
def _create_sequences(self): '''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.''' # Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences try: self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir) except PDBMissingMainchainAtomsException: self.pdb_to_rosetta_residue_map_error = True # Get all the Sequences if self.pdb_id not in do_not_use_the_sequence_aligner: self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences else: self.uniparc_sequences = self.sifts.get_uniparc_sequences() self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id) self.seqres_sequences = self.pdb.seqres_sequences self.atom_sequences = self.pdb.atom_sequences if self.pdb_to_rosetta_residue_map_error: self.rosetta_sequences = {} for c in self.atom_sequences.keys(): self.rosetta_sequences[c] = Sequence() else: self.rosetta_sequences = self.pdb.rosetta_sequences # Update the chain types for the UniParc sequences uniparc_pdb_chain_mapping = {} if self.pdb_id not in do_not_use_the_sequence_aligner: for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.iteritems(): if matches: # we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc. uniparc_chain_id = matches.keys()[0] assert(len(matches) == 1) uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) else: for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().iteritems(): for uniparc_chain_id in uniparc_chain_ids: uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.iteritems(): sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids]) assert(len(sequence_type) == 1) sequence_type = sequence_type.pop() assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None) self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type) for p in pdb_chain_ids: self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id # Update the chain types for the FASTA sequences for chain_id, sequence in self.seqres_sequences.iteritems(): self.fasta_sequences[chain_id].set_type(sequence.sequence_type)
[ "def", "_create_sequences", "(", "self", ")", ":", "# Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences", "try", ":", "self", ".", "pdb", ".", "construct_pdb_to_rosetta_residue_map", "(", "self", ".", "rosetta_scripts_path", ",", "rosetta_database_path", "=", "self", ".", "rosetta_database_path", ",", "cache_dir", "=", "self", ".", "cache_dir", ")", "except", "PDBMissingMainchainAtomsException", ":", "self", ".", "pdb_to_rosetta_residue_map_error", "=", "True", "# Get all the Sequences", "if", "self", ".", "pdb_id", "not", "in", "do_not_use_the_sequence_aligner", ":", "self", ".", "uniparc_sequences", "=", "self", ".", "PDB_UniParc_SA", ".", "uniparc_sequences", "else", ":", "self", ".", "uniparc_sequences", "=", "self", ".", "sifts", ".", "get_uniparc_sequences", "(", ")", "self", ".", "fasta_sequences", "=", "self", ".", "FASTA", ".", "get_sequences", "(", "self", ".", "pdb_id", ")", "self", ".", "seqres_sequences", "=", "self", ".", "pdb", ".", "seqres_sequences", "self", ".", "atom_sequences", "=", "self", ".", "pdb", ".", "atom_sequences", "if", "self", ".", "pdb_to_rosetta_residue_map_error", ":", "self", ".", "rosetta_sequences", "=", "{", "}", "for", "c", "in", "self", ".", "atom_sequences", ".", "keys", "(", ")", ":", "self", ".", "rosetta_sequences", "[", "c", "]", "=", "Sequence", "(", ")", "else", ":", "self", ".", "rosetta_sequences", "=", "self", ".", "pdb", ".", "rosetta_sequences", "# Update the chain types for the UniParc sequences", "uniparc_pdb_chain_mapping", "=", "{", "}", "if", "self", ".", "pdb_id", "not", "in", "do_not_use_the_sequence_aligner", ":", "for", "pdb_chain_id", ",", "matches", "in", "self", ".", "PDB_UniParc_SA", ".", "clustal_matches", ".", "iteritems", "(", ")", ":", "if", "matches", ":", "# we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc.", "uniparc_chain_id", "=", "matches", ".", "keys", "(", ")", "[", "0", "]", "assert", "(", "len", "(", "matches", ")", "==", "1", ")", "uniparc_pdb_chain_mapping", "[", "uniparc_chain_id", "]", "=", "uniparc_pdb_chain_mapping", ".", "get", "(", "uniparc_chain_id", ",", "[", "]", ")", "uniparc_pdb_chain_mapping", "[", "uniparc_chain_id", "]", ".", "append", "(", "pdb_chain_id", ")", "else", ":", "for", "pdb_chain_id", ",", "uniparc_chain_ids", "in", "self", ".", "sifts", ".", "get_pdb_chain_to_uniparc_id_map", "(", ")", ".", "iteritems", "(", ")", ":", "for", "uniparc_chain_id", "in", "uniparc_chain_ids", ":", "uniparc_pdb_chain_mapping", "[", "uniparc_chain_id", "]", "=", "uniparc_pdb_chain_mapping", ".", "get", "(", "uniparc_chain_id", ",", "[", "]", ")", "uniparc_pdb_chain_mapping", "[", "uniparc_chain_id", "]", ".", "append", "(", "pdb_chain_id", ")", "for", "uniparc_chain_id", ",", "pdb_chain_ids", "in", "uniparc_pdb_chain_mapping", ".", "iteritems", "(", ")", ":", "sequence_type", "=", "set", "(", "[", "self", ".", "seqres_sequences", "[", "p", "]", ".", "sequence_type", "for", "p", "in", "pdb_chain_ids", "]", ")", "assert", "(", "len", "(", "sequence_type", ")", "==", "1", ")", "sequence_type", "=", "sequence_type", ".", "pop", "(", ")", "assert", "(", "self", ".", "uniparc_sequences", "[", "uniparc_chain_id", "]", ".", "sequence_type", "==", "None", ")", "self", ".", "uniparc_sequences", "[", "uniparc_chain_id", "]", ".", "set_type", "(", "sequence_type", ")", "for", "p", "in", "pdb_chain_ids", ":", "self", ".", "pdb_chain_to_uniparc_chain_mapping", "[", "p", "]", "=", "uniparc_chain_id", "# Update the chain types for the FASTA sequences", "for", "chain_id", ",", "sequence", "in", "self", ".", "seqres_sequences", ".", "iteritems", "(", ")", ":", "self", ".", "fasta_sequences", "[", "chain_id", "]", ".", "set_type", "(", "sequence", ".", "sequence_type", ")" ]
Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.
[ "Get", "all", "of", "the", "Sequences", "-", "Rosetta", "ATOM", "SEQRES", "FASTA", "UniParc", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L505-L558
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
SearchableModel.search
def search(cls, query_string, options=None, enable_facet_discovery=False, return_facets=None, facet_options=None, facet_refinements=None, deadline=None, **kwargs): """ Searches the index. Conveniently searches only for documents that belong to instances of this class. :param query_string: The query to match against documents in the index. See search.Query() for details. :param options: A QueryOptions describing post-processing of search results. :param enable_facet_discovery: discovery top relevent facets to this search query and return them. :param return_facets: An iterable of FacetRequest or basestring as facet name to return specific facet with the result. :param facet_options: A FacetOption describing processing of facets. :param facet_refinements: An iterable of FacetRefinement objects or refinement token strings used to filter out search results based on a facet value. refinements for different facets will be conjunction and refinements for the same facet will be disjunction. :param deadline: Deadline for RPC call in seconds; if None use the default. :param kwargs: A SearchResults containing a list of documents matched, number returned and number matched by the query. :return: A SearchResults containing a list of documents matched, number returned and number matched by the query. :raises: QueryError: If the query string is not parseable. TypeError: If any of the parameters have invalid types, or an unknown attribute is passed. ValueError: If any of the parameters have invalid values (e.g., a negative deadline). """ search_class = cls.search_get_class_names()[-1] query_string += ' ' + 'class_name:%s' % (search_class,) q = search.Query( query_string=query_string, options=options, enable_facet_discovery=enable_facet_discovery, return_facets=return_facets, facet_options=facet_options, facet_refinements=facet_refinements ) index = cls.search_get_index() return index.search(q, deadline=deadline, **kwargs)
python
def search(cls, query_string, options=None, enable_facet_discovery=False, return_facets=None, facet_options=None, facet_refinements=None, deadline=None, **kwargs): """ Searches the index. Conveniently searches only for documents that belong to instances of this class. :param query_string: The query to match against documents in the index. See search.Query() for details. :param options: A QueryOptions describing post-processing of search results. :param enable_facet_discovery: discovery top relevent facets to this search query and return them. :param return_facets: An iterable of FacetRequest or basestring as facet name to return specific facet with the result. :param facet_options: A FacetOption describing processing of facets. :param facet_refinements: An iterable of FacetRefinement objects or refinement token strings used to filter out search results based on a facet value. refinements for different facets will be conjunction and refinements for the same facet will be disjunction. :param deadline: Deadline for RPC call in seconds; if None use the default. :param kwargs: A SearchResults containing a list of documents matched, number returned and number matched by the query. :return: A SearchResults containing a list of documents matched, number returned and number matched by the query. :raises: QueryError: If the query string is not parseable. TypeError: If any of the parameters have invalid types, or an unknown attribute is passed. ValueError: If any of the parameters have invalid values (e.g., a negative deadline). """ search_class = cls.search_get_class_names()[-1] query_string += ' ' + 'class_name:%s' % (search_class,) q = search.Query( query_string=query_string, options=options, enable_facet_discovery=enable_facet_discovery, return_facets=return_facets, facet_options=facet_options, facet_refinements=facet_refinements ) index = cls.search_get_index() return index.search(q, deadline=deadline, **kwargs)
[ "def", "search", "(", "cls", ",", "query_string", ",", "options", "=", "None", ",", "enable_facet_discovery", "=", "False", ",", "return_facets", "=", "None", ",", "facet_options", "=", "None", ",", "facet_refinements", "=", "None", ",", "deadline", "=", "None", ",", "*", "*", "kwargs", ")", ":", "search_class", "=", "cls", ".", "search_get_class_names", "(", ")", "[", "-", "1", "]", "query_string", "+=", "' '", "+", "'class_name:%s'", "%", "(", "search_class", ",", ")", "q", "=", "search", ".", "Query", "(", "query_string", "=", "query_string", ",", "options", "=", "options", ",", "enable_facet_discovery", "=", "enable_facet_discovery", ",", "return_facets", "=", "return_facets", ",", "facet_options", "=", "facet_options", ",", "facet_refinements", "=", "facet_refinements", ")", "index", "=", "cls", ".", "search_get_index", "(", ")", "return", "index", ".", "search", "(", "q", ",", "deadline", "=", "deadline", ",", "*", "*", "kwargs", ")" ]
Searches the index. Conveniently searches only for documents that belong to instances of this class. :param query_string: The query to match against documents in the index. See search.Query() for details. :param options: A QueryOptions describing post-processing of search results. :param enable_facet_discovery: discovery top relevent facets to this search query and return them. :param return_facets: An iterable of FacetRequest or basestring as facet name to return specific facet with the result. :param facet_options: A FacetOption describing processing of facets. :param facet_refinements: An iterable of FacetRefinement objects or refinement token strings used to filter out search results based on a facet value. refinements for different facets will be conjunction and refinements for the same facet will be disjunction. :param deadline: Deadline for RPC call in seconds; if None use the default. :param kwargs: A SearchResults containing a list of documents matched, number returned and number matched by the query. :return: A SearchResults containing a list of documents matched, number returned and number matched by the query. :raises: QueryError: If the query string is not parseable. TypeError: If any of the parameters have invalid types, or an unknown attribute is passed. ValueError: If any of the parameters have invalid values (e.g., a negative deadline).
[ "Searches", "the", "index", ".", "Conveniently", "searches", "only", "for", "documents", "that", "belong", "to", "instances", "of", "this", "class", "." ]
4f999336b464704a0929cec135c1f09fb1ddfb7c
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L49-L94
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
SearchableModel.search_update_index
def search_update_index(self): """ Updates the search index for this instance. This happens automatically on put. """ doc_id = self.search_get_document_id(self.key) fields = [search.AtomField('class_name', name) for name in self.search_get_class_names()] index = self.search_get_index() if self.searchable_fields is None: searchable_fields = [] for field, prop in self._properties.items(): if field == 'class': continue for class_, field_type in SEARCHABLE_PROPERTY_TYPES.items(): if isinstance(prop, class_): searchable_fields.append(field) else: searchable_fields = self.searchable_fields for f in set(searchable_fields): prop = self._properties[f] value = getattr(self, f) field = None field_found = False for class_, field_type in SEARCHABLE_PROPERTY_TYPES.items(): if isinstance(prop, class_): field_found = True if value is not None: if isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set): for v in value: field = field_type(name=f, value=v) elif isinstance(value, ndb.Key): field = field_type(name=f, value=value.urlsafe()) else: field = field_type(name=f, value=value) if not field_found: raise ValueError('Cannot find field type for %r on %r' % (prop, self.__class__)) if field is not None: fields.append(field) document = search.Document(doc_id, fields=fields) index.put(document)
python
def search_update_index(self): """ Updates the search index for this instance. This happens automatically on put. """ doc_id = self.search_get_document_id(self.key) fields = [search.AtomField('class_name', name) for name in self.search_get_class_names()] index = self.search_get_index() if self.searchable_fields is None: searchable_fields = [] for field, prop in self._properties.items(): if field == 'class': continue for class_, field_type in SEARCHABLE_PROPERTY_TYPES.items(): if isinstance(prop, class_): searchable_fields.append(field) else: searchable_fields = self.searchable_fields for f in set(searchable_fields): prop = self._properties[f] value = getattr(self, f) field = None field_found = False for class_, field_type in SEARCHABLE_PROPERTY_TYPES.items(): if isinstance(prop, class_): field_found = True if value is not None: if isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set): for v in value: field = field_type(name=f, value=v) elif isinstance(value, ndb.Key): field = field_type(name=f, value=value.urlsafe()) else: field = field_type(name=f, value=value) if not field_found: raise ValueError('Cannot find field type for %r on %r' % (prop, self.__class__)) if field is not None: fields.append(field) document = search.Document(doc_id, fields=fields) index.put(document)
[ "def", "search_update_index", "(", "self", ")", ":", "doc_id", "=", "self", ".", "search_get_document_id", "(", "self", ".", "key", ")", "fields", "=", "[", "search", ".", "AtomField", "(", "'class_name'", ",", "name", ")", "for", "name", "in", "self", ".", "search_get_class_names", "(", ")", "]", "index", "=", "self", ".", "search_get_index", "(", ")", "if", "self", ".", "searchable_fields", "is", "None", ":", "searchable_fields", "=", "[", "]", "for", "field", ",", "prop", "in", "self", ".", "_properties", ".", "items", "(", ")", ":", "if", "field", "==", "'class'", ":", "continue", "for", "class_", ",", "field_type", "in", "SEARCHABLE_PROPERTY_TYPES", ".", "items", "(", ")", ":", "if", "isinstance", "(", "prop", ",", "class_", ")", ":", "searchable_fields", ".", "append", "(", "field", ")", "else", ":", "searchable_fields", "=", "self", ".", "searchable_fields", "for", "f", "in", "set", "(", "searchable_fields", ")", ":", "prop", "=", "self", ".", "_properties", "[", "f", "]", "value", "=", "getattr", "(", "self", ",", "f", ")", "field", "=", "None", "field_found", "=", "False", "for", "class_", ",", "field_type", "in", "SEARCHABLE_PROPERTY_TYPES", ".", "items", "(", ")", ":", "if", "isinstance", "(", "prop", ",", "class_", ")", ":", "field_found", "=", "True", "if", "value", "is", "not", "None", ":", "if", "isinstance", "(", "value", ",", "list", ")", "or", "isinstance", "(", "value", ",", "tuple", ")", "or", "isinstance", "(", "value", ",", "set", ")", ":", "for", "v", "in", "value", ":", "field", "=", "field_type", "(", "name", "=", "f", ",", "value", "=", "v", ")", "elif", "isinstance", "(", "value", ",", "ndb", ".", "Key", ")", ":", "field", "=", "field_type", "(", "name", "=", "f", ",", "value", "=", "value", ".", "urlsafe", "(", ")", ")", "else", ":", "field", "=", "field_type", "(", "name", "=", "f", ",", "value", "=", "value", ")", "if", "not", "field_found", ":", "raise", "ValueError", "(", "'Cannot find field type for %r on %r'", "%", "(", "prop", ",", "self", ".", "__class__", ")", ")", "if", "field", "is", "not", "None", ":", "fields", ".", "append", "(", "field", ")", "document", "=", "search", ".", "Document", "(", "doc_id", ",", "fields", "=", "fields", ")", "index", ".", "put", "(", "document", ")" ]
Updates the search index for this instance. This happens automatically on put.
[ "Updates", "the", "search", "index", "for", "this", "instance", "." ]
4f999336b464704a0929cec135c1f09fb1ddfb7c
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L96-L143
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
SearchableModel.search_get_class_names
def search_get_class_names(cls): """ Returns class names for use in document indexing. """ if hasattr(cls, '_class_key'): class_names = [] for n in cls._class_key(): class_names.append(n) return class_names else: return [cls.__name__]
python
def search_get_class_names(cls): """ Returns class names for use in document indexing. """ if hasattr(cls, '_class_key'): class_names = [] for n in cls._class_key(): class_names.append(n) return class_names else: return [cls.__name__]
[ "def", "search_get_class_names", "(", "cls", ")", ":", "if", "hasattr", "(", "cls", ",", "'_class_key'", ")", ":", "class_names", "=", "[", "]", "for", "n", "in", "cls", ".", "_class_key", "(", ")", ":", "class_names", ".", "append", "(", "n", ")", "return", "class_names", "else", ":", "return", "[", "cls", ".", "__name__", "]" ]
Returns class names for use in document indexing.
[ "Returns", "class", "names", "for", "use", "in", "document", "indexing", "." ]
4f999336b464704a0929cec135c1f09fb1ddfb7c
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L146-L156
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
SearchableModel.from_urlsafe
def from_urlsafe(cls, urlsafe): """ Returns an instance of the model from a urlsafe string. :param urlsafe: urlsafe key :return: Instance of cls """ try: key = ndb.Key(urlsafe=urlsafe) except: return None obj = key.get() if obj and isinstance(obj, cls): return obj
python
def from_urlsafe(cls, urlsafe): """ Returns an instance of the model from a urlsafe string. :param urlsafe: urlsafe key :return: Instance of cls """ try: key = ndb.Key(urlsafe=urlsafe) except: return None obj = key.get() if obj and isinstance(obj, cls): return obj
[ "def", "from_urlsafe", "(", "cls", ",", "urlsafe", ")", ":", "try", ":", "key", "=", "ndb", ".", "Key", "(", "urlsafe", "=", "urlsafe", ")", "except", ":", "return", "None", "obj", "=", "key", ".", "get", "(", ")", "if", "obj", "and", "isinstance", "(", "obj", ",", "cls", ")", ":", "return", "obj" ]
Returns an instance of the model from a urlsafe string. :param urlsafe: urlsafe key :return: Instance of cls
[ "Returns", "an", "instance", "of", "the", "model", "from", "a", "urlsafe", "string", "." ]
4f999336b464704a0929cec135c1f09fb1ddfb7c
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L159-L172
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
SearchableModel.get_from_search_doc
def get_from_search_doc(cls, doc_id): """ Returns an instance of the model from a search document id. :param doc_id: Search document id :return: Instance of cls """ # If the document was passed instead of the doc_id, get the document. if hasattr(doc_id, 'doc_id'): doc_id = doc_id.doc_id return cls.from_urlsafe(doc_id)
python
def get_from_search_doc(cls, doc_id): """ Returns an instance of the model from a search document id. :param doc_id: Search document id :return: Instance of cls """ # If the document was passed instead of the doc_id, get the document. if hasattr(doc_id, 'doc_id'): doc_id = doc_id.doc_id return cls.from_urlsafe(doc_id)
[ "def", "get_from_search_doc", "(", "cls", ",", "doc_id", ")", ":", "# If the document was passed instead of the doc_id, get the document.", "if", "hasattr", "(", "doc_id", ",", "'doc_id'", ")", ":", "doc_id", "=", "doc_id", ".", "doc_id", "return", "cls", ".", "from_urlsafe", "(", "doc_id", ")" ]
Returns an instance of the model from a search document id. :param doc_id: Search document id :return: Instance of cls
[ "Returns", "an", "instance", "of", "the", "model", "from", "a", "search", "document", "id", "." ]
4f999336b464704a0929cec135c1f09fb1ddfb7c
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L175-L185
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
SearchableModel._pre_delete_hook
def _pre_delete_hook(cls, key): """ Removes instance from index. """ if cls.searching_enabled: doc_id = cls.search_get_document_id(key) index = cls.search_get_index() index.delete(doc_id)
python
def _pre_delete_hook(cls, key): """ Removes instance from index. """ if cls.searching_enabled: doc_id = cls.search_get_document_id(key) index = cls.search_get_index() index.delete(doc_id)
[ "def", "_pre_delete_hook", "(", "cls", ",", "key", ")", ":", "if", "cls", ".", "searching_enabled", ":", "doc_id", "=", "cls", ".", "search_get_document_id", "(", "key", ")", "index", "=", "cls", ".", "search_get_index", "(", ")", "index", ".", "delete", "(", "doc_id", ")" ]
Removes instance from index.
[ "Removes", "instance", "from", "index", "." ]
4f999336b464704a0929cec135c1f09fb1ddfb7c
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L188-L195
train
adaptive-learning/proso-apps
proso/models/environment.py
Environment.process_answer
def process_answer(self, user, item, asked, answered, time, answer, response_time, guess, **kwargs): """ This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer """ pass
python
def process_answer(self, user, item, asked, answered, time, answer, response_time, guess, **kwargs): """ This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer """ pass
[ "def", "process_answer", "(", "self", ",", "user", ",", "item", ",", "asked", ",", "answered", ",", "time", ",", "answer", ",", "response_time", ",", "guess", ",", "*", "*", "kwargs", ")", ":", "pass" ]
This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer
[ "This", "method", "is", "used", "during", "the", "answer", "streaming", "and", "is", "called", "after", "the", "predictive", "model", "for", "each", "answer", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/models/environment.py#L18-L38
train
Apstra/aeon-venos
pylib/aeon/nxos/autoload/guestshell.py
_guestshell._get_sz_info
def _get_sz_info(self): """ Obtains the current resource allocations, assumes that the guestshell is in an 'Activated' state """ if 'None' == self._state: return None cmd = 'show virtual-service detail name guestshell+' got = self.cli(cmd) got = got['TABLE_detail']['ROW_detail'] sz_cpu = int(got['cpu_reservation']) sz_disk = int(got['disk_reservation']) sz_memory = int(got['memory_reservation']) self.sz_has = _guestshell.Resources( cpu=sz_cpu, memory=sz_memory, disk=sz_disk)
python
def _get_sz_info(self): """ Obtains the current resource allocations, assumes that the guestshell is in an 'Activated' state """ if 'None' == self._state: return None cmd = 'show virtual-service detail name guestshell+' got = self.cli(cmd) got = got['TABLE_detail']['ROW_detail'] sz_cpu = int(got['cpu_reservation']) sz_disk = int(got['disk_reservation']) sz_memory = int(got['memory_reservation']) self.sz_has = _guestshell.Resources( cpu=sz_cpu, memory=sz_memory, disk=sz_disk)
[ "def", "_get_sz_info", "(", "self", ")", ":", "if", "'None'", "==", "self", ".", "_state", ":", "return", "None", "cmd", "=", "'show virtual-service detail name guestshell+'", "got", "=", "self", ".", "cli", "(", "cmd", ")", "got", "=", "got", "[", "'TABLE_detail'", "]", "[", "'ROW_detail'", "]", "sz_cpu", "=", "int", "(", "got", "[", "'cpu_reservation'", "]", ")", "sz_disk", "=", "int", "(", "got", "[", "'disk_reservation'", "]", ")", "sz_memory", "=", "int", "(", "got", "[", "'memory_reservation'", "]", ")", "self", ".", "sz_has", "=", "_guestshell", ".", "Resources", "(", "cpu", "=", "sz_cpu", ",", "memory", "=", "sz_memory", ",", "disk", "=", "sz_disk", ")" ]
Obtains the current resource allocations, assumes that the guestshell is in an 'Activated' state
[ "Obtains", "the", "current", "resource", "allocations", "assumes", "that", "the", "guestshell", "is", "in", "an", "Activated", "state" ]
4d4f73d5904831ddc78c30922a8a226c90cf7d90
https://github.com/Apstra/aeon-venos/blob/4d4f73d5904831ddc78c30922a8a226c90cf7d90/pylib/aeon/nxos/autoload/guestshell.py#L198-L215
train
assamite/creamas
creamas/image.py
fractal_dimension
def fractal_dimension(image): '''Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float ''' pixels = [] for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] > 0: pixels.append((i, j)) lx = image.shape[1] ly = image.shape[0] pixels = np.array(pixels) if len(pixels) < 2: return 0 scales = np.logspace(1, 4, num=20, endpoint=False, base=2) Ns = [] for scale in scales: H, edges = np.histogramdd(pixels, bins=(np.arange(0, lx, scale), np.arange(0, ly, scale))) H_sum = np.sum(H > 0) if H_sum == 0: H_sum = 1 Ns.append(H_sum) coeffs = np.polyfit(np.log(scales), np.log(Ns), 1) hausdorff_dim = -coeffs[0] return hausdorff_dim
python
def fractal_dimension(image): '''Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float ''' pixels = [] for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] > 0: pixels.append((i, j)) lx = image.shape[1] ly = image.shape[0] pixels = np.array(pixels) if len(pixels) < 2: return 0 scales = np.logspace(1, 4, num=20, endpoint=False, base=2) Ns = [] for scale in scales: H, edges = np.histogramdd(pixels, bins=(np.arange(0, lx, scale), np.arange(0, ly, scale))) H_sum = np.sum(H > 0) if H_sum == 0: H_sum = 1 Ns.append(H_sum) coeffs = np.polyfit(np.log(scales), np.log(Ns), 1) hausdorff_dim = -coeffs[0] return hausdorff_dim
[ "def", "fractal_dimension", "(", "image", ")", ":", "pixels", "=", "[", "]", "for", "i", "in", "range", "(", "image", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "image", ".", "shape", "[", "1", "]", ")", ":", "if", "image", "[", "i", ",", "j", "]", ">", "0", ":", "pixels", ".", "append", "(", "(", "i", ",", "j", ")", ")", "lx", "=", "image", ".", "shape", "[", "1", "]", "ly", "=", "image", ".", "shape", "[", "0", "]", "pixels", "=", "np", ".", "array", "(", "pixels", ")", "if", "len", "(", "pixels", ")", "<", "2", ":", "return", "0", "scales", "=", "np", ".", "logspace", "(", "1", ",", "4", ",", "num", "=", "20", ",", "endpoint", "=", "False", ",", "base", "=", "2", ")", "Ns", "=", "[", "]", "for", "scale", "in", "scales", ":", "H", ",", "edges", "=", "np", ".", "histogramdd", "(", "pixels", ",", "bins", "=", "(", "np", ".", "arange", "(", "0", ",", "lx", ",", "scale", ")", ",", "np", ".", "arange", "(", "0", ",", "ly", ",", "scale", ")", ")", ")", "H_sum", "=", "np", ".", "sum", "(", "H", ">", "0", ")", "if", "H_sum", "==", "0", ":", "H_sum", "=", "1", "Ns", ".", "append", "(", "H_sum", ")", "coeffs", "=", "np", ".", "polyfit", "(", "np", ".", "log", "(", "scales", ")", ",", "np", ".", "log", "(", "Ns", ")", ",", "1", ")", "hausdorff_dim", "=", "-", "coeffs", "[", "0", "]", "return", "hausdorff_dim" ]
Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float
[ "Estimates", "the", "fractal", "dimension", "of", "an", "image", "with", "box", "counting", ".", "Counts", "pixels", "with", "value", "0", "as", "empty", "and", "everything", "else", "as", "non", "-", "empty", ".", "Input", "image", "has", "to", "be", "grayscale", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/image.py#L11-L46
train
assamite/creamas
creamas/image.py
channel_portion
def channel_portion(image, channel): '''Estimates the amount of a color relative to other colors. :param image: numpy.ndarray :param channel: int :returns: portion of a channel in an image :rtype: float ''' # Separate color channels rgb = [] for i in range(3): rgb.append(image[:, :, i].astype(int)) ch = rgb.pop(channel) relative_values = ch - np.sum(rgb, axis=0) / 2 relative_values = np.maximum(np.zeros(ch.shape), relative_values) return float(np.average(relative_values) / 255)
python
def channel_portion(image, channel): '''Estimates the amount of a color relative to other colors. :param image: numpy.ndarray :param channel: int :returns: portion of a channel in an image :rtype: float ''' # Separate color channels rgb = [] for i in range(3): rgb.append(image[:, :, i].astype(int)) ch = rgb.pop(channel) relative_values = ch - np.sum(rgb, axis=0) / 2 relative_values = np.maximum(np.zeros(ch.shape), relative_values) return float(np.average(relative_values) / 255)
[ "def", "channel_portion", "(", "image", ",", "channel", ")", ":", "# Separate color channels", "rgb", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "rgb", ".", "append", "(", "image", "[", ":", ",", ":", ",", "i", "]", ".", "astype", "(", "int", ")", ")", "ch", "=", "rgb", ".", "pop", "(", "channel", ")", "relative_values", "=", "ch", "-", "np", ".", "sum", "(", "rgb", ",", "axis", "=", "0", ")", "/", "2", "relative_values", "=", "np", ".", "maximum", "(", "np", ".", "zeros", "(", "ch", ".", "shape", ")", ",", "relative_values", ")", "return", "float", "(", "np", ".", "average", "(", "relative_values", ")", "/", "255", ")" ]
Estimates the amount of a color relative to other colors. :param image: numpy.ndarray :param channel: int :returns: portion of a channel in an image :rtype: float
[ "Estimates", "the", "amount", "of", "a", "color", "relative", "to", "other", "colors", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/image.py#L49-L67
train
assamite/creamas
creamas/image.py
intensity
def intensity(image): '''Calculates the average intensity of the pixels in an image. Accepts both RGB and grayscale images. :param image: numpy.ndarray :returns: image intensity :rtype: float ''' if len(image.shape) > 2: # Convert to grayscale image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) / 255 elif issubclass(image.dtype.type, np.integer): image /= 255 return float(np.sum(image) / np.prod(image.shape))
python
def intensity(image): '''Calculates the average intensity of the pixels in an image. Accepts both RGB and grayscale images. :param image: numpy.ndarray :returns: image intensity :rtype: float ''' if len(image.shape) > 2: # Convert to grayscale image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) / 255 elif issubclass(image.dtype.type, np.integer): image /= 255 return float(np.sum(image) / np.prod(image.shape))
[ "def", "intensity", "(", "image", ")", ":", "if", "len", "(", "image", ".", "shape", ")", ">", "2", ":", "# Convert to grayscale", "image", "=", "cv2", ".", "cvtColor", "(", "image", ",", "cv2", ".", "COLOR_RGB2GRAY", ")", "/", "255", "elif", "issubclass", "(", "image", ".", "dtype", ".", "type", ",", "np", ".", "integer", ")", ":", "image", "/=", "255", "return", "float", "(", "np", ".", "sum", "(", "image", ")", "/", "np", ".", "prod", "(", "image", ".", "shape", ")", ")" ]
Calculates the average intensity of the pixels in an image. Accepts both RGB and grayscale images. :param image: numpy.ndarray :returns: image intensity :rtype: float
[ "Calculates", "the", "average", "intensity", "of", "the", "pixels", "in", "an", "image", ".", "Accepts", "both", "RGB", "and", "grayscale", "images", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/image.py#L70-L83
train
Kortemme-Lab/klab
klab/cloning/dna.py
sliding_window
def sliding_window(sequence, win_size, step=1): """ Returns a generator that will iterate through the defined chunks of input sequence. Input sequence must be iterable. Credit: http://scipher.wordpress.com/2010/12/02/simple-sliding-window-iterator-in-python/ https://github.com/xguse/scipherPyProj/blob/master/scipherSrc/defs/basicDefs.py """ # Verify the inputs try: it = iter(sequence) except TypeError: raise ValueError("sequence must be iterable.") if not isinstance(win_size, int): raise ValueError("type(win_size) must be int.") if not isinstance(step, int): raise ValueError("type(step) must be int.") if step > win_size: raise ValueError("step must not be larger than win_size.") if win_size > len(sequence): raise ValueError("win_size must not be larger than sequence length.") # Pre-compute number of chunks to emit num_chunks = ((len(sequence) - win_size) / step) + 1 # Do the work for i in range(0, num_chunks * step, step): yield sequence[i:i+win_size]
python
def sliding_window(sequence, win_size, step=1): """ Returns a generator that will iterate through the defined chunks of input sequence. Input sequence must be iterable. Credit: http://scipher.wordpress.com/2010/12/02/simple-sliding-window-iterator-in-python/ https://github.com/xguse/scipherPyProj/blob/master/scipherSrc/defs/basicDefs.py """ # Verify the inputs try: it = iter(sequence) except TypeError: raise ValueError("sequence must be iterable.") if not isinstance(win_size, int): raise ValueError("type(win_size) must be int.") if not isinstance(step, int): raise ValueError("type(step) must be int.") if step > win_size: raise ValueError("step must not be larger than win_size.") if win_size > len(sequence): raise ValueError("win_size must not be larger than sequence length.") # Pre-compute number of chunks to emit num_chunks = ((len(sequence) - win_size) / step) + 1 # Do the work for i in range(0, num_chunks * step, step): yield sequence[i:i+win_size]
[ "def", "sliding_window", "(", "sequence", ",", "win_size", ",", "step", "=", "1", ")", ":", "# Verify the inputs", "try", ":", "it", "=", "iter", "(", "sequence", ")", "except", "TypeError", ":", "raise", "ValueError", "(", "\"sequence must be iterable.\"", ")", "if", "not", "isinstance", "(", "win_size", ",", "int", ")", ":", "raise", "ValueError", "(", "\"type(win_size) must be int.\"", ")", "if", "not", "isinstance", "(", "step", ",", "int", ")", ":", "raise", "ValueError", "(", "\"type(step) must be int.\"", ")", "if", "step", ">", "win_size", ":", "raise", "ValueError", "(", "\"step must not be larger than win_size.\"", ")", "if", "win_size", ">", "len", "(", "sequence", ")", ":", "raise", "ValueError", "(", "\"win_size must not be larger than sequence length.\"", ")", "# Pre-compute number of chunks to emit", "num_chunks", "=", "(", "(", "len", "(", "sequence", ")", "-", "win_size", ")", "/", "step", ")", "+", "1", "# Do the work", "for", "i", "in", "range", "(", "0", ",", "num_chunks", "*", "step", ",", "step", ")", ":", "yield", "sequence", "[", "i", ":", "i", "+", "win_size", "]" ]
Returns a generator that will iterate through the defined chunks of input sequence. Input sequence must be iterable. Credit: http://scipher.wordpress.com/2010/12/02/simple-sliding-window-iterator-in-python/ https://github.com/xguse/scipherPyProj/blob/master/scipherSrc/defs/basicDefs.py
[ "Returns", "a", "generator", "that", "will", "iterate", "through", "the", "defined", "chunks", "of", "input", "sequence", ".", "Input", "sequence", "must", "be", "iterable", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/dna.py#L103-L132
train
Kortemme-Lab/klab
klab/cloning/dna.py
dna_to_re
def dna_to_re(seq): """ Return a compiled regular expression that will match anything described by the input sequence. For example, a sequence that contains a 'N' matched any base at that position. """ seq = seq.replace('K', '[GT]') seq = seq.replace('M', '[AC]') seq = seq.replace('R', '[AG]') seq = seq.replace('Y', '[CT]') seq = seq.replace('S', '[CG]') seq = seq.replace('W', '[AT]') seq = seq.replace('B', '[CGT]') seq = seq.replace('V', '[ACG]') seq = seq.replace('H', '[ACT]') seq = seq.replace('D', '[AGT]') seq = seq.replace('X', '[GATC]') seq = seq.replace('N', '[GATC]') return re.compile(seq)
python
def dna_to_re(seq): """ Return a compiled regular expression that will match anything described by the input sequence. For example, a sequence that contains a 'N' matched any base at that position. """ seq = seq.replace('K', '[GT]') seq = seq.replace('M', '[AC]') seq = seq.replace('R', '[AG]') seq = seq.replace('Y', '[CT]') seq = seq.replace('S', '[CG]') seq = seq.replace('W', '[AT]') seq = seq.replace('B', '[CGT]') seq = seq.replace('V', '[ACG]') seq = seq.replace('H', '[ACT]') seq = seq.replace('D', '[AGT]') seq = seq.replace('X', '[GATC]') seq = seq.replace('N', '[GATC]') return re.compile(seq)
[ "def", "dna_to_re", "(", "seq", ")", ":", "seq", "=", "seq", ".", "replace", "(", "'K'", ",", "'[GT]'", ")", "seq", "=", "seq", ".", "replace", "(", "'M'", ",", "'[AC]'", ")", "seq", "=", "seq", ".", "replace", "(", "'R'", ",", "'[AG]'", ")", "seq", "=", "seq", ".", "replace", "(", "'Y'", ",", "'[CT]'", ")", "seq", "=", "seq", ".", "replace", "(", "'S'", ",", "'[CG]'", ")", "seq", "=", "seq", ".", "replace", "(", "'W'", ",", "'[AT]'", ")", "seq", "=", "seq", ".", "replace", "(", "'B'", ",", "'[CGT]'", ")", "seq", "=", "seq", ".", "replace", "(", "'V'", ",", "'[ACG]'", ")", "seq", "=", "seq", ".", "replace", "(", "'H'", ",", "'[ACT]'", ")", "seq", "=", "seq", ".", "replace", "(", "'D'", ",", "'[AGT]'", ")", "seq", "=", "seq", ".", "replace", "(", "'X'", ",", "'[GATC]'", ")", "seq", "=", "seq", ".", "replace", "(", "'N'", ",", "'[GATC]'", ")", "return", "re", ".", "compile", "(", "seq", ")" ]
Return a compiled regular expression that will match anything described by the input sequence. For example, a sequence that contains a 'N' matched any base at that position.
[ "Return", "a", "compiled", "regular", "expression", "that", "will", "match", "anything", "described", "by", "the", "input", "sequence", ".", "For", "example", "a", "sequence", "that", "contains", "a", "N", "matched", "any", "base", "at", "that", "position", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/dna.py#L139-L159
train
Kortemme-Lab/klab
klab/cloning/dna.py
case_highlight
def case_highlight(seq, subseq): """ Highlights all instances of subseq in seq by making them uppercase and everything else lowercase. """ return re.subs(subseq.lower(), subseq.upper(), seq.lower())
python
def case_highlight(seq, subseq): """ Highlights all instances of subseq in seq by making them uppercase and everything else lowercase. """ return re.subs(subseq.lower(), subseq.upper(), seq.lower())
[ "def", "case_highlight", "(", "seq", ",", "subseq", ")", ":", "return", "re", ".", "subs", "(", "subseq", ".", "lower", "(", ")", ",", "subseq", ".", "upper", "(", ")", ",", "seq", ".", "lower", "(", ")", ")" ]
Highlights all instances of subseq in seq by making them uppercase and everything else lowercase.
[ "Highlights", "all", "instances", "of", "subseq", "in", "seq", "by", "making", "them", "uppercase", "and", "everything", "else", "lowercase", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/dna.py#L161-L166
train
inveniosoftware/invenio-pidrelations
invenio_pidrelations/indexers.py
index_relations
def index_relations(sender, pid_type, json=None, record=None, index=None, **kwargs): """Add relations to the indexed record.""" if not json: json = {} pid = PersistentIdentifier.query.filter( PersistentIdentifier.object_uuid == record.id, PersistentIdentifier.pid_type == pid_type, ).one_or_none() relations = None if pid: relations = serialize_relations(pid) if relations: json['relations'] = relations return json
python
def index_relations(sender, pid_type, json=None, record=None, index=None, **kwargs): """Add relations to the indexed record.""" if not json: json = {} pid = PersistentIdentifier.query.filter( PersistentIdentifier.object_uuid == record.id, PersistentIdentifier.pid_type == pid_type, ).one_or_none() relations = None if pid: relations = serialize_relations(pid) if relations: json['relations'] = relations return json
[ "def", "index_relations", "(", "sender", ",", "pid_type", ",", "json", "=", "None", ",", "record", "=", "None", ",", "index", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "json", ":", "json", "=", "{", "}", "pid", "=", "PersistentIdentifier", ".", "query", ".", "filter", "(", "PersistentIdentifier", ".", "object_uuid", "==", "record", ".", "id", ",", "PersistentIdentifier", ".", "pid_type", "==", "pid_type", ",", ")", ".", "one_or_none", "(", ")", "relations", "=", "None", "if", "pid", ":", "relations", "=", "serialize_relations", "(", "pid", ")", "if", "relations", ":", "json", "[", "'relations'", "]", "=", "relations", "return", "json" ]
Add relations to the indexed record.
[ "Add", "relations", "to", "the", "indexed", "record", "." ]
a49f3725cf595b663c5b04814280b231f88bc333
https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/indexers.py#L37-L51
train
inveniosoftware/invenio-pidrelations
invenio_pidrelations/indexers.py
index_siblings
def index_siblings(pid, include_pid=False, children=None, neighbors_eager=False, eager=False, with_deposits=True): """Send sibling records of the passed pid for indexing. Note: By default does not index the 'pid' itself, only zero or more siblings. :param pid: PID (recid) of whose siblings are to be indexed. :param children: Overrides children with a fixed list of PID. Children should contain the 'pid' itself if 'neighbors_eager' is to be used, otherwise the last child is treated as the only neighbor. :param eager: Index all siblings immediately. :param include_pid: If True, will index also the provided 'pid' (default:False). :param neighbors_eager: Index the neighboring PIDs w.r.t. 'pid' immediately, and the rest with a bulk_index (default: False) :param with_deposits: Reindex also corresponding record's deposits. """ assert not (neighbors_eager and eager), \ """Only one of the 'eager' and 'neighbors_eager' flags can be set to True, not both""" if children is None: parent_pid = PIDNodeVersioning(pid=pid).parents.first() children = PIDNodeVersioning(pid=parent_pid).children.all() objid = str(pid.object_uuid) children = [str(p.object_uuid) for p in children] idx = children.index(objid) if objid in children else len(children) # Split children (which can include the pid) into left and right siblings # If 'pid' is not in children, idx is the length of list, so 'left' # will be all children, and 'right' will be an empty list # [X X X] X [X X X] if include_pid: # [X X X X] [X X X] Includes pid to the 'left' set left = children[:idx + 1] else: # [X X X] X [X X X] left = children[:idx] right = children[idx + 1:] if eager: eager_uuids = left + right bulk_uuids = [] elif neighbors_eager: # neighbors are last of 'left' and first or 'right' siblings # X X [X] X [X] X X eager_uuids = left[-1:] + right[:1] # all of the siblings, except the neighbours # [X X] X X X [X X] bulk_uuids = left[:-1] + right[1:] else: eager_uuids = [] bulk_uuids = left + right def get_dep_uuids(rec_uuids): """Get corresponding deposit UUIDs from record's UUIDs.""" return [str(PersistentIdentifier.get( 'depid', Record.get_record(id_)['_deposit']['id']).object_uuid) for id_ in rec_uuids] if with_deposits: eager_uuids += get_dep_uuids(eager_uuids) bulk_uuids += get_dep_uuids(bulk_uuids) for id_ in eager_uuids: RecordIndexer().index_by_id(id_) if bulk_uuids: RecordIndexer().bulk_index(bulk_uuids)
python
def index_siblings(pid, include_pid=False, children=None, neighbors_eager=False, eager=False, with_deposits=True): """Send sibling records of the passed pid for indexing. Note: By default does not index the 'pid' itself, only zero or more siblings. :param pid: PID (recid) of whose siblings are to be indexed. :param children: Overrides children with a fixed list of PID. Children should contain the 'pid' itself if 'neighbors_eager' is to be used, otherwise the last child is treated as the only neighbor. :param eager: Index all siblings immediately. :param include_pid: If True, will index also the provided 'pid' (default:False). :param neighbors_eager: Index the neighboring PIDs w.r.t. 'pid' immediately, and the rest with a bulk_index (default: False) :param with_deposits: Reindex also corresponding record's deposits. """ assert not (neighbors_eager and eager), \ """Only one of the 'eager' and 'neighbors_eager' flags can be set to True, not both""" if children is None: parent_pid = PIDNodeVersioning(pid=pid).parents.first() children = PIDNodeVersioning(pid=parent_pid).children.all() objid = str(pid.object_uuid) children = [str(p.object_uuid) for p in children] idx = children.index(objid) if objid in children else len(children) # Split children (which can include the pid) into left and right siblings # If 'pid' is not in children, idx is the length of list, so 'left' # will be all children, and 'right' will be an empty list # [X X X] X [X X X] if include_pid: # [X X X X] [X X X] Includes pid to the 'left' set left = children[:idx + 1] else: # [X X X] X [X X X] left = children[:idx] right = children[idx + 1:] if eager: eager_uuids = left + right bulk_uuids = [] elif neighbors_eager: # neighbors are last of 'left' and first or 'right' siblings # X X [X] X [X] X X eager_uuids = left[-1:] + right[:1] # all of the siblings, except the neighbours # [X X] X X X [X X] bulk_uuids = left[:-1] + right[1:] else: eager_uuids = [] bulk_uuids = left + right def get_dep_uuids(rec_uuids): """Get corresponding deposit UUIDs from record's UUIDs.""" return [str(PersistentIdentifier.get( 'depid', Record.get_record(id_)['_deposit']['id']).object_uuid) for id_ in rec_uuids] if with_deposits: eager_uuids += get_dep_uuids(eager_uuids) bulk_uuids += get_dep_uuids(bulk_uuids) for id_ in eager_uuids: RecordIndexer().index_by_id(id_) if bulk_uuids: RecordIndexer().bulk_index(bulk_uuids)
[ "def", "index_siblings", "(", "pid", ",", "include_pid", "=", "False", ",", "children", "=", "None", ",", "neighbors_eager", "=", "False", ",", "eager", "=", "False", ",", "with_deposits", "=", "True", ")", ":", "assert", "not", "(", "neighbors_eager", "and", "eager", ")", ",", "\"\"\"Only one of the 'eager' and 'neighbors_eager' flags\n can be set to True, not both\"\"\"", "if", "children", "is", "None", ":", "parent_pid", "=", "PIDNodeVersioning", "(", "pid", "=", "pid", ")", ".", "parents", ".", "first", "(", ")", "children", "=", "PIDNodeVersioning", "(", "pid", "=", "parent_pid", ")", ".", "children", ".", "all", "(", ")", "objid", "=", "str", "(", "pid", ".", "object_uuid", ")", "children", "=", "[", "str", "(", "p", ".", "object_uuid", ")", "for", "p", "in", "children", "]", "idx", "=", "children", ".", "index", "(", "objid", ")", "if", "objid", "in", "children", "else", "len", "(", "children", ")", "# Split children (which can include the pid) into left and right siblings", "# If 'pid' is not in children, idx is the length of list, so 'left'", "# will be all children, and 'right' will be an empty list", "# [X X X] X [X X X]", "if", "include_pid", ":", "# [X X X X] [X X X] Includes pid to the 'left' set", "left", "=", "children", "[", ":", "idx", "+", "1", "]", "else", ":", "# [X X X] X [X X X]", "left", "=", "children", "[", ":", "idx", "]", "right", "=", "children", "[", "idx", "+", "1", ":", "]", "if", "eager", ":", "eager_uuids", "=", "left", "+", "right", "bulk_uuids", "=", "[", "]", "elif", "neighbors_eager", ":", "# neighbors are last of 'left' and first or 'right' siblings", "# X X [X] X [X] X X", "eager_uuids", "=", "left", "[", "-", "1", ":", "]", "+", "right", "[", ":", "1", "]", "# all of the siblings, except the neighbours", "# [X X] X X X [X X]", "bulk_uuids", "=", "left", "[", ":", "-", "1", "]", "+", "right", "[", "1", ":", "]", "else", ":", "eager_uuids", "=", "[", "]", "bulk_uuids", "=", "left", "+", "right", "def", "get_dep_uuids", "(", "rec_uuids", ")", ":", "\"\"\"Get corresponding deposit UUIDs from record's UUIDs.\"\"\"", "return", "[", "str", "(", "PersistentIdentifier", ".", "get", "(", "'depid'", ",", "Record", ".", "get_record", "(", "id_", ")", "[", "'_deposit'", "]", "[", "'id'", "]", ")", ".", "object_uuid", ")", "for", "id_", "in", "rec_uuids", "]", "if", "with_deposits", ":", "eager_uuids", "+=", "get_dep_uuids", "(", "eager_uuids", ")", "bulk_uuids", "+=", "get_dep_uuids", "(", "bulk_uuids", ")", "for", "id_", "in", "eager_uuids", ":", "RecordIndexer", "(", ")", ".", "index_by_id", "(", "id_", ")", "if", "bulk_uuids", ":", "RecordIndexer", "(", ")", ".", "bulk_index", "(", "bulk_uuids", ")" ]
Send sibling records of the passed pid for indexing. Note: By default does not index the 'pid' itself, only zero or more siblings. :param pid: PID (recid) of whose siblings are to be indexed. :param children: Overrides children with a fixed list of PID. Children should contain the 'pid' itself if 'neighbors_eager' is to be used, otherwise the last child is treated as the only neighbor. :param eager: Index all siblings immediately. :param include_pid: If True, will index also the provided 'pid' (default:False). :param neighbors_eager: Index the neighboring PIDs w.r.t. 'pid' immediately, and the rest with a bulk_index (default: False) :param with_deposits: Reindex also corresponding record's deposits.
[ "Send", "sibling", "records", "of", "the", "passed", "pid", "for", "indexing", "." ]
a49f3725cf595b663c5b04814280b231f88bc333
https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/indexers.py#L54-L124
train
brunato/lograptor
lograptor/filemap.py
GlobDict.iter_paths
def iter_paths(self, pathnames=None, mapfunc=None): """ Special iteration on paths. Yields couples of path and items. If a expanded path doesn't match with any files a couple with path and `None` is returned. :param pathnames: Iterable with a set of pathnames. If is `None` uses the all \ the stored pathnames. :param mapfunc: A mapping function for building the effective path from various \ wildcards (eg. time spec wildcards). :return: Yields 2-tuples. """ pathnames = pathnames or self._pathnames if self.recursive and not pathnames: pathnames = ['.'] elif not pathnames: yield [] if mapfunc is not None: for mapped_paths in map(mapfunc, pathnames): for path in mapped_paths: if self.recursive and (os.path.isdir(path) or os.path.islink(path)): for t in os.walk(path, followlinks=self.follow_symlinks): for filename, values in self.iglob(os.path.join(t[0], '*')): yield filename, values else: empty_glob = True for filename, values in self.iglob(path): yield filename, values empty_glob = False if empty_glob: yield path, None else: for path in pathnames: if self.recursive and (os.path.isdir(path) or os.path.islink(path)): for t in os.walk(path, followlinks=self.follow_symlinks): for filename, values in self.iglob(os.path.join(t[0], '*')): yield filename, values else: empty_glob = True for filename, values in self.iglob(path): yield filename, values empty_glob = False if empty_glob: yield path, None
python
def iter_paths(self, pathnames=None, mapfunc=None): """ Special iteration on paths. Yields couples of path and items. If a expanded path doesn't match with any files a couple with path and `None` is returned. :param pathnames: Iterable with a set of pathnames. If is `None` uses the all \ the stored pathnames. :param mapfunc: A mapping function for building the effective path from various \ wildcards (eg. time spec wildcards). :return: Yields 2-tuples. """ pathnames = pathnames or self._pathnames if self.recursive and not pathnames: pathnames = ['.'] elif not pathnames: yield [] if mapfunc is not None: for mapped_paths in map(mapfunc, pathnames): for path in mapped_paths: if self.recursive and (os.path.isdir(path) or os.path.islink(path)): for t in os.walk(path, followlinks=self.follow_symlinks): for filename, values in self.iglob(os.path.join(t[0], '*')): yield filename, values else: empty_glob = True for filename, values in self.iglob(path): yield filename, values empty_glob = False if empty_glob: yield path, None else: for path in pathnames: if self.recursive and (os.path.isdir(path) or os.path.islink(path)): for t in os.walk(path, followlinks=self.follow_symlinks): for filename, values in self.iglob(os.path.join(t[0], '*')): yield filename, values else: empty_glob = True for filename, values in self.iglob(path): yield filename, values empty_glob = False if empty_glob: yield path, None
[ "def", "iter_paths", "(", "self", ",", "pathnames", "=", "None", ",", "mapfunc", "=", "None", ")", ":", "pathnames", "=", "pathnames", "or", "self", ".", "_pathnames", "if", "self", ".", "recursive", "and", "not", "pathnames", ":", "pathnames", "=", "[", "'.'", "]", "elif", "not", "pathnames", ":", "yield", "[", "]", "if", "mapfunc", "is", "not", "None", ":", "for", "mapped_paths", "in", "map", "(", "mapfunc", ",", "pathnames", ")", ":", "for", "path", "in", "mapped_paths", ":", "if", "self", ".", "recursive", "and", "(", "os", ".", "path", ".", "isdir", "(", "path", ")", "or", "os", ".", "path", ".", "islink", "(", "path", ")", ")", ":", "for", "t", "in", "os", ".", "walk", "(", "path", ",", "followlinks", "=", "self", ".", "follow_symlinks", ")", ":", "for", "filename", ",", "values", "in", "self", ".", "iglob", "(", "os", ".", "path", ".", "join", "(", "t", "[", "0", "]", ",", "'*'", ")", ")", ":", "yield", "filename", ",", "values", "else", ":", "empty_glob", "=", "True", "for", "filename", ",", "values", "in", "self", ".", "iglob", "(", "path", ")", ":", "yield", "filename", ",", "values", "empty_glob", "=", "False", "if", "empty_glob", ":", "yield", "path", ",", "None", "else", ":", "for", "path", "in", "pathnames", ":", "if", "self", ".", "recursive", "and", "(", "os", ".", "path", ".", "isdir", "(", "path", ")", "or", "os", ".", "path", ".", "islink", "(", "path", ")", ")", ":", "for", "t", "in", "os", ".", "walk", "(", "path", ",", "followlinks", "=", "self", ".", "follow_symlinks", ")", ":", "for", "filename", ",", "values", "in", "self", ".", "iglob", "(", "os", ".", "path", ".", "join", "(", "t", "[", "0", "]", ",", "'*'", ")", ")", ":", "yield", "filename", ",", "values", "else", ":", "empty_glob", "=", "True", "for", "filename", ",", "values", "in", "self", ".", "iglob", "(", "path", ")", ":", "yield", "filename", ",", "values", "empty_glob", "=", "False", "if", "empty_glob", ":", "yield", "path", ",", "None" ]
Special iteration on paths. Yields couples of path and items. If a expanded path doesn't match with any files a couple with path and `None` is returned. :param pathnames: Iterable with a set of pathnames. If is `None` uses the all \ the stored pathnames. :param mapfunc: A mapping function for building the effective path from various \ wildcards (eg. time spec wildcards). :return: Yields 2-tuples.
[ "Special", "iteration", "on", "paths", ".", "Yields", "couples", "of", "path", "and", "items", ".", "If", "a", "expanded", "path", "doesn", "t", "match", "with", "any", "files", "a", "couple", "with", "path", "and", "None", "is", "returned", "." ]
b1f09fe1b429ed15110610092704ef12d253f3c9
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/filemap.py#L111-L154
train
brunato/lograptor
lograptor/filemap.py
FileMap.check_stat
def check_stat(self, path): """ Checks logfile stat information for excluding files not in datetime period. On Linux it's possible to checks only modification time, because file creation info are not available, so it's possible to exclude only older files. In Unix BSD systems and windows information about file creation date and times are available, so is possible to exclude too newer files. """ statinfo = os.stat(path) st_mtime = datetime.fromtimestamp(statinfo.st_mtime) if platform.system() == 'Linux': check = st_mtime >= self.start_dt else: st_ctime = datetime.fromtimestamp(statinfo.st_ctime) check = st_mtime >= self.start_dt and st_ctime <= self.end_dt if not check: logger.info("file %r not in datetime period!", path) return check
python
def check_stat(self, path): """ Checks logfile stat information for excluding files not in datetime period. On Linux it's possible to checks only modification time, because file creation info are not available, so it's possible to exclude only older files. In Unix BSD systems and windows information about file creation date and times are available, so is possible to exclude too newer files. """ statinfo = os.stat(path) st_mtime = datetime.fromtimestamp(statinfo.st_mtime) if platform.system() == 'Linux': check = st_mtime >= self.start_dt else: st_ctime = datetime.fromtimestamp(statinfo.st_ctime) check = st_mtime >= self.start_dt and st_ctime <= self.end_dt if not check: logger.info("file %r not in datetime period!", path) return check
[ "def", "check_stat", "(", "self", ",", "path", ")", ":", "statinfo", "=", "os", ".", "stat", "(", "path", ")", "st_mtime", "=", "datetime", ".", "fromtimestamp", "(", "statinfo", ".", "st_mtime", ")", "if", "platform", ".", "system", "(", ")", "==", "'Linux'", ":", "check", "=", "st_mtime", ">=", "self", ".", "start_dt", "else", ":", "st_ctime", "=", "datetime", ".", "fromtimestamp", "(", "statinfo", ".", "st_ctime", ")", "check", "=", "st_mtime", ">=", "self", ".", "start_dt", "and", "st_ctime", "<=", "self", ".", "end_dt", "if", "not", "check", ":", "logger", ".", "info", "(", "\"file %r not in datetime period!\"", ",", "path", ")", "return", "check" ]
Checks logfile stat information for excluding files not in datetime period. On Linux it's possible to checks only modification time, because file creation info are not available, so it's possible to exclude only older files. In Unix BSD systems and windows information about file creation date and times are available, so is possible to exclude too newer files.
[ "Checks", "logfile", "stat", "information", "for", "excluding", "files", "not", "in", "datetime", "period", ".", "On", "Linux", "it", "s", "possible", "to", "checks", "only", "modification", "time", "because", "file", "creation", "info", "are", "not", "available", "so", "it", "s", "possible", "to", "exclude", "only", "older", "files", ".", "In", "Unix", "BSD", "systems", "and", "windows", "information", "about", "file", "creation", "date", "and", "times", "are", "available", "so", "is", "possible", "to", "exclude", "too", "newer", "files", "." ]
b1f09fe1b429ed15110610092704ef12d253f3c9
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/filemap.py#L192-L210
train
brunato/lograptor
lograptor/filemap.py
FileMap.add
def add(self, files, items): """ Add a list of files with a reference to a list of objects. """ if isinstance(files, (str, bytes)): files = iter([files]) for pathname in files: try: values = self._filemap[pathname] except KeyError: self._filemap[pathname] = items else: values.extend(items)
python
def add(self, files, items): """ Add a list of files with a reference to a list of objects. """ if isinstance(files, (str, bytes)): files = iter([files]) for pathname in files: try: values = self._filemap[pathname] except KeyError: self._filemap[pathname] = items else: values.extend(items)
[ "def", "add", "(", "self", ",", "files", ",", "items", ")", ":", "if", "isinstance", "(", "files", ",", "(", "str", ",", "bytes", ")", ")", ":", "files", "=", "iter", "(", "[", "files", "]", ")", "for", "pathname", "in", "files", ":", "try", ":", "values", "=", "self", ".", "_filemap", "[", "pathname", "]", "except", "KeyError", ":", "self", ".", "_filemap", "[", "pathname", "]", "=", "items", "else", ":", "values", ".", "extend", "(", "items", ")" ]
Add a list of files with a reference to a list of objects.
[ "Add", "a", "list", "of", "files", "with", "a", "reference", "to", "a", "list", "of", "objects", "." ]
b1f09fe1b429ed15110610092704ef12d253f3c9
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/filemap.py#L212-L224
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
RogersExperiment.recruit
def recruit(self): """Recruit more participants.""" participants = Participant.query.\ with_entities(Participant.status).all() # if all networks are full, close recruitment, if not self.networks(full=False): print "All networks are full, closing recruitment." self.recruiter().close_recruitment() # if anyone is still working, don't recruit elif [p for p in participants if p.status < 100]: print "People are still participating: not recruiting." # we only need to recruit if the current generation is complete elif (len([p for p in participants if p.status == 101]) % self.generation_size) == 0: print "Recruiting another generation." self.recruiter().recruit_participants(n=self.generation_size) # otherwise do nothing else: print "not recruiting."
python
def recruit(self): """Recruit more participants.""" participants = Participant.query.\ with_entities(Participant.status).all() # if all networks are full, close recruitment, if not self.networks(full=False): print "All networks are full, closing recruitment." self.recruiter().close_recruitment() # if anyone is still working, don't recruit elif [p for p in participants if p.status < 100]: print "People are still participating: not recruiting." # we only need to recruit if the current generation is complete elif (len([p for p in participants if p.status == 101]) % self.generation_size) == 0: print "Recruiting another generation." self.recruiter().recruit_participants(n=self.generation_size) # otherwise do nothing else: print "not recruiting."
[ "def", "recruit", "(", "self", ")", ":", "participants", "=", "Participant", ".", "query", ".", "with_entities", "(", "Participant", ".", "status", ")", ".", "all", "(", ")", "# if all networks are full, close recruitment,", "if", "not", "self", ".", "networks", "(", "full", "=", "False", ")", ":", "print", "\"All networks are full, closing recruitment.\"", "self", ".", "recruiter", "(", ")", ".", "close_recruitment", "(", ")", "# if anyone is still working, don't recruit", "elif", "[", "p", "for", "p", "in", "participants", "if", "p", ".", "status", "<", "100", "]", ":", "print", "\"People are still participating: not recruiting.\"", "# we only need to recruit if the current generation is complete", "elif", "(", "len", "(", "[", "p", "for", "p", "in", "participants", "if", "p", ".", "status", "==", "101", "]", ")", "%", "self", ".", "generation_size", ")", "==", "0", ":", "print", "\"Recruiting another generation.\"", "self", ".", "recruiter", "(", ")", ".", "recruit_participants", "(", "n", "=", "self", ".", "generation_size", ")", "# otherwise do nothing", "else", ":", "print", "\"not recruiting.\"" ]
Recruit more participants.
[ "Recruit", "more", "participants", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L106-L127
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
RogersExperiment.data_check
def data_check(self, participant): """Check a participants data.""" participant_id = participant.uniqueid nodes = Node.query.filter_by(participant_id=participant_id).all() if len(nodes) != self.experiment_repeats + self.practice_repeats: print("Error: Participant has {} nodes. Data check failed" .format(len(nodes))) return False nets = [n.network_id for n in nodes] if len(nets) != len(set(nets)): print "Error: Participant participated in the same network \ multiple times. Data check failed" return False if None in [n.fitness for n in nodes]: print "Error: some of participants nodes are missing a fitness. \ Data check failed." return False if None in [n.score for n in nodes]: print "Error: some of participants nodes are missing a score. \ Data check failed" return False return True
python
def data_check(self, participant): """Check a participants data.""" participant_id = participant.uniqueid nodes = Node.query.filter_by(participant_id=participant_id).all() if len(nodes) != self.experiment_repeats + self.practice_repeats: print("Error: Participant has {} nodes. Data check failed" .format(len(nodes))) return False nets = [n.network_id for n in nodes] if len(nets) != len(set(nets)): print "Error: Participant participated in the same network \ multiple times. Data check failed" return False if None in [n.fitness for n in nodes]: print "Error: some of participants nodes are missing a fitness. \ Data check failed." return False if None in [n.score for n in nodes]: print "Error: some of participants nodes are missing a score. \ Data check failed" return False return True
[ "def", "data_check", "(", "self", ",", "participant", ")", ":", "participant_id", "=", "participant", ".", "uniqueid", "nodes", "=", "Node", ".", "query", ".", "filter_by", "(", "participant_id", "=", "participant_id", ")", ".", "all", "(", ")", "if", "len", "(", "nodes", ")", "!=", "self", ".", "experiment_repeats", "+", "self", ".", "practice_repeats", ":", "print", "(", "\"Error: Participant has {} nodes. Data check failed\"", ".", "format", "(", "len", "(", "nodes", ")", ")", ")", "return", "False", "nets", "=", "[", "n", ".", "network_id", "for", "n", "in", "nodes", "]", "if", "len", "(", "nets", ")", "!=", "len", "(", "set", "(", "nets", ")", ")", ":", "print", "\"Error: Participant participated in the same network \\\n multiple times. Data check failed\"", "return", "False", "if", "None", "in", "[", "n", ".", "fitness", "for", "n", "in", "nodes", "]", ":", "print", "\"Error: some of participants nodes are missing a fitness. \\\n Data check failed.\"", "return", "False", "if", "None", "in", "[", "n", ".", "score", "for", "n", "in", "nodes", "]", ":", "print", "\"Error: some of participants nodes are missing a score. \\\n Data check failed\"", "return", "False", "return", "True" ]
Check a participants data.
[ "Check", "a", "participants", "data", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L168-L194
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
RogersExperiment.add_node_to_network
def add_node_to_network(self, node, network): """Add participant's node to a network.""" network.add_node(node) node.receive() environment = network.nodes(type=Environment)[0] environment.connect(whom=node) gene = node.infos(type=LearningGene)[0].contents if (gene == "social"): prev_agents = RogersAgent.query\ .filter(and_(RogersAgent.failed == False, RogersAgent.network_id == network.id, RogersAgent.generation == node.generation - 1))\ .all() parent = random.choice(prev_agents) parent.connect(whom=node) parent.transmit(what=Meme, to_whom=node) elif (gene == "asocial"): environment.transmit(to_whom=node) else: raise ValueError("{} has invalid learning gene value of {}" .format(node, gene)) node.receive()
python
def add_node_to_network(self, node, network): """Add participant's node to a network.""" network.add_node(node) node.receive() environment = network.nodes(type=Environment)[0] environment.connect(whom=node) gene = node.infos(type=LearningGene)[0].contents if (gene == "social"): prev_agents = RogersAgent.query\ .filter(and_(RogersAgent.failed == False, RogersAgent.network_id == network.id, RogersAgent.generation == node.generation - 1))\ .all() parent = random.choice(prev_agents) parent.connect(whom=node) parent.transmit(what=Meme, to_whom=node) elif (gene == "asocial"): environment.transmit(to_whom=node) else: raise ValueError("{} has invalid learning gene value of {}" .format(node, gene)) node.receive()
[ "def", "add_node_to_network", "(", "self", ",", "node", ",", "network", ")", ":", "network", ".", "add_node", "(", "node", ")", "node", ".", "receive", "(", ")", "environment", "=", "network", ".", "nodes", "(", "type", "=", "Environment", ")", "[", "0", "]", "environment", ".", "connect", "(", "whom", "=", "node", ")", "gene", "=", "node", ".", "infos", "(", "type", "=", "LearningGene", ")", "[", "0", "]", ".", "contents", "if", "(", "gene", "==", "\"social\"", ")", ":", "prev_agents", "=", "RogersAgent", ".", "query", ".", "filter", "(", "and_", "(", "RogersAgent", ".", "failed", "==", "False", ",", "RogersAgent", ".", "network_id", "==", "network", ".", "id", ",", "RogersAgent", ".", "generation", "==", "node", ".", "generation", "-", "1", ")", ")", ".", "all", "(", ")", "parent", "=", "random", ".", "choice", "(", "prev_agents", ")", "parent", ".", "connect", "(", "whom", "=", "node", ")", "parent", ".", "transmit", "(", "what", "=", "Meme", ",", "to_whom", "=", "node", ")", "elif", "(", "gene", "==", "\"asocial\"", ")", ":", "environment", ".", "transmit", "(", "to_whom", "=", "node", ")", "else", ":", "raise", "ValueError", "(", "\"{} has invalid learning gene value of {}\"", ".", "format", "(", "node", ",", "gene", ")", ")", "node", ".", "receive", "(", ")" ]
Add participant's node to a network.
[ "Add", "participant", "s", "node", "to", "a", "network", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L196-L219
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
RogersEnvironment.create_state
def create_state(self, proportion): """Create an environmental state.""" if random.random() < 0.5: proportion = 1 - proportion State(origin=self, contents=proportion)
python
def create_state(self, proportion): """Create an environmental state.""" if random.random() < 0.5: proportion = 1 - proportion State(origin=self, contents=proportion)
[ "def", "create_state", "(", "self", ",", "proportion", ")", ":", "if", "random", ".", "random", "(", ")", "<", "0.5", ":", "proportion", "=", "1", "-", "proportion", "State", "(", "origin", "=", "self", ",", "contents", "=", "proportion", ")" ]
Create an environmental state.
[ "Create", "an", "environmental", "state", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L363-L367
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
RogersEnvironment.step
def step(self): """Prompt the environment to change.""" current_state = max(self.infos(type=State), key=attrgetter('creation_time')) current_contents = float(current_state.contents) new_contents = 1 - current_contents info_out = State(origin=self, contents=new_contents) transformations.Mutation(info_in=current_state, info_out=info_out)
python
def step(self): """Prompt the environment to change.""" current_state = max(self.infos(type=State), key=attrgetter('creation_time')) current_contents = float(current_state.contents) new_contents = 1 - current_contents info_out = State(origin=self, contents=new_contents) transformations.Mutation(info_in=current_state, info_out=info_out)
[ "def", "step", "(", "self", ")", ":", "current_state", "=", "max", "(", "self", ".", "infos", "(", "type", "=", "State", ")", ",", "key", "=", "attrgetter", "(", "'creation_time'", ")", ")", "current_contents", "=", "float", "(", "current_state", ".", "contents", ")", "new_contents", "=", "1", "-", "current_contents", "info_out", "=", "State", "(", "origin", "=", "self", ",", "contents", "=", "new_contents", ")", "transformations", ".", "Mutation", "(", "info_in", "=", "current_state", ",", "info_out", "=", "info_out", ")" ]
Prompt the environment to change.
[ "Prompt", "the", "environment", "to", "change", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L369-L376
train
Kortemme-Lab/klab
klab/cluster/python_script_template.py
print_subprocess_output
def print_subprocess_output(subp): '''Prints the stdout and stderr output.''' if subp: if subp.errorcode != 0: print('<error errorcode="%s">' % str(subp.errorcode)) print(subp.stderr) print("</error>") print_tag('stdout', '\n%s\n' % subp.stdout) else: print_tag('success', '\n%s\n' % subp.stdout) print_tag('warnings', '\n%s\n' % subp.stderr)
python
def print_subprocess_output(subp): '''Prints the stdout and stderr output.''' if subp: if subp.errorcode != 0: print('<error errorcode="%s">' % str(subp.errorcode)) print(subp.stderr) print("</error>") print_tag('stdout', '\n%s\n' % subp.stdout) else: print_tag('success', '\n%s\n' % subp.stdout) print_tag('warnings', '\n%s\n' % subp.stderr)
[ "def", "print_subprocess_output", "(", "subp", ")", ":", "if", "subp", ":", "if", "subp", ".", "errorcode", "!=", "0", ":", "print", "(", "'<error errorcode=\"%s\">'", "%", "str", "(", "subp", ".", "errorcode", ")", ")", "print", "(", "subp", ".", "stderr", ")", "print", "(", "\"</error>\"", ")", "print_tag", "(", "'stdout'", ",", "'\\n%s\\n'", "%", "subp", ".", "stdout", ")", "else", ":", "print_tag", "(", "'success'", ",", "'\\n%s\\n'", "%", "subp", ".", "stdout", ")", "print_tag", "(", "'warnings'", ",", "'\\n%s\\n'", "%", "subp", ".", "stderr", ")" ]
Prints the stdout and stderr output.
[ "Prints", "the", "stdout", "and", "stderr", "output", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cluster/python_script_template.py#L48-L58
train
Alveo/pyalveo
pyalveo/objects.py
ItemGroup.get_all
def get_all(self, force_download=False): """ Retrieve the metadata for all items in this list from the server, as Item objects :rtype: List :returns: a List of the corresponding Item objects :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :raises: APIError if the API request is not successful """ cl = self.client return [cl.get_item(item, force_download) for item in self.item_urls]
python
def get_all(self, force_download=False): """ Retrieve the metadata for all items in this list from the server, as Item objects :rtype: List :returns: a List of the corresponding Item objects :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :raises: APIError if the API request is not successful """ cl = self.client return [cl.get_item(item, force_download) for item in self.item_urls]
[ "def", "get_all", "(", "self", ",", "force_download", "=", "False", ")", ":", "cl", "=", "self", ".", "client", "return", "[", "cl", ".", "get_item", "(", "item", ",", "force_download", ")", "for", "item", "in", "self", ".", "item_urls", "]" ]
Retrieve the metadata for all items in this list from the server, as Item objects :rtype: List :returns: a List of the corresponding Item objects :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :raises: APIError if the API request is not successful
[ "Retrieve", "the", "metadata", "for", "all", "items", "in", "this", "list", "from", "the", "server", "as", "Item", "objects" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L170-L185
train
Alveo/pyalveo
pyalveo/objects.py
ItemGroup.get_item
def get_item(self, item_index, force_download=False): """ Retrieve the metadata for a specific item in this ItemGroup :type item_index: int :param item_index: the index of the item :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: Item :returns: the metadata, as an Item object :raises: APIError if the API request is not successful """ return self.client.get_item(self.item_urls[item_index], force_download)
python
def get_item(self, item_index, force_download=False): """ Retrieve the metadata for a specific item in this ItemGroup :type item_index: int :param item_index: the index of the item :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: Item :returns: the metadata, as an Item object :raises: APIError if the API request is not successful """ return self.client.get_item(self.item_urls[item_index], force_download)
[ "def", "get_item", "(", "self", ",", "item_index", ",", "force_download", "=", "False", ")", ":", "return", "self", ".", "client", ".", "get_item", "(", "self", ".", "item_urls", "[", "item_index", "]", ",", "force_download", ")" ]
Retrieve the metadata for a specific item in this ItemGroup :type item_index: int :param item_index: the index of the item :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: Item :returns: the metadata, as an Item object :raises: APIError if the API request is not successful
[ "Retrieve", "the", "metadata", "for", "a", "specific", "item", "in", "this", "ItemGroup" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L230-L246
train
Alveo/pyalveo
pyalveo/objects.py
ItemList.refresh
def refresh(self): """ Update this ItemList by re-downloading it from the server :rtype: ItemList :returns: this ItemList, after the refresh :raises: APIError if the API request is not successful """ refreshed = self.client.get_item_list(self.url()) self.item_urls = refreshed.urls() self.list_name = refreshed.name() return self
python
def refresh(self): """ Update this ItemList by re-downloading it from the server :rtype: ItemList :returns: this ItemList, after the refresh :raises: APIError if the API request is not successful """ refreshed = self.client.get_item_list(self.url()) self.item_urls = refreshed.urls() self.list_name = refreshed.name() return self
[ "def", "refresh", "(", "self", ")", ":", "refreshed", "=", "self", ".", "client", ".", "get_item_list", "(", "self", ".", "url", "(", ")", ")", "self", ".", "item_urls", "=", "refreshed", ".", "urls", "(", ")", "self", ".", "list_name", "=", "refreshed", ".", "name", "(", ")", "return", "self" ]
Update this ItemList by re-downloading it from the server :rtype: ItemList :returns: this ItemList, after the refresh :raises: APIError if the API request is not successful
[ "Update", "this", "ItemList", "by", "re", "-", "downloading", "it", "from", "the", "server" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L345-L358
train
Alveo/pyalveo
pyalveo/objects.py
ItemList.append
def append(self, items): """ Add some items to this ItemList and save the changes to the server :param items: the items to add, either as a List of Item objects, an ItemList, a List of item URLs as Strings, a single item URL as a String, or a single Item object :rtype: String :returns: the server success message :raises: APIError if the API request is not successful """ resp = self.client.add_to_item_list(items, self.url()) self.refresh() return resp
python
def append(self, items): """ Add some items to this ItemList and save the changes to the server :param items: the items to add, either as a List of Item objects, an ItemList, a List of item URLs as Strings, a single item URL as a String, or a single Item object :rtype: String :returns: the server success message :raises: APIError if the API request is not successful """ resp = self.client.add_to_item_list(items, self.url()) self.refresh() return resp
[ "def", "append", "(", "self", ",", "items", ")", ":", "resp", "=", "self", ".", "client", ".", "add_to_item_list", "(", "items", ",", "self", ".", "url", "(", ")", ")", "self", ".", "refresh", "(", ")", "return", "resp" ]
Add some items to this ItemList and save the changes to the server :param items: the items to add, either as a List of Item objects, an ItemList, a List of item URLs as Strings, a single item URL as a String, or a single Item object :rtype: String :returns: the server success message :raises: APIError if the API request is not successful
[ "Add", "some", "items", "to", "this", "ItemList", "and", "save", "the", "changes", "to", "the", "server" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L361-L377
train
Alveo/pyalveo
pyalveo/objects.py
Item.get_document
def get_document(self, index=0): """ Return the metadata for the specified document, as a Document object :type index: int :param index: the index of the document :rtype: Document :returns: the metadata for the specified document """ try: return Document(self.metadata()['alveo:documents'][index], self.client) except IndexError: raise ValueError('No document exists for this item with index: ' + str(index))
python
def get_document(self, index=0): """ Return the metadata for the specified document, as a Document object :type index: int :param index: the index of the document :rtype: Document :returns: the metadata for the specified document """ try: return Document(self.metadata()['alveo:documents'][index], self.client) except IndexError: raise ValueError('No document exists for this item with index: ' + str(index))
[ "def", "get_document", "(", "self", ",", "index", "=", "0", ")", ":", "try", ":", "return", "Document", "(", "self", ".", "metadata", "(", ")", "[", "'alveo:documents'", "]", "[", "index", "]", ",", "self", ".", "client", ")", "except", "IndexError", ":", "raise", "ValueError", "(", "'No document exists for this item with index: '", "+", "str", "(", "index", ")", ")" ]
Return the metadata for the specified document, as a Document object :type index: int :param index: the index of the document :rtype: Document :returns: the metadata for the specified document
[ "Return", "the", "metadata", "for", "the", "specified", "document", "as", "a", "Document", "object" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L462-L478
train
Alveo/pyalveo
pyalveo/objects.py
Item.get_primary_text
def get_primary_text(self, force_download=False): """ Retrieve the primary text for this item from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the primary text :raises: APIError if the API request is not successful """ return self.client.get_primary_text(self.url(), force_download)
python
def get_primary_text(self, force_download=False): """ Retrieve the primary text for this item from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the primary text :raises: APIError if the API request is not successful """ return self.client.get_primary_text(self.url(), force_download)
[ "def", "get_primary_text", "(", "self", ",", "force_download", "=", "False", ")", ":", "return", "self", ".", "client", ".", "get_primary_text", "(", "self", ".", "url", "(", ")", ",", "force_download", ")" ]
Retrieve the primary text for this item from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the primary text :raises: APIError if the API request is not successful
[ "Retrieve", "the", "primary", "text", "for", "this", "item", "from", "the", "server" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L481-L495
train
Alveo/pyalveo
pyalveo/objects.py
Item.get_annotations
def get_annotations(self, atype=None, label=None): """ Retrieve the annotations for this item from the server :type atype: String :param atype: return only results with a matching Type field :type label: String :param label: return only results with a matching Label field :rtype: String :returns: the annotations as a JSON string :raises: APIError if the API request is not successful """ return self.client.get_item_annotations(self.url(), atype, label)
python
def get_annotations(self, atype=None, label=None): """ Retrieve the annotations for this item from the server :type atype: String :param atype: return only results with a matching Type field :type label: String :param label: return only results with a matching Label field :rtype: String :returns: the annotations as a JSON string :raises: APIError if the API request is not successful """ return self.client.get_item_annotations(self.url(), atype, label)
[ "def", "get_annotations", "(", "self", ",", "atype", "=", "None", ",", "label", "=", "None", ")", ":", "return", "self", ".", "client", ".", "get_item_annotations", "(", "self", ".", "url", "(", ")", ",", "atype", ",", "label", ")" ]
Retrieve the annotations for this item from the server :type atype: String :param atype: return only results with a matching Type field :type label: String :param label: return only results with a matching Label field :rtype: String :returns: the annotations as a JSON string :raises: APIError if the API request is not successful
[ "Retrieve", "the", "annotations", "for", "this", "item", "from", "the", "server" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L498-L513
train
Alveo/pyalveo
pyalveo/objects.py
Document.get_content
def get_content(self, force_download=False): """ Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful """ return self.client.get_document(self.url(), force_download)
python
def get_content(self, force_download=False): """ Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful """ return self.client.get_document(self.url(), force_download)
[ "def", "get_content", "(", "self", ",", "force_download", "=", "False", ")", ":", "return", "self", ".", "client", ".", "get_document", "(", "self", ".", "url", "(", ")", ",", "force_download", ")" ]
Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful
[ "Retrieve", "the", "content", "for", "this", "Document", "from", "the", "server" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L702-L716
train
Alveo/pyalveo
pyalveo/objects.py
Document.download_content
def download_content(self, dir_path='', filename=None, force_download=False): """ Download the content for this document to a file :type dir_path: String :param dir_path: the path to which to write the data :type filename: String :param filename: filename to write to (if None, defaults to the document's name, as specified by its metadata :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the path to the downloaded file :raises: APIError if the API request is not successful """ if filename is None: filename = self.get_filename() path = os.path.join(dir_path, filename) data = self.client.get_document(self.url(), force_download) with open(path, 'wb') as f: f.write(data) return path
python
def download_content(self, dir_path='', filename=None, force_download=False): """ Download the content for this document to a file :type dir_path: String :param dir_path: the path to which to write the data :type filename: String :param filename: filename to write to (if None, defaults to the document's name, as specified by its metadata :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the path to the downloaded file :raises: APIError if the API request is not successful """ if filename is None: filename = self.get_filename() path = os.path.join(dir_path, filename) data = self.client.get_document(self.url(), force_download) with open(path, 'wb') as f: f.write(data) return path
[ "def", "download_content", "(", "self", ",", "dir_path", "=", "''", ",", "filename", "=", "None", ",", "force_download", "=", "False", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "get_filename", "(", ")", "path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "filename", ")", "data", "=", "self", ".", "client", ".", "get_document", "(", "self", ".", "url", "(", ")", ",", "force_download", ")", "with", "open", "(", "path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "return", "path" ]
Download the content for this document to a file :type dir_path: String :param dir_path: the path to which to write the data :type filename: String :param filename: filename to write to (if None, defaults to the document's name, as specified by its metadata :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the path to the downloaded file :raises: APIError if the API request is not successful
[ "Download", "the", "content", "for", "this", "document", "to", "a", "file" ]
1e9eec22bc031bc9a08066f9966565a546e6242e
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/objects.py#L730-L756
train
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py
generic_ref_formatter
def generic_ref_formatter(view, context, model, name, lazy=False): """ For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter """ try: if lazy: rel_model = getattr(model, name).fetch() else: rel_model = getattr(model, name) except (mongoengine.DoesNotExist, AttributeError) as e: # custom_field_type_formatters seems to fix the issue of stale references # crashing pages, since it intercepts the display of all ReferenceField's. return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e ) if rel_model is None: return '' try: return Markup( '<a href="%s">%s</a>' % ( url_for( # Flask-Admin creates URL's namespaced w/ model class name, lowercase. '%s.details_view' % rel_model.__class__.__name__.lower(), id=rel_model.id, ), rel_model, ) ) except werkzeug.routing.BuildError as e: return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e )
python
def generic_ref_formatter(view, context, model, name, lazy=False): """ For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter """ try: if lazy: rel_model = getattr(model, name).fetch() else: rel_model = getattr(model, name) except (mongoengine.DoesNotExist, AttributeError) as e: # custom_field_type_formatters seems to fix the issue of stale references # crashing pages, since it intercepts the display of all ReferenceField's. return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e ) if rel_model is None: return '' try: return Markup( '<a href="%s">%s</a>' % ( url_for( # Flask-Admin creates URL's namespaced w/ model class name, lowercase. '%s.details_view' % rel_model.__class__.__name__.lower(), id=rel_model.id, ), rel_model, ) ) except werkzeug.routing.BuildError as e: return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e )
[ "def", "generic_ref_formatter", "(", "view", ",", "context", ",", "model", ",", "name", ",", "lazy", "=", "False", ")", ":", "try", ":", "if", "lazy", ":", "rel_model", "=", "getattr", "(", "model", ",", "name", ")", ".", "fetch", "(", ")", "else", ":", "rel_model", "=", "getattr", "(", "model", ",", "name", ")", "except", "(", "mongoengine", ".", "DoesNotExist", ",", "AttributeError", ")", "as", "e", ":", "# custom_field_type_formatters seems to fix the issue of stale references", "# crashing pages, since it intercepts the display of all ReferenceField's.", "return", "Markup", "(", "'<span class=\"label label-danger\">Error</span> <small>%s</small>'", "%", "e", ")", "if", "rel_model", "is", "None", ":", "return", "''", "try", ":", "return", "Markup", "(", "'<a href=\"%s\">%s</a>'", "%", "(", "url_for", "(", "# Flask-Admin creates URL's namespaced w/ model class name, lowercase.", "'%s.details_view'", "%", "rel_model", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ",", "id", "=", "rel_model", ".", "id", ",", ")", ",", "rel_model", ",", ")", ")", "except", "werkzeug", ".", "routing", ".", "BuildError", "as", "e", ":", "return", "Markup", "(", "'<span class=\"label label-danger\">Error</span> <small>%s</small>'", "%", "e", ")" ]
For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter
[ "For", "GenericReferenceField", "and", "LazyGenericReferenceField" ]
a267eeb49fea07c9a24fb370bd9d7a90ed313ccf
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py#L27-L65
train
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py
generic_document_type_formatter
def generic_document_type_formatter(view, context, model, name): """Return AdminLog.document field wrapped in URL to its list view.""" _document_model = model.get('document').document_type url = _document_model.get_admin_list_url() return Markup('<a href="%s">%s</a>' % (url, _document_model.__name__))
python
def generic_document_type_formatter(view, context, model, name): """Return AdminLog.document field wrapped in URL to its list view.""" _document_model = model.get('document').document_type url = _document_model.get_admin_list_url() return Markup('<a href="%s">%s</a>' % (url, _document_model.__name__))
[ "def", "generic_document_type_formatter", "(", "view", ",", "context", ",", "model", ",", "name", ")", ":", "_document_model", "=", "model", ".", "get", "(", "'document'", ")", ".", "document_type", "url", "=", "_document_model", ".", "get_admin_list_url", "(", ")", "return", "Markup", "(", "'<a href=\"%s\">%s</a>'", "%", "(", "url", ",", "_document_model", ".", "__name__", ")", ")" ]
Return AdminLog.document field wrapped in URL to its list view.
[ "Return", "AdminLog", ".", "document", "field", "wrapped", "in", "URL", "to", "its", "list", "view", "." ]
a267eeb49fea07c9a24fb370bd9d7a90ed313ccf
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py#L77-L81
train
berkeley-cocosci/Wallace
wallace/custom.py
return_page
def return_page(page): """Return a rendered template.""" try: hit_id = request.args['hit_id'] assignment_id = request.args['assignment_id'] worker_id = request.args['worker_id'] mode = request.args['mode'] return render_template( page, hit_id=hit_id, assignment_id=assignment_id, worker_id=worker_id, mode=mode ) except: try: participant_id = request.args['participant_id'] return render_template(page, participant_id=participant_id) except: return error_response(error_type="{} args missing".format(page))
python
def return_page(page): """Return a rendered template.""" try: hit_id = request.args['hit_id'] assignment_id = request.args['assignment_id'] worker_id = request.args['worker_id'] mode = request.args['mode'] return render_template( page, hit_id=hit_id, assignment_id=assignment_id, worker_id=worker_id, mode=mode ) except: try: participant_id = request.args['participant_id'] return render_template(page, participant_id=participant_id) except: return error_response(error_type="{} args missing".format(page))
[ "def", "return_page", "(", "page", ")", ":", "try", ":", "hit_id", "=", "request", ".", "args", "[", "'hit_id'", "]", "assignment_id", "=", "request", ".", "args", "[", "'assignment_id'", "]", "worker_id", "=", "request", ".", "args", "[", "'worker_id'", "]", "mode", "=", "request", ".", "args", "[", "'mode'", "]", "return", "render_template", "(", "page", ",", "hit_id", "=", "hit_id", ",", "assignment_id", "=", "assignment_id", ",", "worker_id", "=", "worker_id", ",", "mode", "=", "mode", ")", "except", ":", "try", ":", "participant_id", "=", "request", ".", "args", "[", "'participant_id'", "]", "return", "render_template", "(", "page", ",", "participant_id", "=", "participant_id", ")", "except", ":", "return", "error_response", "(", "error_type", "=", "\"{} args missing\"", ".", "format", "(", "page", ")", ")" ]
Return a rendered template.
[ "Return", "a", "rendered", "template", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L120-L139
train
berkeley-cocosci/Wallace
wallace/custom.py
quitter
def quitter(): """Overide the psiTurk quitter route.""" exp = experiment(session) exp.log("Quitter route was hit.") return Response( dumps({"status": "success"}), status=200, mimetype='application/json')
python
def quitter(): """Overide the psiTurk quitter route.""" exp = experiment(session) exp.log("Quitter route was hit.") return Response( dumps({"status": "success"}), status=200, mimetype='application/json')
[ "def", "quitter", "(", ")", ":", "exp", "=", "experiment", "(", "session", ")", "exp", ".", "log", "(", "\"Quitter route was hit.\"", ")", "return", "Response", "(", "dumps", "(", "{", "\"status\"", ":", "\"success\"", "}", ")", ",", "status", "=", "200", ",", "mimetype", "=", "'application/json'", ")" ]
Overide the psiTurk quitter route.
[ "Overide", "the", "psiTurk", "quitter", "route", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L228-L236
train
berkeley-cocosci/Wallace
wallace/custom.py
ad_address
def ad_address(mode, hit_id): """Get the address of the ad on AWS. This is used at the end of the experiment to send participants back to AWS where they can complete and submit the HIT. """ if mode == "debug": address = '/complete' elif mode in ["sandbox", "live"]: username = os.getenv('psiturk_access_key_id', config.get("psiTurk Access", "psiturk_access_key_id")) password = os.getenv('psiturk_secret_access_id', config.get("psiTurk Access", "psiturk_secret_access_id")) try: req = requests.get( 'https://api.psiturk.org/api/ad/lookup/' + hit_id, auth=(username, password)) except: raise ValueError('api_server_not_reachable') else: if req.status_code == 200: hit_address = req.json()['ad_id'] else: raise ValueError("something here") if mode == "sandbox": address = ('https://sandbox.ad.psiturk.org/complete/' + str(hit_address)) elif mode == "live": address = 'https://ad.psiturk.org/complete/' + str(hit_address) else: raise ValueError("Unknown mode: {}".format(mode)) return success_response(field="address", data=address, request_type="ad_address")
python
def ad_address(mode, hit_id): """Get the address of the ad on AWS. This is used at the end of the experiment to send participants back to AWS where they can complete and submit the HIT. """ if mode == "debug": address = '/complete' elif mode in ["sandbox", "live"]: username = os.getenv('psiturk_access_key_id', config.get("psiTurk Access", "psiturk_access_key_id")) password = os.getenv('psiturk_secret_access_id', config.get("psiTurk Access", "psiturk_secret_access_id")) try: req = requests.get( 'https://api.psiturk.org/api/ad/lookup/' + hit_id, auth=(username, password)) except: raise ValueError('api_server_not_reachable') else: if req.status_code == 200: hit_address = req.json()['ad_id'] else: raise ValueError("something here") if mode == "sandbox": address = ('https://sandbox.ad.psiturk.org/complete/' + str(hit_address)) elif mode == "live": address = 'https://ad.psiturk.org/complete/' + str(hit_address) else: raise ValueError("Unknown mode: {}".format(mode)) return success_response(field="address", data=address, request_type="ad_address")
[ "def", "ad_address", "(", "mode", ",", "hit_id", ")", ":", "if", "mode", "==", "\"debug\"", ":", "address", "=", "'/complete'", "elif", "mode", "in", "[", "\"sandbox\"", ",", "\"live\"", "]", ":", "username", "=", "os", ".", "getenv", "(", "'psiturk_access_key_id'", ",", "config", ".", "get", "(", "\"psiTurk Access\"", ",", "\"psiturk_access_key_id\"", ")", ")", "password", "=", "os", ".", "getenv", "(", "'psiturk_secret_access_id'", ",", "config", ".", "get", "(", "\"psiTurk Access\"", ",", "\"psiturk_secret_access_id\"", ")", ")", "try", ":", "req", "=", "requests", ".", "get", "(", "'https://api.psiturk.org/api/ad/lookup/'", "+", "hit_id", ",", "auth", "=", "(", "username", ",", "password", ")", ")", "except", ":", "raise", "ValueError", "(", "'api_server_not_reachable'", ")", "else", ":", "if", "req", ".", "status_code", "==", "200", ":", "hit_address", "=", "req", ".", "json", "(", ")", "[", "'ad_id'", "]", "else", ":", "raise", "ValueError", "(", "\"something here\"", ")", "if", "mode", "==", "\"sandbox\"", ":", "address", "=", "(", "'https://sandbox.ad.psiturk.org/complete/'", "+", "str", "(", "hit_address", ")", ")", "elif", "mode", "==", "\"live\"", ":", "address", "=", "'https://ad.psiturk.org/complete/'", "+", "str", "(", "hit_address", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown mode: {}\"", ".", "format", "(", "mode", ")", ")", "return", "success_response", "(", "field", "=", "\"address\"", ",", "data", "=", "address", ",", "request_type", "=", "\"ad_address\"", ")" ]
Get the address of the ad on AWS. This is used at the end of the experiment to send participants back to AWS where they can complete and submit the HIT.
[ "Get", "the", "address", "of", "the", "ad", "on", "AWS", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L248-L283
train
berkeley-cocosci/Wallace
wallace/custom.py
connect
def connect(node_id, other_node_id): """Connect to another node. The ids of both nodes must be speficied in the url. You can also pass direction (to/from/both) as an argument. """ exp = experiment(session) # get the parameters direction = request_parameter(parameter="direction", default="to") if type(direction == Response): return direction # check the nodes exist node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/connect, node does not exist") other_node = models.Node.query.get(other_node_id) if other_node is None: return error_response( error_type="/node/connect, other node does not exist", participant=node.participant) # execute the request try: vectors = node.connect(whom=other_node, direction=direction) for v in vectors: assign_properties(v) # ping the experiment exp.vector_post_request( node=node, vectors=vectors) session.commit() except: return error_response(error_type="/vector POST server error", status=403, participant=node.participant) return success_response(field="vectors", data=[v.__json__() for v in vectors], request_type="vector post")
python
def connect(node_id, other_node_id): """Connect to another node. The ids of both nodes must be speficied in the url. You can also pass direction (to/from/both) as an argument. """ exp = experiment(session) # get the parameters direction = request_parameter(parameter="direction", default="to") if type(direction == Response): return direction # check the nodes exist node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/connect, node does not exist") other_node = models.Node.query.get(other_node_id) if other_node is None: return error_response( error_type="/node/connect, other node does not exist", participant=node.participant) # execute the request try: vectors = node.connect(whom=other_node, direction=direction) for v in vectors: assign_properties(v) # ping the experiment exp.vector_post_request( node=node, vectors=vectors) session.commit() except: return error_response(error_type="/vector POST server error", status=403, participant=node.participant) return success_response(field="vectors", data=[v.__json__() for v in vectors], request_type="vector post")
[ "def", "connect", "(", "node_id", ",", "other_node_id", ")", ":", "exp", "=", "experiment", "(", "session", ")", "# get the parameters", "direction", "=", "request_parameter", "(", "parameter", "=", "\"direction\"", ",", "default", "=", "\"to\"", ")", "if", "type", "(", "direction", "==", "Response", ")", ":", "return", "direction", "# check the nodes exist", "node", "=", "models", ".", "Node", ".", "query", ".", "get", "(", "node_id", ")", "if", "node", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/node/connect, node does not exist\"", ")", "other_node", "=", "models", ".", "Node", ".", "query", ".", "get", "(", "other_node_id", ")", "if", "other_node", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/node/connect, other node does not exist\"", ",", "participant", "=", "node", ".", "participant", ")", "# execute the request", "try", ":", "vectors", "=", "node", ".", "connect", "(", "whom", "=", "other_node", ",", "direction", "=", "direction", ")", "for", "v", "in", "vectors", ":", "assign_properties", "(", "v", ")", "# ping the experiment", "exp", ".", "vector_post_request", "(", "node", "=", "node", ",", "vectors", "=", "vectors", ")", "session", ".", "commit", "(", ")", "except", ":", "return", "error_response", "(", "error_type", "=", "\"/vector POST server error\"", ",", "status", "=", "403", ",", "participant", "=", "node", ".", "participant", ")", "return", "success_response", "(", "field", "=", "\"vectors\"", ",", "data", "=", "[", "v", ".", "__json__", "(", ")", "for", "v", "in", "vectors", "]", ",", "request_type", "=", "\"vector post\"", ")" ]
Connect to another node. The ids of both nodes must be speficied in the url. You can also pass direction (to/from/both) as an argument.
[ "Connect", "to", "another", "node", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L649-L692
train
berkeley-cocosci/Wallace
wallace/custom.py
get_info
def get_info(node_id, info_id): """Get a specific info. Both the node and info id must be specified in the url. """ exp = experiment(session) # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info, node does not exist") # execute the experiment method: info = models.Info.query.get(info_id) if info is None: return error_response(error_type="/info GET, info does not exist", participant=node.participant) elif (info.origin_id != node.id and info.id not in [t.info_id for t in node.transmissions(direction="incoming", status="received")]): return error_response(error_type="/info GET, forbidden info", status=403, participant=node.participant) try: # ping the experiment exp.info_get_request(node=node, infos=info) session.commit() except: return error_response(error_type="/info GET server error", status=403, participant=node.participant) # return the data return success_response(field="info", data=info.__json__(), request_type="info get")
python
def get_info(node_id, info_id): """Get a specific info. Both the node and info id must be specified in the url. """ exp = experiment(session) # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info, node does not exist") # execute the experiment method: info = models.Info.query.get(info_id) if info is None: return error_response(error_type="/info GET, info does not exist", participant=node.participant) elif (info.origin_id != node.id and info.id not in [t.info_id for t in node.transmissions(direction="incoming", status="received")]): return error_response(error_type="/info GET, forbidden info", status=403, participant=node.participant) try: # ping the experiment exp.info_get_request(node=node, infos=info) session.commit() except: return error_response(error_type="/info GET server error", status=403, participant=node.participant) # return the data return success_response(field="info", data=info.__json__(), request_type="info get")
[ "def", "get_info", "(", "node_id", ",", "info_id", ")", ":", "exp", "=", "experiment", "(", "session", ")", "# check the node exists", "node", "=", "models", ".", "Node", ".", "query", ".", "get", "(", "node_id", ")", "if", "node", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/info, node does not exist\"", ")", "# execute the experiment method:", "info", "=", "models", ".", "Info", ".", "query", ".", "get", "(", "info_id", ")", "if", "info", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/info GET, info does not exist\"", ",", "participant", "=", "node", ".", "participant", ")", "elif", "(", "info", ".", "origin_id", "!=", "node", ".", "id", "and", "info", ".", "id", "not", "in", "[", "t", ".", "info_id", "for", "t", "in", "node", ".", "transmissions", "(", "direction", "=", "\"incoming\"", ",", "status", "=", "\"received\"", ")", "]", ")", ":", "return", "error_response", "(", "error_type", "=", "\"/info GET, forbidden info\"", ",", "status", "=", "403", ",", "participant", "=", "node", ".", "participant", ")", "try", ":", "# ping the experiment", "exp", ".", "info_get_request", "(", "node", "=", "node", ",", "infos", "=", "info", ")", "session", ".", "commit", "(", ")", "except", ":", "return", "error_response", "(", "error_type", "=", "\"/info GET server error\"", ",", "status", "=", "403", ",", "participant", "=", "node", ".", "participant", ")", "# return the data", "return", "success_response", "(", "field", "=", "\"info\"", ",", "data", "=", "info", ".", "__json__", "(", ")", ",", "request_type", "=", "\"info get\"", ")" ]
Get a specific info. Both the node and info id must be specified in the url.
[ "Get", "a", "specific", "info", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L696-L733
train
berkeley-cocosci/Wallace
wallace/custom.py
transformation_post
def transformation_post(node_id, info_in_id, info_out_id): """Transform an info. The ids of the node, info in and info out must all be in the url. You can also pass transformation_type. """ exp = experiment(session) # Get the parameters. transformation_type = request_parameter(parameter="transformation_type", parameter_type="known_class", default=models.Transformation) if type(transformation_type) == Response: return transformation_type # Check that the node etc. exists. node = models.Node.query.get(node_id) if node is None: return error_response( error_type="/transformation POST, node does not exist") info_in = models.Info.query.get(info_in_id) if info_in is None: return error_response( error_type="/transformation POST, info_in does not exist", participant=node.participant) info_out = models.Info.query.get(info_out_id) if info_out is None: return error_response( error_type="/transformation POST, info_out does not exist", participant=node.participant) try: # execute the request transformation = transformation_type(info_in=info_in, info_out=info_out) assign_properties(transformation) session.commit() # ping the experiment exp.transformation_post_request(node=node, transformation=transformation) session.commit() except: return error_response(error_type="/tranaformation POST failed", participant=node.participant) # return the data return success_response(field="transformation", data=transformation.__json__(), request_type="transformation post")
python
def transformation_post(node_id, info_in_id, info_out_id): """Transform an info. The ids of the node, info in and info out must all be in the url. You can also pass transformation_type. """ exp = experiment(session) # Get the parameters. transformation_type = request_parameter(parameter="transformation_type", parameter_type="known_class", default=models.Transformation) if type(transformation_type) == Response: return transformation_type # Check that the node etc. exists. node = models.Node.query.get(node_id) if node is None: return error_response( error_type="/transformation POST, node does not exist") info_in = models.Info.query.get(info_in_id) if info_in is None: return error_response( error_type="/transformation POST, info_in does not exist", participant=node.participant) info_out = models.Info.query.get(info_out_id) if info_out is None: return error_response( error_type="/transformation POST, info_out does not exist", participant=node.participant) try: # execute the request transformation = transformation_type(info_in=info_in, info_out=info_out) assign_properties(transformation) session.commit() # ping the experiment exp.transformation_post_request(node=node, transformation=transformation) session.commit() except: return error_response(error_type="/tranaformation POST failed", participant=node.participant) # return the data return success_response(field="transformation", data=transformation.__json__(), request_type="transformation post")
[ "def", "transformation_post", "(", "node_id", ",", "info_in_id", ",", "info_out_id", ")", ":", "exp", "=", "experiment", "(", "session", ")", "# Get the parameters.", "transformation_type", "=", "request_parameter", "(", "parameter", "=", "\"transformation_type\"", ",", "parameter_type", "=", "\"known_class\"", ",", "default", "=", "models", ".", "Transformation", ")", "if", "type", "(", "transformation_type", ")", "==", "Response", ":", "return", "transformation_type", "# Check that the node etc. exists.", "node", "=", "models", ".", "Node", ".", "query", ".", "get", "(", "node_id", ")", "if", "node", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/transformation POST, node does not exist\"", ")", "info_in", "=", "models", ".", "Info", ".", "query", ".", "get", "(", "info_in_id", ")", "if", "info_in", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/transformation POST, info_in does not exist\"", ",", "participant", "=", "node", ".", "participant", ")", "info_out", "=", "models", ".", "Info", ".", "query", ".", "get", "(", "info_out_id", ")", "if", "info_out", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/transformation POST, info_out does not exist\"", ",", "participant", "=", "node", ".", "participant", ")", "try", ":", "# execute the request", "transformation", "=", "transformation_type", "(", "info_in", "=", "info_in", ",", "info_out", "=", "info_out", ")", "assign_properties", "(", "transformation", ")", "session", ".", "commit", "(", ")", "# ping the experiment", "exp", ".", "transformation_post_request", "(", "node", "=", "node", ",", "transformation", "=", "transformation", ")", "session", ".", "commit", "(", ")", "except", ":", "return", "error_response", "(", "error_type", "=", "\"/tranaformation POST failed\"", ",", "participant", "=", "node", ".", "participant", ")", "# return the data", "return", "success_response", "(", "field", "=", "\"transformation\"", ",", "data", "=", "transformation", ".", "__json__", "(", ")", ",", "request_type", "=", "\"transformation post\"", ")" ]
Transform an info. The ids of the node, info in and info out must all be in the url. You can also pass transformation_type.
[ "Transform", "an", "info", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L1053-L1104
train
berkeley-cocosci/Wallace
wallace/custom.py
api_notifications
def api_notifications(): """Receive MTurk REST notifications.""" event_type = request.values['Event.1.EventType'] assignment_id = request.values['Event.1.AssignmentId'] # Add the notification to the queue. db.logger.debug('rq: Queueing %s with id: %s for worker_function', event_type, assignment_id) q.enqueue(worker_function, event_type, assignment_id, None) db.logger.debug('rq: Submitted Queue Length: %d (%s)', len(q), ', '.join(q.job_ids)) return success_response(request_type="notification")
python
def api_notifications(): """Receive MTurk REST notifications.""" event_type = request.values['Event.1.EventType'] assignment_id = request.values['Event.1.AssignmentId'] # Add the notification to the queue. db.logger.debug('rq: Queueing %s with id: %s for worker_function', event_type, assignment_id) q.enqueue(worker_function, event_type, assignment_id, None) db.logger.debug('rq: Submitted Queue Length: %d (%s)', len(q), ', '.join(q.job_ids)) return success_response(request_type="notification")
[ "def", "api_notifications", "(", ")", ":", "event_type", "=", "request", ".", "values", "[", "'Event.1.EventType'", "]", "assignment_id", "=", "request", ".", "values", "[", "'Event.1.AssignmentId'", "]", "# Add the notification to the queue.", "db", ".", "logger", ".", "debug", "(", "'rq: Queueing %s with id: %s for worker_function'", ",", "event_type", ",", "assignment_id", ")", "q", ".", "enqueue", "(", "worker_function", ",", "event_type", ",", "assignment_id", ",", "None", ")", "db", ".", "logger", ".", "debug", "(", "'rq: Submitted Queue Length: %d (%s)'", ",", "len", "(", "q", ")", ",", "', '", ".", "join", "(", "q", ".", "job_ids", ")", ")", "return", "success_response", "(", "request_type", "=", "\"notification\"", ")" ]
Receive MTurk REST notifications.
[ "Receive", "MTurk", "REST", "notifications", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L1108-L1120
train
uogbuji/versa
tools/py/writer/rdfs.py
process
def process(source, target, rdfsonly, base=None, logger=logging): ''' Prepare a statement into a triple ready for rdflib graph ''' for link in source.match(): s, p, o = link[:3] #SKip docheader statements if s == (base or '') + '@docheader': continue if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p] if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o] if p == VERSA_BASEIRI + 'refines': tlinks = list(source.match(s, TYPE_REL)) if tlinks: if tlinks[0][TARGET] == VERSA_BASEIRI + 'Resource': p = I(RDFS_NAMESPACE + 'subClassOf') elif tlinks[0][TARGET] == VERSA_BASEIRI + 'Property': p = I(RDFS_NAMESPACE + 'subPropertyOf') if p == VERSA_BASEIRI + 'properties': suri = I(iri.absolutize(s, base)) if base else s target.add((URIRef(o), URIRef(RDFS_NAMESPACE + 'domain'), URIRef(suri))) continue if p == VERSA_BASEIRI + 'value': if o not in ['Literal', 'IRI']: ouri = I(iri.absolutize(o, base)) if base else o target.add((URIRef(s), URIRef(RDFS_NAMESPACE + 'range'), URIRef(ouri))) continue s = URIRef(s) #Translate v:type to rdf:type p = RDF.type if p == TYPE_REL else URIRef(p) o = URIRef(o) if isinstance(o, I) else Literal(o) if not rdfsonly or p.startswith(RDF_NAMESPACE) or p.startswith(RDFS_NAMESPACE): target.add((s, p, o)) return
python
def process(source, target, rdfsonly, base=None, logger=logging): ''' Prepare a statement into a triple ready for rdflib graph ''' for link in source.match(): s, p, o = link[:3] #SKip docheader statements if s == (base or '') + '@docheader': continue if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p] if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o] if p == VERSA_BASEIRI + 'refines': tlinks = list(source.match(s, TYPE_REL)) if tlinks: if tlinks[0][TARGET] == VERSA_BASEIRI + 'Resource': p = I(RDFS_NAMESPACE + 'subClassOf') elif tlinks[0][TARGET] == VERSA_BASEIRI + 'Property': p = I(RDFS_NAMESPACE + 'subPropertyOf') if p == VERSA_BASEIRI + 'properties': suri = I(iri.absolutize(s, base)) if base else s target.add((URIRef(o), URIRef(RDFS_NAMESPACE + 'domain'), URIRef(suri))) continue if p == VERSA_BASEIRI + 'value': if o not in ['Literal', 'IRI']: ouri = I(iri.absolutize(o, base)) if base else o target.add((URIRef(s), URIRef(RDFS_NAMESPACE + 'range'), URIRef(ouri))) continue s = URIRef(s) #Translate v:type to rdf:type p = RDF.type if p == TYPE_REL else URIRef(p) o = URIRef(o) if isinstance(o, I) else Literal(o) if not rdfsonly or p.startswith(RDF_NAMESPACE) or p.startswith(RDFS_NAMESPACE): target.add((s, p, o)) return
[ "def", "process", "(", "source", ",", "target", ",", "rdfsonly", ",", "base", "=", "None", ",", "logger", "=", "logging", ")", ":", "for", "link", "in", "source", ".", "match", "(", ")", ":", "s", ",", "p", ",", "o", "=", "link", "[", ":", "3", "]", "#SKip docheader statements", "if", "s", "==", "(", "base", "or", "''", ")", "+", "'@docheader'", ":", "continue", "if", "p", "in", "RESOURCE_MAPPING", ":", "p", "=", "RESOURCE_MAPPING", "[", "p", "]", "if", "o", "in", "RESOURCE_MAPPING", ":", "o", "=", "RESOURCE_MAPPING", "[", "o", "]", "if", "p", "==", "VERSA_BASEIRI", "+", "'refines'", ":", "tlinks", "=", "list", "(", "source", ".", "match", "(", "s", ",", "TYPE_REL", ")", ")", "if", "tlinks", ":", "if", "tlinks", "[", "0", "]", "[", "TARGET", "]", "==", "VERSA_BASEIRI", "+", "'Resource'", ":", "p", "=", "I", "(", "RDFS_NAMESPACE", "+", "'subClassOf'", ")", "elif", "tlinks", "[", "0", "]", "[", "TARGET", "]", "==", "VERSA_BASEIRI", "+", "'Property'", ":", "p", "=", "I", "(", "RDFS_NAMESPACE", "+", "'subPropertyOf'", ")", "if", "p", "==", "VERSA_BASEIRI", "+", "'properties'", ":", "suri", "=", "I", "(", "iri", ".", "absolutize", "(", "s", ",", "base", ")", ")", "if", "base", "else", "s", "target", ".", "add", "(", "(", "URIRef", "(", "o", ")", ",", "URIRef", "(", "RDFS_NAMESPACE", "+", "'domain'", ")", ",", "URIRef", "(", "suri", ")", ")", ")", "continue", "if", "p", "==", "VERSA_BASEIRI", "+", "'value'", ":", "if", "o", "not", "in", "[", "'Literal'", ",", "'IRI'", "]", ":", "ouri", "=", "I", "(", "iri", ".", "absolutize", "(", "o", ",", "base", ")", ")", "if", "base", "else", "o", "target", ".", "add", "(", "(", "URIRef", "(", "s", ")", ",", "URIRef", "(", "RDFS_NAMESPACE", "+", "'range'", ")", ",", "URIRef", "(", "ouri", ")", ")", ")", "continue", "s", "=", "URIRef", "(", "s", ")", "#Translate v:type to rdf:type", "p", "=", "RDF", ".", "type", "if", "p", "==", "TYPE_REL", "else", "URIRef", "(", "p", ")", "o", "=", "URIRef", "(", "o", ")", "if", "isinstance", "(", "o", ",", "I", ")", "else", "Literal", "(", "o", ")", "if", "not", "rdfsonly", "or", "p", ".", "startswith", "(", "RDF_NAMESPACE", ")", "or", "p", ".", "startswith", "(", "RDFS_NAMESPACE", ")", ":", "target", ".", "add", "(", "(", "s", ",", "p", ",", "o", ")", ")", "return" ]
Prepare a statement into a triple ready for rdflib graph
[ "Prepare", "a", "statement", "into", "a", "triple", "ready", "for", "rdflib", "graph" ]
f092ffc7ed363a5b170890955168500f32de0dd5
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/rdfs.py#L50-L83
train
uogbuji/versa
tools/py/writer/rdfs.py
write
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging): ''' See the command line help ''' prefixes = prefixes or {} g = graph or rdflib.Graph() #g.bind('bf', BFNS) #g.bind('bfc', BFCNS) #g.bind('bfd', BFDNS) g.bind('v', VNS) for k, v in prefixes.items(): g.bind(k, v) for m in models: base_out = m.base process(m, g, rdfsonly, base=base_out, logger=logger) return g
python
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging): ''' See the command line help ''' prefixes = prefixes or {} g = graph or rdflib.Graph() #g.bind('bf', BFNS) #g.bind('bfc', BFCNS) #g.bind('bfd', BFDNS) g.bind('v', VNS) for k, v in prefixes.items(): g.bind(k, v) for m in models: base_out = m.base process(m, g, rdfsonly, base=base_out, logger=logger) return g
[ "def", "write", "(", "models", ",", "base", "=", "None", ",", "graph", "=", "None", ",", "rdfsonly", "=", "False", ",", "prefixes", "=", "None", ",", "logger", "=", "logging", ")", ":", "prefixes", "=", "prefixes", "or", "{", "}", "g", "=", "graph", "or", "rdflib", ".", "Graph", "(", ")", "#g.bind('bf', BFNS)", "#g.bind('bfc', BFCNS)", "#g.bind('bfd', BFDNS)", "g", ".", "bind", "(", "'v'", ",", "VNS", ")", "for", "k", ",", "v", "in", "prefixes", ".", "items", "(", ")", ":", "g", ".", "bind", "(", "k", ",", "v", ")", "for", "m", "in", "models", ":", "base_out", "=", "m", ".", "base", "process", "(", "m", ",", "g", ",", "rdfsonly", ",", "base", "=", "base_out", ",", "logger", "=", "logger", ")", "return", "g" ]
See the command line help
[ "See", "the", "command", "line", "help" ]
f092ffc7ed363a5b170890955168500f32de0dd5
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/rdfs.py#L86-L101
train
projectshift/shift-boiler
boiler/feature/routing.py
routing_feature
def routing_feature(app): """ Add routing feature Allows to define application routes un urls.py file and use lazy views. Additionally enables regular exceptions in route definitions """ # enable regex routes app.url_map.converters['regex'] = RegexConverter urls = app.name.rsplit('.', 1)[0] + '.urls.urls' # important issue ahead # see: https://github.com/projectshift/shift-boiler/issues/11 try: urls = import_string(urls) except ImportError as e: err = 'Failed to import {}. If it exists, check that it does not ' err += 'import something non-existent itself! ' err += 'Try to manually import it to debug.' raise ImportError(err.format(urls)) # add routes now for route in urls.keys(): route_options = urls[route] route_options['rule'] = route app.add_url_rule(**route_options)
python
def routing_feature(app): """ Add routing feature Allows to define application routes un urls.py file and use lazy views. Additionally enables regular exceptions in route definitions """ # enable regex routes app.url_map.converters['regex'] = RegexConverter urls = app.name.rsplit('.', 1)[0] + '.urls.urls' # important issue ahead # see: https://github.com/projectshift/shift-boiler/issues/11 try: urls = import_string(urls) except ImportError as e: err = 'Failed to import {}. If it exists, check that it does not ' err += 'import something non-existent itself! ' err += 'Try to manually import it to debug.' raise ImportError(err.format(urls)) # add routes now for route in urls.keys(): route_options = urls[route] route_options['rule'] = route app.add_url_rule(**route_options)
[ "def", "routing_feature", "(", "app", ")", ":", "# enable regex routes", "app", ".", "url_map", ".", "converters", "[", "'regex'", "]", "=", "RegexConverter", "urls", "=", "app", ".", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "+", "'.urls.urls'", "# important issue ahead", "# see: https://github.com/projectshift/shift-boiler/issues/11", "try", ":", "urls", "=", "import_string", "(", "urls", ")", "except", "ImportError", "as", "e", ":", "err", "=", "'Failed to import {}. If it exists, check that it does not '", "err", "+=", "'import something non-existent itself! '", "err", "+=", "'Try to manually import it to debug.'", "raise", "ImportError", "(", "err", ".", "format", "(", "urls", ")", ")", "# add routes now", "for", "route", "in", "urls", ".", "keys", "(", ")", ":", "route_options", "=", "urls", "[", "route", "]", "route_options", "[", "'rule'", "]", "=", "route", "app", ".", "add_url_rule", "(", "*", "*", "route_options", ")" ]
Add routing feature Allows to define application routes un urls.py file and use lazy views. Additionally enables regular exceptions in route definitions
[ "Add", "routing", "feature", "Allows", "to", "define", "application", "routes", "un", "urls", ".", "py", "file", "and", "use", "lazy", "views", ".", "Additionally", "enables", "regular", "exceptions", "in", "route", "definitions" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/routing.py#L5-L30
train
rosshamish/undoredo
undoredo.py
undoable
def undoable(method): """ Decorator undoable allows an instance method to be undone. It does this by wrapping the method call as a Command, then calling self.do() on the command. Classes which use this decorator should implement a do() method like such: def do(self, command): return self.undo_manager.do(command) """ def undoable_method(self, *args): return self.do(Command(self, method, *args)) return undoable_method
python
def undoable(method): """ Decorator undoable allows an instance method to be undone. It does this by wrapping the method call as a Command, then calling self.do() on the command. Classes which use this decorator should implement a do() method like such: def do(self, command): return self.undo_manager.do(command) """ def undoable_method(self, *args): return self.do(Command(self, method, *args)) return undoable_method
[ "def", "undoable", "(", "method", ")", ":", "def", "undoable_method", "(", "self", ",", "*", "args", ")", ":", "return", "self", ".", "do", "(", "Command", "(", "self", ",", "method", ",", "*", "args", ")", ")", "return", "undoable_method" ]
Decorator undoable allows an instance method to be undone. It does this by wrapping the method call as a Command, then calling self.do() on the command. Classes which use this decorator should implement a do() method like such: def do(self, command): return self.undo_manager.do(command)
[ "Decorator", "undoable", "allows", "an", "instance", "method", "to", "be", "undone", "." ]
634941181a74477b7ebafa43d9900e6f21fd0458
https://github.com/rosshamish/undoredo/blob/634941181a74477b7ebafa43d9900e6f21fd0458/undoredo.py#L105-L118
train
projectshift/shift-boiler
boiler/migrations/config.py
MigrationsConfig.get_template_directory
def get_template_directory(self): """ Get path to migrations templates This will get used when you run the db init command """ dir = os.path.join(os.path.dirname(__file__), 'templates') return dir
python
def get_template_directory(self): """ Get path to migrations templates This will get used when you run the db init command """ dir = os.path.join(os.path.dirname(__file__), 'templates') return dir
[ "def", "get_template_directory", "(", "self", ")", ":", "dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates'", ")", "return", "dir" ]
Get path to migrations templates This will get used when you run the db init command
[ "Get", "path", "to", "migrations", "templates", "This", "will", "get", "used", "when", "you", "run", "the", "db", "init", "command" ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/migrations/config.py#L23-L29
train
CodersOfTheNight/oshino
oshino/agents/__init__.py
Agent.pull_metrics
async def pull_metrics(self, event_fn, loop=None): """ Method called by core. Should not be overwritten. """ if self.lazy and not self.ready: return None logger = self.get_logger() ts = timer() logger.trace("Waiting for process event") result = await self.process(event_fn) td = int(timer() - ts) logger.trace("It took: {}ms".format(td)) self._last_run = current_ts() return result
python
async def pull_metrics(self, event_fn, loop=None): """ Method called by core. Should not be overwritten. """ if self.lazy and not self.ready: return None logger = self.get_logger() ts = timer() logger.trace("Waiting for process event") result = await self.process(event_fn) td = int(timer() - ts) logger.trace("It took: {}ms".format(td)) self._last_run = current_ts() return result
[ "async", "def", "pull_metrics", "(", "self", ",", "event_fn", ",", "loop", "=", "None", ")", ":", "if", "self", ".", "lazy", "and", "not", "self", ".", "ready", ":", "return", "None", "logger", "=", "self", ".", "get_logger", "(", ")", "ts", "=", "timer", "(", ")", "logger", ".", "trace", "(", "\"Waiting for process event\"", ")", "result", "=", "await", "self", ".", "process", "(", "event_fn", ")", "td", "=", "int", "(", "timer", "(", ")", "-", "ts", ")", "logger", ".", "trace", "(", "\"It took: {}ms\"", ".", "format", "(", "td", ")", ")", "self", ".", "_last_run", "=", "current_ts", "(", ")", "return", "result" ]
Method called by core. Should not be overwritten.
[ "Method", "called", "by", "core", ".", "Should", "not", "be", "overwritten", "." ]
00f7e151e3ce1f3a7f43b353b695c4dba83c7f28
https://github.com/CodersOfTheNight/oshino/blob/00f7e151e3ce1f3a7f43b353b695c4dba83c7f28/oshino/agents/__init__.py#L40-L55
train
CodersOfTheNight/oshino
oshino/agents/__init__.py
Agent.ready
def ready(self): """ Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied """ logger = self.get_logger() now = current_ts() logger.trace("Current time: {0}".format(now)) logger.trace("Last Run: {0}".format(self._last_run)) delta = (now - self._last_run) logger.trace("Delta: {0}, Interval: {1}" .format(delta, self.interval * 1000)) return delta > self.interval * 1000
python
def ready(self): """ Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied """ logger = self.get_logger() now = current_ts() logger.trace("Current time: {0}".format(now)) logger.trace("Last Run: {0}".format(self._last_run)) delta = (now - self._last_run) logger.trace("Delta: {0}, Interval: {1}" .format(delta, self.interval * 1000)) return delta > self.interval * 1000
[ "def", "ready", "(", "self", ")", ":", "logger", "=", "self", ".", "get_logger", "(", ")", "now", "=", "current_ts", "(", ")", "logger", ".", "trace", "(", "\"Current time: {0}\"", ".", "format", "(", "now", ")", ")", "logger", ".", "trace", "(", "\"Last Run: {0}\"", ".", "format", "(", "self", ".", "_last_run", ")", ")", "delta", "=", "(", "now", "-", "self", ".", "_last_run", ")", "logger", ".", "trace", "(", "\"Delta: {0}, Interval: {1}\"", ".", "format", "(", "delta", ",", "self", ".", "interval", "*", "1000", ")", ")", "return", "delta", ">", "self", ".", "interval", "*", "1000" ]
Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied
[ "Function", "used", "when", "agent", "is", "lazy", ".", "It", "is", "being", "processed", "only", "when", "ready", "condition", "is", "satisfied" ]
00f7e151e3ce1f3a7f43b353b695c4dba83c7f28
https://github.com/CodersOfTheNight/oshino/blob/00f7e151e3ce1f3a7f43b353b695c4dba83c7f28/oshino/agents/__init__.py#L66-L78
train
mardix/Mocha
mocha/contrib/auth/__init__.py
UserModel.create_jwt
def create_jwt(self, expires_in=None): """ Create a secure timed JWT token that can be passed. It save the user id, which later will be used to retrieve the data :param user: AuthUser, the user's object :param expires_in: - time in second for the token to expire :return: string """ s = utils.sign_jwt(data={"id": self.user.id}, secret_key=get_jwt_secret(), salt=get_jwt_salt(), expires_in=expires_in or get_jwt_ttl()) return s
python
def create_jwt(self, expires_in=None): """ Create a secure timed JWT token that can be passed. It save the user id, which later will be used to retrieve the data :param user: AuthUser, the user's object :param expires_in: - time in second for the token to expire :return: string """ s = utils.sign_jwt(data={"id": self.user.id}, secret_key=get_jwt_secret(), salt=get_jwt_salt(), expires_in=expires_in or get_jwt_ttl()) return s
[ "def", "create_jwt", "(", "self", ",", "expires_in", "=", "None", ")", ":", "s", "=", "utils", ".", "sign_jwt", "(", "data", "=", "{", "\"id\"", ":", "self", ".", "user", ".", "id", "}", ",", "secret_key", "=", "get_jwt_secret", "(", ")", ",", "salt", "=", "get_jwt_salt", "(", ")", ",", "expires_in", "=", "expires_in", "or", "get_jwt_ttl", "(", ")", ")", "return", "s" ]
Create a secure timed JWT token that can be passed. It save the user id, which later will be used to retrieve the data :param user: AuthUser, the user's object :param expires_in: - time in second for the token to expire :return: string
[ "Create", "a", "secure", "timed", "JWT", "token", "that", "can", "be", "passed", ".", "It", "save", "the", "user", "id", "which", "later", "will", "be", "used", "to", "retrieve", "the", "data" ]
bce481cb31a0972061dd99bc548701411dcb9de3
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/__init__.py#L449-L462
train
Kortemme-Lab/klab
klab/comms/mail.py
MailServer.sendgmail
def sendgmail(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = '[email protected]', pw_filepath = None): '''For this function to work, the password for the gmail user must be colocated with this file or passed in.''' smtpserver = smtplib.SMTP("smtp.gmail.com", 587) smtpserver.ehlo() smtpserver.starttls() smtpserver.ehlo gmail_account = '[email protected]' if pw_filepath: smtpserver.login(gmail_account, read_file(pw_filepath)) else: smtpserver.login(gmail_account, read_file('pw')) for recipient in recipients: if htmltext: msg = MIMEText(htmltext, 'html') msg['From'] = gmail_account msg['To'] = recipient msg['Subject'] = subject smtpserver.sendmail(gmail_account, recipient, msg.as_string()) else: header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n' msg = header + '\n ' + plaintext + '\n\n' smtpserver.sendmail(gmail_account, recipient, msg) smtpserver.close()
python
def sendgmail(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = '[email protected]', pw_filepath = None): '''For this function to work, the password for the gmail user must be colocated with this file or passed in.''' smtpserver = smtplib.SMTP("smtp.gmail.com", 587) smtpserver.ehlo() smtpserver.starttls() smtpserver.ehlo gmail_account = '[email protected]' if pw_filepath: smtpserver.login(gmail_account, read_file(pw_filepath)) else: smtpserver.login(gmail_account, read_file('pw')) for recipient in recipients: if htmltext: msg = MIMEText(htmltext, 'html') msg['From'] = gmail_account msg['To'] = recipient msg['Subject'] = subject smtpserver.sendmail(gmail_account, recipient, msg.as_string()) else: header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n' msg = header + '\n ' + plaintext + '\n\n' smtpserver.sendmail(gmail_account, recipient, msg) smtpserver.close()
[ "def", "sendgmail", "(", "self", ",", "subject", ",", "recipients", ",", "plaintext", ",", "htmltext", "=", "None", ",", "cc", "=", "None", ",", "debug", "=", "False", ",", "useMIMEMultipart", "=", "True", ",", "gmail_account", "=", "'[email protected]'", ",", "pw_filepath", "=", "None", ")", ":", "smtpserver", "=", "smtplib", ".", "SMTP", "(", "\"smtp.gmail.com\"", ",", "587", ")", "smtpserver", ".", "ehlo", "(", ")", "smtpserver", ".", "starttls", "(", ")", "smtpserver", ".", "ehlo", "gmail_account", "=", "'[email protected]'", "if", "pw_filepath", ":", "smtpserver", ".", "login", "(", "gmail_account", ",", "read_file", "(", "pw_filepath", ")", ")", "else", ":", "smtpserver", ".", "login", "(", "gmail_account", ",", "read_file", "(", "'pw'", ")", ")", "for", "recipient", "in", "recipients", ":", "if", "htmltext", ":", "msg", "=", "MIMEText", "(", "htmltext", ",", "'html'", ")", "msg", "[", "'From'", "]", "=", "gmail_account", "msg", "[", "'To'", "]", "=", "recipient", "msg", "[", "'Subject'", "]", "=", "subject", "smtpserver", ".", "sendmail", "(", "gmail_account", ",", "recipient", ",", "msg", ".", "as_string", "(", ")", ")", "else", ":", "header", "=", "'To:'", "+", "recipient", "+", "'\\n'", "+", "'From: '", "+", "gmail_account", "+", "'\\n'", "+", "'Subject:'", "+", "subject", "+", "'\\n'", "msg", "=", "header", "+", "'\\n '", "+", "plaintext", "+", "'\\n\\n'", "smtpserver", ".", "sendmail", "(", "gmail_account", ",", "recipient", ",", "msg", ")", "smtpserver", ".", "close", "(", ")" ]
For this function to work, the password for the gmail user must be colocated with this file or passed in.
[ "For", "this", "function", "to", "work", "the", "password", "for", "the", "gmail", "user", "must", "be", "colocated", "with", "this", "file", "or", "passed", "in", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/comms/mail.py#L72-L95
train
adaptive-learning/proso-apps
proso_common/views.py
show_one
def show_one(request, post_process_fun, object_class, id, template='common_json.html'): """ Return object of the given type with the specified identifier. GET parameters: user: identifier of the current user stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API """ obj = get_object_or_404(object_class, pk=id) json = post_process_fun(request, obj) return render_json(request, json, template=template, help_text=show_one.__doc__)
python
def show_one(request, post_process_fun, object_class, id, template='common_json.html'): """ Return object of the given type with the specified identifier. GET parameters: user: identifier of the current user stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API """ obj = get_object_or_404(object_class, pk=id) json = post_process_fun(request, obj) return render_json(request, json, template=template, help_text=show_one.__doc__)
[ "def", "show_one", "(", "request", ",", "post_process_fun", ",", "object_class", ",", "id", ",", "template", "=", "'common_json.html'", ")", ":", "obj", "=", "get_object_or_404", "(", "object_class", ",", "pk", "=", "id", ")", "json", "=", "post_process_fun", "(", "request", ",", "obj", ")", "return", "render_json", "(", "request", ",", "json", ",", "template", "=", "template", ",", "help_text", "=", "show_one", ".", "__doc__", ")" ]
Return object of the given type with the specified identifier. GET parameters: user: identifier of the current user stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API
[ "Return", "object", "of", "the", "given", "type", "with", "the", "specified", "identifier", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_common/views.py#L29-L43
train
adaptive-learning/proso-apps
proso_common/views.py
show_more
def show_more(request, post_process_fun, get_fun, object_class, should_cache=True, template='common_json.html', to_json_kwargs=None): """ Return list of objects of the given type. GET parameters: limit: number of returned objects (default 10, maximum 100) page: current page number filter_column: column name used to filter the results filter_value: value for the specified column used to filter the results user: identifier of the current user all: return all objects available instead of paging; be aware this parameter can be used only for objects for wich the caching is turned on db_orderby: database column which the result should be ordered by json_orderby: field of the JSON object which the result should be ordered by, it is less effective than the ordering via db_orderby; be aware this parameter can be used only for objects for which the caching is turned on desc turn on the descending order stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API environment turn on the enrichment of the related environment values """ if not should_cache and 'json_orderby' in request.GET: return render_json(request, { 'error': "Can't order the result according to the JSON field, because the caching for this type of object is turned off. See the documentation." }, template='questions_json.html', help_text=show_more.__doc__, status=501) if not should_cache and 'all' in request.GET: return render_json(request, { 'error': "Can't get all objects, because the caching for this type of object is turned off. See the documentation." }, template='questions_json.html', help_text=show_more.__doc__, status=501) if to_json_kwargs is None: to_json_kwargs = {} time_start = time_lib() limit = min(int(request.GET.get('limit', 10)), 100) page = int(request.GET.get('page', 0)) try: objs = get_fun(request, object_class) if 'db_orderby' in request.GET: objs = objs.order_by(('-' if 'desc' in request.GET else '') + request.GET['db_orderby'].strip('/')) if 'all' not in request.GET and 'json_orderby' not in request.GET: objs = objs[page * limit:(page + 1) * limit] cache_key = 'proso_common_sql_json_%s' % hashlib.sha1((str(objs.query) + str(to_json_kwargs)).encode()).hexdigest() cached = cache.get(cache_key) if should_cache and cached: list_objs = json_lib.loads(cached) else: list_objs = [x.to_json(**to_json_kwargs) for x in list(objs)] if should_cache: cache.set(cache_key, json_lib.dumps(list_objs), 60 * 60 * 24 * 30) LOGGER.debug('loading objects in show_more view took %s seconds', (time_lib() - time_start)) json = post_process_fun(request, list_objs) if 'json_orderby' in request.GET: time_before_json_sort = time_lib() json.sort(key=lambda x: (-1 if 'desc' in request.GET else 1) * x[request.GET['json_orderby']]) if 'all' not in request.GET: json = json[page * limit:(page + 1) * limit] LOGGER.debug('sorting objects according to JSON field took %s seconds', (time_lib() - time_before_json_sort)) return render_json(request, json, template=template, help_text=show_more.__doc__) except EmptyResultSet: return render_json(request, [], template=template, help_text=show_more.__doc__)
python
def show_more(request, post_process_fun, get_fun, object_class, should_cache=True, template='common_json.html', to_json_kwargs=None): """ Return list of objects of the given type. GET parameters: limit: number of returned objects (default 10, maximum 100) page: current page number filter_column: column name used to filter the results filter_value: value for the specified column used to filter the results user: identifier of the current user all: return all objects available instead of paging; be aware this parameter can be used only for objects for wich the caching is turned on db_orderby: database column which the result should be ordered by json_orderby: field of the JSON object which the result should be ordered by, it is less effective than the ordering via db_orderby; be aware this parameter can be used only for objects for which the caching is turned on desc turn on the descending order stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API environment turn on the enrichment of the related environment values """ if not should_cache and 'json_orderby' in request.GET: return render_json(request, { 'error': "Can't order the result according to the JSON field, because the caching for this type of object is turned off. See the documentation." }, template='questions_json.html', help_text=show_more.__doc__, status=501) if not should_cache and 'all' in request.GET: return render_json(request, { 'error': "Can't get all objects, because the caching for this type of object is turned off. See the documentation." }, template='questions_json.html', help_text=show_more.__doc__, status=501) if to_json_kwargs is None: to_json_kwargs = {} time_start = time_lib() limit = min(int(request.GET.get('limit', 10)), 100) page = int(request.GET.get('page', 0)) try: objs = get_fun(request, object_class) if 'db_orderby' in request.GET: objs = objs.order_by(('-' if 'desc' in request.GET else '') + request.GET['db_orderby'].strip('/')) if 'all' not in request.GET and 'json_orderby' not in request.GET: objs = objs[page * limit:(page + 1) * limit] cache_key = 'proso_common_sql_json_%s' % hashlib.sha1((str(objs.query) + str(to_json_kwargs)).encode()).hexdigest() cached = cache.get(cache_key) if should_cache and cached: list_objs = json_lib.loads(cached) else: list_objs = [x.to_json(**to_json_kwargs) for x in list(objs)] if should_cache: cache.set(cache_key, json_lib.dumps(list_objs), 60 * 60 * 24 * 30) LOGGER.debug('loading objects in show_more view took %s seconds', (time_lib() - time_start)) json = post_process_fun(request, list_objs) if 'json_orderby' in request.GET: time_before_json_sort = time_lib() json.sort(key=lambda x: (-1 if 'desc' in request.GET else 1) * x[request.GET['json_orderby']]) if 'all' not in request.GET: json = json[page * limit:(page + 1) * limit] LOGGER.debug('sorting objects according to JSON field took %s seconds', (time_lib() - time_before_json_sort)) return render_json(request, json, template=template, help_text=show_more.__doc__) except EmptyResultSet: return render_json(request, [], template=template, help_text=show_more.__doc__)
[ "def", "show_more", "(", "request", ",", "post_process_fun", ",", "get_fun", ",", "object_class", ",", "should_cache", "=", "True", ",", "template", "=", "'common_json.html'", ",", "to_json_kwargs", "=", "None", ")", ":", "if", "not", "should_cache", "and", "'json_orderby'", "in", "request", ".", "GET", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "\"Can't order the result according to the JSON field, because the caching for this type of object is turned off. See the documentation.\"", "}", ",", "template", "=", "'questions_json.html'", ",", "help_text", "=", "show_more", ".", "__doc__", ",", "status", "=", "501", ")", "if", "not", "should_cache", "and", "'all'", "in", "request", ".", "GET", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "\"Can't get all objects, because the caching for this type of object is turned off. See the documentation.\"", "}", ",", "template", "=", "'questions_json.html'", ",", "help_text", "=", "show_more", ".", "__doc__", ",", "status", "=", "501", ")", "if", "to_json_kwargs", "is", "None", ":", "to_json_kwargs", "=", "{", "}", "time_start", "=", "time_lib", "(", ")", "limit", "=", "min", "(", "int", "(", "request", ".", "GET", ".", "get", "(", "'limit'", ",", "10", ")", ")", ",", "100", ")", "page", "=", "int", "(", "request", ".", "GET", ".", "get", "(", "'page'", ",", "0", ")", ")", "try", ":", "objs", "=", "get_fun", "(", "request", ",", "object_class", ")", "if", "'db_orderby'", "in", "request", ".", "GET", ":", "objs", "=", "objs", ".", "order_by", "(", "(", "'-'", "if", "'desc'", "in", "request", ".", "GET", "else", "''", ")", "+", "request", ".", "GET", "[", "'db_orderby'", "]", ".", "strip", "(", "'/'", ")", ")", "if", "'all'", "not", "in", "request", ".", "GET", "and", "'json_orderby'", "not", "in", "request", ".", "GET", ":", "objs", "=", "objs", "[", "page", "*", "limit", ":", "(", "page", "+", "1", ")", "*", "limit", "]", "cache_key", "=", "'proso_common_sql_json_%s'", "%", "hashlib", ".", "sha1", "(", "(", "str", "(", "objs", ".", "query", ")", "+", "str", "(", "to_json_kwargs", ")", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "cached", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "should_cache", "and", "cached", ":", "list_objs", "=", "json_lib", ".", "loads", "(", "cached", ")", "else", ":", "list_objs", "=", "[", "x", ".", "to_json", "(", "*", "*", "to_json_kwargs", ")", "for", "x", "in", "list", "(", "objs", ")", "]", "if", "should_cache", ":", "cache", ".", "set", "(", "cache_key", ",", "json_lib", ".", "dumps", "(", "list_objs", ")", ",", "60", "*", "60", "*", "24", "*", "30", ")", "LOGGER", ".", "debug", "(", "'loading objects in show_more view took %s seconds'", ",", "(", "time_lib", "(", ")", "-", "time_start", ")", ")", "json", "=", "post_process_fun", "(", "request", ",", "list_objs", ")", "if", "'json_orderby'", "in", "request", ".", "GET", ":", "time_before_json_sort", "=", "time_lib", "(", ")", "json", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "-", "1", "if", "'desc'", "in", "request", ".", "GET", "else", "1", ")", "*", "x", "[", "request", ".", "GET", "[", "'json_orderby'", "]", "]", ")", "if", "'all'", "not", "in", "request", ".", "GET", ":", "json", "=", "json", "[", "page", "*", "limit", ":", "(", "page", "+", "1", ")", "*", "limit", "]", "LOGGER", ".", "debug", "(", "'sorting objects according to JSON field took %s seconds'", ",", "(", "time_lib", "(", ")", "-", "time_before_json_sort", ")", ")", "return", "render_json", "(", "request", ",", "json", ",", "template", "=", "template", ",", "help_text", "=", "show_more", ".", "__doc__", ")", "except", "EmptyResultSet", ":", "return", "render_json", "(", "request", ",", "[", "]", ",", "template", "=", "template", ",", "help_text", "=", "show_more", ".", "__doc__", ")" ]
Return list of objects of the given type. GET parameters: limit: number of returned objects (default 10, maximum 100) page: current page number filter_column: column name used to filter the results filter_value: value for the specified column used to filter the results user: identifier of the current user all: return all objects available instead of paging; be aware this parameter can be used only for objects for wich the caching is turned on db_orderby: database column which the result should be ordered by json_orderby: field of the JSON object which the result should be ordered by, it is less effective than the ordering via db_orderby; be aware this parameter can be used only for objects for which the caching is turned on desc turn on the descending order stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API environment turn on the enrichment of the related environment values
[ "Return", "list", "of", "objects", "of", "the", "given", "type", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_common/views.py#L46-L118
train
adaptive-learning/proso-apps
proso_common/views.py
log
def log(request): """ Log an event from the client to the server. POST parameters (JSON keys): message: description (str) of the logged event level: debug|info|warn|error data: additional data (JSON) describing the logged event """ if request.method == "POST": log_dict = json_body(request.body.decode("utf-8")) if 'message' not in log_dict: return HttpResponseBadRequest('There is no message to log!') levels = { 'debug': JAVASCRIPT_LOGGER.debug, 'info': JAVASCRIPT_LOGGER.info, 'warn': JAVASCRIPT_LOGGER.warn, 'error': JAVASCRIPT_LOGGER.error, } log_fun = JAVASCRIPT_LOGGER.info if 'level' in log_dict: log_fun = levels[log_dict['level']] log_fun(log_dict['message'], extra={ 'request': request, 'user': request.user.id if request.user.is_authenticated() else None, 'client_data': json_lib.dumps(log_dict.get('data', {})), }) return HttpResponse('ok', status=201) else: return render_json(request, {}, template='common_log_service.html', help_text=log.__doc__)
python
def log(request): """ Log an event from the client to the server. POST parameters (JSON keys): message: description (str) of the logged event level: debug|info|warn|error data: additional data (JSON) describing the logged event """ if request.method == "POST": log_dict = json_body(request.body.decode("utf-8")) if 'message' not in log_dict: return HttpResponseBadRequest('There is no message to log!') levels = { 'debug': JAVASCRIPT_LOGGER.debug, 'info': JAVASCRIPT_LOGGER.info, 'warn': JAVASCRIPT_LOGGER.warn, 'error': JAVASCRIPT_LOGGER.error, } log_fun = JAVASCRIPT_LOGGER.info if 'level' in log_dict: log_fun = levels[log_dict['level']] log_fun(log_dict['message'], extra={ 'request': request, 'user': request.user.id if request.user.is_authenticated() else None, 'client_data': json_lib.dumps(log_dict.get('data', {})), }) return HttpResponse('ok', status=201) else: return render_json(request, {}, template='common_log_service.html', help_text=log.__doc__)
[ "def", "log", "(", "request", ")", ":", "if", "request", ".", "method", "==", "\"POST\"", ":", "log_dict", "=", "json_body", "(", "request", ".", "body", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "'message'", "not", "in", "log_dict", ":", "return", "HttpResponseBadRequest", "(", "'There is no message to log!'", ")", "levels", "=", "{", "'debug'", ":", "JAVASCRIPT_LOGGER", ".", "debug", ",", "'info'", ":", "JAVASCRIPT_LOGGER", ".", "info", ",", "'warn'", ":", "JAVASCRIPT_LOGGER", ".", "warn", ",", "'error'", ":", "JAVASCRIPT_LOGGER", ".", "error", ",", "}", "log_fun", "=", "JAVASCRIPT_LOGGER", ".", "info", "if", "'level'", "in", "log_dict", ":", "log_fun", "=", "levels", "[", "log_dict", "[", "'level'", "]", "]", "log_fun", "(", "log_dict", "[", "'message'", "]", ",", "extra", "=", "{", "'request'", ":", "request", ",", "'user'", ":", "request", ".", "user", ".", "id", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", "else", "None", ",", "'client_data'", ":", "json_lib", ".", "dumps", "(", "log_dict", ".", "get", "(", "'data'", ",", "{", "}", ")", ")", ",", "}", ")", "return", "HttpResponse", "(", "'ok'", ",", "status", "=", "201", ")", "else", ":", "return", "render_json", "(", "request", ",", "{", "}", ",", "template", "=", "'common_log_service.html'", ",", "help_text", "=", "log", ".", "__doc__", ")" ]
Log an event from the client to the server. POST parameters (JSON keys): message: description (str) of the logged event level: debug|info|warn|error data: additional data (JSON) describing the logged event
[ "Log", "an", "event", "from", "the", "client", "to", "the", "server", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_common/views.py#L122-L154
train
adaptive-learning/proso-apps
proso_common/views.py
custom_config
def custom_config(request): """ Save user-specific configuration property. POST parameters (JSON keys): app_name: application name for which the configuration property is valid (e.g., proso_models) key: name of the property (e.g., predictive_model.class) value: value of the property (number, string, boolean, ..., e.g, proso.models.prediction.PriorCurrentPredictiveModel) condition_key (optional): name of the condition which is used to filter the property (e.g., practice_filter) condition_value (optional): value for the condition filtering the property (e.g., [["context/world"],["category/state"]]) """ if request.method == 'POST': config_dict = json_body(request.body.decode('utf-8')) CustomConfig.objects.try_create( config_dict['app_name'], config_dict['key'], config_dict['value'], request.user.id, config_dict.get('condition_key') if config_dict.get('condition_key') else None, urllib.parse.unquote(config_dict.get('condition_value')) if config_dict.get('condition_value') else None ) return config(request) else: return render_json(request, {}, template='common_custom_config.html', help_text=custom_config.__doc__)
python
def custom_config(request): """ Save user-specific configuration property. POST parameters (JSON keys): app_name: application name for which the configuration property is valid (e.g., proso_models) key: name of the property (e.g., predictive_model.class) value: value of the property (number, string, boolean, ..., e.g, proso.models.prediction.PriorCurrentPredictiveModel) condition_key (optional): name of the condition which is used to filter the property (e.g., practice_filter) condition_value (optional): value for the condition filtering the property (e.g., [["context/world"],["category/state"]]) """ if request.method == 'POST': config_dict = json_body(request.body.decode('utf-8')) CustomConfig.objects.try_create( config_dict['app_name'], config_dict['key'], config_dict['value'], request.user.id, config_dict.get('condition_key') if config_dict.get('condition_key') else None, urllib.parse.unquote(config_dict.get('condition_value')) if config_dict.get('condition_value') else None ) return config(request) else: return render_json(request, {}, template='common_custom_config.html', help_text=custom_config.__doc__)
[ "def", "custom_config", "(", "request", ")", ":", "if", "request", ".", "method", "==", "'POST'", ":", "config_dict", "=", "json_body", "(", "request", ".", "body", ".", "decode", "(", "'utf-8'", ")", ")", "CustomConfig", ".", "objects", ".", "try_create", "(", "config_dict", "[", "'app_name'", "]", ",", "config_dict", "[", "'key'", "]", ",", "config_dict", "[", "'value'", "]", ",", "request", ".", "user", ".", "id", ",", "config_dict", ".", "get", "(", "'condition_key'", ")", "if", "config_dict", ".", "get", "(", "'condition_key'", ")", "else", "None", ",", "urllib", ".", "parse", ".", "unquote", "(", "config_dict", ".", "get", "(", "'condition_value'", ")", ")", "if", "config_dict", ".", "get", "(", "'condition_value'", ")", "else", "None", ")", "return", "config", "(", "request", ")", "else", ":", "return", "render_json", "(", "request", ",", "{", "}", ",", "template", "=", "'common_custom_config.html'", ",", "help_text", "=", "custom_config", ".", "__doc__", ")" ]
Save user-specific configuration property. POST parameters (JSON keys): app_name: application name for which the configuration property is valid (e.g., proso_models) key: name of the property (e.g., predictive_model.class) value: value of the property (number, string, boolean, ..., e.g, proso.models.prediction.PriorCurrentPredictiveModel) condition_key (optional): name of the condition which is used to filter the property (e.g., practice_filter) condition_value (optional): value for the condition filtering the property (e.g., [["context/world"],["category/state"]])
[ "Save", "user", "-", "specific", "configuration", "property", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_common/views.py#L158-L185
train
adaptive-learning/proso-apps
proso_common/views.py
languages
def languages(request): """ Returns languages that are available in the system. Returns Dict: language_code -> domain """ return render_json(request, settings.LANGUAGE_DOMAINS if hasattr(settings, 'LANGUAGE_DOMAINS') else {"error": "Languages are not set. (Set LANGUAGE_DOMAINS in settings.py)"}, template='common_json.html', help_text=languages.__doc__)
python
def languages(request): """ Returns languages that are available in the system. Returns Dict: language_code -> domain """ return render_json(request, settings.LANGUAGE_DOMAINS if hasattr(settings, 'LANGUAGE_DOMAINS') else {"error": "Languages are not set. (Set LANGUAGE_DOMAINS in settings.py)"}, template='common_json.html', help_text=languages.__doc__)
[ "def", "languages", "(", "request", ")", ":", "return", "render_json", "(", "request", ",", "settings", ".", "LANGUAGE_DOMAINS", "if", "hasattr", "(", "settings", ",", "'LANGUAGE_DOMAINS'", ")", "else", "{", "\"error\"", ":", "\"Languages are not set. (Set LANGUAGE_DOMAINS in settings.py)\"", "}", ",", "template", "=", "'common_json.html'", ",", "help_text", "=", "languages", ".", "__doc__", ")" ]
Returns languages that are available in the system. Returns Dict: language_code -> domain
[ "Returns", "languages", "that", "are", "available", "in", "the", "system", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_common/views.py#L192-L201
train
reillysiemens/layabout
examples/early-connection/example.py
channel_to_id
def channel_to_id(slack, channel): """ Surely there's a better way to do this... """ channels = slack.api_call('channels.list').get('channels') or [] groups = slack.api_call('groups.list').get('groups') or [] if not channels and not groups: raise RuntimeError("Couldn't get channels and groups.") ids = [c['id'] for c in channels + groups if c['name'] == channel] if not ids: raise ValueError(f"Couldn't find #{channel}") return ids[0]
python
def channel_to_id(slack, channel): """ Surely there's a better way to do this... """ channels = slack.api_call('channels.list').get('channels') or [] groups = slack.api_call('groups.list').get('groups') or [] if not channels and not groups: raise RuntimeError("Couldn't get channels and groups.") ids = [c['id'] for c in channels + groups if c['name'] == channel] if not ids: raise ValueError(f"Couldn't find #{channel}") return ids[0]
[ "def", "channel_to_id", "(", "slack", ",", "channel", ")", ":", "channels", "=", "slack", ".", "api_call", "(", "'channels.list'", ")", ".", "get", "(", "'channels'", ")", "or", "[", "]", "groups", "=", "slack", ".", "api_call", "(", "'groups.list'", ")", ".", "get", "(", "'groups'", ")", "or", "[", "]", "if", "not", "channels", "and", "not", "groups", ":", "raise", "RuntimeError", "(", "\"Couldn't get channels and groups.\"", ")", "ids", "=", "[", "c", "[", "'id'", "]", "for", "c", "in", "channels", "+", "groups", "if", "c", "[", "'name'", "]", "==", "channel", "]", "if", "not", "ids", ":", "raise", "ValueError", "(", "f\"Couldn't find #{channel}\"", ")", "return", "ids", "[", "0", "]" ]
Surely there's a better way to do this...
[ "Surely", "there", "s", "a", "better", "way", "to", "do", "this", "..." ]
a146c47f2558e66bb51cf708d39909b93eaea7f4
https://github.com/reillysiemens/layabout/blob/a146c47f2558e66bb51cf708d39909b93eaea7f4/examples/early-connection/example.py#L15-L28
train
reillysiemens/layabout
examples/early-connection/example.py
send_message
def send_message(slack): """ Prompt for and send a message to a channel. """ channel = input('Which channel would you like to message? ') message = input('What should the message be? ') channel_id = channel_to_id(slack, channel) print(f"Sending message to #{channel} (id: {channel_id})!") slack.rtm_send_message(channel_id, message)
python
def send_message(slack): """ Prompt for and send a message to a channel. """ channel = input('Which channel would you like to message? ') message = input('What should the message be? ') channel_id = channel_to_id(slack, channel) print(f"Sending message to #{channel} (id: {channel_id})!") slack.rtm_send_message(channel_id, message)
[ "def", "send_message", "(", "slack", ")", ":", "channel", "=", "input", "(", "'Which channel would you like to message? '", ")", "message", "=", "input", "(", "'What should the message be? '", ")", "channel_id", "=", "channel_to_id", "(", "slack", ",", "channel", ")", "print", "(", "f\"Sending message to #{channel} (id: {channel_id})!\"", ")", "slack", ".", "rtm_send_message", "(", "channel_id", ",", "message", ")" ]
Prompt for and send a message to a channel.
[ "Prompt", "for", "and", "send", "a", "message", "to", "a", "channel", "." ]
a146c47f2558e66bb51cf708d39909b93eaea7f4
https://github.com/reillysiemens/layabout/blob/a146c47f2558e66bb51cf708d39909b93eaea7f4/examples/early-connection/example.py#L31-L38
train
truveris/py-mdstat
mdstat/device.py
parse_device
def parse_device(lines): """Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). """ name, status_line, device = parse_device_header(lines.pop(0)) # There are edge cases when the device list is empty and the status line is # merged with the header line, in those cases, the status line is returned # from parse_device_header(), the rest of the time, it's the next line. if not status_line: status_line = lines.pop(0) status = parse_device_status(status_line, device["personality"]) bitmap = None resync = None for line in lines: if line.startswith(" bitmap:"): bitmap = parse_device_bitmap(line) elif line.startswith(" ["): resync = parse_device_resync_progress(line) elif line.startswith(" \tresync="): resync = parse_device_resync_standby(line) else: raise NotImplementedError("unknown device line: {0}".format(line)) device.update({ "status": status, "bitmap": bitmap, "resync": resync, }) return (name, device)
python
def parse_device(lines): """Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). """ name, status_line, device = parse_device_header(lines.pop(0)) # There are edge cases when the device list is empty and the status line is # merged with the header line, in those cases, the status line is returned # from parse_device_header(), the rest of the time, it's the next line. if not status_line: status_line = lines.pop(0) status = parse_device_status(status_line, device["personality"]) bitmap = None resync = None for line in lines: if line.startswith(" bitmap:"): bitmap = parse_device_bitmap(line) elif line.startswith(" ["): resync = parse_device_resync_progress(line) elif line.startswith(" \tresync="): resync = parse_device_resync_standby(line) else: raise NotImplementedError("unknown device line: {0}".format(line)) device.update({ "status": status, "bitmap": bitmap, "resync": resync, }) return (name, device)
[ "def", "parse_device", "(", "lines", ")", ":", "name", ",", "status_line", ",", "device", "=", "parse_device_header", "(", "lines", ".", "pop", "(", "0", ")", ")", "# There are edge cases when the device list is empty and the status line is", "# merged with the header line, in those cases, the status line is returned", "# from parse_device_header(), the rest of the time, it's the next line.", "if", "not", "status_line", ":", "status_line", "=", "lines", ".", "pop", "(", "0", ")", "status", "=", "parse_device_status", "(", "status_line", ",", "device", "[", "\"personality\"", "]", ")", "bitmap", "=", "None", "resync", "=", "None", "for", "line", "in", "lines", ":", "if", "line", ".", "startswith", "(", "\" bitmap:\"", ")", ":", "bitmap", "=", "parse_device_bitmap", "(", "line", ")", "elif", "line", ".", "startswith", "(", "\" [\"", ")", ":", "resync", "=", "parse_device_resync_progress", "(", "line", ")", "elif", "line", ".", "startswith", "(", "\" \\tresync=\"", ")", ":", "resync", "=", "parse_device_resync_standby", "(", "line", ")", "else", ":", "raise", "NotImplementedError", "(", "\"unknown device line: {0}\"", ".", "format", "(", "line", ")", ")", "device", ".", "update", "(", "{", "\"status\"", ":", "status", ",", "\"bitmap\"", ":", "bitmap", ",", "\"resync\"", ":", "resync", ",", "}", ")", "return", "(", "name", ",", "device", ")" ]
Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync).
[ "Parse", "all", "the", "lines", "of", "a", "device", "block", "." ]
881af99d1168694d2f38e606af377ef6cabe2297
https://github.com/truveris/py-mdstat/blob/881af99d1168694d2f38e606af377ef6cabe2297/mdstat/device.py#L14-L51
train
trendels/rhino
rhino/http.py
match_etag
def match_etag(etag, header, weak=False): """Try to match an ETag against a header value. If `weak` is True, uses the weak comparison function. """ if etag is None: return False m = etag_re.match(etag) if not m: raise ValueError("Not a well-formed ETag: '%s'" % etag) (is_weak, etag) = m.groups() parsed_header = parse_etag_header(header) if parsed_header == '*': return True if is_weak and not weak: return False if weak: return etag in [t[1] for t in parsed_header] else: return etag in [t[1] for t in parsed_header if not t[0]]
python
def match_etag(etag, header, weak=False): """Try to match an ETag against a header value. If `weak` is True, uses the weak comparison function. """ if etag is None: return False m = etag_re.match(etag) if not m: raise ValueError("Not a well-formed ETag: '%s'" % etag) (is_weak, etag) = m.groups() parsed_header = parse_etag_header(header) if parsed_header == '*': return True if is_weak and not weak: return False if weak: return etag in [t[1] for t in parsed_header] else: return etag in [t[1] for t in parsed_header if not t[0]]
[ "def", "match_etag", "(", "etag", ",", "header", ",", "weak", "=", "False", ")", ":", "if", "etag", "is", "None", ":", "return", "False", "m", "=", "etag_re", ".", "match", "(", "etag", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "\"Not a well-formed ETag: '%s'\"", "%", "etag", ")", "(", "is_weak", ",", "etag", ")", "=", "m", ".", "groups", "(", ")", "parsed_header", "=", "parse_etag_header", "(", "header", ")", "if", "parsed_header", "==", "'*'", ":", "return", "True", "if", "is_weak", "and", "not", "weak", ":", "return", "False", "if", "weak", ":", "return", "etag", "in", "[", "t", "[", "1", "]", "for", "t", "in", "parsed_header", "]", "else", ":", "return", "etag", "in", "[", "t", "[", "1", "]", "for", "t", "in", "parsed_header", "if", "not", "t", "[", "0", "]", "]" ]
Try to match an ETag against a header value. If `weak` is True, uses the weak comparison function.
[ "Try", "to", "match", "an", "ETag", "against", "a", "header", "value", "." ]
f1f0ef21b6080a2bd130b38b5bef163074c94aed
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/http.py#L60-L79
train
trendels/rhino
rhino/http.py
datetime_to_httpdate
def datetime_to_httpdate(dt): """Convert datetime.datetime or Unix timestamp to HTTP date.""" if isinstance(dt, (int, float)): return format_date_time(dt) elif isinstance(dt, datetime): return format_date_time(datetime_to_timestamp(dt)) else: raise TypeError("expected datetime.datetime or timestamp (int/float)," " got '%s'" % dt)
python
def datetime_to_httpdate(dt): """Convert datetime.datetime or Unix timestamp to HTTP date.""" if isinstance(dt, (int, float)): return format_date_time(dt) elif isinstance(dt, datetime): return format_date_time(datetime_to_timestamp(dt)) else: raise TypeError("expected datetime.datetime or timestamp (int/float)," " got '%s'" % dt)
[ "def", "datetime_to_httpdate", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "(", "int", ",", "float", ")", ")", ":", "return", "format_date_time", "(", "dt", ")", "elif", "isinstance", "(", "dt", ",", "datetime", ")", ":", "return", "format_date_time", "(", "datetime_to_timestamp", "(", "dt", ")", ")", "else", ":", "raise", "TypeError", "(", "\"expected datetime.datetime or timestamp (int/float),\"", "\" got '%s'\"", "%", "dt", ")" ]
Convert datetime.datetime or Unix timestamp to HTTP date.
[ "Convert", "datetime", ".", "datetime", "or", "Unix", "timestamp", "to", "HTTP", "date", "." ]
f1f0ef21b6080a2bd130b38b5bef163074c94aed
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/http.py#L99-L107
train
trendels/rhino
rhino/http.py
timedelta_to_httpdate
def timedelta_to_httpdate(td): """Convert datetime.timedelta or number of seconds to HTTP date. Returns an HTTP date in the future. """ if isinstance(td, (int, float)): return format_date_time(time.time() + td) elif isinstance(td, timedelta): return format_date_time(time.time() + total_seconds(td)) else: raise TypeError("expected datetime.timedelta or number of seconds" "(int/float), got '%s'" % td)
python
def timedelta_to_httpdate(td): """Convert datetime.timedelta or number of seconds to HTTP date. Returns an HTTP date in the future. """ if isinstance(td, (int, float)): return format_date_time(time.time() + td) elif isinstance(td, timedelta): return format_date_time(time.time() + total_seconds(td)) else: raise TypeError("expected datetime.timedelta or number of seconds" "(int/float), got '%s'" % td)
[ "def", "timedelta_to_httpdate", "(", "td", ")", ":", "if", "isinstance", "(", "td", ",", "(", "int", ",", "float", ")", ")", ":", "return", "format_date_time", "(", "time", ".", "time", "(", ")", "+", "td", ")", "elif", "isinstance", "(", "td", ",", "timedelta", ")", ":", "return", "format_date_time", "(", "time", ".", "time", "(", ")", "+", "total_seconds", "(", "td", ")", ")", "else", ":", "raise", "TypeError", "(", "\"expected datetime.timedelta or number of seconds\"", "\"(int/float), got '%s'\"", "%", "td", ")" ]
Convert datetime.timedelta or number of seconds to HTTP date. Returns an HTTP date in the future.
[ "Convert", "datetime", ".", "timedelta", "or", "number", "of", "seconds", "to", "HTTP", "date", "." ]
f1f0ef21b6080a2bd130b38b5bef163074c94aed
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/http.py#L110-L121
train
trendels/rhino
rhino/http.py
cache_control
def cache_control(max_age=None, private=False, public=False, s_maxage=None, must_revalidate=False, proxy_revalidate=False, no_cache=False, no_store=False): """Generate the value for a Cache-Control header. Example: >>> from rhino.http import cache_control as cc >>> from datetime import timedelta >>> cc(public=1, max_age=3600) 'public, max-age=3600' >>> cc(public=1, max_age=timedelta(hours=1)) 'public, max-age=3600' >>> cc(private=True, no_cache=True, no_store=True) 'private, no-cache, no-store' """ if all([private, public]): raise ValueError("'private' and 'public' are mutually exclusive") if isinstance(max_age, timedelta): max_age = int(total_seconds(max_age)) if isinstance(s_maxage, timedelta): s_maxage = int(total_seconds(s_maxage)) directives = [] if public: directives.append('public') if private: directives.append('private') if max_age is not None: directives.append('max-age=%d' % max_age) if s_maxage is not None: directives.append('s-maxage=%d' % s_maxage) if no_cache: directives.append('no-cache') if no_store: directives.append('no-store') if must_revalidate: directives.append('must-revalidate') if proxy_revalidate: directives.append('proxy-revalidate') return ', '.join(directives)
python
def cache_control(max_age=None, private=False, public=False, s_maxage=None, must_revalidate=False, proxy_revalidate=False, no_cache=False, no_store=False): """Generate the value for a Cache-Control header. Example: >>> from rhino.http import cache_control as cc >>> from datetime import timedelta >>> cc(public=1, max_age=3600) 'public, max-age=3600' >>> cc(public=1, max_age=timedelta(hours=1)) 'public, max-age=3600' >>> cc(private=True, no_cache=True, no_store=True) 'private, no-cache, no-store' """ if all([private, public]): raise ValueError("'private' and 'public' are mutually exclusive") if isinstance(max_age, timedelta): max_age = int(total_seconds(max_age)) if isinstance(s_maxage, timedelta): s_maxage = int(total_seconds(s_maxage)) directives = [] if public: directives.append('public') if private: directives.append('private') if max_age is not None: directives.append('max-age=%d' % max_age) if s_maxage is not None: directives.append('s-maxage=%d' % s_maxage) if no_cache: directives.append('no-cache') if no_store: directives.append('no-store') if must_revalidate: directives.append('must-revalidate') if proxy_revalidate: directives.append('proxy-revalidate') return ', '.join(directives)
[ "def", "cache_control", "(", "max_age", "=", "None", ",", "private", "=", "False", ",", "public", "=", "False", ",", "s_maxage", "=", "None", ",", "must_revalidate", "=", "False", ",", "proxy_revalidate", "=", "False", ",", "no_cache", "=", "False", ",", "no_store", "=", "False", ")", ":", "if", "all", "(", "[", "private", ",", "public", "]", ")", ":", "raise", "ValueError", "(", "\"'private' and 'public' are mutually exclusive\"", ")", "if", "isinstance", "(", "max_age", ",", "timedelta", ")", ":", "max_age", "=", "int", "(", "total_seconds", "(", "max_age", ")", ")", "if", "isinstance", "(", "s_maxage", ",", "timedelta", ")", ":", "s_maxage", "=", "int", "(", "total_seconds", "(", "s_maxage", ")", ")", "directives", "=", "[", "]", "if", "public", ":", "directives", ".", "append", "(", "'public'", ")", "if", "private", ":", "directives", ".", "append", "(", "'private'", ")", "if", "max_age", "is", "not", "None", ":", "directives", ".", "append", "(", "'max-age=%d'", "%", "max_age", ")", "if", "s_maxage", "is", "not", "None", ":", "directives", ".", "append", "(", "'s-maxage=%d'", "%", "s_maxage", ")", "if", "no_cache", ":", "directives", ".", "append", "(", "'no-cache'", ")", "if", "no_store", ":", "directives", ".", "append", "(", "'no-store'", ")", "if", "must_revalidate", ":", "directives", ".", "append", "(", "'must-revalidate'", ")", "if", "proxy_revalidate", ":", "directives", ".", "append", "(", "'proxy-revalidate'", ")", "return", "', '", ".", "join", "(", "directives", ")" ]
Generate the value for a Cache-Control header. Example: >>> from rhino.http import cache_control as cc >>> from datetime import timedelta >>> cc(public=1, max_age=3600) 'public, max-age=3600' >>> cc(public=1, max_age=timedelta(hours=1)) 'public, max-age=3600' >>> cc(private=True, no_cache=True, no_store=True) 'private, no-cache, no-store'
[ "Generate", "the", "value", "for", "a", "Cache", "-", "Control", "header", "." ]
f1f0ef21b6080a2bd130b38b5bef163074c94aed
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/http.py#L124-L156
train
chrillux/brottsplatskartan
brottsplatskartan/__init__.py
BrottsplatsKartan.get_incidents
def get_incidents(self) -> Union[list, bool]: """ Get today's incidents. """ brotts_entries_left = True incidents_today = [] url = self.url while brotts_entries_left: requests_response = requests.get( url, params=self.parameters) rate_limited = requests_response.headers.get('x-ratelimit-reset') if rate_limited: print("You have been rate limited until " + time.strftime( '%Y-%m-%d %H:%M:%S%z', time.localtime(rate_limited) )) return False requests_response = requests_response.json() incidents = requests_response.get("data") if not incidents: break datetime_today = datetime.date.today() datetime_today_as_time = time.strptime( str(datetime_today), "%Y-%m-%d" ) today_date_ymd = self._get_datetime_as_ymd(datetime_today_as_time) for incident in incidents: incident_pubdate = incident["pubdate_iso8601"] incident_date = time.strptime( incident_pubdate, "%Y-%m-%dT%H:%M:%S%z" ) incident_date_ymd = self._get_datetime_as_ymd(incident_date) if today_date_ymd == incident_date_ymd: incidents_today.append(incident) else: brotts_entries_left = False break if requests_response.get("links"): url = requests_response["links"]["next_page_url"] else: break return incidents_today
python
def get_incidents(self) -> Union[list, bool]: """ Get today's incidents. """ brotts_entries_left = True incidents_today = [] url = self.url while brotts_entries_left: requests_response = requests.get( url, params=self.parameters) rate_limited = requests_response.headers.get('x-ratelimit-reset') if rate_limited: print("You have been rate limited until " + time.strftime( '%Y-%m-%d %H:%M:%S%z', time.localtime(rate_limited) )) return False requests_response = requests_response.json() incidents = requests_response.get("data") if not incidents: break datetime_today = datetime.date.today() datetime_today_as_time = time.strptime( str(datetime_today), "%Y-%m-%d" ) today_date_ymd = self._get_datetime_as_ymd(datetime_today_as_time) for incident in incidents: incident_pubdate = incident["pubdate_iso8601"] incident_date = time.strptime( incident_pubdate, "%Y-%m-%dT%H:%M:%S%z" ) incident_date_ymd = self._get_datetime_as_ymd(incident_date) if today_date_ymd == incident_date_ymd: incidents_today.append(incident) else: brotts_entries_left = False break if requests_response.get("links"): url = requests_response["links"]["next_page_url"] else: break return incidents_today
[ "def", "get_incidents", "(", "self", ")", "->", "Union", "[", "list", ",", "bool", "]", ":", "brotts_entries_left", "=", "True", "incidents_today", "=", "[", "]", "url", "=", "self", ".", "url", "while", "brotts_entries_left", ":", "requests_response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "self", ".", "parameters", ")", "rate_limited", "=", "requests_response", ".", "headers", ".", "get", "(", "'x-ratelimit-reset'", ")", "if", "rate_limited", ":", "print", "(", "\"You have been rate limited until \"", "+", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S%z'", ",", "time", ".", "localtime", "(", "rate_limited", ")", ")", ")", "return", "False", "requests_response", "=", "requests_response", ".", "json", "(", ")", "incidents", "=", "requests_response", ".", "get", "(", "\"data\"", ")", "if", "not", "incidents", ":", "break", "datetime_today", "=", "datetime", ".", "date", ".", "today", "(", ")", "datetime_today_as_time", "=", "time", ".", "strptime", "(", "str", "(", "datetime_today", ")", ",", "\"%Y-%m-%d\"", ")", "today_date_ymd", "=", "self", ".", "_get_datetime_as_ymd", "(", "datetime_today_as_time", ")", "for", "incident", "in", "incidents", ":", "incident_pubdate", "=", "incident", "[", "\"pubdate_iso8601\"", "]", "incident_date", "=", "time", ".", "strptime", "(", "incident_pubdate", ",", "\"%Y-%m-%dT%H:%M:%S%z\"", ")", "incident_date_ymd", "=", "self", ".", "_get_datetime_as_ymd", "(", "incident_date", ")", "if", "today_date_ymd", "==", "incident_date_ymd", ":", "incidents_today", ".", "append", "(", "incident", ")", "else", ":", "brotts_entries_left", "=", "False", "break", "if", "requests_response", ".", "get", "(", "\"links\"", ")", ":", "url", "=", "requests_response", "[", "\"links\"", "]", "[", "\"next_page_url\"", "]", "else", ":", "break", "return", "incidents_today" ]
Get today's incidents.
[ "Get", "today", "s", "incidents", "." ]
f38df4debd1799ddb384e467999d601cffbe9d94
https://github.com/chrillux/brottsplatskartan/blob/f38df4debd1799ddb384e467999d601cffbe9d94/brottsplatskartan/__init__.py#L42-L92
train
TheGhouls/oct
oct/utilities/newproject.py
from_template
def from_template(args): """Create a new oct project from existing template :param Namespace args: command line arguments """ project_name = args.name template = args.template with tarfile.open(template) as tar: prefix = os.path.commonprefix(tar.getnames()) check_template(tar.getnames(), prefix) tar.extractall(project_name, members=get_members(tar, prefix))
python
def from_template(args): """Create a new oct project from existing template :param Namespace args: command line arguments """ project_name = args.name template = args.template with tarfile.open(template) as tar: prefix = os.path.commonprefix(tar.getnames()) check_template(tar.getnames(), prefix) tar.extractall(project_name, members=get_members(tar, prefix))
[ "def", "from_template", "(", "args", ")", ":", "project_name", "=", "args", ".", "name", "template", "=", "args", ".", "template", "with", "tarfile", ".", "open", "(", "template", ")", "as", "tar", ":", "prefix", "=", "os", ".", "path", ".", "commonprefix", "(", "tar", ".", "getnames", "(", ")", ")", "check_template", "(", "tar", ".", "getnames", "(", ")", ",", "prefix", ")", "tar", ".", "extractall", "(", "project_name", ",", "members", "=", "get_members", "(", "tar", ",", "prefix", ")", ")" ]
Create a new oct project from existing template :param Namespace args: command line arguments
[ "Create", "a", "new", "oct", "project", "from", "existing", "template" ]
7e9bddeb3b8495a26442b1c86744e9fb187fe88f
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/newproject.py#L39-L50
train
TheGhouls/oct
oct/utilities/newproject.py
from_oct
def from_oct(args): """Create a new oct project :param Namespace args: the command line arguments """ project_name = args.name env = Environment(loader=PackageLoader('oct.utilities', 'templates')) config_content = env.get_template('configuration/config.json').render(script_name='v_user.py') script_content = env.get_template('scripts/v_user.j2').render() try: os.makedirs(project_name) os.makedirs(os.path.join(project_name, 'test_scripts')) os.makedirs(os.path.join(project_name, 'templates')) os.makedirs(os.path.join(project_name, 'templates', 'img')) shutil.copytree(os.path.join(BASE_DIR, 'templates', 'css'), os.path.join(project_name, 'templates', 'css')) shutil.copytree(os.path.join(BASE_DIR, 'templates', 'javascript'), os.path.join(project_name, 'templates', 'scripts')) shutil.copytree(os.path.join(BASE_DIR, 'templates', 'fonts'), os.path.join(project_name, 'templates', 'fonts')) shutil.copy(os.path.join(BASE_DIR, 'templates', 'html', 'report.html'), os.path.join(project_name, 'templates')) except OSError: print('ERROR: can not create directory for %r' % project_name, file=sys.stderr) raise with open(os.path.join(project_name, 'config.json'), 'w') as f: f.write(config_content) with open(os.path.join(project_name, 'test_scripts', 'v_user.py'), 'w') as f: f.write(script_content)
python
def from_oct(args): """Create a new oct project :param Namespace args: the command line arguments """ project_name = args.name env = Environment(loader=PackageLoader('oct.utilities', 'templates')) config_content = env.get_template('configuration/config.json').render(script_name='v_user.py') script_content = env.get_template('scripts/v_user.j2').render() try: os.makedirs(project_name) os.makedirs(os.path.join(project_name, 'test_scripts')) os.makedirs(os.path.join(project_name, 'templates')) os.makedirs(os.path.join(project_name, 'templates', 'img')) shutil.copytree(os.path.join(BASE_DIR, 'templates', 'css'), os.path.join(project_name, 'templates', 'css')) shutil.copytree(os.path.join(BASE_DIR, 'templates', 'javascript'), os.path.join(project_name, 'templates', 'scripts')) shutil.copytree(os.path.join(BASE_DIR, 'templates', 'fonts'), os.path.join(project_name, 'templates', 'fonts')) shutil.copy(os.path.join(BASE_DIR, 'templates', 'html', 'report.html'), os.path.join(project_name, 'templates')) except OSError: print('ERROR: can not create directory for %r' % project_name, file=sys.stderr) raise with open(os.path.join(project_name, 'config.json'), 'w') as f: f.write(config_content) with open(os.path.join(project_name, 'test_scripts', 'v_user.py'), 'w') as f: f.write(script_content)
[ "def", "from_oct", "(", "args", ")", ":", "project_name", "=", "args", ".", "name", "env", "=", "Environment", "(", "loader", "=", "PackageLoader", "(", "'oct.utilities'", ",", "'templates'", ")", ")", "config_content", "=", "env", ".", "get_template", "(", "'configuration/config.json'", ")", ".", "render", "(", "script_name", "=", "'v_user.py'", ")", "script_content", "=", "env", ".", "get_template", "(", "'scripts/v_user.j2'", ")", ".", "render", "(", ")", "try", ":", "os", ".", "makedirs", "(", "project_name", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "project_name", ",", "'test_scripts'", ")", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "project_name", ",", "'templates'", ")", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "project_name", ",", "'templates'", ",", "'img'", ")", ")", "shutil", ".", "copytree", "(", "os", ".", "path", ".", "join", "(", "BASE_DIR", ",", "'templates'", ",", "'css'", ")", ",", "os", ".", "path", ".", "join", "(", "project_name", ",", "'templates'", ",", "'css'", ")", ")", "shutil", ".", "copytree", "(", "os", ".", "path", ".", "join", "(", "BASE_DIR", ",", "'templates'", ",", "'javascript'", ")", ",", "os", ".", "path", ".", "join", "(", "project_name", ",", "'templates'", ",", "'scripts'", ")", ")", "shutil", ".", "copytree", "(", "os", ".", "path", ".", "join", "(", "BASE_DIR", ",", "'templates'", ",", "'fonts'", ")", ",", "os", ".", "path", ".", "join", "(", "project_name", ",", "'templates'", ",", "'fonts'", ")", ")", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "BASE_DIR", ",", "'templates'", ",", "'html'", ",", "'report.html'", ")", ",", "os", ".", "path", ".", "join", "(", "project_name", ",", "'templates'", ")", ")", "except", "OSError", ":", "print", "(", "'ERROR: can not create directory for %r'", "%", "project_name", ",", "file", "=", "sys", ".", "stderr", ")", "raise", "with", "open", "(", "os", ".", "path", ".", "join", "(", "project_name", ",", "'config.json'", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "config_content", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "project_name", ",", "'test_scripts'", ",", "'v_user.py'", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "script_content", ")" ]
Create a new oct project :param Namespace args: the command line arguments
[ "Create", "a", "new", "oct", "project" ]
7e9bddeb3b8495a26442b1c86744e9fb187fe88f
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/newproject.py#L53-L85
train
clement-alexandre/TotemBionet
totembionet/src/resource_table/resource_table_with_model.py
ResourceTableWithModel.as_data_frame
def as_data_frame(self) -> pandas.DataFrame: """ Create a panda DataFrame representation of the resource table. """ header_gene = {} header_multiplex = {} headr_transitions = {} for gene in self.influence_graph.genes: header_gene[gene] = repr(gene) header_multiplex[gene] = f"active multiplex on {gene!r}" headr_transitions[gene] = f"K_{gene!r}" columns = defaultdict(list) for state in self.table.keys(): for gene in self.influence_graph.genes: columns[header_gene[gene]].append(state[gene]) columns[header_multiplex[gene]].append(self._repr_multiplexes(gene, state)) columns[headr_transitions[gene]].append(self._repr_transition(gene, state)) header = list(header_gene.values()) + list(header_multiplex.values()) + list(headr_transitions.values()) return pandas.DataFrame(columns, columns=header)
python
def as_data_frame(self) -> pandas.DataFrame: """ Create a panda DataFrame representation of the resource table. """ header_gene = {} header_multiplex = {} headr_transitions = {} for gene in self.influence_graph.genes: header_gene[gene] = repr(gene) header_multiplex[gene] = f"active multiplex on {gene!r}" headr_transitions[gene] = f"K_{gene!r}" columns = defaultdict(list) for state in self.table.keys(): for gene in self.influence_graph.genes: columns[header_gene[gene]].append(state[gene]) columns[header_multiplex[gene]].append(self._repr_multiplexes(gene, state)) columns[headr_transitions[gene]].append(self._repr_transition(gene, state)) header = list(header_gene.values()) + list(header_multiplex.values()) + list(headr_transitions.values()) return pandas.DataFrame(columns, columns=header)
[ "def", "as_data_frame", "(", "self", ")", "->", "pandas", ".", "DataFrame", ":", "header_gene", "=", "{", "}", "header_multiplex", "=", "{", "}", "headr_transitions", "=", "{", "}", "for", "gene", "in", "self", ".", "influence_graph", ".", "genes", ":", "header_gene", "[", "gene", "]", "=", "repr", "(", "gene", ")", "header_multiplex", "[", "gene", "]", "=", "f\"active multiplex on {gene!r}\"", "headr_transitions", "[", "gene", "]", "=", "f\"K_{gene!r}\"", "columns", "=", "defaultdict", "(", "list", ")", "for", "state", "in", "self", ".", "table", ".", "keys", "(", ")", ":", "for", "gene", "in", "self", ".", "influence_graph", ".", "genes", ":", "columns", "[", "header_gene", "[", "gene", "]", "]", ".", "append", "(", "state", "[", "gene", "]", ")", "columns", "[", "header_multiplex", "[", "gene", "]", "]", ".", "append", "(", "self", ".", "_repr_multiplexes", "(", "gene", ",", "state", ")", ")", "columns", "[", "headr_transitions", "[", "gene", "]", "]", ".", "append", "(", "self", ".", "_repr_transition", "(", "gene", ",", "state", ")", ")", "header", "=", "list", "(", "header_gene", ".", "values", "(", ")", ")", "+", "list", "(", "header_multiplex", ".", "values", "(", ")", ")", "+", "list", "(", "headr_transitions", ".", "values", "(", ")", ")", "return", "pandas", ".", "DataFrame", "(", "columns", ",", "columns", "=", "header", ")" ]
Create a panda DataFrame representation of the resource table.
[ "Create", "a", "panda", "DataFrame", "representation", "of", "the", "resource", "table", "." ]
f37a2f9358c1ce49f21c4a868b904da5dcd4614f
https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/resource_table/resource_table_with_model.py#L25-L43
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
SpiroAgent.create
def create(self, r, r_, R=200): '''Create new spirograph image with given arguments. Returned image is scaled to agent's preferred image size. ''' x, y = give_dots(R, r, r_, spins=20) xy = np.array([x, y]).T xy = np.array(np.around(xy), dtype=np.int64) xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) & (xy[:, 0] < 250) & (xy[:, 1] < 250)] xy = xy + 250 img = np.ones([500, 500], dtype=np.uint8) img[:] = 255 img[xy[:, 0], xy[:, 1]] = 0 img = misc.imresize(img, [self.img_size, self.img_size]) fimg = img / 255.0 return fimg
python
def create(self, r, r_, R=200): '''Create new spirograph image with given arguments. Returned image is scaled to agent's preferred image size. ''' x, y = give_dots(R, r, r_, spins=20) xy = np.array([x, y]).T xy = np.array(np.around(xy), dtype=np.int64) xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) & (xy[:, 0] < 250) & (xy[:, 1] < 250)] xy = xy + 250 img = np.ones([500, 500], dtype=np.uint8) img[:] = 255 img[xy[:, 0], xy[:, 1]] = 0 img = misc.imresize(img, [self.img_size, self.img_size]) fimg = img / 255.0 return fimg
[ "def", "create", "(", "self", ",", "r", ",", "r_", ",", "R", "=", "200", ")", ":", "x", ",", "y", "=", "give_dots", "(", "R", ",", "r", ",", "r_", ",", "spins", "=", "20", ")", "xy", "=", "np", ".", "array", "(", "[", "x", ",", "y", "]", ")", ".", "T", "xy", "=", "np", ".", "array", "(", "np", ".", "around", "(", "xy", ")", ",", "dtype", "=", "np", ".", "int64", ")", "xy", "=", "xy", "[", "(", "xy", "[", ":", ",", "0", "]", ">=", "-", "250", ")", "&", "(", "xy", "[", ":", ",", "1", "]", ">=", "-", "250", ")", "&", "(", "xy", "[", ":", ",", "0", "]", "<", "250", ")", "&", "(", "xy", "[", ":", ",", "1", "]", "<", "250", ")", "]", "xy", "=", "xy", "+", "250", "img", "=", "np", ".", "ones", "(", "[", "500", ",", "500", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "img", "[", ":", "]", "=", "255", "img", "[", "xy", "[", ":", ",", "0", "]", ",", "xy", "[", ":", ",", "1", "]", "]", "=", "0", "img", "=", "misc", ".", "imresize", "(", "img", ",", "[", "self", ".", "img_size", ",", "self", ".", "img_size", "]", ")", "fimg", "=", "img", "/", "255.0", "return", "fimg" ]
Create new spirograph image with given arguments. Returned image is scaled to agent's preferred image size.
[ "Create", "new", "spirograph", "image", "with", "given", "arguments", ".", "Returned", "image", "is", "scaled", "to", "agent", "s", "preferred", "image", "size", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L140-L155
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
SpiroAgent.hedonic_value
def hedonic_value(self, novelty): '''Given the agent's desired novelty, how good the novelty value is. Not used if *desired_novelty*=-1 ''' lmax = gaus_pdf(self.desired_novelty, self.desired_novelty, 4) pdf = gaus_pdf(novelty, self.desired_novelty, 4) return pdf / lmax
python
def hedonic_value(self, novelty): '''Given the agent's desired novelty, how good the novelty value is. Not used if *desired_novelty*=-1 ''' lmax = gaus_pdf(self.desired_novelty, self.desired_novelty, 4) pdf = gaus_pdf(novelty, self.desired_novelty, 4) return pdf / lmax
[ "def", "hedonic_value", "(", "self", ",", "novelty", ")", ":", "lmax", "=", "gaus_pdf", "(", "self", ".", "desired_novelty", ",", "self", ".", "desired_novelty", ",", "4", ")", "pdf", "=", "gaus_pdf", "(", "novelty", ",", "self", ".", "desired_novelty", ",", "4", ")", "return", "pdf", "/", "lmax" ]
Given the agent's desired novelty, how good the novelty value is. Not used if *desired_novelty*=-1
[ "Given", "the", "agent", "s", "desired", "novelty", "how", "good", "the", "novelty", "value", "is", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L170-L177
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
SpiroAgent.evaluate
def evaluate(self, artifact): '''Evaluate the artifact with respect to the agents short term memory. Returns value in [0, 1]. ''' if self.desired_novelty > 0: return self.hedonic_value(self.novelty(artifact.obj)) return self.novelty(artifact.obj) / self.img_size, None
python
def evaluate(self, artifact): '''Evaluate the artifact with respect to the agents short term memory. Returns value in [0, 1]. ''' if self.desired_novelty > 0: return self.hedonic_value(self.novelty(artifact.obj)) return self.novelty(artifact.obj) / self.img_size, None
[ "def", "evaluate", "(", "self", ",", "artifact", ")", ":", "if", "self", ".", "desired_novelty", ">", "0", ":", "return", "self", ".", "hedonic_value", "(", "self", ".", "novelty", "(", "artifact", ".", "obj", ")", ")", "return", "self", ".", "novelty", "(", "artifact", ".", "obj", ")", "/", "self", ".", "img_size", ",", "None" ]
Evaluate the artifact with respect to the agents short term memory. Returns value in [0, 1].
[ "Evaluate", "the", "artifact", "with", "respect", "to", "the", "agents", "short", "term", "memory", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L186-L193
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
SpiroAgent.learn
def learn(self, spiro, iterations=1): '''Train short term memory with given spirograph. :param spiro: :py:class:`SpiroArtifact` object ''' for i in range(iterations): self.stmem.train_cycle(spiro.obj.flatten())
python
def learn(self, spiro, iterations=1): '''Train short term memory with given spirograph. :param spiro: :py:class:`SpiroArtifact` object ''' for i in range(iterations): self.stmem.train_cycle(spiro.obj.flatten())
[ "def", "learn", "(", "self", ",", "spiro", ",", "iterations", "=", "1", ")", ":", "for", "i", "in", "range", "(", "iterations", ")", ":", "self", ".", "stmem", ".", "train_cycle", "(", "spiro", ".", "obj", ".", "flatten", "(", ")", ")" ]
Train short term memory with given spirograph. :param spiro: :py:class:`SpiroArtifact` object
[ "Train", "short", "term", "memory", "with", "given", "spirograph", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L292-L299
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
SpiroAgent.plot_places
def plot_places(self): '''Plot places where the agent has been and generated a spirograph. ''' from matplotlib import pyplot as plt fig, ax = plt.subplots() x = [] y = [] if len(self.arg_history) > 1: xs = [] ys = [] for p in self.arg_history: xs.append(p[0]) ys.append(p[1]) ax.plot(xs, ys, color=(0.0, 0.0, 1.0, 0.1)) for a in self.A: if a.self_criticism == 'pass': args = a.framings[a.creator]['args'] x.append(args[0]) y.append(args[1]) sc = ax.scatter(x, y, marker="x", color='red') ax.set_xlim([-200, 200]) ax.set_ylim([-200, 200]) agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format( self.name, self.age, self.env_learning_method, self.env_learning_amount, self.env_learn_on_add, self.stmem.length, self._novelty_threshold, self._own_threshold, self.jump, self.search_width, self.move_radius) if self.logger is not None: imname = os.path.join(self.logger.folder, '{}.png'.format(agent_vars)) plt.savefig(imname) plt.close() fname = os.path.join(self.logger.folder, '{}.txt'.format(agent_vars)) with open(fname, "w") as f: f.write(" ".join([str(e) for e in xs])) f.write("\n") f.write(" ".join([str(e) for e in ys])) f.write("\n") f.write(" ".join([str(e) for e in x])) f.write("\n") f.write(" ".join([str(e) for e in y])) f.write("\n") else: plt.show()
python
def plot_places(self): '''Plot places where the agent has been and generated a spirograph. ''' from matplotlib import pyplot as plt fig, ax = plt.subplots() x = [] y = [] if len(self.arg_history) > 1: xs = [] ys = [] for p in self.arg_history: xs.append(p[0]) ys.append(p[1]) ax.plot(xs, ys, color=(0.0, 0.0, 1.0, 0.1)) for a in self.A: if a.self_criticism == 'pass': args = a.framings[a.creator]['args'] x.append(args[0]) y.append(args[1]) sc = ax.scatter(x, y, marker="x", color='red') ax.set_xlim([-200, 200]) ax.set_ylim([-200, 200]) agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format( self.name, self.age, self.env_learning_method, self.env_learning_amount, self.env_learn_on_add, self.stmem.length, self._novelty_threshold, self._own_threshold, self.jump, self.search_width, self.move_radius) if self.logger is not None: imname = os.path.join(self.logger.folder, '{}.png'.format(agent_vars)) plt.savefig(imname) plt.close() fname = os.path.join(self.logger.folder, '{}.txt'.format(agent_vars)) with open(fname, "w") as f: f.write(" ".join([str(e) for e in xs])) f.write("\n") f.write(" ".join([str(e) for e in ys])) f.write("\n") f.write(" ".join([str(e) for e in x])) f.write("\n") f.write(" ".join([str(e) for e in y])) f.write("\n") else: plt.show()
[ "def", "plot_places", "(", "self", ")", ":", "from", "matplotlib", "import", "pyplot", "as", "plt", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "x", "=", "[", "]", "y", "=", "[", "]", "if", "len", "(", "self", ".", "arg_history", ")", ">", "1", ":", "xs", "=", "[", "]", "ys", "=", "[", "]", "for", "p", "in", "self", ".", "arg_history", ":", "xs", ".", "append", "(", "p", "[", "0", "]", ")", "ys", ".", "append", "(", "p", "[", "1", "]", ")", "ax", ".", "plot", "(", "xs", ",", "ys", ",", "color", "=", "(", "0.0", ",", "0.0", ",", "1.0", ",", "0.1", ")", ")", "for", "a", "in", "self", ".", "A", ":", "if", "a", ".", "self_criticism", "==", "'pass'", ":", "args", "=", "a", ".", "framings", "[", "a", ".", "creator", "]", "[", "'args'", "]", "x", ".", "append", "(", "args", "[", "0", "]", ")", "y", ".", "append", "(", "args", "[", "1", "]", ")", "sc", "=", "ax", ".", "scatter", "(", "x", ",", "y", ",", "marker", "=", "\"x\"", ",", "color", "=", "'red'", ")", "ax", ".", "set_xlim", "(", "[", "-", "200", ",", "200", "]", ")", "ax", ".", "set_ylim", "(", "[", "-", "200", ",", "200", "]", ")", "agent_vars", "=", "\"{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN\"", ".", "format", "(", "self", ".", "name", ",", "self", ".", "age", ",", "self", ".", "env_learning_method", ",", "self", ".", "env_learning_amount", ",", "self", ".", "env_learn_on_add", ",", "self", ".", "stmem", ".", "length", ",", "self", ".", "_novelty_threshold", ",", "self", ".", "_own_threshold", ",", "self", ".", "jump", ",", "self", ".", "search_width", ",", "self", ".", "move_radius", ")", "if", "self", ".", "logger", "is", "not", "None", ":", "imname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logger", ".", "folder", ",", "'{}.png'", ".", "format", "(", "agent_vars", ")", ")", "plt", ".", "savefig", "(", "imname", ")", "plt", ".", "close", "(", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logger", ".", "folder", ",", "'{}.txt'", ".", "format", "(", "agent_vars", ")", ")", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\" \"", ".", "join", "(", "[", "str", "(", "e", ")", "for", "e", "in", "xs", "]", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")", "f", ".", "write", "(", "\" \"", ".", "join", "(", "[", "str", "(", "e", ")", "for", "e", "in", "ys", "]", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")", "f", ".", "write", "(", "\" \"", ".", "join", "(", "[", "str", "(", "e", ")", "for", "e", "in", "x", "]", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")", "f", ".", "write", "(", "\" \"", ".", "join", "(", "[", "str", "(", "e", ")", "for", "e", "in", "y", "]", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")", "else", ":", "plt", ".", "show", "(", ")" ]
Plot places where the agent has been and generated a spirograph.
[ "Plot", "places", "where", "the", "agent", "has", "been", "and", "generated", "a", "spirograph", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L398-L445
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
SpiroMultiEnvironment.destroy
def destroy(self, folder=None): '''Destroy the environment and the subprocesses. ''' ameans = [(0, 0, 0) for _ in range(3)] ret = [self.save_info(folder, ameans)] aiomas.run(until=self.stop_slaves(folder)) # Close and join the process pool nicely. self._pool.close() self._pool.terminate() self._pool.join() self._env.shutdown() return ret
python
def destroy(self, folder=None): '''Destroy the environment and the subprocesses. ''' ameans = [(0, 0, 0) for _ in range(3)] ret = [self.save_info(folder, ameans)] aiomas.run(until=self.stop_slaves(folder)) # Close and join the process pool nicely. self._pool.close() self._pool.terminate() self._pool.join() self._env.shutdown() return ret
[ "def", "destroy", "(", "self", ",", "folder", "=", "None", ")", ":", "ameans", "=", "[", "(", "0", ",", "0", ",", "0", ")", "for", "_", "in", "range", "(", "3", ")", "]", "ret", "=", "[", "self", ".", "save_info", "(", "folder", ",", "ameans", ")", "]", "aiomas", ".", "run", "(", "until", "=", "self", ".", "stop_slaves", "(", "folder", ")", ")", "# Close and join the process pool nicely.", "self", ".", "_pool", ".", "close", "(", ")", "self", ".", "_pool", ".", "terminate", "(", ")", "self", ".", "_pool", ".", "join", "(", ")", "self", ".", "_env", ".", "shutdown", "(", ")", "return", "ret" ]
Destroy the environment and the subprocesses.
[ "Destroy", "the", "environment", "and", "the", "subprocesses", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L704-L715
train
Kortemme-Lab/klab
klab/bio/basics.py
Sequence.add
def add(self, r): '''Takes an id and a Residue r and adds them to the Sequence.''' id = r.get_residue_id() if self.order: last_id = self.order[-1] # KAB - allow for multiresidue noncanonicals if id in self.order: raise colortext.Exception('Warning: using code to "allow for multiresidue noncanonicals" - check this case manually.') id = '%s.%d'%(str(id),self.special_insertion_count) self.special_insertion_count += 1 assert(r.Chain == self.sequence[last_id].Chain) assert(r.residue_type == self.sequence[last_id].residue_type) self.order.append(id) self.sequence[id] = r
python
def add(self, r): '''Takes an id and a Residue r and adds them to the Sequence.''' id = r.get_residue_id() if self.order: last_id = self.order[-1] # KAB - allow for multiresidue noncanonicals if id in self.order: raise colortext.Exception('Warning: using code to "allow for multiresidue noncanonicals" - check this case manually.') id = '%s.%d'%(str(id),self.special_insertion_count) self.special_insertion_count += 1 assert(r.Chain == self.sequence[last_id].Chain) assert(r.residue_type == self.sequence[last_id].residue_type) self.order.append(id) self.sequence[id] = r
[ "def", "add", "(", "self", ",", "r", ")", ":", "id", "=", "r", ".", "get_residue_id", "(", ")", "if", "self", ".", "order", ":", "last_id", "=", "self", ".", "order", "[", "-", "1", "]", "# KAB - allow for multiresidue noncanonicals", "if", "id", "in", "self", ".", "order", ":", "raise", "colortext", ".", "Exception", "(", "'Warning: using code to \"allow for multiresidue noncanonicals\" - check this case manually.'", ")", "id", "=", "'%s.%d'", "%", "(", "str", "(", "id", ")", ",", "self", ".", "special_insertion_count", ")", "self", ".", "special_insertion_count", "+=", "1", "assert", "(", "r", ".", "Chain", "==", "self", ".", "sequence", "[", "last_id", "]", ".", "Chain", ")", "assert", "(", "r", ".", "residue_type", "==", "self", ".", "sequence", "[", "last_id", "]", ".", "residue_type", ")", "self", ".", "order", ".", "append", "(", "id", ")", "self", ".", "sequence", "[", "id", "]", "=", "r" ]
Takes an id and a Residue r and adds them to the Sequence.
[ "Takes", "an", "id", "and", "a", "Residue", "r", "and", "adds", "them", "to", "the", "Sequence", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/basics.py#L381-L396
train
Kortemme-Lab/klab
klab/bio/basics.py
Sequence.set_type
def set_type(self, sequence_type): '''Set the type of a Sequence if it has not been set.''' if not(self.sequence_type): for id, r in self.sequence.iteritems(): assert(r.residue_type == None) r.residue_type = sequence_type self.sequence_type = sequence_type
python
def set_type(self, sequence_type): '''Set the type of a Sequence if it has not been set.''' if not(self.sequence_type): for id, r in self.sequence.iteritems(): assert(r.residue_type == None) r.residue_type = sequence_type self.sequence_type = sequence_type
[ "def", "set_type", "(", "self", ",", "sequence_type", ")", ":", "if", "not", "(", "self", ".", "sequence_type", ")", ":", "for", "id", ",", "r", "in", "self", ".", "sequence", ".", "iteritems", "(", ")", ":", "assert", "(", "r", ".", "residue_type", "==", "None", ")", "r", ".", "residue_type", "=", "sequence_type", "self", ".", "sequence_type", "=", "sequence_type" ]
Set the type of a Sequence if it has not been set.
[ "Set", "the", "type", "of", "a", "Sequence", "if", "it", "has", "not", "been", "set", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/basics.py#L398-L404
train
Kortemme-Lab/klab
klab/bio/basics.py
Sequence.from_sequence
def from_sequence(chain, list_of_residues, sequence_type = None): '''Takes in a chain identifier and protein sequence and returns a Sequence object of Residues, indexed from 1.''' s = Sequence(sequence_type) count = 1 for ResidueAA in list_of_residues: s.add(Residue(chain, count, ResidueAA, sequence_type)) count += 1 return s
python
def from_sequence(chain, list_of_residues, sequence_type = None): '''Takes in a chain identifier and protein sequence and returns a Sequence object of Residues, indexed from 1.''' s = Sequence(sequence_type) count = 1 for ResidueAA in list_of_residues: s.add(Residue(chain, count, ResidueAA, sequence_type)) count += 1 return s
[ "def", "from_sequence", "(", "chain", ",", "list_of_residues", ",", "sequence_type", "=", "None", ")", ":", "s", "=", "Sequence", "(", "sequence_type", ")", "count", "=", "1", "for", "ResidueAA", "in", "list_of_residues", ":", "s", ".", "add", "(", "Residue", "(", "chain", ",", "count", ",", "ResidueAA", ",", "sequence_type", ")", ")", "count", "+=", "1", "return", "s" ]
Takes in a chain identifier and protein sequence and returns a Sequence object of Residues, indexed from 1.
[ "Takes", "in", "a", "chain", "identifier", "and", "protein", "sequence", "and", "returns", "a", "Sequence", "object", "of", "Residues", "indexed", "from", "1", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/basics.py#L413-L420
train
Kortemme-Lab/klab
klab/bio/basics.py
SequenceMap.substitution_scores_match
def substitution_scores_match(self, other): '''Check to make sure that the substitution scores agree. If one map has a null score and the other has a non-null score, we trust the other's score and vice versa.''' overlap = set(self.substitution_scores.keys()).intersection(set(other.substitution_scores.keys())) for k in overlap: if not(self.substitution_scores[k] == None or other.substitution_scores[k] == None): if self.substitution_scores[k] != other.substitution_scores[k]: return False return True
python
def substitution_scores_match(self, other): '''Check to make sure that the substitution scores agree. If one map has a null score and the other has a non-null score, we trust the other's score and vice versa.''' overlap = set(self.substitution_scores.keys()).intersection(set(other.substitution_scores.keys())) for k in overlap: if not(self.substitution_scores[k] == None or other.substitution_scores[k] == None): if self.substitution_scores[k] != other.substitution_scores[k]: return False return True
[ "def", "substitution_scores_match", "(", "self", ",", "other", ")", ":", "overlap", "=", "set", "(", "self", ".", "substitution_scores", ".", "keys", "(", ")", ")", ".", "intersection", "(", "set", "(", "other", ".", "substitution_scores", ".", "keys", "(", ")", ")", ")", "for", "k", "in", "overlap", ":", "if", "not", "(", "self", ".", "substitution_scores", "[", "k", "]", "==", "None", "or", "other", ".", "substitution_scores", "[", "k", "]", "==", "None", ")", ":", "if", "self", ".", "substitution_scores", "[", "k", "]", "!=", "other", ".", "substitution_scores", "[", "k", "]", ":", "return", "False", "return", "True" ]
Check to make sure that the substitution scores agree. If one map has a null score and the other has a non-null score, we trust the other's score and vice versa.
[ "Check", "to", "make", "sure", "that", "the", "substitution", "scores", "agree", ".", "If", "one", "map", "has", "a", "null", "score", "and", "the", "other", "has", "a", "non", "-", "null", "score", "we", "trust", "the", "other", "s", "score", "and", "vice", "versa", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/basics.py#L465-L472
train
Kortemme-Lab/klab
klab/bio/basics.py
ElementCounter.merge
def merge(self, other): '''Merge two element counters. For all elements, we take the max count from both counters.''' our_element_frequencies = self.items their_element_frequencies = other.items for element_name, freq in sorted(our_element_frequencies.iteritems()): our_element_frequencies[element_name] = max(our_element_frequencies.get(element_name, 0), their_element_frequencies.get(element_name, 0)) for element_name, freq in sorted(their_element_frequencies.iteritems()): if element_name not in our_element_frequencies: our_element_frequencies[element_name] = their_element_frequencies[element_name]
python
def merge(self, other): '''Merge two element counters. For all elements, we take the max count from both counters.''' our_element_frequencies = self.items their_element_frequencies = other.items for element_name, freq in sorted(our_element_frequencies.iteritems()): our_element_frequencies[element_name] = max(our_element_frequencies.get(element_name, 0), their_element_frequencies.get(element_name, 0)) for element_name, freq in sorted(their_element_frequencies.iteritems()): if element_name not in our_element_frequencies: our_element_frequencies[element_name] = their_element_frequencies[element_name]
[ "def", "merge", "(", "self", ",", "other", ")", ":", "our_element_frequencies", "=", "self", ".", "items", "their_element_frequencies", "=", "other", ".", "items", "for", "element_name", ",", "freq", "in", "sorted", "(", "our_element_frequencies", ".", "iteritems", "(", ")", ")", ":", "our_element_frequencies", "[", "element_name", "]", "=", "max", "(", "our_element_frequencies", ".", "get", "(", "element_name", ",", "0", ")", ",", "their_element_frequencies", ".", "get", "(", "element_name", ",", "0", ")", ")", "for", "element_name", ",", "freq", "in", "sorted", "(", "their_element_frequencies", ".", "iteritems", "(", ")", ")", ":", "if", "element_name", "not", "in", "our_element_frequencies", ":", "our_element_frequencies", "[", "element_name", "]", "=", "their_element_frequencies", "[", "element_name", "]" ]
Merge two element counters. For all elements, we take the max count from both counters.
[ "Merge", "two", "element", "counters", ".", "For", "all", "elements", "we", "take", "the", "max", "count", "from", "both", "counters", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/basics.py#L721-L729
train
gamechanger/avro_codec
avro_codec/__init__.py
AvroCodec.dump
def dump(self, obj, fp): """ Serializes obj as an avro-format byte stream to the provided fp file-like object stream. """ if not validate(obj, self._raw_schema): raise AvroTypeException(self._avro_schema, obj) fastavro_write_data(fp, obj, self._raw_schema)
python
def dump(self, obj, fp): """ Serializes obj as an avro-format byte stream to the provided fp file-like object stream. """ if not validate(obj, self._raw_schema): raise AvroTypeException(self._avro_schema, obj) fastavro_write_data(fp, obj, self._raw_schema)
[ "def", "dump", "(", "self", ",", "obj", ",", "fp", ")", ":", "if", "not", "validate", "(", "obj", ",", "self", ".", "_raw_schema", ")", ":", "raise", "AvroTypeException", "(", "self", ".", "_avro_schema", ",", "obj", ")", "fastavro_write_data", "(", "fp", ",", "obj", ",", "self", ".", "_raw_schema", ")" ]
Serializes obj as an avro-format byte stream to the provided fp file-like object stream.
[ "Serializes", "obj", "as", "an", "avro", "-", "format", "byte", "stream", "to", "the", "provided", "fp", "file", "-", "like", "object", "stream", "." ]
57468bee8972a26b31b16a3437b3eeaa5ace2af6
https://github.com/gamechanger/avro_codec/blob/57468bee8972a26b31b16a3437b3eeaa5ace2af6/avro_codec/__init__.py#L14-L21
train