repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._validate_config | def _validate_config(config):
"""Validate that the provided configurtion is valid.
Each dictionary in the configuration list must have the following
mandatory entries :
{label: {host(string), port(int), dbpath(string|list of strings)}}
It can also contain 1 optional key:
{read_preference(string)}
Args:
config: the list of configurations provided at instantiation
Raises:
TypeError: a fault in the configurations is found
"""
if not isinstance(config, list):
raise TypeError('Config must be a list')
for config_dict in config:
if not isinstance(config_dict, dict):
raise TypeError('Config must be a list of dictionaries')
label = config_dict.keys()[0]
cfg = config_dict[label]
if not isinstance(cfg, dict):
raise TypeError('Config structure is broken')
if 'host' not in cfg:
raise TypeError('Config entries must have a value for host')
if not isinstance(cfg['host'], str) and not isinstance(cfg['host'], list):
raise TypeError('Host must be a string or a list.')
if 'port' not in cfg:
raise TypeError('Config entries must have a value for port')
if not isinstance(cfg['port'], int):
raise TypeError('Port must be an int')
if 'dbpath' not in cfg:
raise TypeError('Config entries must have a value for dbpath')
if not isinstance(cfg['dbpath'], str):
if not isinstance(cfg['dbpath'], list):
raise TypeError('Dbpath must either a string or a list of '
'strings')
for dbpath in cfg['dbpath']:
if not isinstance(dbpath, str):
raise TypeError('Dbpath must either a string or a list '
'of strings')
if ('read_preference' in cfg and
not isinstance(cfg['read_preference'], str)):
raise TypeError('Read_preference must be a string')
if ('replicaSet' in cfg and
not isinstance(cfg['replicaSet'], str)):
raise TypeError('replicaSet must be a string') | python | def _validate_config(config):
"""Validate that the provided configurtion is valid.
Each dictionary in the configuration list must have the following
mandatory entries :
{label: {host(string), port(int), dbpath(string|list of strings)}}
It can also contain 1 optional key:
{read_preference(string)}
Args:
config: the list of configurations provided at instantiation
Raises:
TypeError: a fault in the configurations is found
"""
if not isinstance(config, list):
raise TypeError('Config must be a list')
for config_dict in config:
if not isinstance(config_dict, dict):
raise TypeError('Config must be a list of dictionaries')
label = config_dict.keys()[0]
cfg = config_dict[label]
if not isinstance(cfg, dict):
raise TypeError('Config structure is broken')
if 'host' not in cfg:
raise TypeError('Config entries must have a value for host')
if not isinstance(cfg['host'], str) and not isinstance(cfg['host'], list):
raise TypeError('Host must be a string or a list.')
if 'port' not in cfg:
raise TypeError('Config entries must have a value for port')
if not isinstance(cfg['port'], int):
raise TypeError('Port must be an int')
if 'dbpath' not in cfg:
raise TypeError('Config entries must have a value for dbpath')
if not isinstance(cfg['dbpath'], str):
if not isinstance(cfg['dbpath'], list):
raise TypeError('Dbpath must either a string or a list of '
'strings')
for dbpath in cfg['dbpath']:
if not isinstance(dbpath, str):
raise TypeError('Dbpath must either a string or a list '
'of strings')
if ('read_preference' in cfg and
not isinstance(cfg['read_preference'], str)):
raise TypeError('Read_preference must be a string')
if ('replicaSet' in cfg and
not isinstance(cfg['replicaSet'], str)):
raise TypeError('replicaSet must be a string') | [
"def",
"_validate_config",
"(",
"config",
")",
":",
"if",
"not",
"isinstance",
"(",
"config",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'Config must be a list'",
")",
"for",
"config_dict",
"in",
"config",
":",
"if",
"not",
"isinstance",
"(",
"config_dict",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'Config must be a list of dictionaries'",
")",
"label",
"=",
"config_dict",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"cfg",
"=",
"config_dict",
"[",
"label",
"]",
"if",
"not",
"isinstance",
"(",
"cfg",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'Config structure is broken'",
")",
"if",
"'host'",
"not",
"in",
"cfg",
":",
"raise",
"TypeError",
"(",
"'Config entries must have a value for host'",
")",
"if",
"not",
"isinstance",
"(",
"cfg",
"[",
"'host'",
"]",
",",
"str",
")",
"and",
"not",
"isinstance",
"(",
"cfg",
"[",
"'host'",
"]",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'Host must be a string or a list.'",
")",
"if",
"'port'",
"not",
"in",
"cfg",
":",
"raise",
"TypeError",
"(",
"'Config entries must have a value for port'",
")",
"if",
"not",
"isinstance",
"(",
"cfg",
"[",
"'port'",
"]",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"'Port must be an int'",
")",
"if",
"'dbpath'",
"not",
"in",
"cfg",
":",
"raise",
"TypeError",
"(",
"'Config entries must have a value for dbpath'",
")",
"if",
"not",
"isinstance",
"(",
"cfg",
"[",
"'dbpath'",
"]",
",",
"str",
")",
":",
"if",
"not",
"isinstance",
"(",
"cfg",
"[",
"'dbpath'",
"]",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'Dbpath must either a string or a list of '",
"'strings'",
")",
"for",
"dbpath",
"in",
"cfg",
"[",
"'dbpath'",
"]",
":",
"if",
"not",
"isinstance",
"(",
"dbpath",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'Dbpath must either a string or a list '",
"'of strings'",
")",
"if",
"(",
"'read_preference'",
"in",
"cfg",
"and",
"not",
"isinstance",
"(",
"cfg",
"[",
"'read_preference'",
"]",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Read_preference must be a string'",
")",
"if",
"(",
"'replicaSet'",
"in",
"cfg",
"and",
"not",
"isinstance",
"(",
"cfg",
"[",
"'replicaSet'",
"]",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"'replicaSet must be a string'",
")"
] | Validate that the provided configurtion is valid.
Each dictionary in the configuration list must have the following
mandatory entries :
{label: {host(string), port(int), dbpath(string|list of strings)}}
It can also contain 1 optional key:
{read_preference(string)}
Args:
config: the list of configurations provided at instantiation
Raises:
TypeError: a fault in the configurations is found | [
"Validate",
"that",
"the",
"provided",
"configurtion",
"is",
"valid",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L45-L98 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._parse_configs | def _parse_configs(self, config):
"""Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided.
"""
for config_dict in config:
label = config_dict.keys()[0]
cfg = config_dict[label]
# Transform dbpath to something digestable by regexp.
dbpath = cfg['dbpath']
pattern = self._parse_dbpath(dbpath)
read_preference = cfg.get('read_preference', 'primary').upper()
read_preference = self._get_read_preference(read_preference)
# Put all parameters that could be passed to pymongo.MongoClient
# in a separate dict, to ease MongoClient creation.
cluster_config = {
'params': {
'host': cfg['host'],
'port': cfg['port'],
'read_preference': read_preference,
'replicaSet': cfg.get('replicaSet')
},
'pattern': pattern,
'label': label
}
self._clusters.append(cluster_config) | python | def _parse_configs(self, config):
"""Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided.
"""
for config_dict in config:
label = config_dict.keys()[0]
cfg = config_dict[label]
# Transform dbpath to something digestable by regexp.
dbpath = cfg['dbpath']
pattern = self._parse_dbpath(dbpath)
read_preference = cfg.get('read_preference', 'primary').upper()
read_preference = self._get_read_preference(read_preference)
# Put all parameters that could be passed to pymongo.MongoClient
# in a separate dict, to ease MongoClient creation.
cluster_config = {
'params': {
'host': cfg['host'],
'port': cfg['port'],
'read_preference': read_preference,
'replicaSet': cfg.get('replicaSet')
},
'pattern': pattern,
'label': label
}
self._clusters.append(cluster_config) | [
"def",
"_parse_configs",
"(",
"self",
",",
"config",
")",
":",
"for",
"config_dict",
"in",
"config",
":",
"label",
"=",
"config_dict",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"cfg",
"=",
"config_dict",
"[",
"label",
"]",
"# Transform dbpath to something digestable by regexp.",
"dbpath",
"=",
"cfg",
"[",
"'dbpath'",
"]",
"pattern",
"=",
"self",
".",
"_parse_dbpath",
"(",
"dbpath",
")",
"read_preference",
"=",
"cfg",
".",
"get",
"(",
"'read_preference'",
",",
"'primary'",
")",
".",
"upper",
"(",
")",
"read_preference",
"=",
"self",
".",
"_get_read_preference",
"(",
"read_preference",
")",
"# Put all parameters that could be passed to pymongo.MongoClient",
"# in a separate dict, to ease MongoClient creation.",
"cluster_config",
"=",
"{",
"'params'",
":",
"{",
"'host'",
":",
"cfg",
"[",
"'host'",
"]",
",",
"'port'",
":",
"cfg",
"[",
"'port'",
"]",
",",
"'read_preference'",
":",
"read_preference",
",",
"'replicaSet'",
":",
"cfg",
".",
"get",
"(",
"'replicaSet'",
")",
"}",
",",
"'pattern'",
":",
"pattern",
",",
"'label'",
":",
"label",
"}",
"self",
".",
"_clusters",
".",
"append",
"(",
"cluster_config",
")"
] | Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided. | [
"Builds",
"a",
"dict",
"with",
"information",
"to",
"connect",
"to",
"Clusters",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L100-L139 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._parse_dbpath | def _parse_dbpath(dbpath):
"""Converts the dbpath to a regexp pattern.
Transforms dbpath from a string or an array of strings to a
regexp pattern which will be used to match database names.
Args:
dbpath: a string or an array containing the databases to be matched
from a cluster.
Returns:
A regexp pattern that will match any of the desired databases on
on a cluster.
"""
if isinstance(dbpath, list):
# Support dbpath param as an array.
dbpath = '|'.join(dbpath)
# Append $ (end of string) so that twit will not match twitter!
if not dbpath.endswith('$'):
dbpath = '(%s)$' % dbpath
return dbpath | python | def _parse_dbpath(dbpath):
"""Converts the dbpath to a regexp pattern.
Transforms dbpath from a string or an array of strings to a
regexp pattern which will be used to match database names.
Args:
dbpath: a string or an array containing the databases to be matched
from a cluster.
Returns:
A regexp pattern that will match any of the desired databases on
on a cluster.
"""
if isinstance(dbpath, list):
# Support dbpath param as an array.
dbpath = '|'.join(dbpath)
# Append $ (end of string) so that twit will not match twitter!
if not dbpath.endswith('$'):
dbpath = '(%s)$' % dbpath
return dbpath | [
"def",
"_parse_dbpath",
"(",
"dbpath",
")",
":",
"if",
"isinstance",
"(",
"dbpath",
",",
"list",
")",
":",
"# Support dbpath param as an array.",
"dbpath",
"=",
"'|'",
".",
"join",
"(",
"dbpath",
")",
"# Append $ (end of string) so that twit will not match twitter!",
"if",
"not",
"dbpath",
".",
"endswith",
"(",
"'$'",
")",
":",
"dbpath",
"=",
"'(%s)$'",
"%",
"dbpath",
"return",
"dbpath"
] | Converts the dbpath to a regexp pattern.
Transforms dbpath from a string or an array of strings to a
regexp pattern which will be used to match database names.
Args:
dbpath: a string or an array containing the databases to be matched
from a cluster.
Returns:
A regexp pattern that will match any of the desired databases on
on a cluster. | [
"Converts",
"the",
"dbpath",
"to",
"a",
"regexp",
"pattern",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L142-L164 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._get_read_preference | def _get_read_preference(read_preference):
"""Converts read_preference from string to pymongo.ReadPreference value.
Args:
read_preference: string containig the read_preference from the
config file
Returns:
A value from the pymongo.ReadPreference enum
Raises:
Exception: Invalid read preference"""
read_preference = getattr(pymongo.ReadPreference, read_preference, None)
if read_preference is None:
raise ValueError('Invalid read preference: %s' % read_preference)
return read_preference | python | def _get_read_preference(read_preference):
"""Converts read_preference from string to pymongo.ReadPreference value.
Args:
read_preference: string containig the read_preference from the
config file
Returns:
A value from the pymongo.ReadPreference enum
Raises:
Exception: Invalid read preference"""
read_preference = getattr(pymongo.ReadPreference, read_preference, None)
if read_preference is None:
raise ValueError('Invalid read preference: %s' % read_preference)
return read_preference | [
"def",
"_get_read_preference",
"(",
"read_preference",
")",
":",
"read_preference",
"=",
"getattr",
"(",
"pymongo",
".",
"ReadPreference",
",",
"read_preference",
",",
"None",
")",
"if",
"read_preference",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Invalid read preference: %s'",
"%",
"read_preference",
")",
"return",
"read_preference"
] | Converts read_preference from string to pymongo.ReadPreference value.
Args:
read_preference: string containig the read_preference from the
config file
Returns:
A value from the pymongo.ReadPreference enum
Raises:
Exception: Invalid read preference | [
"Converts",
"read_preference",
"from",
"string",
"to",
"pymongo",
".",
"ReadPreference",
"value",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L167-L181 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool.set_timeout | def set_timeout(self, network_timeout):
"""Set the timeout for existing and future Clients.
Close all current connections. This will cause future operations to
create new Clients with the network_timeout passed through
socketTimeoutMS optional parameter.
Args:
network_timeout: The new value in milliseconds for the timeout.
"""
# Do nothing if attempting to set the same timeout twice.
if network_timeout == self._network_timeout:
return
self._network_timeout = network_timeout
self._disconnect() | python | def set_timeout(self, network_timeout):
"""Set the timeout for existing and future Clients.
Close all current connections. This will cause future operations to
create new Clients with the network_timeout passed through
socketTimeoutMS optional parameter.
Args:
network_timeout: The new value in milliseconds for the timeout.
"""
# Do nothing if attempting to set the same timeout twice.
if network_timeout == self._network_timeout:
return
self._network_timeout = network_timeout
self._disconnect() | [
"def",
"set_timeout",
"(",
"self",
",",
"network_timeout",
")",
":",
"# Do nothing if attempting to set the same timeout twice.",
"if",
"network_timeout",
"==",
"self",
".",
"_network_timeout",
":",
"return",
"self",
".",
"_network_timeout",
"=",
"network_timeout",
"self",
".",
"_disconnect",
"(",
")"
] | Set the timeout for existing and future Clients.
Close all current connections. This will cause future operations to
create new Clients with the network_timeout passed through
socketTimeoutMS optional parameter.
Args:
network_timeout: The new value in milliseconds for the timeout. | [
"Set",
"the",
"timeout",
"for",
"existing",
"and",
"future",
"Clients",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L183-L197 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._disconnect | def _disconnect(self):
"""Disconnect from all MongoDB Clients."""
for cluster in self._clusters:
if 'connection' in cluster:
connection = cluster.pop('connection')
connection.close()
# Remove all attributes that are database names so that next time
# when they are accessed, __getattr__ will be called and will create
# new Clients
for dbname in self._mapped_databases:
self.__delattr__(dbname)
self._mapped_databases = [] | python | def _disconnect(self):
"""Disconnect from all MongoDB Clients."""
for cluster in self._clusters:
if 'connection' in cluster:
connection = cluster.pop('connection')
connection.close()
# Remove all attributes that are database names so that next time
# when they are accessed, __getattr__ will be called and will create
# new Clients
for dbname in self._mapped_databases:
self.__delattr__(dbname)
self._mapped_databases = [] | [
"def",
"_disconnect",
"(",
"self",
")",
":",
"for",
"cluster",
"in",
"self",
".",
"_clusters",
":",
"if",
"'connection'",
"in",
"cluster",
":",
"connection",
"=",
"cluster",
".",
"pop",
"(",
"'connection'",
")",
"connection",
".",
"close",
"(",
")",
"# Remove all attributes that are database names so that next time",
"# when they are accessed, __getattr__ will be called and will create",
"# new Clients",
"for",
"dbname",
"in",
"self",
".",
"_mapped_databases",
":",
"self",
".",
"__delattr__",
"(",
"dbname",
")",
"self",
".",
"_mapped_databases",
"=",
"[",
"]"
] | Disconnect from all MongoDB Clients. | [
"Disconnect",
"from",
"all",
"MongoDB",
"Clients",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L199-L210 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._get_connection | def _get_connection(self, cluster):
"""Return a connection to a Cluster.
Return a MongoClient or a MongoReplicaSetClient for the given Cluster.
This is done in a lazy manner (if there is already a Client connected to
the Cluster, it is returned and no other Client is created).
Args:
cluster: A dict containing information about a cluster.
Returns:
A MongoClient or MongoReplicaSetClient instance connected to the
desired cluster
"""
# w=1 because:
# http://stackoverflow.com/questions/14798552/is-mongodb-2-x-write-concern-w-1-truly-equals-to-safe-true
if 'connection' not in cluster:
cluster['connection'] = self._connection_class(
socketTimeoutMS=self._network_timeout,
w=1,
j=self.j,
**cluster['params'])
return cluster['connection'] | python | def _get_connection(self, cluster):
"""Return a connection to a Cluster.
Return a MongoClient or a MongoReplicaSetClient for the given Cluster.
This is done in a lazy manner (if there is already a Client connected to
the Cluster, it is returned and no other Client is created).
Args:
cluster: A dict containing information about a cluster.
Returns:
A MongoClient or MongoReplicaSetClient instance connected to the
desired cluster
"""
# w=1 because:
# http://stackoverflow.com/questions/14798552/is-mongodb-2-x-write-concern-w-1-truly-equals-to-safe-true
if 'connection' not in cluster:
cluster['connection'] = self._connection_class(
socketTimeoutMS=self._network_timeout,
w=1,
j=self.j,
**cluster['params'])
return cluster['connection'] | [
"def",
"_get_connection",
"(",
"self",
",",
"cluster",
")",
":",
"# w=1 because:",
"# http://stackoverflow.com/questions/14798552/is-mongodb-2-x-write-concern-w-1-truly-equals-to-safe-true",
"if",
"'connection'",
"not",
"in",
"cluster",
":",
"cluster",
"[",
"'connection'",
"]",
"=",
"self",
".",
"_connection_class",
"(",
"socketTimeoutMS",
"=",
"self",
".",
"_network_timeout",
",",
"w",
"=",
"1",
",",
"j",
"=",
"self",
".",
"j",
",",
"*",
"*",
"cluster",
"[",
"'params'",
"]",
")",
"return",
"cluster",
"[",
"'connection'",
"]"
] | Return a connection to a Cluster.
Return a MongoClient or a MongoReplicaSetClient for the given Cluster.
This is done in a lazy manner (if there is already a Client connected to
the Cluster, it is returned and no other Client is created).
Args:
cluster: A dict containing information about a cluster.
Returns:
A MongoClient or MongoReplicaSetClient instance connected to the
desired cluster | [
"Return",
"a",
"connection",
"to",
"a",
"Cluster",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L212-L235 | train |
uberVU/mongo-pool | mongo_pool/mongo_pool.py | MongoPool._match_dbname | def _match_dbname(self, dbname):
"""Map a database name to the Cluster that holds the database.
Args:
dbname: A database name.
Returns:
A dict containing the information about the Cluster that holds the
database.
"""
for config in self._clusters:
if re.match(config['pattern'], dbname):
return config
raise Exception('No such database %s.' % dbname) | python | def _match_dbname(self, dbname):
"""Map a database name to the Cluster that holds the database.
Args:
dbname: A database name.
Returns:
A dict containing the information about the Cluster that holds the
database.
"""
for config in self._clusters:
if re.match(config['pattern'], dbname):
return config
raise Exception('No such database %s.' % dbname) | [
"def",
"_match_dbname",
"(",
"self",
",",
"dbname",
")",
":",
"for",
"config",
"in",
"self",
".",
"_clusters",
":",
"if",
"re",
".",
"match",
"(",
"config",
"[",
"'pattern'",
"]",
",",
"dbname",
")",
":",
"return",
"config",
"raise",
"Exception",
"(",
"'No such database %s.'",
"%",
"dbname",
")"
] | Map a database name to the Cluster that holds the database.
Args:
dbname: A database name.
Returns:
A dict containing the information about the Cluster that holds the
database. | [
"Map",
"a",
"database",
"name",
"to",
"the",
"Cluster",
"that",
"holds",
"the",
"database",
"."
] | 286d1d8e0b3c17d5d7d4860487fe69358941067d | https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L237-L250 | train |
MacHu-GWU/single_file_module-project | sfm/flow.py | try_ntime | def try_ntime(max_try, func, *args, **kwargs):
"""
Try execute a function n times, until no exception raised or tried
``max_try`` times.
**中文文档**
反复尝试执行一个函数若干次。直到成功为止或是重复尝试 ``max_try`` 次。期间
只要有一次成功, 就正常返回。如果一次都没有成功, 则行为跟最后一次执行了
``func(*args, **kwargs)`` 一样。
"""
if max_try < 1:
raise ValueError
for i in range(max_try):
try:
return func(*args, **kwargs)
except Exception as e:
last_exception = e
raise last_exception | python | def try_ntime(max_try, func, *args, **kwargs):
"""
Try execute a function n times, until no exception raised or tried
``max_try`` times.
**中文文档**
反复尝试执行一个函数若干次。直到成功为止或是重复尝试 ``max_try`` 次。期间
只要有一次成功, 就正常返回。如果一次都没有成功, 则行为跟最后一次执行了
``func(*args, **kwargs)`` 一样。
"""
if max_try < 1:
raise ValueError
for i in range(max_try):
try:
return func(*args, **kwargs)
except Exception as e:
last_exception = e
raise last_exception | [
"def",
"try_ntime",
"(",
"max_try",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"max_try",
"<",
"1",
":",
"raise",
"ValueError",
"for",
"i",
"in",
"range",
"(",
"max_try",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"last_exception",
"=",
"e",
"raise",
"last_exception"
] | Try execute a function n times, until no exception raised or tried
``max_try`` times.
**中文文档**
反复尝试执行一个函数若干次。直到成功为止或是重复尝试 ``max_try`` 次。期间
只要有一次成功, 就正常返回。如果一次都没有成功, 则行为跟最后一次执行了
``func(*args, **kwargs)`` 一样。 | [
"Try",
"execute",
"a",
"function",
"n",
"times",
"until",
"no",
"exception",
"raised",
"or",
"tried",
"max_try",
"times",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/flow.py#L9-L29 | train |
MounirMesselmeni/django-highlightjs | highlightjs/templatetags/highlightjs.py | highlightjs_javascript | def highlightjs_javascript(jquery=None):
"""
Return HTML for highlightjs JavaScript.
Adjust url in settings. If no url is returned, we don't want this statement to return any HTML.
This is intended behavior.
Default value: ``None``
This value is configurable, see Settings section
**Tag name**::
highlightjs_javascript
**Parameters**:
:jquery: Truthy to include jQuery as well as highlightjs
**usage**::
{% highlightjs_javascript %}
**example**::
{% highlightjs_javascript jquery=1 %}
"""
javascript = ''
# See if we have to include jQuery
if jquery is None:
jquery = get_highlightjs_setting('include_jquery', False)
if jquery:
url = highlightjs_jquery_url()
if url:
javascript += '<script src="{url}"></script>'.format(url=url)
url = highlightjs_url()
if url:
javascript += '<script src="{url}"></script>'.format(url=url)
javascript += '<script>hljs.initHighlightingOnLoad();</script>'
return javascript | python | def highlightjs_javascript(jquery=None):
"""
Return HTML for highlightjs JavaScript.
Adjust url in settings. If no url is returned, we don't want this statement to return any HTML.
This is intended behavior.
Default value: ``None``
This value is configurable, see Settings section
**Tag name**::
highlightjs_javascript
**Parameters**:
:jquery: Truthy to include jQuery as well as highlightjs
**usage**::
{% highlightjs_javascript %}
**example**::
{% highlightjs_javascript jquery=1 %}
"""
javascript = ''
# See if we have to include jQuery
if jquery is None:
jquery = get_highlightjs_setting('include_jquery', False)
if jquery:
url = highlightjs_jquery_url()
if url:
javascript += '<script src="{url}"></script>'.format(url=url)
url = highlightjs_url()
if url:
javascript += '<script src="{url}"></script>'.format(url=url)
javascript += '<script>hljs.initHighlightingOnLoad();</script>'
return javascript | [
"def",
"highlightjs_javascript",
"(",
"jquery",
"=",
"None",
")",
":",
"javascript",
"=",
"''",
"# See if we have to include jQuery",
"if",
"jquery",
"is",
"None",
":",
"jquery",
"=",
"get_highlightjs_setting",
"(",
"'include_jquery'",
",",
"False",
")",
"if",
"jquery",
":",
"url",
"=",
"highlightjs_jquery_url",
"(",
")",
"if",
"url",
":",
"javascript",
"+=",
"'<script src=\"{url}\"></script>'",
".",
"format",
"(",
"url",
"=",
"url",
")",
"url",
"=",
"highlightjs_url",
"(",
")",
"if",
"url",
":",
"javascript",
"+=",
"'<script src=\"{url}\"></script>'",
".",
"format",
"(",
"url",
"=",
"url",
")",
"javascript",
"+=",
"'<script>hljs.initHighlightingOnLoad();</script>'",
"return",
"javascript"
] | Return HTML for highlightjs JavaScript.
Adjust url in settings. If no url is returned, we don't want this statement to return any HTML.
This is intended behavior.
Default value: ``None``
This value is configurable, see Settings section
**Tag name**::
highlightjs_javascript
**Parameters**:
:jquery: Truthy to include jQuery as well as highlightjs
**usage**::
{% highlightjs_javascript %}
**example**::
{% highlightjs_javascript jquery=1 %} | [
"Return",
"HTML",
"for",
"highlightjs",
"JavaScript",
"."
] | 3758cae67ed15f38641fb51a71ca9ed85af78345 | https://github.com/MounirMesselmeni/django-highlightjs/blob/3758cae67ed15f38641fb51a71ca9ed85af78345/highlightjs/templatetags/highlightjs.py#L39-L79 | train |
ariebovenberg/snug | tutorial/composed0.py | repo | def repo(name: str, owner: str) -> snug.Query[dict]:
"""a repository lookup by owner and name"""
return json.loads((yield f'/repos/{owner}/{name}').content) | python | def repo(name: str, owner: str) -> snug.Query[dict]:
"""a repository lookup by owner and name"""
return json.loads((yield f'/repos/{owner}/{name}').content) | [
"def",
"repo",
"(",
"name",
":",
"str",
",",
"owner",
":",
"str",
")",
"->",
"snug",
".",
"Query",
"[",
"dict",
"]",
":",
"return",
"json",
".",
"loads",
"(",
"(",
"yield",
"f'/repos/{owner}/{name}'",
")",
".",
"content",
")"
] | a repository lookup by owner and name | [
"a",
"repository",
"lookup",
"by",
"owner",
"and",
"name"
] | 4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef | https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/composed0.py#L13-L15 | train |
ariebovenberg/snug | tutorial/executing_queries.py | repo | def repo(name: str, owner: str) -> snug.Query[dict]:
"""a repo lookup by owner and name"""
request = snug.GET(f'https://api.github.com/repos/{owner}/{name}')
response = yield request
return json.loads(response.content) | python | def repo(name: str, owner: str) -> snug.Query[dict]:
"""a repo lookup by owner and name"""
request = snug.GET(f'https://api.github.com/repos/{owner}/{name}')
response = yield request
return json.loads(response.content) | [
"def",
"repo",
"(",
"name",
":",
"str",
",",
"owner",
":",
"str",
")",
"->",
"snug",
".",
"Query",
"[",
"dict",
"]",
":",
"request",
"=",
"snug",
".",
"GET",
"(",
"f'https://api.github.com/repos/{owner}/{name}'",
")",
"response",
"=",
"yield",
"request",
"return",
"json",
".",
"loads",
"(",
"response",
".",
"content",
")"
] | a repo lookup by owner and name | [
"a",
"repo",
"lookup",
"by",
"owner",
"and",
"name"
] | 4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef | https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/executing_queries.py#L6-L10 | train |
ariebovenberg/snug | tutorial/executing_queries.py | follow | def follow(name: str) -> snug.Query[bool]:
"""follow another user"""
request = snug.PUT(f'https://api.github.com/user/following/{name}')
response = yield request
return response.status_code == 204 | python | def follow(name: str) -> snug.Query[bool]:
"""follow another user"""
request = snug.PUT(f'https://api.github.com/user/following/{name}')
response = yield request
return response.status_code == 204 | [
"def",
"follow",
"(",
"name",
":",
"str",
")",
"->",
"snug",
".",
"Query",
"[",
"bool",
"]",
":",
"request",
"=",
"snug",
".",
"PUT",
"(",
"f'https://api.github.com/user/following/{name}'",
")",
"response",
"=",
"yield",
"request",
"return",
"response",
".",
"status_code",
"==",
"204"
] | follow another user | [
"follow",
"another",
"user"
] | 4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef | https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/executing_queries.py#L13-L17 | train |
envi-idl/envipyengine | envipyengine/taskengine/task.py | Task.taskinfo | def taskinfo(self):
""" Retrieve the Task Information
"""
task_input = {'taskName': 'QueryTask',
'inputParameters': {"Task_Name": self._name}}
info = taskengine.execute(task_input, self._engine, cwd=self._cwd)
task_def = info['outputParameters']['DEFINITION']
task_def['name'] = str(task_def.pop('NAME'))
task_def['description'] = str(task_def.pop('DESCRIPTION'))
task_def['displayName'] = str(task_def.pop('DISPLAY_NAME'))
if 'COMMUTE_ON_SUBSET' in task_def:
task_def['commute_on_subset'] = task_def.pop('COMMUTE_ON_SUBSET')
if 'COMMUTE_ON_DOWNSAMPLE' in task_def:
task_def['commute_on_downsample'] = task_def.pop('COMMUTE_ON_DOWNSAMPLE')
# Convert PARAMETERS into a list instead of a dictionary
# which matches the gsf side things
task_def['parameters'] = \
[v for v in task_def['PARAMETERS'].values()]
task_def.pop('PARAMETERS')
parameters = task_def['parameters']
for parameter in parameters:
parameter['name'] = str(parameter.pop('NAME'))
parameter['description'] = str(parameter.pop('DESCRIPTION'))
parameter['display_name'] = str(parameter.pop('DISPLAY_NAME'))
parameter['required'] = bool(parameter.pop('REQUIRED'))
if 'MIN' in parameter:
parameter['min'] = parameter.pop('MIN')
if 'MAX' in parameter:
parameter['max'] = parameter.pop('MAX')
if parameter['TYPE'].count('['):
parameter['type'], parameter['dimensions'] = parameter.pop('TYPE').split('[')
parameter['dimensions'] = '[' + parameter['dimensions']
parameter['type'] = str(parameter['type'])
else:
parameter['type'] = str(parameter.pop('TYPE').split('ARRAY')[0])
if 'DIMENSIONS' in parameter:
parameter['dimensions'] = parameter.pop('DIMENSIONS')
if 'DIRECTION' in parameter:
parameter['direction'] = parameter.pop('DIRECTION').lower()
if 'DEFAULT' in parameter:
if parameter['DEFAULT'] is not None:
parameter['default_value'] = parameter.pop('DEFAULT')
else:
parameter.pop('DEFAULT')
if 'CHOICE_LIST' in parameter:
if parameter['CHOICE_LIST'] is not None:
parameter['choice_list'] = parameter.pop('CHOICE_LIST')
else:
parameter.pop('CHOICE_LIST')
if 'FOLD_CASE' in parameter:
parameter['fold_case'] = parameter.pop('FOLD_CASE')
if 'AUTO_EXTENSION' in parameter:
parameter['auto_extension'] = parameter.pop('AUTO_EXTENSION')
if 'IS_TEMPORARY' in parameter:
parameter['is_temporary'] = parameter.pop('IS_TEMPORARY')
if 'IS_DIRECTORY' in parameter:
parameter['is_directory'] = parameter.pop('IS_DIRECTORY')
return task_def | python | def taskinfo(self):
""" Retrieve the Task Information
"""
task_input = {'taskName': 'QueryTask',
'inputParameters': {"Task_Name": self._name}}
info = taskengine.execute(task_input, self._engine, cwd=self._cwd)
task_def = info['outputParameters']['DEFINITION']
task_def['name'] = str(task_def.pop('NAME'))
task_def['description'] = str(task_def.pop('DESCRIPTION'))
task_def['displayName'] = str(task_def.pop('DISPLAY_NAME'))
if 'COMMUTE_ON_SUBSET' in task_def:
task_def['commute_on_subset'] = task_def.pop('COMMUTE_ON_SUBSET')
if 'COMMUTE_ON_DOWNSAMPLE' in task_def:
task_def['commute_on_downsample'] = task_def.pop('COMMUTE_ON_DOWNSAMPLE')
# Convert PARAMETERS into a list instead of a dictionary
# which matches the gsf side things
task_def['parameters'] = \
[v for v in task_def['PARAMETERS'].values()]
task_def.pop('PARAMETERS')
parameters = task_def['parameters']
for parameter in parameters:
parameter['name'] = str(parameter.pop('NAME'))
parameter['description'] = str(parameter.pop('DESCRIPTION'))
parameter['display_name'] = str(parameter.pop('DISPLAY_NAME'))
parameter['required'] = bool(parameter.pop('REQUIRED'))
if 'MIN' in parameter:
parameter['min'] = parameter.pop('MIN')
if 'MAX' in parameter:
parameter['max'] = parameter.pop('MAX')
if parameter['TYPE'].count('['):
parameter['type'], parameter['dimensions'] = parameter.pop('TYPE').split('[')
parameter['dimensions'] = '[' + parameter['dimensions']
parameter['type'] = str(parameter['type'])
else:
parameter['type'] = str(parameter.pop('TYPE').split('ARRAY')[0])
if 'DIMENSIONS' in parameter:
parameter['dimensions'] = parameter.pop('DIMENSIONS')
if 'DIRECTION' in parameter:
parameter['direction'] = parameter.pop('DIRECTION').lower()
if 'DEFAULT' in parameter:
if parameter['DEFAULT'] is not None:
parameter['default_value'] = parameter.pop('DEFAULT')
else:
parameter.pop('DEFAULT')
if 'CHOICE_LIST' in parameter:
if parameter['CHOICE_LIST'] is not None:
parameter['choice_list'] = parameter.pop('CHOICE_LIST')
else:
parameter.pop('CHOICE_LIST')
if 'FOLD_CASE' in parameter:
parameter['fold_case'] = parameter.pop('FOLD_CASE')
if 'AUTO_EXTENSION' in parameter:
parameter['auto_extension'] = parameter.pop('AUTO_EXTENSION')
if 'IS_TEMPORARY' in parameter:
parameter['is_temporary'] = parameter.pop('IS_TEMPORARY')
if 'IS_DIRECTORY' in parameter:
parameter['is_directory'] = parameter.pop('IS_DIRECTORY')
return task_def | [
"def",
"taskinfo",
"(",
"self",
")",
":",
"task_input",
"=",
"{",
"'taskName'",
":",
"'QueryTask'",
",",
"'inputParameters'",
":",
"{",
"\"Task_Name\"",
":",
"self",
".",
"_name",
"}",
"}",
"info",
"=",
"taskengine",
".",
"execute",
"(",
"task_input",
",",
"self",
".",
"_engine",
",",
"cwd",
"=",
"self",
".",
"_cwd",
")",
"task_def",
"=",
"info",
"[",
"'outputParameters'",
"]",
"[",
"'DEFINITION'",
"]",
"task_def",
"[",
"'name'",
"]",
"=",
"str",
"(",
"task_def",
".",
"pop",
"(",
"'NAME'",
")",
")",
"task_def",
"[",
"'description'",
"]",
"=",
"str",
"(",
"task_def",
".",
"pop",
"(",
"'DESCRIPTION'",
")",
")",
"task_def",
"[",
"'displayName'",
"]",
"=",
"str",
"(",
"task_def",
".",
"pop",
"(",
"'DISPLAY_NAME'",
")",
")",
"if",
"'COMMUTE_ON_SUBSET'",
"in",
"task_def",
":",
"task_def",
"[",
"'commute_on_subset'",
"]",
"=",
"task_def",
".",
"pop",
"(",
"'COMMUTE_ON_SUBSET'",
")",
"if",
"'COMMUTE_ON_DOWNSAMPLE'",
"in",
"task_def",
":",
"task_def",
"[",
"'commute_on_downsample'",
"]",
"=",
"task_def",
".",
"pop",
"(",
"'COMMUTE_ON_DOWNSAMPLE'",
")",
"# Convert PARAMETERS into a list instead of a dictionary",
"# which matches the gsf side things",
"task_def",
"[",
"'parameters'",
"]",
"=",
"[",
"v",
"for",
"v",
"in",
"task_def",
"[",
"'PARAMETERS'",
"]",
".",
"values",
"(",
")",
"]",
"task_def",
".",
"pop",
"(",
"'PARAMETERS'",
")",
"parameters",
"=",
"task_def",
"[",
"'parameters'",
"]",
"for",
"parameter",
"in",
"parameters",
":",
"parameter",
"[",
"'name'",
"]",
"=",
"str",
"(",
"parameter",
".",
"pop",
"(",
"'NAME'",
")",
")",
"parameter",
"[",
"'description'",
"]",
"=",
"str",
"(",
"parameter",
".",
"pop",
"(",
"'DESCRIPTION'",
")",
")",
"parameter",
"[",
"'display_name'",
"]",
"=",
"str",
"(",
"parameter",
".",
"pop",
"(",
"'DISPLAY_NAME'",
")",
")",
"parameter",
"[",
"'required'",
"]",
"=",
"bool",
"(",
"parameter",
".",
"pop",
"(",
"'REQUIRED'",
")",
")",
"if",
"'MIN'",
"in",
"parameter",
":",
"parameter",
"[",
"'min'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'MIN'",
")",
"if",
"'MAX'",
"in",
"parameter",
":",
"parameter",
"[",
"'max'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'MAX'",
")",
"if",
"parameter",
"[",
"'TYPE'",
"]",
".",
"count",
"(",
"'['",
")",
":",
"parameter",
"[",
"'type'",
"]",
",",
"parameter",
"[",
"'dimensions'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'TYPE'",
")",
".",
"split",
"(",
"'['",
")",
"parameter",
"[",
"'dimensions'",
"]",
"=",
"'['",
"+",
"parameter",
"[",
"'dimensions'",
"]",
"parameter",
"[",
"'type'",
"]",
"=",
"str",
"(",
"parameter",
"[",
"'type'",
"]",
")",
"else",
":",
"parameter",
"[",
"'type'",
"]",
"=",
"str",
"(",
"parameter",
".",
"pop",
"(",
"'TYPE'",
")",
".",
"split",
"(",
"'ARRAY'",
")",
"[",
"0",
"]",
")",
"if",
"'DIMENSIONS'",
"in",
"parameter",
":",
"parameter",
"[",
"'dimensions'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'DIMENSIONS'",
")",
"if",
"'DIRECTION'",
"in",
"parameter",
":",
"parameter",
"[",
"'direction'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'DIRECTION'",
")",
".",
"lower",
"(",
")",
"if",
"'DEFAULT'",
"in",
"parameter",
":",
"if",
"parameter",
"[",
"'DEFAULT'",
"]",
"is",
"not",
"None",
":",
"parameter",
"[",
"'default_value'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'DEFAULT'",
")",
"else",
":",
"parameter",
".",
"pop",
"(",
"'DEFAULT'",
")",
"if",
"'CHOICE_LIST'",
"in",
"parameter",
":",
"if",
"parameter",
"[",
"'CHOICE_LIST'",
"]",
"is",
"not",
"None",
":",
"parameter",
"[",
"'choice_list'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'CHOICE_LIST'",
")",
"else",
":",
"parameter",
".",
"pop",
"(",
"'CHOICE_LIST'",
")",
"if",
"'FOLD_CASE'",
"in",
"parameter",
":",
"parameter",
"[",
"'fold_case'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'FOLD_CASE'",
")",
"if",
"'AUTO_EXTENSION'",
"in",
"parameter",
":",
"parameter",
"[",
"'auto_extension'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'AUTO_EXTENSION'",
")",
"if",
"'IS_TEMPORARY'",
"in",
"parameter",
":",
"parameter",
"[",
"'is_temporary'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'IS_TEMPORARY'",
")",
"if",
"'IS_DIRECTORY'",
"in",
"parameter",
":",
"parameter",
"[",
"'is_directory'",
"]",
"=",
"parameter",
".",
"pop",
"(",
"'IS_DIRECTORY'",
")",
"return",
"task_def"
] | Retrieve the Task Information | [
"Retrieve",
"the",
"Task",
"Information"
] | 567b639d6592deec3289f6122a9e3d18f2f98432 | https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/task.py#L57-L133 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | despeckle_simple | def despeckle_simple(B, th2=2):
"""Single-chromosome despeckling
Simple speckle removing function on a single chromomsome. It also works
for multiple chromosomes but trends may be disrupted.
Parameters
----------
B : array_like
The input matrix to despeckle
th2 : float
The number of standard deviations above the mean beyond which
despeckling should be performed
Returns
-------
array_like
The despeckled matrix
"""
A = np.copy(B)
n1 = A.shape[0]
dist = {u: np.diag(A, u) for u in range(n1)}
medians, stds = {}, {}
for u in dist:
medians[u] = np.median(dist[u])
stds[u] = np.std(dist[u])
for nw, j in itertools.product(range(n1), range(n1)):
lp = j + nw
kp = j - nw
if lp < n1:
if A[j, lp] > medians[nw] + th2 * stds[nw]:
A[j, lp] = medians[nw]
if kp >= 0:
if A[j, kp] > medians[nw] + th2 * stds[nw]:
A[j, kp] = medians[nw]
return A | python | def despeckle_simple(B, th2=2):
"""Single-chromosome despeckling
Simple speckle removing function on a single chromomsome. It also works
for multiple chromosomes but trends may be disrupted.
Parameters
----------
B : array_like
The input matrix to despeckle
th2 : float
The number of standard deviations above the mean beyond which
despeckling should be performed
Returns
-------
array_like
The despeckled matrix
"""
A = np.copy(B)
n1 = A.shape[0]
dist = {u: np.diag(A, u) for u in range(n1)}
medians, stds = {}, {}
for u in dist:
medians[u] = np.median(dist[u])
stds[u] = np.std(dist[u])
for nw, j in itertools.product(range(n1), range(n1)):
lp = j + nw
kp = j - nw
if lp < n1:
if A[j, lp] > medians[nw] + th2 * stds[nw]:
A[j, lp] = medians[nw]
if kp >= 0:
if A[j, kp] > medians[nw] + th2 * stds[nw]:
A[j, kp] = medians[nw]
return A | [
"def",
"despeckle_simple",
"(",
"B",
",",
"th2",
"=",
"2",
")",
":",
"A",
"=",
"np",
".",
"copy",
"(",
"B",
")",
"n1",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"dist",
"=",
"{",
"u",
":",
"np",
".",
"diag",
"(",
"A",
",",
"u",
")",
"for",
"u",
"in",
"range",
"(",
"n1",
")",
"}",
"medians",
",",
"stds",
"=",
"{",
"}",
",",
"{",
"}",
"for",
"u",
"in",
"dist",
":",
"medians",
"[",
"u",
"]",
"=",
"np",
".",
"median",
"(",
"dist",
"[",
"u",
"]",
")",
"stds",
"[",
"u",
"]",
"=",
"np",
".",
"std",
"(",
"dist",
"[",
"u",
"]",
")",
"for",
"nw",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"n1",
")",
",",
"range",
"(",
"n1",
")",
")",
":",
"lp",
"=",
"j",
"+",
"nw",
"kp",
"=",
"j",
"-",
"nw",
"if",
"lp",
"<",
"n1",
":",
"if",
"A",
"[",
"j",
",",
"lp",
"]",
">",
"medians",
"[",
"nw",
"]",
"+",
"th2",
"*",
"stds",
"[",
"nw",
"]",
":",
"A",
"[",
"j",
",",
"lp",
"]",
"=",
"medians",
"[",
"nw",
"]",
"if",
"kp",
">=",
"0",
":",
"if",
"A",
"[",
"j",
",",
"kp",
"]",
">",
"medians",
"[",
"nw",
"]",
"+",
"th2",
"*",
"stds",
"[",
"nw",
"]",
":",
"A",
"[",
"j",
",",
"kp",
"]",
"=",
"medians",
"[",
"nw",
"]",
"return",
"A"
] | Single-chromosome despeckling
Simple speckle removing function on a single chromomsome. It also works
for multiple chromosomes but trends may be disrupted.
Parameters
----------
B : array_like
The input matrix to despeckle
th2 : float
The number of standard deviations above the mean beyond which
despeckling should be performed
Returns
-------
array_like
The despeckled matrix | [
"Single",
"-",
"chromosome",
"despeckling"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L32-L70 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | bin_sparse | def bin_sparse(M, subsampling_factor=3):
"""Perform the bin_dense procedure for sparse matrices. Remaining rows
and cols are lumped with the rest at the end.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense binning by default.")
return bin_dense(M.todense())
N = M.tocoo()
n, m = N.shape
row, col, data = N.row, N.col, N.data
# Divide row and column indices - duplicate coordinates are added in
# sparse matrix construction
binned_row = row // subsampling_factor
binned_col = col // subsampling_factor
binned_n = n // subsampling_factor
binned_m = m // subsampling_factor
# Attach remaining columns and rows to the last one
binned_row[binned_row >= binned_n] -= n % subsampling_factor
binned_col[binned_col >= binned_m] -= m % subsampling_factor
result = coo_matrix((data, (binned_row, binned_col)),
shape=(binned_n, binned_m))
return result | python | def bin_sparse(M, subsampling_factor=3):
"""Perform the bin_dense procedure for sparse matrices. Remaining rows
and cols are lumped with the rest at the end.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense binning by default.")
return bin_dense(M.todense())
N = M.tocoo()
n, m = N.shape
row, col, data = N.row, N.col, N.data
# Divide row and column indices - duplicate coordinates are added in
# sparse matrix construction
binned_row = row // subsampling_factor
binned_col = col // subsampling_factor
binned_n = n // subsampling_factor
binned_m = m // subsampling_factor
# Attach remaining columns and rows to the last one
binned_row[binned_row >= binned_n] -= n % subsampling_factor
binned_col[binned_col >= binned_m] -= m % subsampling_factor
result = coo_matrix((data, (binned_row, binned_col)),
shape=(binned_n, binned_m))
return result | [
"def",
"bin_sparse",
"(",
"M",
",",
"subsampling_factor",
"=",
"3",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"coo_matrix",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I am peforming dense binning by default.\"",
")",
"return",
"bin_dense",
"(",
"M",
".",
"todense",
"(",
")",
")",
"N",
"=",
"M",
".",
"tocoo",
"(",
")",
"n",
",",
"m",
"=",
"N",
".",
"shape",
"row",
",",
"col",
",",
"data",
"=",
"N",
".",
"row",
",",
"N",
".",
"col",
",",
"N",
".",
"data",
"# Divide row and column indices - duplicate coordinates are added in",
"# sparse matrix construction",
"binned_row",
"=",
"row",
"//",
"subsampling_factor",
"binned_col",
"=",
"col",
"//",
"subsampling_factor",
"binned_n",
"=",
"n",
"//",
"subsampling_factor",
"binned_m",
"=",
"m",
"//",
"subsampling_factor",
"# Attach remaining columns and rows to the last one",
"binned_row",
"[",
"binned_row",
">=",
"binned_n",
"]",
"-=",
"n",
"%",
"subsampling_factor",
"binned_col",
"[",
"binned_col",
">=",
"binned_m",
"]",
"-=",
"m",
"%",
"subsampling_factor",
"result",
"=",
"coo_matrix",
"(",
"(",
"data",
",",
"(",
"binned_row",
",",
"binned_col",
")",
")",
",",
"shape",
"=",
"(",
"binned_n",
",",
"binned_m",
")",
")",
"return",
"result"
] | Perform the bin_dense procedure for sparse matrices. Remaining rows
and cols are lumped with the rest at the end. | [
"Perform",
"the",
"bin_dense",
"procedure",
"for",
"sparse",
"matrices",
".",
"Remaining",
"rows",
"and",
"cols",
"are",
"lumped",
"with",
"the",
"rest",
"at",
"the",
"end",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L174-L205 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | bin_matrix | def bin_matrix(M, subsampling_factor=3):
"""Bin either sparse or dense matrices.
"""
try:
from scipy.sparse import issparse
if issparse(M):
return bin_sparse(M, subsampling_factor=subsampling_factor)
else:
raise ImportError
except ImportError:
return bin_dense(M, subsampling_factor=subsampling_factor) | python | def bin_matrix(M, subsampling_factor=3):
"""Bin either sparse or dense matrices.
"""
try:
from scipy.sparse import issparse
if issparse(M):
return bin_sparse(M, subsampling_factor=subsampling_factor)
else:
raise ImportError
except ImportError:
return bin_dense(M, subsampling_factor=subsampling_factor) | [
"def",
"bin_matrix",
"(",
"M",
",",
"subsampling_factor",
"=",
"3",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"issparse",
"if",
"issparse",
"(",
"M",
")",
":",
"return",
"bin_sparse",
"(",
"M",
",",
"subsampling_factor",
"=",
"subsampling_factor",
")",
"else",
":",
"raise",
"ImportError",
"except",
"ImportError",
":",
"return",
"bin_dense",
"(",
"M",
",",
"subsampling_factor",
"=",
"subsampling_factor",
")"
] | Bin either sparse or dense matrices. | [
"Bin",
"either",
"sparse",
"or",
"dense",
"matrices",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L208-L219 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | bin_annotation | def bin_annotation(annotation=None, subsampling_factor=3):
"""Perform binning on genome annotations such as contig information or bin
positions.
"""
if annotation is None:
annotation = np.array([])
n = len(annotation)
binned_positions = [annotation[i] for i in range(n) if
i % subsampling_factor == 0]
if len(binned_positions) == 0:
binned_positions.append(0)
return np.array(binned_positions) | python | def bin_annotation(annotation=None, subsampling_factor=3):
"""Perform binning on genome annotations such as contig information or bin
positions.
"""
if annotation is None:
annotation = np.array([])
n = len(annotation)
binned_positions = [annotation[i] for i in range(n) if
i % subsampling_factor == 0]
if len(binned_positions) == 0:
binned_positions.append(0)
return np.array(binned_positions) | [
"def",
"bin_annotation",
"(",
"annotation",
"=",
"None",
",",
"subsampling_factor",
"=",
"3",
")",
":",
"if",
"annotation",
"is",
"None",
":",
"annotation",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"n",
"=",
"len",
"(",
"annotation",
")",
"binned_positions",
"=",
"[",
"annotation",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
")",
"if",
"i",
"%",
"subsampling_factor",
"==",
"0",
"]",
"if",
"len",
"(",
"binned_positions",
")",
"==",
"0",
":",
"binned_positions",
".",
"append",
"(",
"0",
")",
"return",
"np",
".",
"array",
"(",
"binned_positions",
")"
] | Perform binning on genome annotations such as contig information or bin
positions. | [
"Perform",
"binning",
"on",
"genome",
"annotations",
"such",
"as",
"contig",
"information",
"or",
"bin",
"positions",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L222-L234 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | build_pyramid | def build_pyramid(M, subsampling_factor=3):
"""Iterate over a given number of times on matrix M
so as to compute smaller and smaller matrices with bin_dense.
"""
subs = int(subsampling_factor)
if subs < 1:
raise ValueError(
"Subsampling factor needs to be an integer greater than 1.")
N = [M]
while min(N[-1].shape) > 1:
N.append(bin_matrix(N[-1], subsampling_factor=subs))
return N | python | def build_pyramid(M, subsampling_factor=3):
"""Iterate over a given number of times on matrix M
so as to compute smaller and smaller matrices with bin_dense.
"""
subs = int(subsampling_factor)
if subs < 1:
raise ValueError(
"Subsampling factor needs to be an integer greater than 1.")
N = [M]
while min(N[-1].shape) > 1:
N.append(bin_matrix(N[-1], subsampling_factor=subs))
return N | [
"def",
"build_pyramid",
"(",
"M",
",",
"subsampling_factor",
"=",
"3",
")",
":",
"subs",
"=",
"int",
"(",
"subsampling_factor",
")",
"if",
"subs",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Subsampling factor needs to be an integer greater than 1.\"",
")",
"N",
"=",
"[",
"M",
"]",
"while",
"min",
"(",
"N",
"[",
"-",
"1",
"]",
".",
"shape",
")",
">",
"1",
":",
"N",
".",
"append",
"(",
"bin_matrix",
"(",
"N",
"[",
"-",
"1",
"]",
",",
"subsampling_factor",
"=",
"subs",
")",
")",
"return",
"N"
] | Iterate over a given number of times on matrix M
so as to compute smaller and smaller matrices with bin_dense. | [
"Iterate",
"over",
"a",
"given",
"number",
"of",
"times",
"on",
"matrix",
"M",
"so",
"as",
"to",
"compute",
"smaller",
"and",
"smaller",
"matrices",
"with",
"bin_dense",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L251-L263 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | bin_exact_kb_dense | def bin_exact_kb_dense(M, positions, length=10):
"""Perform the kb-binning procedure with total bin lengths being exactly
set to that of the specified input. Fragments overlapping two potential
bins will be split and related contact counts will be divided according
to overlap proportions in each bin.
"""
unit = 10**3
ul = unit * length
units = positions / ul
n = len(positions)
idx = [i for i in range(
n - 1) if np.ceil(units[i]) < np.ceil(units[i + 1])]
m = len(idx) - 1
N = np.zeros((m, m))
remainders = [0] + [np.abs(units[i] - units[i + 1]) for i in range(m)]
for i in range(m):
N[i] = np.array([(M[idx[j]:idx[j + 1], idx[i]:idx[i + 1]].sum() -
remainders[j] * M[i][j] +
remainders[j + 1] * M[i + 1][j])
for j in range(m)])
return N | python | def bin_exact_kb_dense(M, positions, length=10):
"""Perform the kb-binning procedure with total bin lengths being exactly
set to that of the specified input. Fragments overlapping two potential
bins will be split and related contact counts will be divided according
to overlap proportions in each bin.
"""
unit = 10**3
ul = unit * length
units = positions / ul
n = len(positions)
idx = [i for i in range(
n - 1) if np.ceil(units[i]) < np.ceil(units[i + 1])]
m = len(idx) - 1
N = np.zeros((m, m))
remainders = [0] + [np.abs(units[i] - units[i + 1]) for i in range(m)]
for i in range(m):
N[i] = np.array([(M[idx[j]:idx[j + 1], idx[i]:idx[i + 1]].sum() -
remainders[j] * M[i][j] +
remainders[j + 1] * M[i + 1][j])
for j in range(m)])
return N | [
"def",
"bin_exact_kb_dense",
"(",
"M",
",",
"positions",
",",
"length",
"=",
"10",
")",
":",
"unit",
"=",
"10",
"**",
"3",
"ul",
"=",
"unit",
"*",
"length",
"units",
"=",
"positions",
"/",
"ul",
"n",
"=",
"len",
"(",
"positions",
")",
"idx",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"1",
")",
"if",
"np",
".",
"ceil",
"(",
"units",
"[",
"i",
"]",
")",
"<",
"np",
".",
"ceil",
"(",
"units",
"[",
"i",
"+",
"1",
"]",
")",
"]",
"m",
"=",
"len",
"(",
"idx",
")",
"-",
"1",
"N",
"=",
"np",
".",
"zeros",
"(",
"(",
"m",
",",
"m",
")",
")",
"remainders",
"=",
"[",
"0",
"]",
"+",
"[",
"np",
".",
"abs",
"(",
"units",
"[",
"i",
"]",
"-",
"units",
"[",
"i",
"+",
"1",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"m",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"m",
")",
":",
"N",
"[",
"i",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"M",
"[",
"idx",
"[",
"j",
"]",
":",
"idx",
"[",
"j",
"+",
"1",
"]",
",",
"idx",
"[",
"i",
"]",
":",
"idx",
"[",
"i",
"+",
"1",
"]",
"]",
".",
"sum",
"(",
")",
"-",
"remainders",
"[",
"j",
"]",
"*",
"M",
"[",
"i",
"]",
"[",
"j",
"]",
"+",
"remainders",
"[",
"j",
"+",
"1",
"]",
"*",
"M",
"[",
"i",
"+",
"1",
"]",
"[",
"j",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"m",
")",
"]",
")",
"return",
"N"
] | Perform the kb-binning procedure with total bin lengths being exactly
set to that of the specified input. Fragments overlapping two potential
bins will be split and related contact counts will be divided according
to overlap proportions in each bin. | [
"Perform",
"the",
"kb",
"-",
"binning",
"procedure",
"with",
"total",
"bin",
"lengths",
"being",
"exactly",
"set",
"to",
"that",
"of",
"the",
"specified",
"input",
".",
"Fragments",
"overlapping",
"two",
"potential",
"bins",
"will",
"be",
"split",
"and",
"related",
"contact",
"counts",
"will",
"be",
"divided",
"according",
"to",
"overlap",
"proportions",
"in",
"each",
"bin",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L290-L311 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | bin_kb_sparse | def bin_kb_sparse(M, positions, length=10):
"""Perform the exact kb-binning procedure on a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return bin_kb_dense(M.todense(), positions=positions)
r = M.tocoo()
unit = 10**3
ul = unit * length
units = positions / ul
n = len(positions)
indices = np.floor(units)
row = [indices[np.floor(i)] for i in r.row / ul]
col = [indices[np.floor(j)] for j in r.col / ul]
binned_indices = positions[
[i for i in range(n - 1) if np.ceil(units[i]) < np.ceil(units[i + 1])]]
return coo_matrix((r.data, (row, col))), binned_indices | python | def bin_kb_sparse(M, positions, length=10):
"""Perform the exact kb-binning procedure on a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return bin_kb_dense(M.todense(), positions=positions)
r = M.tocoo()
unit = 10**3
ul = unit * length
units = positions / ul
n = len(positions)
indices = np.floor(units)
row = [indices[np.floor(i)] for i in r.row / ul]
col = [indices[np.floor(j)] for j in r.col / ul]
binned_indices = positions[
[i for i in range(n - 1) if np.ceil(units[i]) < np.ceil(units[i + 1])]]
return coo_matrix((r.data, (row, col))), binned_indices | [
"def",
"bin_kb_sparse",
"(",
"M",
",",
"positions",
",",
"length",
"=",
"10",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"coo_matrix",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I am peforming dense normalization by default.\"",
")",
"return",
"bin_kb_dense",
"(",
"M",
".",
"todense",
"(",
")",
",",
"positions",
"=",
"positions",
")",
"r",
"=",
"M",
".",
"tocoo",
"(",
")",
"unit",
"=",
"10",
"**",
"3",
"ul",
"=",
"unit",
"*",
"length",
"units",
"=",
"positions",
"/",
"ul",
"n",
"=",
"len",
"(",
"positions",
")",
"indices",
"=",
"np",
".",
"floor",
"(",
"units",
")",
"row",
"=",
"[",
"indices",
"[",
"np",
".",
"floor",
"(",
"i",
")",
"]",
"for",
"i",
"in",
"r",
".",
"row",
"/",
"ul",
"]",
"col",
"=",
"[",
"indices",
"[",
"np",
".",
"floor",
"(",
"j",
")",
"]",
"for",
"j",
"in",
"r",
".",
"col",
"/",
"ul",
"]",
"binned_indices",
"=",
"positions",
"[",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"1",
")",
"if",
"np",
".",
"ceil",
"(",
"units",
"[",
"i",
"]",
")",
"<",
"np",
".",
"ceil",
"(",
"units",
"[",
"i",
"+",
"1",
"]",
")",
"]",
"]",
"return",
"coo_matrix",
"(",
"(",
"r",
".",
"data",
",",
"(",
"row",
",",
"col",
")",
")",
")",
",",
"binned_indices"
] | Perform the exact kb-binning procedure on a sparse matrix. | [
"Perform",
"the",
"exact",
"kb",
"-",
"binning",
"procedure",
"on",
"a",
"sparse",
"matrix",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L314-L334 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | trim_sparse | def trim_sparse(M, n_std=3, s_min=None, s_max=None):
"""Apply the trimming procedure to a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return trim_dense(M.todense())
r = M.tocoo()
sparsity = np.array(r.sum(axis=1)).flatten()
mean = np.mean(sparsity)
std = np.std(sparsity)
if s_min is None:
s_min = mean - n_std * std
if s_max is None:
s_max = mean + n_std * std
f = (sparsity > s_min) * (sparsity < s_max)
indices = [u for u in range(len(r.data)) if f[r.row[u]] and f[r.col[u]]]
rows = np.array([r.row[i] for i in indices])
cols = np.array([r.col[j] for j in indices])
data = np.array([r.data[k] for k in indices])
N = coo_matrix((data, (rows, cols)))
return N | python | def trim_sparse(M, n_std=3, s_min=None, s_max=None):
"""Apply the trimming procedure to a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return trim_dense(M.todense())
r = M.tocoo()
sparsity = np.array(r.sum(axis=1)).flatten()
mean = np.mean(sparsity)
std = np.std(sparsity)
if s_min is None:
s_min = mean - n_std * std
if s_max is None:
s_max = mean + n_std * std
f = (sparsity > s_min) * (sparsity < s_max)
indices = [u for u in range(len(r.data)) if f[r.row[u]] and f[r.col[u]]]
rows = np.array([r.row[i] for i in indices])
cols = np.array([r.col[j] for j in indices])
data = np.array([r.data[k] for k in indices])
N = coo_matrix((data, (rows, cols)))
return N | [
"def",
"trim_sparse",
"(",
"M",
",",
"n_std",
"=",
"3",
",",
"s_min",
"=",
"None",
",",
"s_max",
"=",
"None",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"coo_matrix",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I am peforming dense normalization by default.\"",
")",
"return",
"trim_dense",
"(",
"M",
".",
"todense",
"(",
")",
")",
"r",
"=",
"M",
".",
"tocoo",
"(",
")",
"sparsity",
"=",
"np",
".",
"array",
"(",
"r",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
".",
"flatten",
"(",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"sparsity",
")",
"std",
"=",
"np",
".",
"std",
"(",
"sparsity",
")",
"if",
"s_min",
"is",
"None",
":",
"s_min",
"=",
"mean",
"-",
"n_std",
"*",
"std",
"if",
"s_max",
"is",
"None",
":",
"s_max",
"=",
"mean",
"+",
"n_std",
"*",
"std",
"f",
"=",
"(",
"sparsity",
">",
"s_min",
")",
"*",
"(",
"sparsity",
"<",
"s_max",
")",
"indices",
"=",
"[",
"u",
"for",
"u",
"in",
"range",
"(",
"len",
"(",
"r",
".",
"data",
")",
")",
"if",
"f",
"[",
"r",
".",
"row",
"[",
"u",
"]",
"]",
"and",
"f",
"[",
"r",
".",
"col",
"[",
"u",
"]",
"]",
"]",
"rows",
"=",
"np",
".",
"array",
"(",
"[",
"r",
".",
"row",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"]",
")",
"cols",
"=",
"np",
".",
"array",
"(",
"[",
"r",
".",
"col",
"[",
"j",
"]",
"for",
"j",
"in",
"indices",
"]",
")",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"r",
".",
"data",
"[",
"k",
"]",
"for",
"k",
"in",
"indices",
"]",
")",
"N",
"=",
"coo_matrix",
"(",
"(",
"data",
",",
"(",
"rows",
",",
"cols",
")",
")",
")",
"return",
"N"
] | Apply the trimming procedure to a sparse matrix. | [
"Apply",
"the",
"trimming",
"procedure",
"to",
"a",
"sparse",
"matrix",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L361-L386 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | normalize_dense | def normalize_dense(M, norm="frag", order=1, iterations=3):
"""Apply one of the many normalization types to input dense
matrix. Will also apply any callable norms such as a user-made
or a lambda function.
"""
s = np.array(M, np.float64)
floatorder = np.float64(order)
if norm == "SCN":
for _ in range(0, iterations):
sumrows = s.sum(axis=1)
maskrows = (sumrows != 0)[:, None] * (sumrows != 0)[None, :]
sums_row = sumrows[:, None] * np.ones(sumrows.shape)[None, :]
s[maskrows] = 1. * s[maskrows] / sums_row[maskrows]
sumcols = s.sum(axis=0)
maskcols = (sumcols != 0)[:, None] * (sumcols != 0)[None, :]
sums_col = sumcols[None, :] * np.ones(sumcols.shape)[:, None]
s[maskcols] = 1. * s[maskcols] / sums_col[maskcols]
elif norm == "mirnylib":
try:
from mirnylib import numutils as ntls
s = ntls.iterativeCorrection(s, iterations)[0]
except ImportError as e:
print(str(e))
print("I can't find mirnylib.")
print("Please install it from "
"https://bitbucket.org/mirnylab/mirnylib")
print("I will use default norm as fallback.")
return normalize_dense(M, order=order, iterations=iterations)
elif norm == "frag":
for _ in range(1, iterations):
s_norm_x = np.linalg.norm(s, ord=floatorder, axis=0)
s_norm_y = np.linalg.norm(s, ord=floatorder, axis=1)
s_norm = np.tensordot(s_norm_x, s_norm_y, axes=0)
s[s_norm != 0] = 1. * s[s_norm != 0] / s_norm[s_norm != 0]
elif norm == "global":
s_norm = np.linalg.norm(s, ord=floatorder)
s /= 1. * s_norm
elif callable(norm):
s = norm(M)
else:
print("Unknown norm. Returning input as fallback")
return (s + s.T) / 2 | python | def normalize_dense(M, norm="frag", order=1, iterations=3):
"""Apply one of the many normalization types to input dense
matrix. Will also apply any callable norms such as a user-made
or a lambda function.
"""
s = np.array(M, np.float64)
floatorder = np.float64(order)
if norm == "SCN":
for _ in range(0, iterations):
sumrows = s.sum(axis=1)
maskrows = (sumrows != 0)[:, None] * (sumrows != 0)[None, :]
sums_row = sumrows[:, None] * np.ones(sumrows.shape)[None, :]
s[maskrows] = 1. * s[maskrows] / sums_row[maskrows]
sumcols = s.sum(axis=0)
maskcols = (sumcols != 0)[:, None] * (sumcols != 0)[None, :]
sums_col = sumcols[None, :] * np.ones(sumcols.shape)[:, None]
s[maskcols] = 1. * s[maskcols] / sums_col[maskcols]
elif norm == "mirnylib":
try:
from mirnylib import numutils as ntls
s = ntls.iterativeCorrection(s, iterations)[0]
except ImportError as e:
print(str(e))
print("I can't find mirnylib.")
print("Please install it from "
"https://bitbucket.org/mirnylab/mirnylib")
print("I will use default norm as fallback.")
return normalize_dense(M, order=order, iterations=iterations)
elif norm == "frag":
for _ in range(1, iterations):
s_norm_x = np.linalg.norm(s, ord=floatorder, axis=0)
s_norm_y = np.linalg.norm(s, ord=floatorder, axis=1)
s_norm = np.tensordot(s_norm_x, s_norm_y, axes=0)
s[s_norm != 0] = 1. * s[s_norm != 0] / s_norm[s_norm != 0]
elif norm == "global":
s_norm = np.linalg.norm(s, ord=floatorder)
s /= 1. * s_norm
elif callable(norm):
s = norm(M)
else:
print("Unknown norm. Returning input as fallback")
return (s + s.T) / 2 | [
"def",
"normalize_dense",
"(",
"M",
",",
"norm",
"=",
"\"frag\"",
",",
"order",
"=",
"1",
",",
"iterations",
"=",
"3",
")",
":",
"s",
"=",
"np",
".",
"array",
"(",
"M",
",",
"np",
".",
"float64",
")",
"floatorder",
"=",
"np",
".",
"float64",
"(",
"order",
")",
"if",
"norm",
"==",
"\"SCN\"",
":",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"iterations",
")",
":",
"sumrows",
"=",
"s",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"maskrows",
"=",
"(",
"sumrows",
"!=",
"0",
")",
"[",
":",
",",
"None",
"]",
"*",
"(",
"sumrows",
"!=",
"0",
")",
"[",
"None",
",",
":",
"]",
"sums_row",
"=",
"sumrows",
"[",
":",
",",
"None",
"]",
"*",
"np",
".",
"ones",
"(",
"sumrows",
".",
"shape",
")",
"[",
"None",
",",
":",
"]",
"s",
"[",
"maskrows",
"]",
"=",
"1.",
"*",
"s",
"[",
"maskrows",
"]",
"/",
"sums_row",
"[",
"maskrows",
"]",
"sumcols",
"=",
"s",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"maskcols",
"=",
"(",
"sumcols",
"!=",
"0",
")",
"[",
":",
",",
"None",
"]",
"*",
"(",
"sumcols",
"!=",
"0",
")",
"[",
"None",
",",
":",
"]",
"sums_col",
"=",
"sumcols",
"[",
"None",
",",
":",
"]",
"*",
"np",
".",
"ones",
"(",
"sumcols",
".",
"shape",
")",
"[",
":",
",",
"None",
"]",
"s",
"[",
"maskcols",
"]",
"=",
"1.",
"*",
"s",
"[",
"maskcols",
"]",
"/",
"sums_col",
"[",
"maskcols",
"]",
"elif",
"norm",
"==",
"\"mirnylib\"",
":",
"try",
":",
"from",
"mirnylib",
"import",
"numutils",
"as",
"ntls",
"s",
"=",
"ntls",
".",
"iterativeCorrection",
"(",
"s",
",",
"iterations",
")",
"[",
"0",
"]",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I can't find mirnylib.\"",
")",
"print",
"(",
"\"Please install it from \"",
"\"https://bitbucket.org/mirnylab/mirnylib\"",
")",
"print",
"(",
"\"I will use default norm as fallback.\"",
")",
"return",
"normalize_dense",
"(",
"M",
",",
"order",
"=",
"order",
",",
"iterations",
"=",
"iterations",
")",
"elif",
"norm",
"==",
"\"frag\"",
":",
"for",
"_",
"in",
"range",
"(",
"1",
",",
"iterations",
")",
":",
"s_norm_x",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"s",
",",
"ord",
"=",
"floatorder",
",",
"axis",
"=",
"0",
")",
"s_norm_y",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"s",
",",
"ord",
"=",
"floatorder",
",",
"axis",
"=",
"1",
")",
"s_norm",
"=",
"np",
".",
"tensordot",
"(",
"s_norm_x",
",",
"s_norm_y",
",",
"axes",
"=",
"0",
")",
"s",
"[",
"s_norm",
"!=",
"0",
"]",
"=",
"1.",
"*",
"s",
"[",
"s_norm",
"!=",
"0",
"]",
"/",
"s_norm",
"[",
"s_norm",
"!=",
"0",
"]",
"elif",
"norm",
"==",
"\"global\"",
":",
"s_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"s",
",",
"ord",
"=",
"floatorder",
")",
"s",
"/=",
"1.",
"*",
"s_norm",
"elif",
"callable",
"(",
"norm",
")",
":",
"s",
"=",
"norm",
"(",
"M",
")",
"else",
":",
"print",
"(",
"\"Unknown norm. Returning input as fallback\"",
")",
"return",
"(",
"s",
"+",
"s",
".",
"T",
")",
"/",
"2"
] | Apply one of the many normalization types to input dense
matrix. Will also apply any callable norms such as a user-made
or a lambda function. | [
"Apply",
"one",
"of",
"the",
"many",
"normalization",
"types",
"to",
"input",
"dense",
"matrix",
".",
"Will",
"also",
"apply",
"any",
"callable",
"norms",
"such",
"as",
"a",
"user",
"-",
"made",
"or",
"a",
"lambda",
"function",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L389-L440 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | normalize_sparse | def normalize_sparse(M, norm="frag", order=1, iterations=3):
"""Applies a normalization type to a sparse matrix.
"""
try:
from scipy.sparse import csr_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return normalize_dense(M.todense())
r = csr_matrix(M)
if norm == "SCN":
for _ in range(1, iterations):
row_sums = np.array(r.sum(axis=1)).flatten()
col_sums = np.array(r.sum(axis=0)).flatten()
row_indices, col_indices = r.nonzero()
r.data /= row_sums[row_indices] * col_sums[col_indices]
elif norm == "global":
try:
from scipy.sparse import linalg
r = linalg.norm(M, ord=order)
except (ImportError, AttributeError) as e:
print(str(e))
print("I can't import linalg tools for sparse matrices.")
print("Please upgrade your scipy version to 0.16.0.")
elif callable(norm):
r = norm(M)
else:
print("Unknown norm. Returning input as fallback")
return r | python | def normalize_sparse(M, norm="frag", order=1, iterations=3):
"""Applies a normalization type to a sparse matrix.
"""
try:
from scipy.sparse import csr_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return normalize_dense(M.todense())
r = csr_matrix(M)
if norm == "SCN":
for _ in range(1, iterations):
row_sums = np.array(r.sum(axis=1)).flatten()
col_sums = np.array(r.sum(axis=0)).flatten()
row_indices, col_indices = r.nonzero()
r.data /= row_sums[row_indices] * col_sums[col_indices]
elif norm == "global":
try:
from scipy.sparse import linalg
r = linalg.norm(M, ord=order)
except (ImportError, AttributeError) as e:
print(str(e))
print("I can't import linalg tools for sparse matrices.")
print("Please upgrade your scipy version to 0.16.0.")
elif callable(norm):
r = norm(M)
else:
print("Unknown norm. Returning input as fallback")
return r | [
"def",
"normalize_sparse",
"(",
"M",
",",
"norm",
"=",
"\"frag\"",
",",
"order",
"=",
"1",
",",
"iterations",
"=",
"3",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"csr_matrix",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I am peforming dense normalization by default.\"",
")",
"return",
"normalize_dense",
"(",
"M",
".",
"todense",
"(",
")",
")",
"r",
"=",
"csr_matrix",
"(",
"M",
")",
"if",
"norm",
"==",
"\"SCN\"",
":",
"for",
"_",
"in",
"range",
"(",
"1",
",",
"iterations",
")",
":",
"row_sums",
"=",
"np",
".",
"array",
"(",
"r",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
".",
"flatten",
"(",
")",
"col_sums",
"=",
"np",
".",
"array",
"(",
"r",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
".",
"flatten",
"(",
")",
"row_indices",
",",
"col_indices",
"=",
"r",
".",
"nonzero",
"(",
")",
"r",
".",
"data",
"/=",
"row_sums",
"[",
"row_indices",
"]",
"*",
"col_sums",
"[",
"col_indices",
"]",
"elif",
"norm",
"==",
"\"global\"",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"linalg",
"r",
"=",
"linalg",
".",
"norm",
"(",
"M",
",",
"ord",
"=",
"order",
")",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I can't import linalg tools for sparse matrices.\"",
")",
"print",
"(",
"\"Please upgrade your scipy version to 0.16.0.\"",
")",
"elif",
"callable",
"(",
"norm",
")",
":",
"r",
"=",
"norm",
"(",
"M",
")",
"else",
":",
"print",
"(",
"\"Unknown norm. Returning input as fallback\"",
")",
"return",
"r"
] | Applies a normalization type to a sparse matrix. | [
"Applies",
"a",
"normalization",
"type",
"to",
"a",
"sparse",
"matrix",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L443-L476 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | GC_wide | def GC_wide(genome, window=1000):
"""Compute GC across a window of given length.
:note: Requires Biopython
"""
GC = []
from Bio import SeqIO
with open(genome) as handle:
sequence = "".join([str(record.seq)
for record in SeqIO.parse(handle, "fasta")])
n = len(sequence)
for i in range(0, n, window):
portion = sequence[i:min(i + window, n)]
GC.append(GC_partial(portion))
return GC | python | def GC_wide(genome, window=1000):
"""Compute GC across a window of given length.
:note: Requires Biopython
"""
GC = []
from Bio import SeqIO
with open(genome) as handle:
sequence = "".join([str(record.seq)
for record in SeqIO.parse(handle, "fasta")])
n = len(sequence)
for i in range(0, n, window):
portion = sequence[i:min(i + window, n)]
GC.append(GC_partial(portion))
return GC | [
"def",
"GC_wide",
"(",
"genome",
",",
"window",
"=",
"1000",
")",
":",
"GC",
"=",
"[",
"]",
"from",
"Bio",
"import",
"SeqIO",
"with",
"open",
"(",
"genome",
")",
"as",
"handle",
":",
"sequence",
"=",
"\"\"",
".",
"join",
"(",
"[",
"str",
"(",
"record",
".",
"seq",
")",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"handle",
",",
"\"fasta\"",
")",
"]",
")",
"n",
"=",
"len",
"(",
"sequence",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n",
",",
"window",
")",
":",
"portion",
"=",
"sequence",
"[",
"i",
":",
"min",
"(",
"i",
"+",
"window",
",",
"n",
")",
"]",
"GC",
".",
"append",
"(",
"GC_partial",
"(",
"portion",
")",
")",
"return",
"GC"
] | Compute GC across a window of given length.
:note: Requires Biopython | [
"Compute",
"GC",
"across",
"a",
"window",
"of",
"given",
"length",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L492-L509 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | to_dade_matrix | def to_dade_matrix(M, annotations="", filename=None):
"""Returns a Dade matrix from input numpy matrix. Any annotations are added
as header. If filename is provided and valid, said matrix is also saved
as text.
"""
n, m = M.shape
A = np.zeros((n + 1, m + 1))
A[1:, 1:] = M
if not annotations:
annotations = np.array(["" for _ in n], dtype=str)
A[0, :] = annotations
A[:, 0] = annotations.T
if filename:
try:
np.savetxt(filename, A, fmt='%i')
print("I saved input matrix in dade format as " + str(filename))
except ValueError as e:
print("I couldn't save input matrix.")
print(str(e))
finally:
return A
return A | python | def to_dade_matrix(M, annotations="", filename=None):
"""Returns a Dade matrix from input numpy matrix. Any annotations are added
as header. If filename is provided and valid, said matrix is also saved
as text.
"""
n, m = M.shape
A = np.zeros((n + 1, m + 1))
A[1:, 1:] = M
if not annotations:
annotations = np.array(["" for _ in n], dtype=str)
A[0, :] = annotations
A[:, 0] = annotations.T
if filename:
try:
np.savetxt(filename, A, fmt='%i')
print("I saved input matrix in dade format as " + str(filename))
except ValueError as e:
print("I couldn't save input matrix.")
print(str(e))
finally:
return A
return A | [
"def",
"to_dade_matrix",
"(",
"M",
",",
"annotations",
"=",
"\"\"",
",",
"filename",
"=",
"None",
")",
":",
"n",
",",
"m",
"=",
"M",
".",
"shape",
"A",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
"+",
"1",
",",
"m",
"+",
"1",
")",
")",
"A",
"[",
"1",
":",
",",
"1",
":",
"]",
"=",
"M",
"if",
"not",
"annotations",
":",
"annotations",
"=",
"np",
".",
"array",
"(",
"[",
"\"\"",
"for",
"_",
"in",
"n",
"]",
",",
"dtype",
"=",
"str",
")",
"A",
"[",
"0",
",",
":",
"]",
"=",
"annotations",
"A",
"[",
":",
",",
"0",
"]",
"=",
"annotations",
".",
"T",
"if",
"filename",
":",
"try",
":",
"np",
".",
"savetxt",
"(",
"filename",
",",
"A",
",",
"fmt",
"=",
"'%i'",
")",
"print",
"(",
"\"I saved input matrix in dade format as \"",
"+",
"str",
"(",
"filename",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"print",
"(",
"\"I couldn't save input matrix.\"",
")",
"print",
"(",
"str",
"(",
"e",
")",
")",
"finally",
":",
"return",
"A",
"return",
"A"
] | Returns a Dade matrix from input numpy matrix. Any annotations are added
as header. If filename is provided and valid, said matrix is also saved
as text. | [
"Returns",
"a",
"Dade",
"matrix",
"from",
"input",
"numpy",
"matrix",
".",
"Any",
"annotations",
"are",
"added",
"as",
"header",
".",
"If",
"filename",
"is",
"provided",
"and",
"valid",
"said",
"matrix",
"is",
"also",
"saved",
"as",
"text",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L678-L701 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | largest_connected_component | def largest_connected_component(matrix):
"""Compute the adjacency matrix of the largest connected component of the
graph whose input matrix is adjacent.
"""
try:
import scipy.sparse
n, components = scipy.sparse.csgraph.connected_components(
matrix, directed=False)
print("I found " + str(n) + " connected components.")
component_dist = collections.Counter(components)
print("Distribution of components: " + str(component_dist))
most_common, _ = component_dist.most_common(1)[0]
ilcc = (components == most_common)
return matrix[:, ilcc][ilcc]
except ImportError as e:
print("I couldn't find scipy which is needed for graph routines.")
print(str(e))
print("Returning input matrix as fallback.")
return matrix | python | def largest_connected_component(matrix):
"""Compute the adjacency matrix of the largest connected component of the
graph whose input matrix is adjacent.
"""
try:
import scipy.sparse
n, components = scipy.sparse.csgraph.connected_components(
matrix, directed=False)
print("I found " + str(n) + " connected components.")
component_dist = collections.Counter(components)
print("Distribution of components: " + str(component_dist))
most_common, _ = component_dist.most_common(1)[0]
ilcc = (components == most_common)
return matrix[:, ilcc][ilcc]
except ImportError as e:
print("I couldn't find scipy which is needed for graph routines.")
print(str(e))
print("Returning input matrix as fallback.")
return matrix | [
"def",
"largest_connected_component",
"(",
"matrix",
")",
":",
"try",
":",
"import",
"scipy",
".",
"sparse",
"n",
",",
"components",
"=",
"scipy",
".",
"sparse",
".",
"csgraph",
".",
"connected_components",
"(",
"matrix",
",",
"directed",
"=",
"False",
")",
"print",
"(",
"\"I found \"",
"+",
"str",
"(",
"n",
")",
"+",
"\" connected components.\"",
")",
"component_dist",
"=",
"collections",
".",
"Counter",
"(",
"components",
")",
"print",
"(",
"\"Distribution of components: \"",
"+",
"str",
"(",
"component_dist",
")",
")",
"most_common",
",",
"_",
"=",
"component_dist",
".",
"most_common",
"(",
"1",
")",
"[",
"0",
"]",
"ilcc",
"=",
"(",
"components",
"==",
"most_common",
")",
"return",
"matrix",
"[",
":",
",",
"ilcc",
"]",
"[",
"ilcc",
"]",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"\"I couldn't find scipy which is needed for graph routines.\"",
")",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"Returning input matrix as fallback.\"",
")",
"return",
"matrix"
] | Compute the adjacency matrix of the largest connected component of the
graph whose input matrix is adjacent. | [
"Compute",
"the",
"adjacency",
"matrix",
"of",
"the",
"largest",
"connected",
"component",
"of",
"the",
"graph",
"whose",
"input",
"matrix",
"is",
"adjacent",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L736-L756 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | to_structure | def to_structure(matrix, alpha=1):
"""Compute best matching 3D genome structure from underlying input matrix
using ShRec3D-derived method from Lesne et al., 2014.
Link: https://www.ncbi.nlm.nih.gov/pubmed/25240436
The method performs two steps: first compute distance matrix by treating
contact data as an adjacency graph (of weights equal to a power law
function of the data), then embed the resulting distance matrix into
3D space.
The alpha parameter influences the weighting of contacts: if alpha < 1
long-range interactions are prioritized; if alpha >> 1 short-range
interactions have more weight wahen computing the distance matrix.
"""
connected = largest_connected_component(matrix)
distances = to_distance(connected, alpha)
n, m = connected.shape
bary = np.sum(np.triu(distances, 1)) / (n**2) # barycenters
d = np.array(np.sum(distances**2, 0) / n - bary) # distances to origin
gram = np.array([(d[i] + d[j] - distances[i][j]**2) / 2 for i,
j in itertools.product(range(n), range(m))]).reshape(n, m)
normalized = gram / np.linalg.norm(gram, 'fro')
try:
symmetric = np.array((normalized + normalized.T) / 2,
dtype=np.longfloat) # just in case
except AttributeError:
symmetric = np.array((normalized + normalized.T) / 2)
from scipy import linalg
eigen_values, eigen_vectors = linalg.eigh(symmetric)
if not (eigen_values >= 0).all():
warnings.warn("Negative eigen values were found.")
idx = eigen_values.argsort()[-3:][::-1]
values = eigen_values[idx]
vectors = eigen_vectors[:, idx]
coordinates = vectors * np.sqrt(values)
return coordinates | python | def to_structure(matrix, alpha=1):
"""Compute best matching 3D genome structure from underlying input matrix
using ShRec3D-derived method from Lesne et al., 2014.
Link: https://www.ncbi.nlm.nih.gov/pubmed/25240436
The method performs two steps: first compute distance matrix by treating
contact data as an adjacency graph (of weights equal to a power law
function of the data), then embed the resulting distance matrix into
3D space.
The alpha parameter influences the weighting of contacts: if alpha < 1
long-range interactions are prioritized; if alpha >> 1 short-range
interactions have more weight wahen computing the distance matrix.
"""
connected = largest_connected_component(matrix)
distances = to_distance(connected, alpha)
n, m = connected.shape
bary = np.sum(np.triu(distances, 1)) / (n**2) # barycenters
d = np.array(np.sum(distances**2, 0) / n - bary) # distances to origin
gram = np.array([(d[i] + d[j] - distances[i][j]**2) / 2 for i,
j in itertools.product(range(n), range(m))]).reshape(n, m)
normalized = gram / np.linalg.norm(gram, 'fro')
try:
symmetric = np.array((normalized + normalized.T) / 2,
dtype=np.longfloat) # just in case
except AttributeError:
symmetric = np.array((normalized + normalized.T) / 2)
from scipy import linalg
eigen_values, eigen_vectors = linalg.eigh(symmetric)
if not (eigen_values >= 0).all():
warnings.warn("Negative eigen values were found.")
idx = eigen_values.argsort()[-3:][::-1]
values = eigen_values[idx]
vectors = eigen_vectors[:, idx]
coordinates = vectors * np.sqrt(values)
return coordinates | [
"def",
"to_structure",
"(",
"matrix",
",",
"alpha",
"=",
"1",
")",
":",
"connected",
"=",
"largest_connected_component",
"(",
"matrix",
")",
"distances",
"=",
"to_distance",
"(",
"connected",
",",
"alpha",
")",
"n",
",",
"m",
"=",
"connected",
".",
"shape",
"bary",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"triu",
"(",
"distances",
",",
"1",
")",
")",
"/",
"(",
"n",
"**",
"2",
")",
"# barycenters",
"d",
"=",
"np",
".",
"array",
"(",
"np",
".",
"sum",
"(",
"distances",
"**",
"2",
",",
"0",
")",
"/",
"n",
"-",
"bary",
")",
"# distances to origin",
"gram",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"d",
"[",
"i",
"]",
"+",
"d",
"[",
"j",
"]",
"-",
"distances",
"[",
"i",
"]",
"[",
"j",
"]",
"**",
"2",
")",
"/",
"2",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"n",
")",
",",
"range",
"(",
"m",
")",
")",
"]",
")",
".",
"reshape",
"(",
"n",
",",
"m",
")",
"normalized",
"=",
"gram",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"gram",
",",
"'fro'",
")",
"try",
":",
"symmetric",
"=",
"np",
".",
"array",
"(",
"(",
"normalized",
"+",
"normalized",
".",
"T",
")",
"/",
"2",
",",
"dtype",
"=",
"np",
".",
"longfloat",
")",
"# just in case",
"except",
"AttributeError",
":",
"symmetric",
"=",
"np",
".",
"array",
"(",
"(",
"normalized",
"+",
"normalized",
".",
"T",
")",
"/",
"2",
")",
"from",
"scipy",
"import",
"linalg",
"eigen_values",
",",
"eigen_vectors",
"=",
"linalg",
".",
"eigh",
"(",
"symmetric",
")",
"if",
"not",
"(",
"eigen_values",
">=",
"0",
")",
".",
"all",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Negative eigen values were found.\"",
")",
"idx",
"=",
"eigen_values",
".",
"argsort",
"(",
")",
"[",
"-",
"3",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
"values",
"=",
"eigen_values",
"[",
"idx",
"]",
"vectors",
"=",
"eigen_vectors",
"[",
":",
",",
"idx",
"]",
"coordinates",
"=",
"vectors",
"*",
"np",
".",
"sqrt",
"(",
"values",
")",
"return",
"coordinates"
] | Compute best matching 3D genome structure from underlying input matrix
using ShRec3D-derived method from Lesne et al., 2014.
Link: https://www.ncbi.nlm.nih.gov/pubmed/25240436
The method performs two steps: first compute distance matrix by treating
contact data as an adjacency graph (of weights equal to a power law
function of the data), then embed the resulting distance matrix into
3D space.
The alpha parameter influences the weighting of contacts: if alpha < 1
long-range interactions are prioritized; if alpha >> 1 short-range
interactions have more weight wahen computing the distance matrix. | [
"Compute",
"best",
"matching",
"3D",
"genome",
"structure",
"from",
"underlying",
"input",
"matrix",
"using",
"ShRec3D",
"-",
"derived",
"method",
"from",
"Lesne",
"et",
"al",
".",
"2014",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L759-L798 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | get_missing_bins | def get_missing_bins(original, trimmed):
"""Retrieve indices of a trimmed matrix with respect to the original matrix.
Fairly fast but is only correct if diagonal values are different, which is
always the case in practice.
"""
original_diag = np.diag(original)
trimmed_diag = np.diag(trimmed)
index = []
m = min(original.shape)
for j in range(min(trimmed.shape)):
k = 0
while original_diag[j + k] != trimmed_diag[j] and k < 2 * m:
k += 1
index.append(k + j)
return np.array(index) | python | def get_missing_bins(original, trimmed):
"""Retrieve indices of a trimmed matrix with respect to the original matrix.
Fairly fast but is only correct if diagonal values are different, which is
always the case in practice.
"""
original_diag = np.diag(original)
trimmed_diag = np.diag(trimmed)
index = []
m = min(original.shape)
for j in range(min(trimmed.shape)):
k = 0
while original_diag[j + k] != trimmed_diag[j] and k < 2 * m:
k += 1
index.append(k + j)
return np.array(index) | [
"def",
"get_missing_bins",
"(",
"original",
",",
"trimmed",
")",
":",
"original_diag",
"=",
"np",
".",
"diag",
"(",
"original",
")",
"trimmed_diag",
"=",
"np",
".",
"diag",
"(",
"trimmed",
")",
"index",
"=",
"[",
"]",
"m",
"=",
"min",
"(",
"original",
".",
"shape",
")",
"for",
"j",
"in",
"range",
"(",
"min",
"(",
"trimmed",
".",
"shape",
")",
")",
":",
"k",
"=",
"0",
"while",
"original_diag",
"[",
"j",
"+",
"k",
"]",
"!=",
"trimmed_diag",
"[",
"j",
"]",
"and",
"k",
"<",
"2",
"*",
"m",
":",
"k",
"+=",
"1",
"index",
".",
"append",
"(",
"k",
"+",
"j",
")",
"return",
"np",
".",
"array",
"(",
"index",
")"
] | Retrieve indices of a trimmed matrix with respect to the original matrix.
Fairly fast but is only correct if diagonal values are different, which is
always the case in practice. | [
"Retrieve",
"indices",
"of",
"a",
"trimmed",
"matrix",
"with",
"respect",
"to",
"the",
"original",
"matrix",
".",
"Fairly",
"fast",
"but",
"is",
"only",
"correct",
"if",
"diagonal",
"values",
"are",
"different",
"which",
"is",
"always",
"the",
"case",
"in",
"practice",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L801-L816 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | distance_to_contact | def distance_to_contact(D, alpha=1):
"""Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values.
"""
if callable(alpha):
distance_function = alpha
else:
try:
a = np.float64(alpha)
def distance_function(x):
return 1 / (x ** (1 / a))
except TypeError:
print("Alpha parameter must be callable or an array-like")
raise
except ZeroDivisionError:
raise ValueError("Alpha parameter must be non-zero")
m = np.max(distance_function(D[D != 0]))
M = np.zeros(D.shape)
M[D != 0] = distance_function(D[D != 0])
M[D == 0] = m
return M | python | def distance_to_contact(D, alpha=1):
"""Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values.
"""
if callable(alpha):
distance_function = alpha
else:
try:
a = np.float64(alpha)
def distance_function(x):
return 1 / (x ** (1 / a))
except TypeError:
print("Alpha parameter must be callable or an array-like")
raise
except ZeroDivisionError:
raise ValueError("Alpha parameter must be non-zero")
m = np.max(distance_function(D[D != 0]))
M = np.zeros(D.shape)
M[D != 0] = distance_function(D[D != 0])
M[D == 0] = m
return M | [
"def",
"distance_to_contact",
"(",
"D",
",",
"alpha",
"=",
"1",
")",
":",
"if",
"callable",
"(",
"alpha",
")",
":",
"distance_function",
"=",
"alpha",
"else",
":",
"try",
":",
"a",
"=",
"np",
".",
"float64",
"(",
"alpha",
")",
"def",
"distance_function",
"(",
"x",
")",
":",
"return",
"1",
"/",
"(",
"x",
"**",
"(",
"1",
"/",
"a",
")",
")",
"except",
"TypeError",
":",
"print",
"(",
"\"Alpha parameter must be callable or an array-like\"",
")",
"raise",
"except",
"ZeroDivisionError",
":",
"raise",
"ValueError",
"(",
"\"Alpha parameter must be non-zero\"",
")",
"m",
"=",
"np",
".",
"max",
"(",
"distance_function",
"(",
"D",
"[",
"D",
"!=",
"0",
"]",
")",
")",
"M",
"=",
"np",
".",
"zeros",
"(",
"D",
".",
"shape",
")",
"M",
"[",
"D",
"!=",
"0",
"]",
"=",
"distance_function",
"(",
"D",
"[",
"D",
"!=",
"0",
"]",
")",
"M",
"[",
"D",
"==",
"0",
"]",
"=",
"m",
"return",
"M"
] | Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values. | [
"Compute",
"contact",
"matrix",
"from",
"input",
"distance",
"matrix",
".",
"Distance",
"values",
"of",
"zeroes",
"are",
"given",
"the",
"largest",
"contact",
"count",
"otherwise",
"inferred",
"non",
"-",
"zero",
"distance",
"values",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L918-L942 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | pdb_to_structure | def pdb_to_structure(filename):
"""Import a structure object from a PDB file.
"""
try:
from Bio.PDB import PDB
except ImportError:
print("I can't import Biopython which is needed to handle PDB files.")
raise
p = PDB.PDBParser()
structure = p.get_structure('S', filename)
for _ in structure.get_chains():
atoms = [np.array(atom.get_coord()) for atom in structure.get_atoms()]
return atoms | python | def pdb_to_structure(filename):
"""Import a structure object from a PDB file.
"""
try:
from Bio.PDB import PDB
except ImportError:
print("I can't import Biopython which is needed to handle PDB files.")
raise
p = PDB.PDBParser()
structure = p.get_structure('S', filename)
for _ in structure.get_chains():
atoms = [np.array(atom.get_coord()) for atom in structure.get_atoms()]
return atoms | [
"def",
"pdb_to_structure",
"(",
"filename",
")",
":",
"try",
":",
"from",
"Bio",
".",
"PDB",
"import",
"PDB",
"except",
"ImportError",
":",
"print",
"(",
"\"I can't import Biopython which is needed to handle PDB files.\"",
")",
"raise",
"p",
"=",
"PDB",
".",
"PDBParser",
"(",
")",
"structure",
"=",
"p",
".",
"get_structure",
"(",
"'S'",
",",
"filename",
")",
"for",
"_",
"in",
"structure",
".",
"get_chains",
"(",
")",
":",
"atoms",
"=",
"[",
"np",
".",
"array",
"(",
"atom",
".",
"get_coord",
"(",
")",
")",
"for",
"atom",
"in",
"structure",
".",
"get_atoms",
"(",
")",
"]",
"return",
"atoms"
] | Import a structure object from a PDB file. | [
"Import",
"a",
"structure",
"object",
"from",
"a",
"PDB",
"file",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L980-L993 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | positions_to_contigs | def positions_to_contigs(positions):
"""Flattens and converts a positions array to a contigs array, if applicable.
"""
if isinstance(positions, np.ndarray):
flattened_positions = positions.flatten()
else:
try:
flattened_positions = np.array(
[pos for contig in positions for pos in contig])
except TypeError:
flattened_positions = np.array(positions)
if (np.diff(positions) == 0).any() and not (0 in set(positions)):
warnings.warn("I detected identical consecutive nonzero values.")
return positions
n = len(flattened_positions)
contigs = np.ones(n)
counter = 0
for i in range(1, n):
if positions[i] == 0:
counter += 1
contigs[i] += counter
else:
contigs[i] = contigs[i - 1]
return contigs | python | def positions_to_contigs(positions):
"""Flattens and converts a positions array to a contigs array, if applicable.
"""
if isinstance(positions, np.ndarray):
flattened_positions = positions.flatten()
else:
try:
flattened_positions = np.array(
[pos for contig in positions for pos in contig])
except TypeError:
flattened_positions = np.array(positions)
if (np.diff(positions) == 0).any() and not (0 in set(positions)):
warnings.warn("I detected identical consecutive nonzero values.")
return positions
n = len(flattened_positions)
contigs = np.ones(n)
counter = 0
for i in range(1, n):
if positions[i] == 0:
counter += 1
contigs[i] += counter
else:
contigs[i] = contigs[i - 1]
return contigs | [
"def",
"positions_to_contigs",
"(",
"positions",
")",
":",
"if",
"isinstance",
"(",
"positions",
",",
"np",
".",
"ndarray",
")",
":",
"flattened_positions",
"=",
"positions",
".",
"flatten",
"(",
")",
"else",
":",
"try",
":",
"flattened_positions",
"=",
"np",
".",
"array",
"(",
"[",
"pos",
"for",
"contig",
"in",
"positions",
"for",
"pos",
"in",
"contig",
"]",
")",
"except",
"TypeError",
":",
"flattened_positions",
"=",
"np",
".",
"array",
"(",
"positions",
")",
"if",
"(",
"np",
".",
"diff",
"(",
"positions",
")",
"==",
"0",
")",
".",
"any",
"(",
")",
"and",
"not",
"(",
"0",
"in",
"set",
"(",
"positions",
")",
")",
":",
"warnings",
".",
"warn",
"(",
"\"I detected identical consecutive nonzero values.\"",
")",
"return",
"positions",
"n",
"=",
"len",
"(",
"flattened_positions",
")",
"contigs",
"=",
"np",
".",
"ones",
"(",
"n",
")",
"counter",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"if",
"positions",
"[",
"i",
"]",
"==",
"0",
":",
"counter",
"+=",
"1",
"contigs",
"[",
"i",
"]",
"+=",
"counter",
"else",
":",
"contigs",
"[",
"i",
"]",
"=",
"contigs",
"[",
"i",
"-",
"1",
"]",
"return",
"contigs"
] | Flattens and converts a positions array to a contigs array, if applicable. | [
"Flattens",
"and",
"converts",
"a",
"positions",
"array",
"to",
"a",
"contigs",
"array",
"if",
"applicable",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1005-L1031 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | distance_diagonal_law | def distance_diagonal_law(matrix, positions=None):
"""Compute a distance law trend using the contact averages of equal distances.
Specific positions can be supplied if needed.
"""
n = min(matrix.shape)
if positions is None:
return np.array([np.average(np.diagonal(matrix, j)) for j in range(n)])
else:
contigs = positions_to_contigs(positions)
def is_intra(i, j):
return contigs[i] == contigs[j]
max_intra_distance = max((len(contigs == u) for u in set(contigs)))
intra_contacts = []
inter_contacts = [np.average(np.diagonal(matrix, j))
for j in range(max_intra_distance, n)]
for j in range(max_intra_distance):
D = np.diagonal(matrix, j)
for i in range(len(D)):
diagonal_intra = []
if is_intra(i, j):
diagonal_intra.append(D[i])
# else:
# diagonal_inter.append(D[i])
# inter_contacts.append(np.average(np.array(diagonal_inter)))
intra_contacts.append(np.average(np.array(diagonal_intra)))
intra_contacts.extend(inter_contacts)
return [positions, np.array(intra_contacts)] | python | def distance_diagonal_law(matrix, positions=None):
"""Compute a distance law trend using the contact averages of equal distances.
Specific positions can be supplied if needed.
"""
n = min(matrix.shape)
if positions is None:
return np.array([np.average(np.diagonal(matrix, j)) for j in range(n)])
else:
contigs = positions_to_contigs(positions)
def is_intra(i, j):
return contigs[i] == contigs[j]
max_intra_distance = max((len(contigs == u) for u in set(contigs)))
intra_contacts = []
inter_contacts = [np.average(np.diagonal(matrix, j))
for j in range(max_intra_distance, n)]
for j in range(max_intra_distance):
D = np.diagonal(matrix, j)
for i in range(len(D)):
diagonal_intra = []
if is_intra(i, j):
diagonal_intra.append(D[i])
# else:
# diagonal_inter.append(D[i])
# inter_contacts.append(np.average(np.array(diagonal_inter)))
intra_contacts.append(np.average(np.array(diagonal_intra)))
intra_contacts.extend(inter_contacts)
return [positions, np.array(intra_contacts)] | [
"def",
"distance_diagonal_law",
"(",
"matrix",
",",
"positions",
"=",
"None",
")",
":",
"n",
"=",
"min",
"(",
"matrix",
".",
"shape",
")",
"if",
"positions",
"is",
"None",
":",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"average",
"(",
"np",
".",
"diagonal",
"(",
"matrix",
",",
"j",
")",
")",
"for",
"j",
"in",
"range",
"(",
"n",
")",
"]",
")",
"else",
":",
"contigs",
"=",
"positions_to_contigs",
"(",
"positions",
")",
"def",
"is_intra",
"(",
"i",
",",
"j",
")",
":",
"return",
"contigs",
"[",
"i",
"]",
"==",
"contigs",
"[",
"j",
"]",
"max_intra_distance",
"=",
"max",
"(",
"(",
"len",
"(",
"contigs",
"==",
"u",
")",
"for",
"u",
"in",
"set",
"(",
"contigs",
")",
")",
")",
"intra_contacts",
"=",
"[",
"]",
"inter_contacts",
"=",
"[",
"np",
".",
"average",
"(",
"np",
".",
"diagonal",
"(",
"matrix",
",",
"j",
")",
")",
"for",
"j",
"in",
"range",
"(",
"max_intra_distance",
",",
"n",
")",
"]",
"for",
"j",
"in",
"range",
"(",
"max_intra_distance",
")",
":",
"D",
"=",
"np",
".",
"diagonal",
"(",
"matrix",
",",
"j",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"D",
")",
")",
":",
"diagonal_intra",
"=",
"[",
"]",
"if",
"is_intra",
"(",
"i",
",",
"j",
")",
":",
"diagonal_intra",
".",
"append",
"(",
"D",
"[",
"i",
"]",
")",
"# else:",
"# diagonal_inter.append(D[i])",
"# inter_contacts.append(np.average(np.array(diagonal_inter)))",
"intra_contacts",
".",
"append",
"(",
"np",
".",
"average",
"(",
"np",
".",
"array",
"(",
"diagonal_intra",
")",
")",
")",
"intra_contacts",
".",
"extend",
"(",
"inter_contacts",
")",
"return",
"[",
"positions",
",",
"np",
".",
"array",
"(",
"intra_contacts",
")",
"]"
] | Compute a distance law trend using the contact averages of equal distances.
Specific positions can be supplied if needed. | [
"Compute",
"a",
"distance",
"law",
"trend",
"using",
"the",
"contact",
"averages",
"of",
"equal",
"distances",
".",
"Specific",
"positions",
"can",
"be",
"supplied",
"if",
"needed",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1034-L1066 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | rippe_parameters | def rippe_parameters(matrix, positions, lengths=None, init=None, circ=False):
"""Estimate parameters from the model described in Rippe et al., 2001.
"""
n, _ = matrix.shape
if lengths is None:
lengths = np.abs(np.diff(positions))
measurements, bins = [], []
for i in range(n):
for j in range(1, i):
mean_length = (lengths[i] + lengths[j]) / 2.
if positions[i] < positions[j]:
d = (((positions[j] - positions[i] -
lengths[i]) + mean_length) /
1000.)
else:
d = (((positions[i] - positions[j] -
lengths[j]) + mean_length) /
1000.)
bins.append(np.abs(d))
measurements.append(matrix[i, j])
parameters = estimate_param_rippe(measurements, bins, init=init, circ=circ)
print(parameters)
return parameters[0] | python | def rippe_parameters(matrix, positions, lengths=None, init=None, circ=False):
"""Estimate parameters from the model described in Rippe et al., 2001.
"""
n, _ = matrix.shape
if lengths is None:
lengths = np.abs(np.diff(positions))
measurements, bins = [], []
for i in range(n):
for j in range(1, i):
mean_length = (lengths[i] + lengths[j]) / 2.
if positions[i] < positions[j]:
d = (((positions[j] - positions[i] -
lengths[i]) + mean_length) /
1000.)
else:
d = (((positions[i] - positions[j] -
lengths[j]) + mean_length) /
1000.)
bins.append(np.abs(d))
measurements.append(matrix[i, j])
parameters = estimate_param_rippe(measurements, bins, init=init, circ=circ)
print(parameters)
return parameters[0] | [
"def",
"rippe_parameters",
"(",
"matrix",
",",
"positions",
",",
"lengths",
"=",
"None",
",",
"init",
"=",
"None",
",",
"circ",
"=",
"False",
")",
":",
"n",
",",
"_",
"=",
"matrix",
".",
"shape",
"if",
"lengths",
"is",
"None",
":",
"lengths",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"positions",
")",
")",
"measurements",
",",
"bins",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"i",
")",
":",
"mean_length",
"=",
"(",
"lengths",
"[",
"i",
"]",
"+",
"lengths",
"[",
"j",
"]",
")",
"/",
"2.",
"if",
"positions",
"[",
"i",
"]",
"<",
"positions",
"[",
"j",
"]",
":",
"d",
"=",
"(",
"(",
"(",
"positions",
"[",
"j",
"]",
"-",
"positions",
"[",
"i",
"]",
"-",
"lengths",
"[",
"i",
"]",
")",
"+",
"mean_length",
")",
"/",
"1000.",
")",
"else",
":",
"d",
"=",
"(",
"(",
"(",
"positions",
"[",
"i",
"]",
"-",
"positions",
"[",
"j",
"]",
"-",
"lengths",
"[",
"j",
"]",
")",
"+",
"mean_length",
")",
"/",
"1000.",
")",
"bins",
".",
"append",
"(",
"np",
".",
"abs",
"(",
"d",
")",
")",
"measurements",
".",
"append",
"(",
"matrix",
"[",
"i",
",",
"j",
"]",
")",
"parameters",
"=",
"estimate_param_rippe",
"(",
"measurements",
",",
"bins",
",",
"init",
"=",
"init",
",",
"circ",
"=",
"circ",
")",
"print",
"(",
"parameters",
")",
"return",
"parameters",
"[",
"0",
"]"
] | Estimate parameters from the model described in Rippe et al., 2001. | [
"Estimate",
"parameters",
"from",
"the",
"model",
"described",
"in",
"Rippe",
"et",
"al",
".",
"2001",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1069-L1095 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | scalogram | def scalogram(M, circ=False):
"""Computes so-called 'scalograms' used to easily
visualize contacts at different distance scales.
Edge cases have been painstakingly taken
care of.
"""
# Sanity checks
if not type(M) is np.ndarray:
M = np.array(M)
if M.shape[0] != M.shape[1]:
raise ValueError("Matrix is not square.")
try:
n = min(M.shape)
except AttributeError:
n = M.size
N = np.zeros(M.shape)
for i in range(n):
for j in range(n):
if i + j < n and i >= j:
N[i, j] = M[i, i - j:i + j + 1].sum()
elif circ and i + j < n and i < j:
N[i, j] = M[i, i - j:].sum() + M[i, :i + j + 1].sum()
elif circ and i >= j and i + j >= n:
N[i, j] = M[i, i - j:].sum() + M[i, :i + j - n + 1].sum()
elif circ and i < j and i + j >= n:
N[i, j] = (M[i, i - j:].sum() +
M[i, :].sum() +
M[i, :i + j - n + 1].sum())
return N | python | def scalogram(M, circ=False):
"""Computes so-called 'scalograms' used to easily
visualize contacts at different distance scales.
Edge cases have been painstakingly taken
care of.
"""
# Sanity checks
if not type(M) is np.ndarray:
M = np.array(M)
if M.shape[0] != M.shape[1]:
raise ValueError("Matrix is not square.")
try:
n = min(M.shape)
except AttributeError:
n = M.size
N = np.zeros(M.shape)
for i in range(n):
for j in range(n):
if i + j < n and i >= j:
N[i, j] = M[i, i - j:i + j + 1].sum()
elif circ and i + j < n and i < j:
N[i, j] = M[i, i - j:].sum() + M[i, :i + j + 1].sum()
elif circ and i >= j and i + j >= n:
N[i, j] = M[i, i - j:].sum() + M[i, :i + j - n + 1].sum()
elif circ and i < j and i + j >= n:
N[i, j] = (M[i, i - j:].sum() +
M[i, :].sum() +
M[i, :i + j - n + 1].sum())
return N | [
"def",
"scalogram",
"(",
"M",
",",
"circ",
"=",
"False",
")",
":",
"# Sanity checks",
"if",
"not",
"type",
"(",
"M",
")",
"is",
"np",
".",
"ndarray",
":",
"M",
"=",
"np",
".",
"array",
"(",
"M",
")",
"if",
"M",
".",
"shape",
"[",
"0",
"]",
"!=",
"M",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Matrix is not square.\"",
")",
"try",
":",
"n",
"=",
"min",
"(",
"M",
".",
"shape",
")",
"except",
"AttributeError",
":",
"n",
"=",
"M",
".",
"size",
"N",
"=",
"np",
".",
"zeros",
"(",
"M",
".",
"shape",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"if",
"i",
"+",
"j",
"<",
"n",
"and",
"i",
">=",
"j",
":",
"N",
"[",
"i",
",",
"j",
"]",
"=",
"M",
"[",
"i",
",",
"i",
"-",
"j",
":",
"i",
"+",
"j",
"+",
"1",
"]",
".",
"sum",
"(",
")",
"elif",
"circ",
"and",
"i",
"+",
"j",
"<",
"n",
"and",
"i",
"<",
"j",
":",
"N",
"[",
"i",
",",
"j",
"]",
"=",
"M",
"[",
"i",
",",
"i",
"-",
"j",
":",
"]",
".",
"sum",
"(",
")",
"+",
"M",
"[",
"i",
",",
":",
"i",
"+",
"j",
"+",
"1",
"]",
".",
"sum",
"(",
")",
"elif",
"circ",
"and",
"i",
">=",
"j",
"and",
"i",
"+",
"j",
">=",
"n",
":",
"N",
"[",
"i",
",",
"j",
"]",
"=",
"M",
"[",
"i",
",",
"i",
"-",
"j",
":",
"]",
".",
"sum",
"(",
")",
"+",
"M",
"[",
"i",
",",
":",
"i",
"+",
"j",
"-",
"n",
"+",
"1",
"]",
".",
"sum",
"(",
")",
"elif",
"circ",
"and",
"i",
"<",
"j",
"and",
"i",
"+",
"j",
">=",
"n",
":",
"N",
"[",
"i",
",",
"j",
"]",
"=",
"(",
"M",
"[",
"i",
",",
"i",
"-",
"j",
":",
"]",
".",
"sum",
"(",
")",
"+",
"M",
"[",
"i",
",",
":",
"]",
".",
"sum",
"(",
")",
"+",
"M",
"[",
"i",
",",
":",
"i",
"+",
"j",
"-",
"n",
"+",
"1",
"]",
".",
"sum",
"(",
")",
")",
"return",
"N"
] | Computes so-called 'scalograms' used to easily
visualize contacts at different distance scales.
Edge cases have been painstakingly taken
care of. | [
"Computes",
"so",
"-",
"called",
"scalograms",
"used",
"to",
"easily",
"visualize",
"contacts",
"at",
"different",
"distance",
"scales",
".",
"Edge",
"cases",
"have",
"been",
"painstakingly",
"taken",
"care",
"of",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1288-L1321 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | asd | def asd(M1, M2):
"""Compute a Fourier transform based distance
between two matrices.
Inspired from Galiez et al., 2015
(https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4535829/)
"""
from scipy.fftpack import fft2
spectra1 = np.abs(fft2(M1))
spectra2 = np.abs(fft2(M2))
return np.linalg.norm(spectra2 - spectra1) | python | def asd(M1, M2):
"""Compute a Fourier transform based distance
between two matrices.
Inspired from Galiez et al., 2015
(https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4535829/)
"""
from scipy.fftpack import fft2
spectra1 = np.abs(fft2(M1))
spectra2 = np.abs(fft2(M2))
return np.linalg.norm(spectra2 - spectra1) | [
"def",
"asd",
"(",
"M1",
",",
"M2",
")",
":",
"from",
"scipy",
".",
"fftpack",
"import",
"fft2",
"spectra1",
"=",
"np",
".",
"abs",
"(",
"fft2",
"(",
"M1",
")",
")",
"spectra2",
"=",
"np",
".",
"abs",
"(",
"fft2",
"(",
"M2",
")",
")",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"spectra2",
"-",
"spectra1",
")"
] | Compute a Fourier transform based distance
between two matrices.
Inspired from Galiez et al., 2015
(https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4535829/) | [
"Compute",
"a",
"Fourier",
"transform",
"based",
"distance",
"between",
"two",
"matrices",
"."
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1324-L1336 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | remove_intra | def remove_intra(M, contigs):
"""Remove intrachromosomal contacts
Given a contact map and a list attributing each position
to a given chromosome, set all contacts within each
chromosome or contig to zero. Useful to perform
calculations on interchromosomal contacts only.
Parameters
----------
M : array_like
The initial contact map
contigs : list or array_like
A 1D array whose value at index i reflect the contig
label of the row i in the matrix M. The length of
the array must be equal to the (identical) shape
value of the matrix.
Returns
-------
N : numpy.ndarray
The output contact map with no intrachromosomal contacts
"""
N = np.copy(M)
n = len(N)
assert n == len(contigs)
# Naive implmentation for now
for (i, j) in itertools.product(range(n), range(n)):
if contigs[i] == contigs[j]:
N[i, j] = 0
return N | python | def remove_intra(M, contigs):
"""Remove intrachromosomal contacts
Given a contact map and a list attributing each position
to a given chromosome, set all contacts within each
chromosome or contig to zero. Useful to perform
calculations on interchromosomal contacts only.
Parameters
----------
M : array_like
The initial contact map
contigs : list or array_like
A 1D array whose value at index i reflect the contig
label of the row i in the matrix M. The length of
the array must be equal to the (identical) shape
value of the matrix.
Returns
-------
N : numpy.ndarray
The output contact map with no intrachromosomal contacts
"""
N = np.copy(M)
n = len(N)
assert n == len(contigs)
# Naive implmentation for now
for (i, j) in itertools.product(range(n), range(n)):
if contigs[i] == contigs[j]:
N[i, j] = 0
return N | [
"def",
"remove_intra",
"(",
"M",
",",
"contigs",
")",
":",
"N",
"=",
"np",
".",
"copy",
"(",
"M",
")",
"n",
"=",
"len",
"(",
"N",
")",
"assert",
"n",
"==",
"len",
"(",
"contigs",
")",
"# Naive implmentation for now",
"for",
"(",
"i",
",",
"j",
")",
"in",
"itertools",
".",
"product",
"(",
"range",
"(",
"n",
")",
",",
"range",
"(",
"n",
")",
")",
":",
"if",
"contigs",
"[",
"i",
"]",
"==",
"contigs",
"[",
"j",
"]",
":",
"N",
"[",
"i",
",",
"j",
"]",
"=",
"0",
"return",
"N"
] | Remove intrachromosomal contacts
Given a contact map and a list attributing each position
to a given chromosome, set all contacts within each
chromosome or contig to zero. Useful to perform
calculations on interchromosomal contacts only.
Parameters
----------
M : array_like
The initial contact map
contigs : list or array_like
A 1D array whose value at index i reflect the contig
label of the row i in the matrix M. The length of
the array must be equal to the (identical) shape
value of the matrix.
Returns
-------
N : numpy.ndarray
The output contact map with no intrachromosomal contacts | [
"Remove",
"intrachromosomal",
"contacts"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1378-L1412 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | positions_to_contigs | def positions_to_contigs(positions):
"""Label contigs according to relative positions
Given a list of positions, return an ordered list
of labels reflecting where the positions array started
over (and presumably a new contig began).
Parameters
----------
positions : list or array_like
A piece-wise ordered list of integers representing
positions
Returns
-------
contig_labels : numpy.ndarray
The list of contig labels
"""
contig_labels = np.zeros_like(positions)
contig_index = 0
for i, p in enumerate(positions):
if p == 0:
contig_index += 1
contig_labels[i] = contig_index
return contig_labels | python | def positions_to_contigs(positions):
"""Label contigs according to relative positions
Given a list of positions, return an ordered list
of labels reflecting where the positions array started
over (and presumably a new contig began).
Parameters
----------
positions : list or array_like
A piece-wise ordered list of integers representing
positions
Returns
-------
contig_labels : numpy.ndarray
The list of contig labels
"""
contig_labels = np.zeros_like(positions)
contig_index = 0
for i, p in enumerate(positions):
if p == 0:
contig_index += 1
contig_labels[i] = contig_index
return contig_labels | [
"def",
"positions_to_contigs",
"(",
"positions",
")",
":",
"contig_labels",
"=",
"np",
".",
"zeros_like",
"(",
"positions",
")",
"contig_index",
"=",
"0",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"positions",
")",
":",
"if",
"p",
"==",
"0",
":",
"contig_index",
"+=",
"1",
"contig_labels",
"[",
"i",
"]",
"=",
"contig_index",
"return",
"contig_labels"
] | Label contigs according to relative positions
Given a list of positions, return an ordered list
of labels reflecting where the positions array started
over (and presumably a new contig began).
Parameters
----------
positions : list or array_like
A piece-wise ordered list of integers representing
positions
Returns
-------
contig_labels : numpy.ndarray
The list of contig labels | [
"Label",
"contigs",
"according",
"to",
"relative",
"positions"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1452-L1480 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | contigs_to_positions | def contigs_to_positions(contigs, binning=10000):
"""Build positions from contig labels
From a list of contig labels and a binning parameter,
build a list of positions that's essentially a
concatenation of linspaces with step equal to the
binning.
Parameters
----------
contigs : list or array_like
The list of contig labels, must be sorted.
binning : int, optional
The step for the list of positions. Default is 10000.
Returns
-------
positions : numpy.ndarray
The piece-wise sorted list of positions
"""
positions = np.zeros_like(contigs)
index = 0
for _, chunk in itertools.groubpy(contigs):
l = len(chunk)
positions[index : index + l] = np.arange(list(chunk)) * binning
index += l
return positions | python | def contigs_to_positions(contigs, binning=10000):
"""Build positions from contig labels
From a list of contig labels and a binning parameter,
build a list of positions that's essentially a
concatenation of linspaces with step equal to the
binning.
Parameters
----------
contigs : list or array_like
The list of contig labels, must be sorted.
binning : int, optional
The step for the list of positions. Default is 10000.
Returns
-------
positions : numpy.ndarray
The piece-wise sorted list of positions
"""
positions = np.zeros_like(contigs)
index = 0
for _, chunk in itertools.groubpy(contigs):
l = len(chunk)
positions[index : index + l] = np.arange(list(chunk)) * binning
index += l
return positions | [
"def",
"contigs_to_positions",
"(",
"contigs",
",",
"binning",
"=",
"10000",
")",
":",
"positions",
"=",
"np",
".",
"zeros_like",
"(",
"contigs",
")",
"index",
"=",
"0",
"for",
"_",
",",
"chunk",
"in",
"itertools",
".",
"groubpy",
"(",
"contigs",
")",
":",
"l",
"=",
"len",
"(",
"chunk",
")",
"positions",
"[",
"index",
":",
"index",
"+",
"l",
"]",
"=",
"np",
".",
"arange",
"(",
"list",
"(",
"chunk",
")",
")",
"*",
"binning",
"index",
"+=",
"l",
"return",
"positions"
] | Build positions from contig labels
From a list of contig labels and a binning parameter,
build a list of positions that's essentially a
concatenation of linspaces with step equal to the
binning.
Parameters
----------
contigs : list or array_like
The list of contig labels, must be sorted.
binning : int, optional
The step for the list of positions. Default is 10000.
Returns
-------
positions : numpy.ndarray
The piece-wise sorted list of positions | [
"Build",
"positions",
"from",
"contig",
"labels"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1482-L1511 | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | split_matrix | def split_matrix(M, contigs):
"""Split multiple chromosome matrix
Split a labeled matrix with multiple chromosomes
into unlabeled single-chromosome matrices. Inter chromosomal
contacts are discarded.
Parameters
----------
M : array_like
The multiple chromosome matrix to be split
contigs : list or array_like
The list of contig labels
"""
index = 0
for _, chunk in itertools.groubpy(contigs):
l = len(chunk)
yield M[index : index + l, index : index + l]
index += l | python | def split_matrix(M, contigs):
"""Split multiple chromosome matrix
Split a labeled matrix with multiple chromosomes
into unlabeled single-chromosome matrices. Inter chromosomal
contacts are discarded.
Parameters
----------
M : array_like
The multiple chromosome matrix to be split
contigs : list or array_like
The list of contig labels
"""
index = 0
for _, chunk in itertools.groubpy(contigs):
l = len(chunk)
yield M[index : index + l, index : index + l]
index += l | [
"def",
"split_matrix",
"(",
"M",
",",
"contigs",
")",
":",
"index",
"=",
"0",
"for",
"_",
",",
"chunk",
"in",
"itertools",
".",
"groubpy",
"(",
"contigs",
")",
":",
"l",
"=",
"len",
"(",
"chunk",
")",
"yield",
"M",
"[",
"index",
":",
"index",
"+",
"l",
",",
"index",
":",
"index",
"+",
"l",
"]",
"index",
"+=",
"l"
] | Split multiple chromosome matrix
Split a labeled matrix with multiple chromosomes
into unlabeled single-chromosome matrices. Inter chromosomal
contacts are discarded.
Parameters
----------
M : array_like
The multiple chromosome matrix to be split
contigs : list or array_like
The list of contig labels | [
"Split",
"multiple",
"chromosome",
"matrix"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1514-L1533 | train |
MacHu-GWU/single_file_module-project | sfm/binarysearch.py | find_nearest | def find_nearest(sorted_list, x):
"""
Find the nearest item of x from sorted array.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
note: for finding the nearest item from a descending array, I recommend
find_nearest(sorted_list[::-1], x). Because the built-in list[::-1] method
is super fast.
Usage::
>>> find_nearest([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5.1)
5
**中文文档**
在正序数组中, 返回最接近x的数。
"""
if x <= sorted_list[0]:
return sorted_list[0]
elif x >= sorted_list[-1]:
return sorted_list[-1]
else:
lower = find_le(sorted_list, x)
upper = find_ge(sorted_list, x)
if (x - lower) > (upper - x):
return upper
else:
return lower | python | def find_nearest(sorted_list, x):
"""
Find the nearest item of x from sorted array.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
note: for finding the nearest item from a descending array, I recommend
find_nearest(sorted_list[::-1], x). Because the built-in list[::-1] method
is super fast.
Usage::
>>> find_nearest([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5.1)
5
**中文文档**
在正序数组中, 返回最接近x的数。
"""
if x <= sorted_list[0]:
return sorted_list[0]
elif x >= sorted_list[-1]:
return sorted_list[-1]
else:
lower = find_le(sorted_list, x)
upper = find_ge(sorted_list, x)
if (x - lower) > (upper - x):
return upper
else:
return lower | [
"def",
"find_nearest",
"(",
"sorted_list",
",",
"x",
")",
":",
"if",
"x",
"<=",
"sorted_list",
"[",
"0",
"]",
":",
"return",
"sorted_list",
"[",
"0",
"]",
"elif",
"x",
">=",
"sorted_list",
"[",
"-",
"1",
"]",
":",
"return",
"sorted_list",
"[",
"-",
"1",
"]",
"else",
":",
"lower",
"=",
"find_le",
"(",
"sorted_list",
",",
"x",
")",
"upper",
"=",
"find_ge",
"(",
"sorted_list",
",",
"x",
")",
"if",
"(",
"x",
"-",
"lower",
")",
">",
"(",
"upper",
"-",
"x",
")",
":",
"return",
"upper",
"else",
":",
"return",
"lower"
] | Find the nearest item of x from sorted array.
:type array: list
:param array: an iterable object that support inex
:param x: a comparable value
note: for finding the nearest item from a descending array, I recommend
find_nearest(sorted_list[::-1], x). Because the built-in list[::-1] method
is super fast.
Usage::
>>> find_nearest([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5.1)
5
**中文文档**
在正序数组中, 返回最接近x的数。 | [
"Find",
"the",
"nearest",
"item",
"of",
"x",
"from",
"sorted",
"array",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/binarysearch.py#L146-L178 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | format_x_tick | def format_x_tick(axis,
major_locator=None,
major_formatter=None,
minor_locator=None,
minor_formatter=None):
"""Set x axis's format.
This method is designed for time axis.
**中文文档**
设置X轴格式。
"""
if major_locator:
axis.xaxis.set_major_locator(major_locator)
if major_formatter:
axis.xaxis.set_major_formatter(major_formatter)
if minor_locator:
axis.xaxis.set_minor_locator(minor_locator)
if minor_formatter:
axis.xaxis.set_minor_formatter(minor_formatter)
axis.autoscale_view()
plt.setp(axis.xaxis.get_majorticklabels(), rotation=90)
plt.setp(axis.xaxis.get_minorticklabels(), rotation=90)
axis.grid() | python | def format_x_tick(axis,
major_locator=None,
major_formatter=None,
minor_locator=None,
minor_formatter=None):
"""Set x axis's format.
This method is designed for time axis.
**中文文档**
设置X轴格式。
"""
if major_locator:
axis.xaxis.set_major_locator(major_locator)
if major_formatter:
axis.xaxis.set_major_formatter(major_formatter)
if minor_locator:
axis.xaxis.set_minor_locator(minor_locator)
if minor_formatter:
axis.xaxis.set_minor_formatter(minor_formatter)
axis.autoscale_view()
plt.setp(axis.xaxis.get_majorticklabels(), rotation=90)
plt.setp(axis.xaxis.get_minorticklabels(), rotation=90)
axis.grid() | [
"def",
"format_x_tick",
"(",
"axis",
",",
"major_locator",
"=",
"None",
",",
"major_formatter",
"=",
"None",
",",
"minor_locator",
"=",
"None",
",",
"minor_formatter",
"=",
"None",
")",
":",
"if",
"major_locator",
":",
"axis",
".",
"xaxis",
".",
"set_major_locator",
"(",
"major_locator",
")",
"if",
"major_formatter",
":",
"axis",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"major_formatter",
")",
"if",
"minor_locator",
":",
"axis",
".",
"xaxis",
".",
"set_minor_locator",
"(",
"minor_locator",
")",
"if",
"minor_formatter",
":",
"axis",
".",
"xaxis",
".",
"set_minor_formatter",
"(",
"minor_formatter",
")",
"axis",
".",
"autoscale_view",
"(",
")",
"plt",
".",
"setp",
"(",
"axis",
".",
"xaxis",
".",
"get_majorticklabels",
"(",
")",
",",
"rotation",
"=",
"90",
")",
"plt",
".",
"setp",
"(",
"axis",
".",
"xaxis",
".",
"get_minorticklabels",
"(",
")",
",",
"rotation",
"=",
"90",
")",
"axis",
".",
"grid",
"(",
")"
] | Set x axis's format.
This method is designed for time axis.
**中文文档**
设置X轴格式。 | [
"Set",
"x",
"axis",
"s",
"format",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L60-L85 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | set_legend | def set_legend(axis, lines, legend):
"""Set line legend.
**中文文档**
设置图例。
"""
try:
if legend:
axis.legend(lines, legend)
except Exception as e:
raise ValueError("invalid 'legend', Error: %s" % e) | python | def set_legend(axis, lines, legend):
"""Set line legend.
**中文文档**
设置图例。
"""
try:
if legend:
axis.legend(lines, legend)
except Exception as e:
raise ValueError("invalid 'legend', Error: %s" % e) | [
"def",
"set_legend",
"(",
"axis",
",",
"lines",
",",
"legend",
")",
":",
"try",
":",
"if",
"legend",
":",
"axis",
".",
"legend",
"(",
"lines",
",",
"legend",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"invalid 'legend', Error: %s\"",
"%",
"e",
")"
] | Set line legend.
**中文文档**
设置图例。 | [
"Set",
"line",
"legend",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L119-L130 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | get_max | def get_max(array):
"""Get maximum value of an array. Automatically ignore invalid data.
**中文文档**
获得最大值。
"""
largest = -np.inf
for i in array:
try:
if i > largest:
largest = i
except:
pass
if np.isinf(largest):
raise ValueError("there's no numeric value in array!")
else:
return largest | python | def get_max(array):
"""Get maximum value of an array. Automatically ignore invalid data.
**中文文档**
获得最大值。
"""
largest = -np.inf
for i in array:
try:
if i > largest:
largest = i
except:
pass
if np.isinf(largest):
raise ValueError("there's no numeric value in array!")
else:
return largest | [
"def",
"get_max",
"(",
"array",
")",
":",
"largest",
"=",
"-",
"np",
".",
"inf",
"for",
"i",
"in",
"array",
":",
"try",
":",
"if",
"i",
">",
"largest",
":",
"largest",
"=",
"i",
"except",
":",
"pass",
"if",
"np",
".",
"isinf",
"(",
"largest",
")",
":",
"raise",
"ValueError",
"(",
"\"there's no numeric value in array!\"",
")",
"else",
":",
"return",
"largest"
] | Get maximum value of an array. Automatically ignore invalid data.
**中文文档**
获得最大值。 | [
"Get",
"maximum",
"value",
"of",
"an",
"array",
".",
"Automatically",
"ignore",
"invalid",
"data",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L133-L150 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | get_min | def get_min(array):
"""Get minimum value of an array. Automatically ignore invalid data.
**中文文档**
获得最小值。
"""
smallest = np.inf
for i in array:
try:
if i < smallest:
smallest = i
except:
pass
if np.isinf(smallest):
raise ValueError("there's no numeric value in array!")
else:
return smallest | python | def get_min(array):
"""Get minimum value of an array. Automatically ignore invalid data.
**中文文档**
获得最小值。
"""
smallest = np.inf
for i in array:
try:
if i < smallest:
smallest = i
except:
pass
if np.isinf(smallest):
raise ValueError("there's no numeric value in array!")
else:
return smallest | [
"def",
"get_min",
"(",
"array",
")",
":",
"smallest",
"=",
"np",
".",
"inf",
"for",
"i",
"in",
"array",
":",
"try",
":",
"if",
"i",
"<",
"smallest",
":",
"smallest",
"=",
"i",
"except",
":",
"pass",
"if",
"np",
".",
"isinf",
"(",
"smallest",
")",
":",
"raise",
"ValueError",
"(",
"\"there's no numeric value in array!\"",
")",
"else",
":",
"return",
"smallest"
] | Get minimum value of an array. Automatically ignore invalid data.
**中文文档**
获得最小值。 | [
"Get",
"minimum",
"value",
"of",
"an",
"array",
".",
"Automatically",
"ignore",
"invalid",
"data",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L153-L170 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | get_yAxis_limit | def get_yAxis_limit(y, lower=0.05, upper=0.2):
"""Find optimal y_min and y_max that guarantee enough space for legend and
plot.
**中文文档**
计算y坐标轴的最小和最大坐标。
:params lower: ymin为 y的最小值再减去gap的一定倍率
:params upper: ymax为 y的最大值再加上gap的一定倍率
"""
smallest = get_min(y)
largest = get_max(y)
gap = largest - smallest
if gap >= 0.000001:
y_min = smallest - lower * gap
y_max = largest + upper * gap
else:
y_min = smallest - lower * abs(smallest)
y_max = largest + upper * abs(largest)
return y_min, y_max | python | def get_yAxis_limit(y, lower=0.05, upper=0.2):
"""Find optimal y_min and y_max that guarantee enough space for legend and
plot.
**中文文档**
计算y坐标轴的最小和最大坐标。
:params lower: ymin为 y的最小值再减去gap的一定倍率
:params upper: ymax为 y的最大值再加上gap的一定倍率
"""
smallest = get_min(y)
largest = get_max(y)
gap = largest - smallest
if gap >= 0.000001:
y_min = smallest - lower * gap
y_max = largest + upper * gap
else:
y_min = smallest - lower * abs(smallest)
y_max = largest + upper * abs(largest)
return y_min, y_max | [
"def",
"get_yAxis_limit",
"(",
"y",
",",
"lower",
"=",
"0.05",
",",
"upper",
"=",
"0.2",
")",
":",
"smallest",
"=",
"get_min",
"(",
"y",
")",
"largest",
"=",
"get_max",
"(",
"y",
")",
"gap",
"=",
"largest",
"-",
"smallest",
"if",
"gap",
">=",
"0.000001",
":",
"y_min",
"=",
"smallest",
"-",
"lower",
"*",
"gap",
"y_max",
"=",
"largest",
"+",
"upper",
"*",
"gap",
"else",
":",
"y_min",
"=",
"smallest",
"-",
"lower",
"*",
"abs",
"(",
"smallest",
")",
"y_max",
"=",
"largest",
"+",
"upper",
"*",
"abs",
"(",
"largest",
")",
"return",
"y_min",
",",
"y_max"
] | Find optimal y_min and y_max that guarantee enough space for legend and
plot.
**中文文档**
计算y坐标轴的最小和最大坐标。
:params lower: ymin为 y的最小值再减去gap的一定倍率
:params upper: ymax为 y的最大值再加上gap的一定倍率 | [
"Find",
"optimal",
"y_min",
"and",
"y_max",
"that",
"guarantee",
"enough",
"space",
"for",
"legend",
"and",
"plot",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L173-L193 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | create_figure | def create_figure(width=20, height=10):
"""Create a figure instance.
:params width: figure width
:params height: figure height
"""
figure = plt.figure(figsize=(width, height))
axis = figure.add_subplot(1, 1, 1)
return figure, axis | python | def create_figure(width=20, height=10):
"""Create a figure instance.
:params width: figure width
:params height: figure height
"""
figure = plt.figure(figsize=(width, height))
axis = figure.add_subplot(1, 1, 1)
return figure, axis | [
"def",
"create_figure",
"(",
"width",
"=",
"20",
",",
"height",
"=",
"10",
")",
":",
"figure",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"width",
",",
"height",
")",
")",
"axis",
"=",
"figure",
".",
"add_subplot",
"(",
"1",
",",
"1",
",",
"1",
")",
"return",
"figure",
",",
"axis"
] | Create a figure instance.
:params width: figure width
:params height: figure height | [
"Create",
"a",
"figure",
"instance",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L196-L204 | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | preprocess_x_y | def preprocess_x_y(x, y):
"""Preprocess x, y input data. Returns list of list style.
**中文文档**
预处理输入的x, y数据。
"""
def is_iterable_slicable(a):
if hasattr(a, "__iter__") and hasattr(a, "__getitem__"):
return True
else:
return False
if is_iterable_slicable(x):
if is_iterable_slicable(x[0]):
return x, y
else:
return (x,), (y,)
else:
raise ValueError("invalid input!") | python | def preprocess_x_y(x, y):
"""Preprocess x, y input data. Returns list of list style.
**中文文档**
预处理输入的x, y数据。
"""
def is_iterable_slicable(a):
if hasattr(a, "__iter__") and hasattr(a, "__getitem__"):
return True
else:
return False
if is_iterable_slicable(x):
if is_iterable_slicable(x[0]):
return x, y
else:
return (x,), (y,)
else:
raise ValueError("invalid input!") | [
"def",
"preprocess_x_y",
"(",
"x",
",",
"y",
")",
":",
"def",
"is_iterable_slicable",
"(",
"a",
")",
":",
"if",
"hasattr",
"(",
"a",
",",
"\"__iter__\"",
")",
"and",
"hasattr",
"(",
"a",
",",
"\"__getitem__\"",
")",
":",
"return",
"True",
"else",
":",
"return",
"False",
"if",
"is_iterable_slicable",
"(",
"x",
")",
":",
"if",
"is_iterable_slicable",
"(",
"x",
"[",
"0",
"]",
")",
":",
"return",
"x",
",",
"y",
"else",
":",
"return",
"(",
"x",
",",
")",
",",
"(",
"y",
",",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid input!\"",
")"
] | Preprocess x, y input data. Returns list of list style.
**中文文档**
预处理输入的x, y数据。 | [
"Preprocess",
"x",
"y",
"input",
"data",
".",
"Returns",
"list",
"of",
"list",
"style",
"."
] | 01f7a6b250853bebfd73de275895bf274325cfc1 | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L207-L226 | train |
envi-idl/envipyengine | envipyengine/taskengine/taskengine.py | execute | def execute(input_params, engine, cwd=None):
"""
Execute a task with the provided input parameters
:param input_params: Python dictionary containg all input parameters.
This will be converted to JSON before being passed
to the task engine.
:param engine: String specifying Task Engine type to run (ENVI, IDL, etc.)
:param cwd: Optionally specify the current working directory to be used
when spawning the task engine.
:return: A python dictionary representing the results JSON string generated
by the Task Engine.
"""
try:
taskengine_exe = config.get('engine')
except NoConfigOptionError:
raise TaskEngineNotFoundError(
"Task Engine config option not set." +
"\nPlease verify the 'engine' configuration setting.")
if not os.path.exists(taskengine_exe):
raise TaskEngineNotFoundError(
"Task Engine executable not found." +
"\nPlease verify the 'engine' configuration setting.")
# Get any arguments for the taskengine
engine_args = None
try:
engine_args = config.get('engine-args')
except NoConfigOptionError:
pass
# Get environment overrides if they exist
environment = None
config_environment = config.get_environment()
if config_environment:
environment = os.environ.copy()
environment.update(config_environment)
# Build up the args vector for popen
args = [taskengine_exe, engine]
if engine_args:
args.append(engine_args)
# Hide the Console Window on Windows OS
startupinfo = None
if sys.platform.startswith('win'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
input_json = json.dumps(input_params)
process = Popen(args,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
cwd=cwd,
env=environment,
startupinfo=startupinfo)
# taskengine output is in UTF8. Encode/Decode to UTF8
stdout, stderr = process.communicate(input=input_json.encode('utf-8'))
if process.returncode != 0:
if stderr != '':
raise TaskEngineExecutionError(stderr.decode('utf-8'))
else:
raise TaskEngineExecutionError(
'Task Engine exited with code: ' + str(process.returncode))
else:
return json.loads(stdout.decode('utf-8'), object_pairs_hook=OrderedDict) | python | def execute(input_params, engine, cwd=None):
"""
Execute a task with the provided input parameters
:param input_params: Python dictionary containg all input parameters.
This will be converted to JSON before being passed
to the task engine.
:param engine: String specifying Task Engine type to run (ENVI, IDL, etc.)
:param cwd: Optionally specify the current working directory to be used
when spawning the task engine.
:return: A python dictionary representing the results JSON string generated
by the Task Engine.
"""
try:
taskengine_exe = config.get('engine')
except NoConfigOptionError:
raise TaskEngineNotFoundError(
"Task Engine config option not set." +
"\nPlease verify the 'engine' configuration setting.")
if not os.path.exists(taskengine_exe):
raise TaskEngineNotFoundError(
"Task Engine executable not found." +
"\nPlease verify the 'engine' configuration setting.")
# Get any arguments for the taskengine
engine_args = None
try:
engine_args = config.get('engine-args')
except NoConfigOptionError:
pass
# Get environment overrides if they exist
environment = None
config_environment = config.get_environment()
if config_environment:
environment = os.environ.copy()
environment.update(config_environment)
# Build up the args vector for popen
args = [taskengine_exe, engine]
if engine_args:
args.append(engine_args)
# Hide the Console Window on Windows OS
startupinfo = None
if sys.platform.startswith('win'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
input_json = json.dumps(input_params)
process = Popen(args,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
cwd=cwd,
env=environment,
startupinfo=startupinfo)
# taskengine output is in UTF8. Encode/Decode to UTF8
stdout, stderr = process.communicate(input=input_json.encode('utf-8'))
if process.returncode != 0:
if stderr != '':
raise TaskEngineExecutionError(stderr.decode('utf-8'))
else:
raise TaskEngineExecutionError(
'Task Engine exited with code: ' + str(process.returncode))
else:
return json.loads(stdout.decode('utf-8'), object_pairs_hook=OrderedDict) | [
"def",
"execute",
"(",
"input_params",
",",
"engine",
",",
"cwd",
"=",
"None",
")",
":",
"try",
":",
"taskengine_exe",
"=",
"config",
".",
"get",
"(",
"'engine'",
")",
"except",
"NoConfigOptionError",
":",
"raise",
"TaskEngineNotFoundError",
"(",
"\"Task Engine config option not set.\"",
"+",
"\"\\nPlease verify the 'engine' configuration setting.\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"taskengine_exe",
")",
":",
"raise",
"TaskEngineNotFoundError",
"(",
"\"Task Engine executable not found.\"",
"+",
"\"\\nPlease verify the 'engine' configuration setting.\"",
")",
"# Get any arguments for the taskengine",
"engine_args",
"=",
"None",
"try",
":",
"engine_args",
"=",
"config",
".",
"get",
"(",
"'engine-args'",
")",
"except",
"NoConfigOptionError",
":",
"pass",
"# Get environment overrides if they exist",
"environment",
"=",
"None",
"config_environment",
"=",
"config",
".",
"get_environment",
"(",
")",
"if",
"config_environment",
":",
"environment",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"environment",
".",
"update",
"(",
"config_environment",
")",
"# Build up the args vector for popen",
"args",
"=",
"[",
"taskengine_exe",
",",
"engine",
"]",
"if",
"engine_args",
":",
"args",
".",
"append",
"(",
"engine_args",
")",
"# Hide the Console Window on Windows OS",
"startupinfo",
"=",
"None",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"startupinfo",
"=",
"subprocess",
".",
"STARTUPINFO",
"(",
")",
"startupinfo",
".",
"dwFlags",
"|=",
"subprocess",
".",
"STARTF_USESHOWWINDOW",
"input_json",
"=",
"json",
".",
"dumps",
"(",
"input_params",
")",
"process",
"=",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"PIPE",
",",
"stdin",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"cwd",
"=",
"cwd",
",",
"env",
"=",
"environment",
",",
"startupinfo",
"=",
"startupinfo",
")",
"# taskengine output is in UTF8. Encode/Decode to UTF8 ",
"stdout",
",",
"stderr",
"=",
"process",
".",
"communicate",
"(",
"input",
"=",
"input_json",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"process",
".",
"returncode",
"!=",
"0",
":",
"if",
"stderr",
"!=",
"''",
":",
"raise",
"TaskEngineExecutionError",
"(",
"stderr",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"raise",
"TaskEngineExecutionError",
"(",
"'Task Engine exited with code: '",
"+",
"str",
"(",
"process",
".",
"returncode",
")",
")",
"else",
":",
"return",
"json",
".",
"loads",
"(",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")"
] | Execute a task with the provided input parameters
:param input_params: Python dictionary containg all input parameters.
This will be converted to JSON before being passed
to the task engine.
:param engine: String specifying Task Engine type to run (ENVI, IDL, etc.)
:param cwd: Optionally specify the current working directory to be used
when spawning the task engine.
:return: A python dictionary representing the results JSON string generated
by the Task Engine. | [
"Execute",
"a",
"task",
"with",
"the",
"provided",
"input",
"parameters"
] | 567b639d6592deec3289f6122a9e3d18f2f98432 | https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/taskengine.py#L17-L84 | train |
Godley/MuseParse | MuseParse/classes/Output/LilypondOutput.py | LilypondRenderer.run | def run(self, wrappers=["", ""]):
'''
run the lilypond script on the hierarchy class
:param wrappers: this is useful for testing: use wrappers to put something around the outputted "lilypond string" from the hierarchy class.
For example if you're testing a pitch, you might put \relative c {} around the note so that lilypond handles it properly without causing an error
:return: doesn't return anything, side effect that a PDF should be created.
'''
opened_file = open(self.lyfile, 'w')
lilystring = self.piece_obj.toLily()
opened_file.writelines(
wrappers[0] +
"\\version \"2.18.2\" \n" +
lilystring +
wrappers[1])
opened_file.close()
# subprocess.Popen(['sudo', self.lily_script," --output=" +
# self.folder, self.lyfile])
os.system(self.lily_script +
" --loglevel=WARNING --output=" +
self.folder + " " + self.lyfile
) | python | def run(self, wrappers=["", ""]):
'''
run the lilypond script on the hierarchy class
:param wrappers: this is useful for testing: use wrappers to put something around the outputted "lilypond string" from the hierarchy class.
For example if you're testing a pitch, you might put \relative c {} around the note so that lilypond handles it properly without causing an error
:return: doesn't return anything, side effect that a PDF should be created.
'''
opened_file = open(self.lyfile, 'w')
lilystring = self.piece_obj.toLily()
opened_file.writelines(
wrappers[0] +
"\\version \"2.18.2\" \n" +
lilystring +
wrappers[1])
opened_file.close()
# subprocess.Popen(['sudo', self.lily_script," --output=" +
# self.folder, self.lyfile])
os.system(self.lily_script +
" --loglevel=WARNING --output=" +
self.folder + " " + self.lyfile
) | [
"def",
"run",
"(",
"self",
",",
"wrappers",
"=",
"[",
"\"\"",
",",
"\"\"",
"]",
")",
":",
"opened_file",
"=",
"open",
"(",
"self",
".",
"lyfile",
",",
"'w'",
")",
"lilystring",
"=",
"self",
".",
"piece_obj",
".",
"toLily",
"(",
")",
"opened_file",
".",
"writelines",
"(",
"wrappers",
"[",
"0",
"]",
"+",
"\"\\\\version \\\"2.18.2\\\" \\n\"",
"+",
"lilystring",
"+",
"wrappers",
"[",
"1",
"]",
")",
"opened_file",
".",
"close",
"(",
")",
"# subprocess.Popen(['sudo', self.lily_script,\" --output=\" +",
"# self.folder, self.lyfile])",
"os",
".",
"system",
"(",
"self",
".",
"lily_script",
"+",
"\" --loglevel=WARNING --output=\"",
"+",
"self",
".",
"folder",
"+",
"\" \"",
"+",
"self",
".",
"lyfile",
")"
] | run the lilypond script on the hierarchy class
:param wrappers: this is useful for testing: use wrappers to put something around the outputted "lilypond string" from the hierarchy class.
For example if you're testing a pitch, you might put \relative c {} around the note so that lilypond handles it properly without causing an error
:return: doesn't return anything, side effect that a PDF should be created. | [
"run",
"the",
"lilypond",
"script",
"on",
"the",
"hierarchy",
"class"
] | 23cecafa1fdc0f2d6a87760553572b459f3c9904 | https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Output/LilypondOutput.py#L42-L64 | train |
koszullab/metaTOR | metator/scripts/bins.py | extract_fasta | def extract_fasta(
partition_file,
fasta_file,
output_dir,
chunk_size=DEFAULT_CHUNK_SIZE,
max_cores=DEFAULT_MAX_CORES,
):
"""Extract sequences from bins
Identify bins, extract chunks belonging to each bins and gather them
in a single FASTA file.
Parameters
----------
partition_file : file, str or pathlib.Path
The file containing, for each chunk, the communities it was
assigned to at each iteration.
fasta_file : file, str or pathlib.Path
The initial assembly from which chunks were initialized.
output_dir : str or pathlib.Path
The output directory to write the FASTA chunks into.
chunk_size : int, optional
The size of the chunks (in bp) used in the pipeline. Default is 1000.
max_cores : int, optional
How many bins to extract FASTA sequences from. Default is 100.
"""
genome = {
record.id: record.seq for record in SeqIO.parse(fasta_file, "fasta")
}
data_chunks = list(
zip(*np.genfromtxt(partition_file, usecols=(0, 1), dtype=None))
)
chunk_names = np.array(data_chunks[0], dtype=object)
cores = np.array(data_chunks[1])
for core in set(cores):
if core > max_cores:
continue
chunks_to_keep = chunk_names[cores == core]
core_name = "core_{}.fa".format(core)
core_file = os.path.join(output_dir, core_name)
with open(core_file, "w") as core_handle:
for name in chunks_to_keep:
fields = name.split("_")
header_name = "_".join(fields[:-1])
chunk = int(fields[-1])
pos_start = chunk * chunk_size
pos_end = min(
(chunk + 1) * chunk_size, len(genome[header_name])
)
sequence = str(genome[header_name][pos_start:pos_end])
core_handle.write(">{}\n".format(name))
core_handle.write("{}\n".format(sequence)) | python | def extract_fasta(
partition_file,
fasta_file,
output_dir,
chunk_size=DEFAULT_CHUNK_SIZE,
max_cores=DEFAULT_MAX_CORES,
):
"""Extract sequences from bins
Identify bins, extract chunks belonging to each bins and gather them
in a single FASTA file.
Parameters
----------
partition_file : file, str or pathlib.Path
The file containing, for each chunk, the communities it was
assigned to at each iteration.
fasta_file : file, str or pathlib.Path
The initial assembly from which chunks were initialized.
output_dir : str or pathlib.Path
The output directory to write the FASTA chunks into.
chunk_size : int, optional
The size of the chunks (in bp) used in the pipeline. Default is 1000.
max_cores : int, optional
How many bins to extract FASTA sequences from. Default is 100.
"""
genome = {
record.id: record.seq for record in SeqIO.parse(fasta_file, "fasta")
}
data_chunks = list(
zip(*np.genfromtxt(partition_file, usecols=(0, 1), dtype=None))
)
chunk_names = np.array(data_chunks[0], dtype=object)
cores = np.array(data_chunks[1])
for core in set(cores):
if core > max_cores:
continue
chunks_to_keep = chunk_names[cores == core]
core_name = "core_{}.fa".format(core)
core_file = os.path.join(output_dir, core_name)
with open(core_file, "w") as core_handle:
for name in chunks_to_keep:
fields = name.split("_")
header_name = "_".join(fields[:-1])
chunk = int(fields[-1])
pos_start = chunk * chunk_size
pos_end = min(
(chunk + 1) * chunk_size, len(genome[header_name])
)
sequence = str(genome[header_name][pos_start:pos_end])
core_handle.write(">{}\n".format(name))
core_handle.write("{}\n".format(sequence)) | [
"def",
"extract_fasta",
"(",
"partition_file",
",",
"fasta_file",
",",
"output_dir",
",",
"chunk_size",
"=",
"DEFAULT_CHUNK_SIZE",
",",
"max_cores",
"=",
"DEFAULT_MAX_CORES",
",",
")",
":",
"genome",
"=",
"{",
"record",
".",
"id",
":",
"record",
".",
"seq",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"fasta_file",
",",
"\"fasta\"",
")",
"}",
"data_chunks",
"=",
"list",
"(",
"zip",
"(",
"*",
"np",
".",
"genfromtxt",
"(",
"partition_file",
",",
"usecols",
"=",
"(",
"0",
",",
"1",
")",
",",
"dtype",
"=",
"None",
")",
")",
")",
"chunk_names",
"=",
"np",
".",
"array",
"(",
"data_chunks",
"[",
"0",
"]",
",",
"dtype",
"=",
"object",
")",
"cores",
"=",
"np",
".",
"array",
"(",
"data_chunks",
"[",
"1",
"]",
")",
"for",
"core",
"in",
"set",
"(",
"cores",
")",
":",
"if",
"core",
">",
"max_cores",
":",
"continue",
"chunks_to_keep",
"=",
"chunk_names",
"[",
"cores",
"==",
"core",
"]",
"core_name",
"=",
"\"core_{}.fa\"",
".",
"format",
"(",
"core",
")",
"core_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"core_name",
")",
"with",
"open",
"(",
"core_file",
",",
"\"w\"",
")",
"as",
"core_handle",
":",
"for",
"name",
"in",
"chunks_to_keep",
":",
"fields",
"=",
"name",
".",
"split",
"(",
"\"_\"",
")",
"header_name",
"=",
"\"_\"",
".",
"join",
"(",
"fields",
"[",
":",
"-",
"1",
"]",
")",
"chunk",
"=",
"int",
"(",
"fields",
"[",
"-",
"1",
"]",
")",
"pos_start",
"=",
"chunk",
"*",
"chunk_size",
"pos_end",
"=",
"min",
"(",
"(",
"chunk",
"+",
"1",
")",
"*",
"chunk_size",
",",
"len",
"(",
"genome",
"[",
"header_name",
"]",
")",
")",
"sequence",
"=",
"str",
"(",
"genome",
"[",
"header_name",
"]",
"[",
"pos_start",
":",
"pos_end",
"]",
")",
"core_handle",
".",
"write",
"(",
"\">{}\\n\"",
".",
"format",
"(",
"name",
")",
")",
"core_handle",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"sequence",
")",
")"
] | Extract sequences from bins
Identify bins, extract chunks belonging to each bins and gather them
in a single FASTA file.
Parameters
----------
partition_file : file, str or pathlib.Path
The file containing, for each chunk, the communities it was
assigned to at each iteration.
fasta_file : file, str or pathlib.Path
The initial assembly from which chunks were initialized.
output_dir : str or pathlib.Path
The output directory to write the FASTA chunks into.
chunk_size : int, optional
The size of the chunks (in bp) used in the pipeline. Default is 1000.
max_cores : int, optional
How many bins to extract FASTA sequences from. Default is 100. | [
"Extract",
"sequences",
"from",
"bins"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/bins.py#L183-L243 | train |
koszullab/metaTOR | metator/scripts/bins.py | merge_fasta | def merge_fasta(fasta_file, output_dir):
"""Merge chunks into complete FASTA bins
Merge bin chunks by appending consecutive chunks to one another.
Parameters
----------
fasta_file : file, str or pathlib.Path
The FASTA file containing the chunks to merge.
output_dir : str or pathlib.Path
The output directory to write the merged FASTA bin into.
"""
# First, define some functions for ordering chunks and detecting
# consecutive chunk sequences
def chunk_lexicographic_order(chunk):
"""A quick callback to sort chunk ids lexicographically
(first on original names alphabetically, then on relative
position on the original contig)
"""
chunk_fields = chunk.split("_")
chunk_name = chunk_fields[:-1]
chunk_id = chunk_fields[-1]
return (chunk_name, int(chunk_id))
def are_consecutive(chunk1, chunk2):
if None in {chunk1, chunk2}:
return False
else:
ord1 = chunk_lexicographic_order(chunk1)
ord2 = chunk_lexicographic_order(chunk2)
return (ord1[0] == ord2[0]) and (ord1[1] == ord2[1] + 1)
def consecutiveness(key_chunk_pair):
"""A callback for the groupby magic below
"""
key, chunk = key_chunk_pair
chunk_name, chunk_id = chunk_lexicographic_order(chunk)
return (chunk_name, chunk_id - key)
# Read chunks and sort them
genome = {
record.id: record.seq for record in SeqIO.parse(fasta_file, "fasta")
}
sorted_ids = sorted(genome, key=chunk_lexicographic_order)
# Identify consecutive ranges and merge them
new_genome = dict()
for _, g in itertools.groupby(enumerate(sorted_ids), consecutiveness):
chunk_range = map(operator.itemgetter(1), g)
first_chunk = next(chunk_range)
my_sequence = genome[first_chunk]
my_chunk = None
while "Reading chunk range":
try:
my_chunk = next(chunk_range)
my_sequence += genome[my_chunk]
except StopIteration:
break
try:
last_chunk_id = my_chunk.split("_")[-1]
except AttributeError:
last_chunk_id = ""
if last_chunk_id:
new_chunk_id = "{}_{}".format(first_chunk, last_chunk_id)
else:
new_chunk_id = first_chunk
new_genome[new_chunk_id] = my_sequence
# Write the result
base_name = ".".join(os.path.basename(fasta_file).split(".")[:-1])
output_name = "{}_merged.fa".format(base_name)
merged_core_file = os.path.join(output_dir, output_name)
with open(merged_core_file, "w") as output_handle:
for my_id in sorted(new_genome, key=chunk_lexicographic_order):
output_handle.write(">{}\n".format(my_id))
output_handle.write("{}\n".format(new_genome[my_id])) | python | def merge_fasta(fasta_file, output_dir):
"""Merge chunks into complete FASTA bins
Merge bin chunks by appending consecutive chunks to one another.
Parameters
----------
fasta_file : file, str or pathlib.Path
The FASTA file containing the chunks to merge.
output_dir : str or pathlib.Path
The output directory to write the merged FASTA bin into.
"""
# First, define some functions for ordering chunks and detecting
# consecutive chunk sequences
def chunk_lexicographic_order(chunk):
"""A quick callback to sort chunk ids lexicographically
(first on original names alphabetically, then on relative
position on the original contig)
"""
chunk_fields = chunk.split("_")
chunk_name = chunk_fields[:-1]
chunk_id = chunk_fields[-1]
return (chunk_name, int(chunk_id))
def are_consecutive(chunk1, chunk2):
if None in {chunk1, chunk2}:
return False
else:
ord1 = chunk_lexicographic_order(chunk1)
ord2 = chunk_lexicographic_order(chunk2)
return (ord1[0] == ord2[0]) and (ord1[1] == ord2[1] + 1)
def consecutiveness(key_chunk_pair):
"""A callback for the groupby magic below
"""
key, chunk = key_chunk_pair
chunk_name, chunk_id = chunk_lexicographic_order(chunk)
return (chunk_name, chunk_id - key)
# Read chunks and sort them
genome = {
record.id: record.seq for record in SeqIO.parse(fasta_file, "fasta")
}
sorted_ids = sorted(genome, key=chunk_lexicographic_order)
# Identify consecutive ranges and merge them
new_genome = dict()
for _, g in itertools.groupby(enumerate(sorted_ids), consecutiveness):
chunk_range = map(operator.itemgetter(1), g)
first_chunk = next(chunk_range)
my_sequence = genome[first_chunk]
my_chunk = None
while "Reading chunk range":
try:
my_chunk = next(chunk_range)
my_sequence += genome[my_chunk]
except StopIteration:
break
try:
last_chunk_id = my_chunk.split("_")[-1]
except AttributeError:
last_chunk_id = ""
if last_chunk_id:
new_chunk_id = "{}_{}".format(first_chunk, last_chunk_id)
else:
new_chunk_id = first_chunk
new_genome[new_chunk_id] = my_sequence
# Write the result
base_name = ".".join(os.path.basename(fasta_file).split(".")[:-1])
output_name = "{}_merged.fa".format(base_name)
merged_core_file = os.path.join(output_dir, output_name)
with open(merged_core_file, "w") as output_handle:
for my_id in sorted(new_genome, key=chunk_lexicographic_order):
output_handle.write(">{}\n".format(my_id))
output_handle.write("{}\n".format(new_genome[my_id])) | [
"def",
"merge_fasta",
"(",
"fasta_file",
",",
"output_dir",
")",
":",
"# First, define some functions for ordering chunks and detecting",
"# consecutive chunk sequences",
"def",
"chunk_lexicographic_order",
"(",
"chunk",
")",
":",
"\"\"\"A quick callback to sort chunk ids lexicographically\n (first on original names alphabetically, then on relative\n position on the original contig)\n \"\"\"",
"chunk_fields",
"=",
"chunk",
".",
"split",
"(",
"\"_\"",
")",
"chunk_name",
"=",
"chunk_fields",
"[",
":",
"-",
"1",
"]",
"chunk_id",
"=",
"chunk_fields",
"[",
"-",
"1",
"]",
"return",
"(",
"chunk_name",
",",
"int",
"(",
"chunk_id",
")",
")",
"def",
"are_consecutive",
"(",
"chunk1",
",",
"chunk2",
")",
":",
"if",
"None",
"in",
"{",
"chunk1",
",",
"chunk2",
"}",
":",
"return",
"False",
"else",
":",
"ord1",
"=",
"chunk_lexicographic_order",
"(",
"chunk1",
")",
"ord2",
"=",
"chunk_lexicographic_order",
"(",
"chunk2",
")",
"return",
"(",
"ord1",
"[",
"0",
"]",
"==",
"ord2",
"[",
"0",
"]",
")",
"and",
"(",
"ord1",
"[",
"1",
"]",
"==",
"ord2",
"[",
"1",
"]",
"+",
"1",
")",
"def",
"consecutiveness",
"(",
"key_chunk_pair",
")",
":",
"\"\"\"A callback for the groupby magic below\n \"\"\"",
"key",
",",
"chunk",
"=",
"key_chunk_pair",
"chunk_name",
",",
"chunk_id",
"=",
"chunk_lexicographic_order",
"(",
"chunk",
")",
"return",
"(",
"chunk_name",
",",
"chunk_id",
"-",
"key",
")",
"# Read chunks and sort them",
"genome",
"=",
"{",
"record",
".",
"id",
":",
"record",
".",
"seq",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"fasta_file",
",",
"\"fasta\"",
")",
"}",
"sorted_ids",
"=",
"sorted",
"(",
"genome",
",",
"key",
"=",
"chunk_lexicographic_order",
")",
"# Identify consecutive ranges and merge them",
"new_genome",
"=",
"dict",
"(",
")",
"for",
"_",
",",
"g",
"in",
"itertools",
".",
"groupby",
"(",
"enumerate",
"(",
"sorted_ids",
")",
",",
"consecutiveness",
")",
":",
"chunk_range",
"=",
"map",
"(",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"g",
")",
"first_chunk",
"=",
"next",
"(",
"chunk_range",
")",
"my_sequence",
"=",
"genome",
"[",
"first_chunk",
"]",
"my_chunk",
"=",
"None",
"while",
"\"Reading chunk range\"",
":",
"try",
":",
"my_chunk",
"=",
"next",
"(",
"chunk_range",
")",
"my_sequence",
"+=",
"genome",
"[",
"my_chunk",
"]",
"except",
"StopIteration",
":",
"break",
"try",
":",
"last_chunk_id",
"=",
"my_chunk",
".",
"split",
"(",
"\"_\"",
")",
"[",
"-",
"1",
"]",
"except",
"AttributeError",
":",
"last_chunk_id",
"=",
"\"\"",
"if",
"last_chunk_id",
":",
"new_chunk_id",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"first_chunk",
",",
"last_chunk_id",
")",
"else",
":",
"new_chunk_id",
"=",
"first_chunk",
"new_genome",
"[",
"new_chunk_id",
"]",
"=",
"my_sequence",
"# Write the result",
"base_name",
"=",
"\".\"",
".",
"join",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"fasta_file",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"output_name",
"=",
"\"{}_merged.fa\"",
".",
"format",
"(",
"base_name",
")",
"merged_core_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"output_name",
")",
"with",
"open",
"(",
"merged_core_file",
",",
"\"w\"",
")",
"as",
"output_handle",
":",
"for",
"my_id",
"in",
"sorted",
"(",
"new_genome",
",",
"key",
"=",
"chunk_lexicographic_order",
")",
":",
"output_handle",
".",
"write",
"(",
"\">{}\\n\"",
".",
"format",
"(",
"my_id",
")",
")",
"output_handle",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"new_genome",
"[",
"my_id",
"]",
")",
")"
] | Merge chunks into complete FASTA bins
Merge bin chunks by appending consecutive chunks to one another.
Parameters
----------
fasta_file : file, str or pathlib.Path
The FASTA file containing the chunks to merge.
output_dir : str or pathlib.Path
The output directory to write the merged FASTA bin into. | [
"Merge",
"chunks",
"into",
"complete",
"FASTA",
"bins"
] | 0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/bins.py#L246-L327 | train |
nugget/python-anthemav | anthemav/tools.py | monitor | def monitor():
"""Wrapper to call console with a loop."""
log = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
asyncio.ensure_future(console(loop, log))
loop.run_forever() | python | def monitor():
"""Wrapper to call console with a loop."""
log = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
asyncio.ensure_future(console(loop, log))
loop.run_forever() | [
"def",
"monitor",
"(",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"asyncio",
".",
"ensure_future",
"(",
"console",
"(",
"loop",
",",
"log",
")",
")",
"loop",
".",
"run_forever",
"(",
")"
] | Wrapper to call console with a loop. | [
"Wrapper",
"to",
"call",
"console",
"with",
"a",
"loop",
"."
] | c3cee38f2d452c1ab1335d9885e0769ec24d5f90 | https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/tools.py#L60-L65 | train |
eclipse/unide.python | src/unide/schema.py | make_object | def make_object(cls, data):
"""Creates an API object of class `cls`, setting its `_data` to
data. Subclasses of `Object` are required to use this to build a
new, empty instance without using their constructor.
"""
if issubclass(cls, Object):
self = object.__new__(cls)
self._data = data
else:
self = data
return self | python | def make_object(cls, data):
"""Creates an API object of class `cls`, setting its `_data` to
data. Subclasses of `Object` are required to use this to build a
new, empty instance without using their constructor.
"""
if issubclass(cls, Object):
self = object.__new__(cls)
self._data = data
else:
self = data
return self | [
"def",
"make_object",
"(",
"cls",
",",
"data",
")",
":",
"if",
"issubclass",
"(",
"cls",
",",
"Object",
")",
":",
"self",
"=",
"object",
".",
"__new__",
"(",
"cls",
")",
"self",
".",
"_data",
"=",
"data",
"else",
":",
"self",
"=",
"data",
"return",
"self"
] | Creates an API object of class `cls`, setting its `_data` to
data. Subclasses of `Object` are required to use this to build a
new, empty instance without using their constructor. | [
"Creates",
"an",
"API",
"object",
"of",
"class",
"cls",
"setting",
"its",
"_data",
"to",
"data",
".",
"Subclasses",
"of",
"Object",
"are",
"required",
"to",
"use",
"this",
"to",
"build",
"a",
"new",
"empty",
"instance",
"without",
"using",
"their",
"constructor",
"."
] | b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493 | https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L296-L306 | train |
eclipse/unide.python | src/unide/schema.py | String | def String(length=None, **kwargs):
"""A string valued property with max. `length`."""
return Property(
length=length,
types=stringy_types,
convert=to_string,
**kwargs
) | python | def String(length=None, **kwargs):
"""A string valued property with max. `length`."""
return Property(
length=length,
types=stringy_types,
convert=to_string,
**kwargs
) | [
"def",
"String",
"(",
"length",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Property",
"(",
"length",
"=",
"length",
",",
"types",
"=",
"stringy_types",
",",
"convert",
"=",
"to_string",
",",
"*",
"*",
"kwargs",
")"
] | A string valued property with max. `length`. | [
"A",
"string",
"valued",
"property",
"with",
"max",
".",
"length",
"."
] | b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493 | https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L365-L372 | train |
eclipse/unide.python | src/unide/schema.py | Datetime | def Datetime(null=True, **kwargs):
"""A datetime property."""
return Property(
types=datetime.datetime,
convert=util.local_timezone,
load=dateutil.parser.parse,
null=null,
**kwargs
) | python | def Datetime(null=True, **kwargs):
"""A datetime property."""
return Property(
types=datetime.datetime,
convert=util.local_timezone,
load=dateutil.parser.parse,
null=null,
**kwargs
) | [
"def",
"Datetime",
"(",
"null",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Property",
"(",
"types",
"=",
"datetime",
".",
"datetime",
",",
"convert",
"=",
"util",
".",
"local_timezone",
",",
"load",
"=",
"dateutil",
".",
"parser",
".",
"parse",
",",
"null",
"=",
"null",
",",
"*",
"*",
"kwargs",
")"
] | A datetime property. | [
"A",
"datetime",
"property",
"."
] | b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493 | https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L395-L403 | train |
eclipse/unide.python | src/unide/schema.py | InstanceOf | def InstanceOf(cls, **kwargs):
"""A property that is an instance of `cls`."""
return Property(types=cls, load=cls.load, **kwargs) | python | def InstanceOf(cls, **kwargs):
"""A property that is an instance of `cls`."""
return Property(types=cls, load=cls.load, **kwargs) | [
"def",
"InstanceOf",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Property",
"(",
"types",
"=",
"cls",
",",
"load",
"=",
"cls",
".",
"load",
",",
"*",
"*",
"kwargs",
")"
] | A property that is an instance of `cls`. | [
"A",
"property",
"that",
"is",
"an",
"instance",
"of",
"cls",
"."
] | b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493 | https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L406-L408 | train |
eclipse/unide.python | src/unide/schema.py | ListOf | def ListOf(cls, **kwargs):
"""A property that is a list of `cls`."""
def _list_load(value):
return [cls.load(d) for d in value]
return Property(types=list, load=_list_load, default=list, **kwargs) | python | def ListOf(cls, **kwargs):
"""A property that is a list of `cls`."""
def _list_load(value):
return [cls.load(d) for d in value]
return Property(types=list, load=_list_load, default=list, **kwargs) | [
"def",
"ListOf",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_list_load",
"(",
"value",
")",
":",
"return",
"[",
"cls",
".",
"load",
"(",
"d",
")",
"for",
"d",
"in",
"value",
"]",
"return",
"Property",
"(",
"types",
"=",
"list",
",",
"load",
"=",
"_list_load",
",",
"default",
"=",
"list",
",",
"*",
"*",
"kwargs",
")"
] | A property that is a list of `cls`. | [
"A",
"property",
"that",
"is",
"a",
"list",
"of",
"cls",
"."
] | b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493 | https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L411-L417 | train |
eclipse/unide.python | src/unide/schema.py | HasDimensions.add_dimension | def add_dimension(self, name, data=None):
"""Add a named dimension to this entity."""
self.dimensions.add(name)
if data is None:
valobj = self.__dimtype__()
else:
valobj = make_object(self.__dimtype__, data)
self._data[name] = valobj
setattr(self, name, valobj)
return valobj | python | def add_dimension(self, name, data=None):
"""Add a named dimension to this entity."""
self.dimensions.add(name)
if data is None:
valobj = self.__dimtype__()
else:
valobj = make_object(self.__dimtype__, data)
self._data[name] = valobj
setattr(self, name, valobj)
return valobj | [
"def",
"add_dimension",
"(",
"self",
",",
"name",
",",
"data",
"=",
"None",
")",
":",
"self",
".",
"dimensions",
".",
"add",
"(",
"name",
")",
"if",
"data",
"is",
"None",
":",
"valobj",
"=",
"self",
".",
"__dimtype__",
"(",
")",
"else",
":",
"valobj",
"=",
"make_object",
"(",
"self",
".",
"__dimtype__",
",",
"data",
")",
"self",
".",
"_data",
"[",
"name",
"]",
"=",
"valobj",
"setattr",
"(",
"self",
",",
"name",
",",
"valobj",
")",
"return",
"valobj"
] | Add a named dimension to this entity. | [
"Add",
"a",
"named",
"dimension",
"to",
"this",
"entity",
"."
] | b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493 | https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L323-L332 | train |
MoseleyBioinformaticsLab/mwtab | mwtab/mwtab.py | MWTabFile.print_block | def print_block(self, section_key, f=sys.stdout, file_format="mwtab"):
"""Print `mwtab` section into a file or stdout.
:param str section_key: Section name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `mwtab` or `json`.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "mwtab":
for key, value in self[section_key].items():
if section_key == "METABOLOMICS WORKBENCH" and key not in ("VERSION", "CREATED_ON"):
continue
if key in ("VERSION", "CREATED_ON"):
cw = 20 - len(key)
elif key in ("SUBJECT_SAMPLE_FACTORS", ):
cw = 33 - len(key)
else:
cw = 30 - len(key)
if "\n" in value:
for line in value.split("\n"):
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", line), file=f)
elif key == "SUBJECT_SAMPLE_FACTORS":
for factor in value:
print("{}{}\t{}".format(key, cw * " ", "\t".join(factor.values())), file=f)
elif key.endswith(":UNITS"):
print("{}\t{}".format(key, value), file=f)
elif key.endswith("_RESULTS_FILE"):
if isinstance(value, dict):
print("{}{} \t{}\t{}:{}".format(self.prefixes.get(section_key, ""),
*[i for pair in value.items() for i in pair]), file=f)
else:
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", value), file=f)
elif key.endswith("_START"):
start_key = key
end_key = "{}{}".format(start_key[:-5], "END")
print(start_key, file=f)
for data_key in value:
if data_key in ("Samples", "Factors"):
print("{}\t{}".format(data_key, "\t".join(self[section_key][key][data_key])), file=f)
elif data_key in ("Fields", ):
print("{}".format("\t".join(self[section_key][key][data_key])), file=f)
elif data_key == "DATA":
for data in self[section_key][key][data_key]:
print("\t".join(data.values()), file=f)
print(end_key, file=f)
else:
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", value), file=f)
elif file_format == "json":
print(json.dumps(self[section_key], sort_keys=False, indent=4), file=f) | python | def print_block(self, section_key, f=sys.stdout, file_format="mwtab"):
"""Print `mwtab` section into a file or stdout.
:param str section_key: Section name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `mwtab` or `json`.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "mwtab":
for key, value in self[section_key].items():
if section_key == "METABOLOMICS WORKBENCH" and key not in ("VERSION", "CREATED_ON"):
continue
if key in ("VERSION", "CREATED_ON"):
cw = 20 - len(key)
elif key in ("SUBJECT_SAMPLE_FACTORS", ):
cw = 33 - len(key)
else:
cw = 30 - len(key)
if "\n" in value:
for line in value.split("\n"):
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", line), file=f)
elif key == "SUBJECT_SAMPLE_FACTORS":
for factor in value:
print("{}{}\t{}".format(key, cw * " ", "\t".join(factor.values())), file=f)
elif key.endswith(":UNITS"):
print("{}\t{}".format(key, value), file=f)
elif key.endswith("_RESULTS_FILE"):
if isinstance(value, dict):
print("{}{} \t{}\t{}:{}".format(self.prefixes.get(section_key, ""),
*[i for pair in value.items() for i in pair]), file=f)
else:
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", value), file=f)
elif key.endswith("_START"):
start_key = key
end_key = "{}{}".format(start_key[:-5], "END")
print(start_key, file=f)
for data_key in value:
if data_key in ("Samples", "Factors"):
print("{}\t{}".format(data_key, "\t".join(self[section_key][key][data_key])), file=f)
elif data_key in ("Fields", ):
print("{}".format("\t".join(self[section_key][key][data_key])), file=f)
elif data_key == "DATA":
for data in self[section_key][key][data_key]:
print("\t".join(data.values()), file=f)
print(end_key, file=f)
else:
print("{}{}{}\t{}".format(self.prefixes.get(section_key, ""), key, cw * " ", value), file=f)
elif file_format == "json":
print(json.dumps(self[section_key], sort_keys=False, indent=4), file=f) | [
"def",
"print_block",
"(",
"self",
",",
"section_key",
",",
"f",
"=",
"sys",
".",
"stdout",
",",
"file_format",
"=",
"\"mwtab\"",
")",
":",
"if",
"file_format",
"==",
"\"mwtab\"",
":",
"for",
"key",
",",
"value",
"in",
"self",
"[",
"section_key",
"]",
".",
"items",
"(",
")",
":",
"if",
"section_key",
"==",
"\"METABOLOMICS WORKBENCH\"",
"and",
"key",
"not",
"in",
"(",
"\"VERSION\"",
",",
"\"CREATED_ON\"",
")",
":",
"continue",
"if",
"key",
"in",
"(",
"\"VERSION\"",
",",
"\"CREATED_ON\"",
")",
":",
"cw",
"=",
"20",
"-",
"len",
"(",
"key",
")",
"elif",
"key",
"in",
"(",
"\"SUBJECT_SAMPLE_FACTORS\"",
",",
")",
":",
"cw",
"=",
"33",
"-",
"len",
"(",
"key",
")",
"else",
":",
"cw",
"=",
"30",
"-",
"len",
"(",
"key",
")",
"if",
"\"\\n\"",
"in",
"value",
":",
"for",
"line",
"in",
"value",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"print",
"(",
"\"{}{}{}\\t{}\"",
".",
"format",
"(",
"self",
".",
"prefixes",
".",
"get",
"(",
"section_key",
",",
"\"\"",
")",
",",
"key",
",",
"cw",
"*",
"\" \"",
",",
"line",
")",
",",
"file",
"=",
"f",
")",
"elif",
"key",
"==",
"\"SUBJECT_SAMPLE_FACTORS\"",
":",
"for",
"factor",
"in",
"value",
":",
"print",
"(",
"\"{}{}\\t{}\"",
".",
"format",
"(",
"key",
",",
"cw",
"*",
"\" \"",
",",
"\"\\t\"",
".",
"join",
"(",
"factor",
".",
"values",
"(",
")",
")",
")",
",",
"file",
"=",
"f",
")",
"elif",
"key",
".",
"endswith",
"(",
"\":UNITS\"",
")",
":",
"print",
"(",
"\"{}\\t{}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
",",
"file",
"=",
"f",
")",
"elif",
"key",
".",
"endswith",
"(",
"\"_RESULTS_FILE\"",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"print",
"(",
"\"{}{} \\t{}\\t{}:{}\"",
".",
"format",
"(",
"self",
".",
"prefixes",
".",
"get",
"(",
"section_key",
",",
"\"\"",
")",
",",
"*",
"[",
"i",
"for",
"pair",
"in",
"value",
".",
"items",
"(",
")",
"for",
"i",
"in",
"pair",
"]",
")",
",",
"file",
"=",
"f",
")",
"else",
":",
"print",
"(",
"\"{}{}{}\\t{}\"",
".",
"format",
"(",
"self",
".",
"prefixes",
".",
"get",
"(",
"section_key",
",",
"\"\"",
")",
",",
"key",
",",
"cw",
"*",
"\" \"",
",",
"value",
")",
",",
"file",
"=",
"f",
")",
"elif",
"key",
".",
"endswith",
"(",
"\"_START\"",
")",
":",
"start_key",
"=",
"key",
"end_key",
"=",
"\"{}{}\"",
".",
"format",
"(",
"start_key",
"[",
":",
"-",
"5",
"]",
",",
"\"END\"",
")",
"print",
"(",
"start_key",
",",
"file",
"=",
"f",
")",
"for",
"data_key",
"in",
"value",
":",
"if",
"data_key",
"in",
"(",
"\"Samples\"",
",",
"\"Factors\"",
")",
":",
"print",
"(",
"\"{}\\t{}\"",
".",
"format",
"(",
"data_key",
",",
"\"\\t\"",
".",
"join",
"(",
"self",
"[",
"section_key",
"]",
"[",
"key",
"]",
"[",
"data_key",
"]",
")",
")",
",",
"file",
"=",
"f",
")",
"elif",
"data_key",
"in",
"(",
"\"Fields\"",
",",
")",
":",
"print",
"(",
"\"{}\"",
".",
"format",
"(",
"\"\\t\"",
".",
"join",
"(",
"self",
"[",
"section_key",
"]",
"[",
"key",
"]",
"[",
"data_key",
"]",
")",
")",
",",
"file",
"=",
"f",
")",
"elif",
"data_key",
"==",
"\"DATA\"",
":",
"for",
"data",
"in",
"self",
"[",
"section_key",
"]",
"[",
"key",
"]",
"[",
"data_key",
"]",
":",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"data",
".",
"values",
"(",
")",
")",
",",
"file",
"=",
"f",
")",
"print",
"(",
"end_key",
",",
"file",
"=",
"f",
")",
"else",
":",
"print",
"(",
"\"{}{}{}\\t{}\"",
".",
"format",
"(",
"self",
".",
"prefixes",
".",
"get",
"(",
"section_key",
",",
"\"\"",
")",
",",
"key",
",",
"cw",
"*",
"\" \"",
",",
"value",
")",
",",
"file",
"=",
"f",
")",
"elif",
"file_format",
"==",
"\"json\"",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"self",
"[",
"section_key",
"]",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"4",
")",
",",
"file",
"=",
"f",
")"
] | Print `mwtab` section into a file or stdout.
:param str section_key: Section name.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `mwtab` or `json`.
:return: None
:rtype: :py:obj:`None` | [
"Print",
"mwtab",
"section",
"into",
"a",
"file",
"or",
"stdout",
"."
] | 8c0ae8ab2aa621662f99589ed41e481cf8b7152b | https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/mwtab.py#L233-L293 | train |
MoseleyBioinformaticsLab/mwtab | mwtab/mwtab.py | MWTabFile._is_mwtab | def _is_mwtab(string):
"""Test if input string is in `mwtab` format.
:param string: Input string.
:type string: :py:class:`str` or :py:class:`bytes`
:return: Input string if in mwTab format or False otherwise.
:rtype: :py:class:`str` or :py:obj:`False`
"""
if isinstance(string, str):
lines = string.split("\n")
elif isinstance(string, bytes):
lines = string.decode("utf-8").split("\n")
else:
raise TypeError("Expecting <class 'str'> or <class 'bytes'>, but {} was passed".format(type(string)))
lines = [line for line in lines if line]
header = lines[0]
if header.startswith("#METABOLOMICS WORKBENCH"):
return "\n".join(lines)
return False | python | def _is_mwtab(string):
"""Test if input string is in `mwtab` format.
:param string: Input string.
:type string: :py:class:`str` or :py:class:`bytes`
:return: Input string if in mwTab format or False otherwise.
:rtype: :py:class:`str` or :py:obj:`False`
"""
if isinstance(string, str):
lines = string.split("\n")
elif isinstance(string, bytes):
lines = string.decode("utf-8").split("\n")
else:
raise TypeError("Expecting <class 'str'> or <class 'bytes'>, but {} was passed".format(type(string)))
lines = [line for line in lines if line]
header = lines[0]
if header.startswith("#METABOLOMICS WORKBENCH"):
return "\n".join(lines)
return False | [
"def",
"_is_mwtab",
"(",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"str",
")",
":",
"lines",
"=",
"string",
".",
"split",
"(",
"\"\\n\"",
")",
"elif",
"isinstance",
"(",
"string",
",",
"bytes",
")",
":",
"lines",
"=",
"string",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expecting <class 'str'> or <class 'bytes'>, but {} was passed\"",
".",
"format",
"(",
"type",
"(",
"string",
")",
")",
")",
"lines",
"=",
"[",
"line",
"for",
"line",
"in",
"lines",
"if",
"line",
"]",
"header",
"=",
"lines",
"[",
"0",
"]",
"if",
"header",
".",
"startswith",
"(",
"\"#METABOLOMICS WORKBENCH\"",
")",
":",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
"return",
"False"
] | Test if input string is in `mwtab` format.
:param string: Input string.
:type string: :py:class:`str` or :py:class:`bytes`
:return: Input string if in mwTab format or False otherwise.
:rtype: :py:class:`str` or :py:obj:`False` | [
"Test",
"if",
"input",
"string",
"is",
"in",
"mwtab",
"format",
"."
] | 8c0ae8ab2aa621662f99589ed41e481cf8b7152b | https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/mwtab.py#L314-L334 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getTraceIdsBySpanName | def getTraceIdsBySpanName(self, service_name, span_name, end_ts, limit, order):
"""
Fetch trace ids by service and span name.
Gets "limit" number of entries from before the "end_ts".
Span name is optional.
Timestamps are in microseconds.
Parameters:
- service_name
- span_name
- end_ts
- limit
- order
"""
self.send_getTraceIdsBySpanName(service_name, span_name, end_ts, limit, order)
return self.recv_getTraceIdsBySpanName() | python | def getTraceIdsBySpanName(self, service_name, span_name, end_ts, limit, order):
"""
Fetch trace ids by service and span name.
Gets "limit" number of entries from before the "end_ts".
Span name is optional.
Timestamps are in microseconds.
Parameters:
- service_name
- span_name
- end_ts
- limit
- order
"""
self.send_getTraceIdsBySpanName(service_name, span_name, end_ts, limit, order)
return self.recv_getTraceIdsBySpanName() | [
"def",
"getTraceIdsBySpanName",
"(",
"self",
",",
"service_name",
",",
"span_name",
",",
"end_ts",
",",
"limit",
",",
"order",
")",
":",
"self",
".",
"send_getTraceIdsBySpanName",
"(",
"service_name",
",",
"span_name",
",",
"end_ts",
",",
"limit",
",",
"order",
")",
"return",
"self",
".",
"recv_getTraceIdsBySpanName",
"(",
")"
] | Fetch trace ids by service and span name.
Gets "limit" number of entries from before the "end_ts".
Span name is optional.
Timestamps are in microseconds.
Parameters:
- service_name
- span_name
- end_ts
- limit
- order | [
"Fetch",
"trace",
"ids",
"by",
"service",
"and",
"span",
"name",
".",
"Gets",
"limit",
"number",
"of",
"entries",
"from",
"before",
"the",
"end_ts",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L286-L302 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getTraceIdsByServiceName | def getTraceIdsByServiceName(self, service_name, end_ts, limit, order):
"""
Fetch trace ids by service name.
Gets "limit" number of entries from before the "end_ts".
Timestamps are in microseconds.
Parameters:
- service_name
- end_ts
- limit
- order
"""
self.send_getTraceIdsByServiceName(service_name, end_ts, limit, order)
return self.recv_getTraceIdsByServiceName() | python | def getTraceIdsByServiceName(self, service_name, end_ts, limit, order):
"""
Fetch trace ids by service name.
Gets "limit" number of entries from before the "end_ts".
Timestamps are in microseconds.
Parameters:
- service_name
- end_ts
- limit
- order
"""
self.send_getTraceIdsByServiceName(service_name, end_ts, limit, order)
return self.recv_getTraceIdsByServiceName() | [
"def",
"getTraceIdsByServiceName",
"(",
"self",
",",
"service_name",
",",
"end_ts",
",",
"limit",
",",
"order",
")",
":",
"self",
".",
"send_getTraceIdsByServiceName",
"(",
"service_name",
",",
"end_ts",
",",
"limit",
",",
"order",
")",
"return",
"self",
".",
"recv_getTraceIdsByServiceName",
"(",
")"
] | Fetch trace ids by service name.
Gets "limit" number of entries from before the "end_ts".
Timestamps are in microseconds.
Parameters:
- service_name
- end_ts
- limit
- order | [
"Fetch",
"trace",
"ids",
"by",
"service",
"name",
".",
"Gets",
"limit",
"number",
"of",
"entries",
"from",
"before",
"the",
"end_ts",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L332-L346 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getTraceIdsByAnnotation | def getTraceIdsByAnnotation(self, service_name, annotation, value, end_ts, limit, order):
"""
Fetch trace ids with a particular annotation.
Gets "limit" number of entries from before the "end_ts".
When requesting based on time based annotations only pass in the first parameter, "annotation" and leave out
the second "value". If looking for a key-value binary annotation provide both, "annotation" is then the
key in the key-value.
Timestamps are in microseconds.
Parameters:
- service_name
- annotation
- value
- end_ts
- limit
- order
"""
self.send_getTraceIdsByAnnotation(service_name, annotation, value, end_ts, limit, order)
return self.recv_getTraceIdsByAnnotation() | python | def getTraceIdsByAnnotation(self, service_name, annotation, value, end_ts, limit, order):
"""
Fetch trace ids with a particular annotation.
Gets "limit" number of entries from before the "end_ts".
When requesting based on time based annotations only pass in the first parameter, "annotation" and leave out
the second "value". If looking for a key-value binary annotation provide both, "annotation" is then the
key in the key-value.
Timestamps are in microseconds.
Parameters:
- service_name
- annotation
- value
- end_ts
- limit
- order
"""
self.send_getTraceIdsByAnnotation(service_name, annotation, value, end_ts, limit, order)
return self.recv_getTraceIdsByAnnotation() | [
"def",
"getTraceIdsByAnnotation",
"(",
"self",
",",
"service_name",
",",
"annotation",
",",
"value",
",",
"end_ts",
",",
"limit",
",",
"order",
")",
":",
"self",
".",
"send_getTraceIdsByAnnotation",
"(",
"service_name",
",",
"annotation",
",",
"value",
",",
"end_ts",
",",
"limit",
",",
"order",
")",
"return",
"self",
".",
"recv_getTraceIdsByAnnotation",
"(",
")"
] | Fetch trace ids with a particular annotation.
Gets "limit" number of entries from before the "end_ts".
When requesting based on time based annotations only pass in the first parameter, "annotation" and leave out
the second "value". If looking for a key-value binary annotation provide both, "annotation" is then the
key in the key-value.
Timestamps are in microseconds.
Parameters:
- service_name
- annotation
- value
- end_ts
- limit
- order | [
"Fetch",
"trace",
"ids",
"with",
"a",
"particular",
"annotation",
".",
"Gets",
"limit",
"number",
"of",
"entries",
"from",
"before",
"the",
"end_ts",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L375-L395 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getTracesByIds | def getTracesByIds(self, trace_ids, adjust):
"""
Get the full traces associated with the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Parameters:
- trace_ids
- adjust
"""
self.send_getTracesByIds(trace_ids, adjust)
return self.recv_getTracesByIds() | python | def getTracesByIds(self, trace_ids, adjust):
"""
Get the full traces associated with the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Parameters:
- trace_ids
- adjust
"""
self.send_getTracesByIds(trace_ids, adjust)
return self.recv_getTracesByIds() | [
"def",
"getTracesByIds",
"(",
"self",
",",
"trace_ids",
",",
"adjust",
")",
":",
"self",
".",
"send_getTracesByIds",
"(",
"trace_ids",
",",
"adjust",
")",
"return",
"self",
".",
"recv_getTracesByIds",
"(",
")"
] | Get the full traces associated with the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Parameters:
- trace_ids
- adjust | [
"Get",
"the",
"full",
"traces",
"associated",
"with",
"the",
"given",
"trace",
"ids",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L460-L472 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getTraceSummariesByIds | def getTraceSummariesByIds(self, trace_ids, adjust):
"""
Fetch trace summaries for the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Note that if one of the trace ids does not have any data associated with it, it will not be
represented in the output list.
Parameters:
- trace_ids
- adjust
"""
self.send_getTraceSummariesByIds(trace_ids, adjust)
return self.recv_getTraceSummariesByIds() | python | def getTraceSummariesByIds(self, trace_ids, adjust):
"""
Fetch trace summaries for the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Note that if one of the trace ids does not have any data associated with it, it will not be
represented in the output list.
Parameters:
- trace_ids
- adjust
"""
self.send_getTraceSummariesByIds(trace_ids, adjust)
return self.recv_getTraceSummariesByIds() | [
"def",
"getTraceSummariesByIds",
"(",
"self",
",",
"trace_ids",
",",
"adjust",
")",
":",
"self",
".",
"send_getTraceSummariesByIds",
"(",
"trace_ids",
",",
"adjust",
")",
"return",
"self",
".",
"recv_getTraceSummariesByIds",
"(",
")"
] | Fetch trace summaries for the given trace ids.
Second argument is a list of methods of adjusting the trace
data before returning it. Can be empty.
Note that if one of the trace ids does not have any data associated with it, it will not be
represented in the output list.
Parameters:
- trace_ids
- adjust | [
"Fetch",
"trace",
"summaries",
"for",
"the",
"given",
"trace",
"ids",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L543-L558 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.getTraceCombosByIds | def getTraceCombosByIds(self, trace_ids, adjust):
"""
Not content with just one of traces, summaries or timelines? Want it all? This is the method for you.
Parameters:
- trace_ids
- adjust
"""
self.send_getTraceCombosByIds(trace_ids, adjust)
return self.recv_getTraceCombosByIds() | python | def getTraceCombosByIds(self, trace_ids, adjust):
"""
Not content with just one of traces, summaries or timelines? Want it all? This is the method for you.
Parameters:
- trace_ids
- adjust
"""
self.send_getTraceCombosByIds(trace_ids, adjust)
return self.recv_getTraceCombosByIds() | [
"def",
"getTraceCombosByIds",
"(",
"self",
",",
"trace_ids",
",",
"adjust",
")",
":",
"self",
".",
"send_getTraceCombosByIds",
"(",
"trace_ids",
",",
"adjust",
")",
"return",
"self",
".",
"recv_getTraceCombosByIds",
"(",
")"
] | Not content with just one of traces, summaries or timelines? Want it all? This is the method for you.
Parameters:
- trace_ids
- adjust | [
"Not",
"content",
"with",
"just",
"one",
"of",
"traces",
"summaries",
"or",
"timelines?",
"Want",
"it",
"all?",
"This",
"is",
"the",
"method",
"for",
"you",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L585-L594 | train |
prezi/django-zipkin | django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py | Client.setTraceTimeToLive | def setTraceTimeToLive(self, trace_id, ttl_seconds):
"""
Change the TTL of a trace. If we find an interesting trace we want to keep around for further
investigation.
Parameters:
- trace_id
- ttl_seconds
"""
self.send_setTraceTimeToLive(trace_id, ttl_seconds)
self.recv_setTraceTimeToLive() | python | def setTraceTimeToLive(self, trace_id, ttl_seconds):
"""
Change the TTL of a trace. If we find an interesting trace we want to keep around for further
investigation.
Parameters:
- trace_id
- ttl_seconds
"""
self.send_setTraceTimeToLive(trace_id, ttl_seconds)
self.recv_setTraceTimeToLive() | [
"def",
"setTraceTimeToLive",
"(",
"self",
",",
"trace_id",
",",
"ttl_seconds",
")",
":",
"self",
".",
"send_setTraceTimeToLive",
"(",
"trace_id",
",",
"ttl_seconds",
")",
"self",
".",
"recv_setTraceTimeToLive",
"(",
")"
] | Change the TTL of a trace. If we find an interesting trace we want to keep around for further
investigation.
Parameters:
- trace_id
- ttl_seconds | [
"Change",
"the",
"TTL",
"of",
"a",
"trace",
".",
"If",
"we",
"find",
"an",
"interesting",
"trace",
"we",
"want",
"to",
"keep",
"around",
"for",
"further",
"investigation",
"."
] | 158d04cf9c2fe0adcb4cda66a250d9e41eae33f3 | https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L685-L695 | train |
camptocamp/Studio | studio/lib/datasource_discovery.py | discover_datasource_columns | def discover_datasource_columns(datastore_str, datasource_id):
""" Loop through the datastore's datasources to find
the datasource identified by datasource_id, return
the matching datasource's columns. """
datastore = DataStore(datastore_str)
datasource = datastore.get_datasource(datasource_id)
if datasource.type != "RASTER":
return datasource.list_columns()
else:
return [] | python | def discover_datasource_columns(datastore_str, datasource_id):
""" Loop through the datastore's datasources to find
the datasource identified by datasource_id, return
the matching datasource's columns. """
datastore = DataStore(datastore_str)
datasource = datastore.get_datasource(datasource_id)
if datasource.type != "RASTER":
return datasource.list_columns()
else:
return [] | [
"def",
"discover_datasource_columns",
"(",
"datastore_str",
",",
"datasource_id",
")",
":",
"datastore",
"=",
"DataStore",
"(",
"datastore_str",
")",
"datasource",
"=",
"datastore",
".",
"get_datasource",
"(",
"datasource_id",
")",
"if",
"datasource",
".",
"type",
"!=",
"\"RASTER\"",
":",
"return",
"datasource",
".",
"list_columns",
"(",
")",
"else",
":",
"return",
"[",
"]"
] | Loop through the datastore's datasources to find
the datasource identified by datasource_id, return
the matching datasource's columns. | [
"Loop",
"through",
"the",
"datastore",
"s",
"datasources",
"to",
"find",
"the",
"datasource",
"identified",
"by",
"datasource_id",
"return",
"the",
"matching",
"datasource",
"s",
"columns",
"."
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L58-L67 | train |
camptocamp/Studio | studio/lib/datasource_discovery.py | OgrDataSource._get_column_type | def _get_column_type(self,column):
""" Return 'numeric' if the column is of type integer or
real, otherwise return 'string'. """
ctype = column.GetType()
if ctype in [ogr.OFTInteger, ogr.OFTReal]:
return 'numeric'
else:
return 'string' | python | def _get_column_type(self,column):
""" Return 'numeric' if the column is of type integer or
real, otherwise return 'string'. """
ctype = column.GetType()
if ctype in [ogr.OFTInteger, ogr.OFTReal]:
return 'numeric'
else:
return 'string' | [
"def",
"_get_column_type",
"(",
"self",
",",
"column",
")",
":",
"ctype",
"=",
"column",
".",
"GetType",
"(",
")",
"if",
"ctype",
"in",
"[",
"ogr",
".",
"OFTInteger",
",",
"ogr",
".",
"OFTReal",
"]",
":",
"return",
"'numeric'",
"else",
":",
"return",
"'string'"
] | Return 'numeric' if the column is of type integer or
real, otherwise return 'string'. | [
"Return",
"numeric",
"if",
"the",
"column",
"is",
"of",
"type",
"integer",
"or",
"real",
"otherwise",
"return",
"string",
"."
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L269-L276 | train |
camptocamp/Studio | studio/lib/datasource_discovery.py | OgrDataSource._get_default_mapfile_excerpt | def _get_default_mapfile_excerpt(self):
""" Given an OGR string, an OGR connection and an OGR layer, create and
return a representation of a MapFile LAYER block. """
layerobj = self._get_layer_stub()
classobj = mapscript.classObj()
layerobj.insertClass(classobj)
styleobj = self._get_default_style()
classobj.insertStyle(styleobj)
return mapserializer.layerobj_to_dict(layerobj,None) | python | def _get_default_mapfile_excerpt(self):
""" Given an OGR string, an OGR connection and an OGR layer, create and
return a representation of a MapFile LAYER block. """
layerobj = self._get_layer_stub()
classobj = mapscript.classObj()
layerobj.insertClass(classobj)
styleobj = self._get_default_style()
classobj.insertStyle(styleobj)
return mapserializer.layerobj_to_dict(layerobj,None) | [
"def",
"_get_default_mapfile_excerpt",
"(",
"self",
")",
":",
"layerobj",
"=",
"self",
".",
"_get_layer_stub",
"(",
")",
"classobj",
"=",
"mapscript",
".",
"classObj",
"(",
")",
"layerobj",
".",
"insertClass",
"(",
"classobj",
")",
"styleobj",
"=",
"self",
".",
"_get_default_style",
"(",
")",
"classobj",
".",
"insertStyle",
"(",
"styleobj",
")",
"return",
"mapserializer",
".",
"layerobj_to_dict",
"(",
"layerobj",
",",
"None",
")"
] | Given an OGR string, an OGR connection and an OGR layer, create and
return a representation of a MapFile LAYER block. | [
"Given",
"an",
"OGR",
"string",
"an",
"OGR",
"connection",
"and",
"an",
"OGR",
"layer",
"create",
"and",
"return",
"a",
"representation",
"of",
"a",
"MapFile",
"LAYER",
"block",
"."
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L285-L295 | train |
camptocamp/Studio | studio/lib/datasource_discovery.py | OgrDataSource._get_layer_stub | def _get_layer_stub(self):
""" builds a minimal mapscript layerobj, with no styling """
layerobj = mapscript.layerObj()
layerobj.name = self.name
layerobj.status = mapscript.MS_ON
projection = self.ogr_layer.GetSpatialRef()
featureIdColumn = self._get_featureId_column()
if featureIdColumn is not None and featureIdColumn != '' :
layerobj.metadata.set('gml_featureid', featureIdColumn)
if projection is not None:
layerobj.setProjection(projection.ExportToProj4())
if self.datastore.connection_type == "directory":
#append the extension to the shapefile until mapserver bug 2895 is fixed
datastr = os.path.normpath(self.datastore.datastore_str + "/" + self.name)
if os.path.exists(datastr+'.shp'):
datastr = datastr+'.shp'
elif os.path.exists(datastr+'.SHP'):
datastr = datastr+'.SHP'
layerobj.data = datastr
elif self.datastore.connection_type == "postgis":
layerobj.connectiontype = mapscript.MS_POSTGIS
#remove the leading "PG:" from the connection string
layerobj.connection = self.datastore.datastore_str[3:].strip()
if featureIdColumn is not None and featureIdColumn != '' :
layerobj.data = "%s from %s using unique %s" %(
self.ogr_layer.GetGeometryColumn(),
self.name,
featureIdColumn)
else:
layerobj.data = "%s from %s"%(self.ogr_layer.GetGeometryColumn(),self.name)
else:
raise RuntimeError("unsupported connection type")
if self.type == 'POINT':
layerobj.type = mapscript.MS_LAYER_POINT
elif self.type == 'POLYGON':
layerobj.type = mapscript.MS_LAYER_POLYGON
else:
layerobj.type = mapscript.MS_LAYER_LINE
return layerobj | python | def _get_layer_stub(self):
""" builds a minimal mapscript layerobj, with no styling """
layerobj = mapscript.layerObj()
layerobj.name = self.name
layerobj.status = mapscript.MS_ON
projection = self.ogr_layer.GetSpatialRef()
featureIdColumn = self._get_featureId_column()
if featureIdColumn is not None and featureIdColumn != '' :
layerobj.metadata.set('gml_featureid', featureIdColumn)
if projection is not None:
layerobj.setProjection(projection.ExportToProj4())
if self.datastore.connection_type == "directory":
#append the extension to the shapefile until mapserver bug 2895 is fixed
datastr = os.path.normpath(self.datastore.datastore_str + "/" + self.name)
if os.path.exists(datastr+'.shp'):
datastr = datastr+'.shp'
elif os.path.exists(datastr+'.SHP'):
datastr = datastr+'.SHP'
layerobj.data = datastr
elif self.datastore.connection_type == "postgis":
layerobj.connectiontype = mapscript.MS_POSTGIS
#remove the leading "PG:" from the connection string
layerobj.connection = self.datastore.datastore_str[3:].strip()
if featureIdColumn is not None and featureIdColumn != '' :
layerobj.data = "%s from %s using unique %s" %(
self.ogr_layer.GetGeometryColumn(),
self.name,
featureIdColumn)
else:
layerobj.data = "%s from %s"%(self.ogr_layer.GetGeometryColumn(),self.name)
else:
raise RuntimeError("unsupported connection type")
if self.type == 'POINT':
layerobj.type = mapscript.MS_LAYER_POINT
elif self.type == 'POLYGON':
layerobj.type = mapscript.MS_LAYER_POLYGON
else:
layerobj.type = mapscript.MS_LAYER_LINE
return layerobj | [
"def",
"_get_layer_stub",
"(",
"self",
")",
":",
"layerobj",
"=",
"mapscript",
".",
"layerObj",
"(",
")",
"layerobj",
".",
"name",
"=",
"self",
".",
"name",
"layerobj",
".",
"status",
"=",
"mapscript",
".",
"MS_ON",
"projection",
"=",
"self",
".",
"ogr_layer",
".",
"GetSpatialRef",
"(",
")",
"featureIdColumn",
"=",
"self",
".",
"_get_featureId_column",
"(",
")",
"if",
"featureIdColumn",
"is",
"not",
"None",
"and",
"featureIdColumn",
"!=",
"''",
":",
"layerobj",
".",
"metadata",
".",
"set",
"(",
"'gml_featureid'",
",",
"featureIdColumn",
")",
"if",
"projection",
"is",
"not",
"None",
":",
"layerobj",
".",
"setProjection",
"(",
"projection",
".",
"ExportToProj4",
"(",
")",
")",
"if",
"self",
".",
"datastore",
".",
"connection_type",
"==",
"\"directory\"",
":",
"#append the extension to the shapefile until mapserver bug 2895 is fixed",
"datastr",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"self",
".",
"datastore",
".",
"datastore_str",
"+",
"\"/\"",
"+",
"self",
".",
"name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"datastr",
"+",
"'.shp'",
")",
":",
"datastr",
"=",
"datastr",
"+",
"'.shp'",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"datastr",
"+",
"'.SHP'",
")",
":",
"datastr",
"=",
"datastr",
"+",
"'.SHP'",
"layerobj",
".",
"data",
"=",
"datastr",
"elif",
"self",
".",
"datastore",
".",
"connection_type",
"==",
"\"postgis\"",
":",
"layerobj",
".",
"connectiontype",
"=",
"mapscript",
".",
"MS_POSTGIS",
"#remove the leading \"PG:\" from the connection string",
"layerobj",
".",
"connection",
"=",
"self",
".",
"datastore",
".",
"datastore_str",
"[",
"3",
":",
"]",
".",
"strip",
"(",
")",
"if",
"featureIdColumn",
"is",
"not",
"None",
"and",
"featureIdColumn",
"!=",
"''",
":",
"layerobj",
".",
"data",
"=",
"\"%s from %s using unique %s\"",
"%",
"(",
"self",
".",
"ogr_layer",
".",
"GetGeometryColumn",
"(",
")",
",",
"self",
".",
"name",
",",
"featureIdColumn",
")",
"else",
":",
"layerobj",
".",
"data",
"=",
"\"%s from %s\"",
"%",
"(",
"self",
".",
"ogr_layer",
".",
"GetGeometryColumn",
"(",
")",
",",
"self",
".",
"name",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"unsupported connection type\"",
")",
"if",
"self",
".",
"type",
"==",
"'POINT'",
":",
"layerobj",
".",
"type",
"=",
"mapscript",
".",
"MS_LAYER_POINT",
"elif",
"self",
".",
"type",
"==",
"'POLYGON'",
":",
"layerobj",
".",
"type",
"=",
"mapscript",
".",
"MS_LAYER_POLYGON",
"else",
":",
"layerobj",
".",
"type",
"=",
"mapscript",
".",
"MS_LAYER_LINE",
"return",
"layerobj"
] | builds a minimal mapscript layerobj, with no styling | [
"builds",
"a",
"minimal",
"mapscript",
"layerobj",
"with",
"no",
"styling"
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L303-L341 | train |
cocaine/cocaine-tools | cocaine/proxy/mds_direct.py | MDSDirect.reelect_app | def reelect_app(self, request, app):
"""tries to connect to the same app on differnet host from dist-info"""
# disconnect app explicitly to break possibly existing connection
app.disconnect()
endpoints_size = len(app.locator.endpoints)
# try x times, where x is the number of different endpoints in app locator.
for _ in xrange(0, endpoints_size + 1):
# last chance to take app from common pool
if len(app.locator.endpoints) == 0:
request.logger.info(
"giving up on connecting to dist-info hosts, falling back to common pool processing")
app = yield self.proxy.reelect_app(request, app)
raise gen.Return(app)
try:
# always create new locator to prevent locking as we do connect with timeout
# however lock can be still held during TCP timeout
locator = Locator(endpoints=app.locator.endpoints)
request.logger.info("connecting to locator %s", locator.endpoints[0])
# first try to connect to locator only on remote host with timeout
yield gen.with_timeout(self.service_connect_timeout, locator.connect())
request.logger.debug("connected to locator %s for %s", locator.endpoints[0], app.name)
app = Service(app.name, locator=locator, timeout=RESOLVE_TIMEOUT)
# try to resolve and connect to application itself
yield gen.with_timeout(self.service_connect_timeout, app.connect())
request.logger.debug("connected to application %s via %s", app.name, app.endpoints)
except gen.TimeoutError:
# on timeout try next endpoint first
request.logger.warning("timed out while connecting to application")
continue
except ServiceError as err:
request.logger.warning("got error while resolving app - %s", err)
if err.category in LOCATORCATEGORY and err.code == ESERVICENOTAVAILABLE:
# if the application is down - also try next endpoint
continue
else:
raise err
finally:
# drop first endpoint to start next connection from different endpoint
# we do this, as default logic of connection attempts in locator do not fit here
app.locator.endpoints = app.locator.endpoints[1:]
# return connected app
raise gen.Return(app)
raise PluginApplicationError(42, 42, "could not connect to application") | python | def reelect_app(self, request, app):
"""tries to connect to the same app on differnet host from dist-info"""
# disconnect app explicitly to break possibly existing connection
app.disconnect()
endpoints_size = len(app.locator.endpoints)
# try x times, where x is the number of different endpoints in app locator.
for _ in xrange(0, endpoints_size + 1):
# last chance to take app from common pool
if len(app.locator.endpoints) == 0:
request.logger.info(
"giving up on connecting to dist-info hosts, falling back to common pool processing")
app = yield self.proxy.reelect_app(request, app)
raise gen.Return(app)
try:
# always create new locator to prevent locking as we do connect with timeout
# however lock can be still held during TCP timeout
locator = Locator(endpoints=app.locator.endpoints)
request.logger.info("connecting to locator %s", locator.endpoints[0])
# first try to connect to locator only on remote host with timeout
yield gen.with_timeout(self.service_connect_timeout, locator.connect())
request.logger.debug("connected to locator %s for %s", locator.endpoints[0], app.name)
app = Service(app.name, locator=locator, timeout=RESOLVE_TIMEOUT)
# try to resolve and connect to application itself
yield gen.with_timeout(self.service_connect_timeout, app.connect())
request.logger.debug("connected to application %s via %s", app.name, app.endpoints)
except gen.TimeoutError:
# on timeout try next endpoint first
request.logger.warning("timed out while connecting to application")
continue
except ServiceError as err:
request.logger.warning("got error while resolving app - %s", err)
if err.category in LOCATORCATEGORY and err.code == ESERVICENOTAVAILABLE:
# if the application is down - also try next endpoint
continue
else:
raise err
finally:
# drop first endpoint to start next connection from different endpoint
# we do this, as default logic of connection attempts in locator do not fit here
app.locator.endpoints = app.locator.endpoints[1:]
# return connected app
raise gen.Return(app)
raise PluginApplicationError(42, 42, "could not connect to application") | [
"def",
"reelect_app",
"(",
"self",
",",
"request",
",",
"app",
")",
":",
"# disconnect app explicitly to break possibly existing connection",
"app",
".",
"disconnect",
"(",
")",
"endpoints_size",
"=",
"len",
"(",
"app",
".",
"locator",
".",
"endpoints",
")",
"# try x times, where x is the number of different endpoints in app locator.",
"for",
"_",
"in",
"xrange",
"(",
"0",
",",
"endpoints_size",
"+",
"1",
")",
":",
"# last chance to take app from common pool",
"if",
"len",
"(",
"app",
".",
"locator",
".",
"endpoints",
")",
"==",
"0",
":",
"request",
".",
"logger",
".",
"info",
"(",
"\"giving up on connecting to dist-info hosts, falling back to common pool processing\"",
")",
"app",
"=",
"yield",
"self",
".",
"proxy",
".",
"reelect_app",
"(",
"request",
",",
"app",
")",
"raise",
"gen",
".",
"Return",
"(",
"app",
")",
"try",
":",
"# always create new locator to prevent locking as we do connect with timeout",
"# however lock can be still held during TCP timeout",
"locator",
"=",
"Locator",
"(",
"endpoints",
"=",
"app",
".",
"locator",
".",
"endpoints",
")",
"request",
".",
"logger",
".",
"info",
"(",
"\"connecting to locator %s\"",
",",
"locator",
".",
"endpoints",
"[",
"0",
"]",
")",
"# first try to connect to locator only on remote host with timeout",
"yield",
"gen",
".",
"with_timeout",
"(",
"self",
".",
"service_connect_timeout",
",",
"locator",
".",
"connect",
"(",
")",
")",
"request",
".",
"logger",
".",
"debug",
"(",
"\"connected to locator %s for %s\"",
",",
"locator",
".",
"endpoints",
"[",
"0",
"]",
",",
"app",
".",
"name",
")",
"app",
"=",
"Service",
"(",
"app",
".",
"name",
",",
"locator",
"=",
"locator",
",",
"timeout",
"=",
"RESOLVE_TIMEOUT",
")",
"# try to resolve and connect to application itself",
"yield",
"gen",
".",
"with_timeout",
"(",
"self",
".",
"service_connect_timeout",
",",
"app",
".",
"connect",
"(",
")",
")",
"request",
".",
"logger",
".",
"debug",
"(",
"\"connected to application %s via %s\"",
",",
"app",
".",
"name",
",",
"app",
".",
"endpoints",
")",
"except",
"gen",
".",
"TimeoutError",
":",
"# on timeout try next endpoint first",
"request",
".",
"logger",
".",
"warning",
"(",
"\"timed out while connecting to application\"",
")",
"continue",
"except",
"ServiceError",
"as",
"err",
":",
"request",
".",
"logger",
".",
"warning",
"(",
"\"got error while resolving app - %s\"",
",",
"err",
")",
"if",
"err",
".",
"category",
"in",
"LOCATORCATEGORY",
"and",
"err",
".",
"code",
"==",
"ESERVICENOTAVAILABLE",
":",
"# if the application is down - also try next endpoint",
"continue",
"else",
":",
"raise",
"err",
"finally",
":",
"# drop first endpoint to start next connection from different endpoint",
"# we do this, as default logic of connection attempts in locator do not fit here",
"app",
".",
"locator",
".",
"endpoints",
"=",
"app",
".",
"locator",
".",
"endpoints",
"[",
"1",
":",
"]",
"# return connected app",
"raise",
"gen",
".",
"Return",
"(",
"app",
")",
"raise",
"PluginApplicationError",
"(",
"42",
",",
"42",
",",
"\"could not connect to application\"",
")"
] | tries to connect to the same app on differnet host from dist-info | [
"tries",
"to",
"connect",
"to",
"the",
"same",
"app",
"on",
"differnet",
"host",
"from",
"dist",
"-",
"info"
] | d8834f8e04ca42817d5f4e368d471484d4b3419f | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/proxy/mds_direct.py#L69-L116 | train |
tropo/tropo-webapi-python | samples/appengine/main.py | RecordHelloWorld | def RecordHelloWorld(handler, t):
"""
Demonstration of recording a message.
"""
url = "%s/receive_recording.py" % THIS_URL
t.startRecording(url)
t.say ("Hello, World.")
t.stopRecording()
json = t.RenderJson()
logging.info ("RecordHelloWorld json: %s" % json)
handler.response.out.write(json) | python | def RecordHelloWorld(handler, t):
"""
Demonstration of recording a message.
"""
url = "%s/receive_recording.py" % THIS_URL
t.startRecording(url)
t.say ("Hello, World.")
t.stopRecording()
json = t.RenderJson()
logging.info ("RecordHelloWorld json: %s" % json)
handler.response.out.write(json) | [
"def",
"RecordHelloWorld",
"(",
"handler",
",",
"t",
")",
":",
"url",
"=",
"\"%s/receive_recording.py\"",
"%",
"THIS_URL",
"t",
".",
"startRecording",
"(",
"url",
")",
"t",
".",
"say",
"(",
"\"Hello, World.\"",
")",
"t",
".",
"stopRecording",
"(",
")",
"json",
"=",
"t",
".",
"RenderJson",
"(",
")",
"logging",
".",
"info",
"(",
"\"RecordHelloWorld json: %s\"",
"%",
"json",
")",
"handler",
".",
"response",
".",
"out",
".",
"write",
"(",
"json",
")"
] | Demonstration of recording a message. | [
"Demonstration",
"of",
"recording",
"a",
"message",
"."
] | f87772644a6b45066a4c5218f0c1f6467b64ab3c | https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/samples/appengine/main.py#L69-L79 | train |
tropo/tropo-webapi-python | samples/appengine/main.py | RedirectDemo | def RedirectDemo(handler, t):
"""
Demonstration of redirecting to another number.
"""
# t.say ("One moment please.")
t.redirect(SIP_PHONE)
json = t.RenderJson()
logging.info ("RedirectDemo json: %s" % json)
handler.response.out.write(json) | python | def RedirectDemo(handler, t):
"""
Demonstration of redirecting to another number.
"""
# t.say ("One moment please.")
t.redirect(SIP_PHONE)
json = t.RenderJson()
logging.info ("RedirectDemo json: %s" % json)
handler.response.out.write(json) | [
"def",
"RedirectDemo",
"(",
"handler",
",",
"t",
")",
":",
"# t.say (\"One moment please.\")",
"t",
".",
"redirect",
"(",
"SIP_PHONE",
")",
"json",
"=",
"t",
".",
"RenderJson",
"(",
")",
"logging",
".",
"info",
"(",
"\"RedirectDemo json: %s\"",
"%",
"json",
")",
"handler",
".",
"response",
".",
"out",
".",
"write",
"(",
"json",
")"
] | Demonstration of redirecting to another number. | [
"Demonstration",
"of",
"redirecting",
"to",
"another",
"number",
"."
] | f87772644a6b45066a4c5218f0c1f6467b64ab3c | https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/samples/appengine/main.py#L81-L89 | train |
tropo/tropo-webapi-python | samples/appengine/main.py | TransferDemo | def TransferDemo(handler, t):
"""
Demonstration of transfering to another number
"""
t.say ("One moment please.")
t.transfer(MY_PHONE)
t.say("Hi. I am a robot")
json = t.RenderJson()
logging.info ("TransferDemo json: %s" % json)
handler.response.out.write(json) | python | def TransferDemo(handler, t):
"""
Demonstration of transfering to another number
"""
t.say ("One moment please.")
t.transfer(MY_PHONE)
t.say("Hi. I am a robot")
json = t.RenderJson()
logging.info ("TransferDemo json: %s" % json)
handler.response.out.write(json) | [
"def",
"TransferDemo",
"(",
"handler",
",",
"t",
")",
":",
"t",
".",
"say",
"(",
"\"One moment please.\"",
")",
"t",
".",
"transfer",
"(",
"MY_PHONE",
")",
"t",
".",
"say",
"(",
"\"Hi. I am a robot\"",
")",
"json",
"=",
"t",
".",
"RenderJson",
"(",
")",
"logging",
".",
"info",
"(",
"\"TransferDemo json: %s\"",
"%",
"json",
")",
"handler",
".",
"response",
".",
"out",
".",
"write",
"(",
"json",
")"
] | Demonstration of transfering to another number | [
"Demonstration",
"of",
"transfering",
"to",
"another",
"number"
] | f87772644a6b45066a4c5218f0c1f6467b64ab3c | https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/samples/appengine/main.py#L91-L100 | train |
uw-it-aca/uw-restclients-core | restclients_core/util/retry.py | retry | def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, status_codes=[],
logger=None):
"""
Decorator function for retrying the decorated function,
using an exponential or fixed backoff.
Original: https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
ExceptionToCheck: the exception to check. Can be a tuple of
exceptions to check
tries: number of times to try (not retry) before giving up
delay: initial delay between tries in seconds
backoff: backoff multiplier
status_codes: list of http status codes to check for retrying, only applies
when ExceptionToCheck is a DataFailureException
logger: logging.Logger instance
"""
if backoff is None or backoff <= 0:
raise ValueError("backoff must be a number greater than 0")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be a number 0 or greater")
if delay is None or delay <= 0:
raise ValueError("delay must be a number greater than 0")
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as err:
if (type(err) is DataFailureException and
len(status_codes) and
err.status not in status_codes):
raise
if logger:
logger.warning('%s: %s, Retrying in %s seconds.' % (
f.__name__, err, mdelay))
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry
return deco_retry | python | def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, status_codes=[],
logger=None):
"""
Decorator function for retrying the decorated function,
using an exponential or fixed backoff.
Original: https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
ExceptionToCheck: the exception to check. Can be a tuple of
exceptions to check
tries: number of times to try (not retry) before giving up
delay: initial delay between tries in seconds
backoff: backoff multiplier
status_codes: list of http status codes to check for retrying, only applies
when ExceptionToCheck is a DataFailureException
logger: logging.Logger instance
"""
if backoff is None or backoff <= 0:
raise ValueError("backoff must be a number greater than 0")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be a number 0 or greater")
if delay is None or delay <= 0:
raise ValueError("delay must be a number greater than 0")
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as err:
if (type(err) is DataFailureException and
len(status_codes) and
err.status not in status_codes):
raise
if logger:
logger.warning('%s: %s, Retrying in %s seconds.' % (
f.__name__, err, mdelay))
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry
return deco_retry | [
"def",
"retry",
"(",
"ExceptionToCheck",
",",
"tries",
"=",
"4",
",",
"delay",
"=",
"3",
",",
"backoff",
"=",
"2",
",",
"status_codes",
"=",
"[",
"]",
",",
"logger",
"=",
"None",
")",
":",
"if",
"backoff",
"is",
"None",
"or",
"backoff",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"backoff must be a number greater than 0\"",
")",
"tries",
"=",
"math",
".",
"floor",
"(",
"tries",
")",
"if",
"tries",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"tries must be a number 0 or greater\"",
")",
"if",
"delay",
"is",
"None",
"or",
"delay",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"delay must be a number greater than 0\"",
")",
"def",
"deco_retry",
"(",
"f",
")",
":",
"def",
"f_retry",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"mtries",
",",
"mdelay",
"=",
"tries",
",",
"delay",
"while",
"mtries",
">",
"1",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"ExceptionToCheck",
"as",
"err",
":",
"if",
"(",
"type",
"(",
"err",
")",
"is",
"DataFailureException",
"and",
"len",
"(",
"status_codes",
")",
"and",
"err",
".",
"status",
"not",
"in",
"status_codes",
")",
":",
"raise",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"'%s: %s, Retrying in %s seconds.'",
"%",
"(",
"f",
".",
"__name__",
",",
"err",
",",
"mdelay",
")",
")",
"time",
".",
"sleep",
"(",
"mdelay",
")",
"mtries",
"-=",
"1",
"mdelay",
"*=",
"backoff",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"f_retry",
"return",
"deco_retry"
] | Decorator function for retrying the decorated function,
using an exponential or fixed backoff.
Original: https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
ExceptionToCheck: the exception to check. Can be a tuple of
exceptions to check
tries: number of times to try (not retry) before giving up
delay: initial delay between tries in seconds
backoff: backoff multiplier
status_codes: list of http status codes to check for retrying, only applies
when ExceptionToCheck is a DataFailureException
logger: logging.Logger instance | [
"Decorator",
"function",
"for",
"retrying",
"the",
"decorated",
"function",
"using",
"an",
"exponential",
"or",
"fixed",
"backoff",
"."
] | fda9380dceb6355ec6a3123e88c9ec66ae992682 | https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/util/retry.py#L6-L58 | train |
uw-it-aca/uw-restclients-core | restclients_core/dao.py | DAO._custom_response_edit | def _custom_response_edit(self, method, url, headers, body, response):
"""
This method allows a service to edit a response.
If you want to do this, you probably really want to use
_edit_mock_response - this method will operate on Live resources.
"""
if self.get_implementation().is_mock():
delay = self.get_setting("MOCKDATA_DELAY", 0.0)
time.sleep(delay)
self._edit_mock_response(method, url, headers, body, response) | python | def _custom_response_edit(self, method, url, headers, body, response):
"""
This method allows a service to edit a response.
If you want to do this, you probably really want to use
_edit_mock_response - this method will operate on Live resources.
"""
if self.get_implementation().is_mock():
delay = self.get_setting("MOCKDATA_DELAY", 0.0)
time.sleep(delay)
self._edit_mock_response(method, url, headers, body, response) | [
"def",
"_custom_response_edit",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
",",
"body",
",",
"response",
")",
":",
"if",
"self",
".",
"get_implementation",
"(",
")",
".",
"is_mock",
"(",
")",
":",
"delay",
"=",
"self",
".",
"get_setting",
"(",
"\"MOCKDATA_DELAY\"",
",",
"0.0",
")",
"time",
".",
"sleep",
"(",
"delay",
")",
"self",
".",
"_edit_mock_response",
"(",
"method",
",",
"url",
",",
"headers",
",",
"body",
",",
"response",
")"
] | This method allows a service to edit a response.
If you want to do this, you probably really want to use
_edit_mock_response - this method will operate on Live resources. | [
"This",
"method",
"allows",
"a",
"service",
"to",
"edit",
"a",
"response",
"."
] | fda9380dceb6355ec6a3123e88c9ec66ae992682 | https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L64-L74 | train |
uw-it-aca/uw-restclients-core | restclients_core/dao.py | DAO.postURL | def postURL(self, url, headers={}, body=None):
"""
Request a URL using the HTTP method POST.
"""
return self._load_resource("POST", url, headers, body) | python | def postURL(self, url, headers={}, body=None):
"""
Request a URL using the HTTP method POST.
"""
return self._load_resource("POST", url, headers, body) | [
"def",
"postURL",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"{",
"}",
",",
"body",
"=",
"None",
")",
":",
"return",
"self",
".",
"_load_resource",
"(",
"\"POST\"",
",",
"url",
",",
"headers",
",",
"body",
")"
] | Request a URL using the HTTP method POST. | [
"Request",
"a",
"URL",
"using",
"the",
"HTTP",
"method",
"POST",
"."
] | fda9380dceb6355ec6a3123e88c9ec66ae992682 | https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L100-L104 | train |
uw-it-aca/uw-restclients-core | restclients_core/dao.py | DAO.putURL | def putURL(self, url, headers, body=None):
"""
Request a URL using the HTTP method PUT.
"""
return self._load_resource("PUT", url, headers, body) | python | def putURL(self, url, headers, body=None):
"""
Request a URL using the HTTP method PUT.
"""
return self._load_resource("PUT", url, headers, body) | [
"def",
"putURL",
"(",
"self",
",",
"url",
",",
"headers",
",",
"body",
"=",
"None",
")",
":",
"return",
"self",
".",
"_load_resource",
"(",
"\"PUT\"",
",",
"url",
",",
"headers",
",",
"body",
")"
] | Request a URL using the HTTP method PUT. | [
"Request",
"a",
"URL",
"using",
"the",
"HTTP",
"method",
"PUT",
"."
] | fda9380dceb6355ec6a3123e88c9ec66ae992682 | https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L106-L110 | train |
uw-it-aca/uw-restclients-core | restclients_core/dao.py | DAO.patchURL | def patchURL(self, url, headers, body):
"""
Request a URL using the HTTP method PATCH.
"""
return self._load_resource("PATCH", url, headers, body) | python | def patchURL(self, url, headers, body):
"""
Request a URL using the HTTP method PATCH.
"""
return self._load_resource("PATCH", url, headers, body) | [
"def",
"patchURL",
"(",
"self",
",",
"url",
",",
"headers",
",",
"body",
")",
":",
"return",
"self",
".",
"_load_resource",
"(",
"\"PATCH\"",
",",
"url",
",",
"headers",
",",
"body",
")"
] | Request a URL using the HTTP method PATCH. | [
"Request",
"a",
"URL",
"using",
"the",
"HTTP",
"method",
"PATCH",
"."
] | fda9380dceb6355ec6a3123e88c9ec66ae992682 | https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L112-L116 | train |
mete0r/hypua2jamo | setup.py | setup_dir | def setup_dir(f):
''' Decorate f to run inside the directory where setup.py resides.
'''
setup_dir = os.path.dirname(os.path.abspath(__file__))
def wrapped(*args, **kwargs):
with chdir(setup_dir):
return f(*args, **kwargs)
return wrapped | python | def setup_dir(f):
''' Decorate f to run inside the directory where setup.py resides.
'''
setup_dir = os.path.dirname(os.path.abspath(__file__))
def wrapped(*args, **kwargs):
with chdir(setup_dir):
return f(*args, **kwargs)
return wrapped | [
"def",
"setup_dir",
"(",
"f",
")",
":",
"setup_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"chdir",
"(",
"setup_dir",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | Decorate f to run inside the directory where setup.py resides. | [
"Decorate",
"f",
"to",
"run",
"inside",
"the",
"directory",
"where",
"setup",
".",
"py",
"resides",
"."
] | caceb33a26c27645703d659a82bb1152deef1469 | https://github.com/mete0r/hypua2jamo/blob/caceb33a26c27645703d659a82bb1152deef1469/setup.py#L33-L42 | train |
bitlabstudio/django-feedback-form | feedback_form/templatetags/feedback_tags.py | feedback_form | def feedback_form(context):
"""Template tag to render a feedback form."""
user = None
url = None
if context.get('request'):
url = context['request'].path
if context['request'].user.is_authenticated():
user = context['request'].user
return {
'form': FeedbackForm(url=url, user=user),
'background_color': FEEDBACK_FORM_COLOR,
'text_color': FEEDBACK_FORM_TEXTCOLOR,
'text': FEEDBACK_FORM_TEXT,
} | python | def feedback_form(context):
"""Template tag to render a feedback form."""
user = None
url = None
if context.get('request'):
url = context['request'].path
if context['request'].user.is_authenticated():
user = context['request'].user
return {
'form': FeedbackForm(url=url, user=user),
'background_color': FEEDBACK_FORM_COLOR,
'text_color': FEEDBACK_FORM_TEXTCOLOR,
'text': FEEDBACK_FORM_TEXT,
} | [
"def",
"feedback_form",
"(",
"context",
")",
":",
"user",
"=",
"None",
"url",
"=",
"None",
"if",
"context",
".",
"get",
"(",
"'request'",
")",
":",
"url",
"=",
"context",
"[",
"'request'",
"]",
".",
"path",
"if",
"context",
"[",
"'request'",
"]",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"user",
"=",
"context",
"[",
"'request'",
"]",
".",
"user",
"return",
"{",
"'form'",
":",
"FeedbackForm",
"(",
"url",
"=",
"url",
",",
"user",
"=",
"user",
")",
",",
"'background_color'",
":",
"FEEDBACK_FORM_COLOR",
",",
"'text_color'",
":",
"FEEDBACK_FORM_TEXTCOLOR",
",",
"'text'",
":",
"FEEDBACK_FORM_TEXT",
",",
"}"
] | Template tag to render a feedback form. | [
"Template",
"tag",
"to",
"render",
"a",
"feedback",
"form",
"."
] | e3b5acbbde37caddab2da65f0fd5d7f3a8c8c597 | https://github.com/bitlabstudio/django-feedback-form/blob/e3b5acbbde37caddab2da65f0fd5d7f3a8c8c597/feedback_form/templatetags/feedback_tags.py#L11-L24 | train |
nickelkr/yfi | yfi/yql.py | Yql.select | def select(self, *itms):
"""Joins the items to be selected and inserts the current table name"""
if not itms:
itms = ['*']
self.terms.append("select %s from %s" % (', '.join(itms), self.table))
return self | python | def select(self, *itms):
"""Joins the items to be selected and inserts the current table name"""
if not itms:
itms = ['*']
self.terms.append("select %s from %s" % (', '.join(itms), self.table))
return self | [
"def",
"select",
"(",
"self",
",",
"*",
"itms",
")",
":",
"if",
"not",
"itms",
":",
"itms",
"=",
"[",
"'*'",
"]",
"self",
".",
"terms",
".",
"append",
"(",
"\"select %s from %s\"",
"%",
"(",
"', '",
".",
"join",
"(",
"itms",
")",
",",
"self",
".",
"table",
")",
")",
"return",
"self"
] | Joins the items to be selected and inserts the current table name | [
"Joins",
"the",
"items",
"to",
"be",
"selected",
"and",
"inserts",
"the",
"current",
"table",
"name"
] | 720773ea311abe01be83982f26a61ef744f9f648 | https://github.com/nickelkr/yfi/blob/720773ea311abe01be83982f26a61ef744f9f648/yfi/yql.py#L45-L50 | train |
nickelkr/yfi | yfi/yql.py | Yql._in | def _in(self, *lst):
"""Build out the in clause. Using _in due to shadowing for in"""
self.terms.append('in (%s)' % ', '.join(['"%s"' % x for x in lst]))
return self | python | def _in(self, *lst):
"""Build out the in clause. Using _in due to shadowing for in"""
self.terms.append('in (%s)' % ', '.join(['"%s"' % x for x in lst]))
return self | [
"def",
"_in",
"(",
"self",
",",
"*",
"lst",
")",
":",
"self",
".",
"terms",
".",
"append",
"(",
"'in (%s)'",
"%",
"', '",
".",
"join",
"(",
"[",
"'\"%s\"'",
"%",
"x",
"for",
"x",
"in",
"lst",
"]",
")",
")",
"return",
"self"
] | Build out the in clause. Using _in due to shadowing for in | [
"Build",
"out",
"the",
"in",
"clause",
".",
"Using",
"_in",
"due",
"to",
"shadowing",
"for",
"in"
] | 720773ea311abe01be83982f26a61ef744f9f648 | https://github.com/nickelkr/yfi/blob/720773ea311abe01be83982f26a61ef744f9f648/yfi/yql.py#L57-L60 | train |
nickelkr/yfi | yfi/yql.py | Yql.compile | def compile(self):
"""Take all of the 'parts' components and build the complete query to be passed
to Yahoo YQL"""
cs = ""
for term in self.terms:
if cs:
cs += " "
cs += term
self.compiled_str = urllib.parse.quote(cs)
return self | python | def compile(self):
"""Take all of the 'parts' components and build the complete query to be passed
to Yahoo YQL"""
cs = ""
for term in self.terms:
if cs:
cs += " "
cs += term
self.compiled_str = urllib.parse.quote(cs)
return self | [
"def",
"compile",
"(",
"self",
")",
":",
"cs",
"=",
"\"\"",
"for",
"term",
"in",
"self",
".",
"terms",
":",
"if",
"cs",
":",
"cs",
"+=",
"\" \"",
"cs",
"+=",
"term",
"self",
".",
"compiled_str",
"=",
"urllib",
".",
"parse",
".",
"quote",
"(",
"cs",
")",
"return",
"self"
] | Take all of the 'parts' components and build the complete query to be passed
to Yahoo YQL | [
"Take",
"all",
"of",
"the",
"parts",
"components",
"and",
"build",
"the",
"complete",
"query",
"to",
"be",
"passed",
"to",
"Yahoo",
"YQL"
] | 720773ea311abe01be83982f26a61ef744f9f648 | https://github.com/nickelkr/yfi/blob/720773ea311abe01be83982f26a61ef744f9f648/yfi/yql.py#L69-L78 | train |
MoseleyBioinformaticsLab/mwtab | mwtab/fileio.py | read_files | def read_files(*sources, **kwds):
"""Construct a generator that yields file instances.
:param sources: One or more strings representing path to file(s).
"""
filenames = _generate_filenames(sources)
filehandles = _generate_handles(filenames)
for fh, source in filehandles:
try:
f = mwtab.MWTabFile(source)
f.read(fh)
if kwds.get('validate'):
validator.validate_file(mwtabfile=f,
section_schema_mapping=mwschema.section_schema_mapping,
validate_samples=True,
validate_factors=True)
yield f
if VERBOSE:
print("Processed file: {}".format(os.path.abspath(source)))
except Exception as e:
if VERBOSE:
print("Error processing file: ", os.path.abspath(source), "\nReason:", e)
pass | python | def read_files(*sources, **kwds):
"""Construct a generator that yields file instances.
:param sources: One or more strings representing path to file(s).
"""
filenames = _generate_filenames(sources)
filehandles = _generate_handles(filenames)
for fh, source in filehandles:
try:
f = mwtab.MWTabFile(source)
f.read(fh)
if kwds.get('validate'):
validator.validate_file(mwtabfile=f,
section_schema_mapping=mwschema.section_schema_mapping,
validate_samples=True,
validate_factors=True)
yield f
if VERBOSE:
print("Processed file: {}".format(os.path.abspath(source)))
except Exception as e:
if VERBOSE:
print("Error processing file: ", os.path.abspath(source), "\nReason:", e)
pass | [
"def",
"read_files",
"(",
"*",
"sources",
",",
"*",
"*",
"kwds",
")",
":",
"filenames",
"=",
"_generate_filenames",
"(",
"sources",
")",
"filehandles",
"=",
"_generate_handles",
"(",
"filenames",
")",
"for",
"fh",
",",
"source",
"in",
"filehandles",
":",
"try",
":",
"f",
"=",
"mwtab",
".",
"MWTabFile",
"(",
"source",
")",
"f",
".",
"read",
"(",
"fh",
")",
"if",
"kwds",
".",
"get",
"(",
"'validate'",
")",
":",
"validator",
".",
"validate_file",
"(",
"mwtabfile",
"=",
"f",
",",
"section_schema_mapping",
"=",
"mwschema",
".",
"section_schema_mapping",
",",
"validate_samples",
"=",
"True",
",",
"validate_factors",
"=",
"True",
")",
"yield",
"f",
"if",
"VERBOSE",
":",
"print",
"(",
"\"Processed file: {}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"source",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"VERBOSE",
":",
"print",
"(",
"\"Error processing file: \"",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"source",
")",
",",
"\"\\nReason:\"",
",",
"e",
")",
"pass"
] | Construct a generator that yields file instances.
:param sources: One or more strings representing path to file(s). | [
"Construct",
"a",
"generator",
"that",
"yields",
"file",
"instances",
"."
] | 8c0ae8ab2aa621662f99589ed41e481cf8b7152b | https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/fileio.py#L90-L115 | train |
MoseleyBioinformaticsLab/mwtab | mwtab/fileio.py | GenericFilePath.is_url | def is_url(path):
"""Test if path represents a valid URL.
:param str path: Path to file.
:return: True if path is valid url string, False otherwise.
:rtype: :py:obj:`True` or :py:obj:`False`
"""
try:
parse_result = urlparse(path)
return all((parse_result.scheme, parse_result.netloc, parse_result.path))
except ValueError:
return False | python | def is_url(path):
"""Test if path represents a valid URL.
:param str path: Path to file.
:return: True if path is valid url string, False otherwise.
:rtype: :py:obj:`True` or :py:obj:`False`
"""
try:
parse_result = urlparse(path)
return all((parse_result.scheme, parse_result.netloc, parse_result.path))
except ValueError:
return False | [
"def",
"is_url",
"(",
"path",
")",
":",
"try",
":",
"parse_result",
"=",
"urlparse",
"(",
"path",
")",
"return",
"all",
"(",
"(",
"parse_result",
".",
"scheme",
",",
"parse_result",
".",
"netloc",
",",
"parse_result",
".",
"path",
")",
")",
"except",
"ValueError",
":",
"return",
"False"
] | Test if path represents a valid URL.
:param str path: Path to file.
:return: True if path is valid url string, False otherwise.
:rtype: :py:obj:`True` or :py:obj:`False` | [
"Test",
"if",
"path",
"represents",
"a",
"valid",
"URL",
"."
] | 8c0ae8ab2aa621662f99589ed41e481cf8b7152b | https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/fileio.py#L208-L219 | train |
camptocamp/Studio | studio/lib/auth.py | AuthMiddleware | def AuthMiddleware(app):
"""
Add authentication and authorization middleware to the ``app``.
"""
# url_for mustn't be used here because AuthMiddleware is built once at startup,
# url path can be reconstructed only on http requests (based on environ)
basic_redirect_form = BasicRedirectFormPlugin(login_form_url="/signin",
login_handler_path="/login",
post_login_url="/",
logout_handler_path="/logout",
post_logout_url="/signin",
rememberer_name="cookie")
return setup_sql_auth(
app,
user_class = model.User,
group_class = model.Group,
permission_class = model.Permission,
dbsession = model.meta.Session,
form_plugin = basic_redirect_form,
cookie_secret = config['cookie_secret'],
translations = {
'user_name': 'login',
'users': 'users',
'group_name': 'name',
'groups': 'groups',
'permission_name': 'name',
'permissions': 'permissions',
'validate_password': 'validate_password'},
) | python | def AuthMiddleware(app):
"""
Add authentication and authorization middleware to the ``app``.
"""
# url_for mustn't be used here because AuthMiddleware is built once at startup,
# url path can be reconstructed only on http requests (based on environ)
basic_redirect_form = BasicRedirectFormPlugin(login_form_url="/signin",
login_handler_path="/login",
post_login_url="/",
logout_handler_path="/logout",
post_logout_url="/signin",
rememberer_name="cookie")
return setup_sql_auth(
app,
user_class = model.User,
group_class = model.Group,
permission_class = model.Permission,
dbsession = model.meta.Session,
form_plugin = basic_redirect_form,
cookie_secret = config['cookie_secret'],
translations = {
'user_name': 'login',
'users': 'users',
'group_name': 'name',
'groups': 'groups',
'permission_name': 'name',
'permissions': 'permissions',
'validate_password': 'validate_password'},
) | [
"def",
"AuthMiddleware",
"(",
"app",
")",
":",
"# url_for mustn't be used here because AuthMiddleware is built once at startup,",
"# url path can be reconstructed only on http requests (based on environ)",
"basic_redirect_form",
"=",
"BasicRedirectFormPlugin",
"(",
"login_form_url",
"=",
"\"/signin\"",
",",
"login_handler_path",
"=",
"\"/login\"",
",",
"post_login_url",
"=",
"\"/\"",
",",
"logout_handler_path",
"=",
"\"/logout\"",
",",
"post_logout_url",
"=",
"\"/signin\"",
",",
"rememberer_name",
"=",
"\"cookie\"",
")",
"return",
"setup_sql_auth",
"(",
"app",
",",
"user_class",
"=",
"model",
".",
"User",
",",
"group_class",
"=",
"model",
".",
"Group",
",",
"permission_class",
"=",
"model",
".",
"Permission",
",",
"dbsession",
"=",
"model",
".",
"meta",
".",
"Session",
",",
"form_plugin",
"=",
"basic_redirect_form",
",",
"cookie_secret",
"=",
"config",
"[",
"'cookie_secret'",
"]",
",",
"translations",
"=",
"{",
"'user_name'",
":",
"'login'",
",",
"'users'",
":",
"'users'",
",",
"'group_name'",
":",
"'name'",
",",
"'groups'",
":",
"'groups'",
",",
"'permission_name'",
":",
"'name'",
",",
"'permissions'",
":",
"'permissions'",
",",
"'validate_password'",
":",
"'validate_password'",
"}",
",",
")"
] | Add authentication and authorization middleware to the ``app``. | [
"Add",
"authentication",
"and",
"authorization",
"middleware",
"to",
"the",
"app",
"."
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/auth.py#L154-L183 | train |
camptocamp/Studio | studio/lib/auth.py | BasicRedirectFormPlugin._get_full_path | def _get_full_path(self, path, environ):
"""
Return the full path to ``path`` by prepending the SCRIPT_NAME.
If ``path`` is a URL, do nothing.
"""
if path.startswith('/'):
path = environ.get('SCRIPT_NAME', '') + path
return path | python | def _get_full_path(self, path, environ):
"""
Return the full path to ``path`` by prepending the SCRIPT_NAME.
If ``path`` is a URL, do nothing.
"""
if path.startswith('/'):
path = environ.get('SCRIPT_NAME', '') + path
return path | [
"def",
"_get_full_path",
"(",
"self",
",",
"path",
",",
"environ",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"path",
"=",
"environ",
".",
"get",
"(",
"'SCRIPT_NAME'",
",",
"''",
")",
"+",
"path",
"return",
"path"
] | Return the full path to ``path`` by prepending the SCRIPT_NAME.
If ``path`` is a URL, do nothing. | [
"Return",
"the",
"full",
"path",
"to",
"path",
"by",
"prepending",
"the",
"SCRIPT_NAME",
".",
"If",
"path",
"is",
"a",
"URL",
"do",
"nothing",
"."
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/auth.py#L130-L139 | train |
camptocamp/Studio | studio/lib/auth.py | BasicRedirectFormPlugin._replace_qs | def _replace_qs(self, url, qs):
"""
Replace the query string of ``url`` with ``qs`` and return the new URL.
"""
url_parts = list(urlparse(url))
url_parts[4] = qs
return urlunparse(url_parts) | python | def _replace_qs(self, url, qs):
"""
Replace the query string of ``url`` with ``qs`` and return the new URL.
"""
url_parts = list(urlparse(url))
url_parts[4] = qs
return urlunparse(url_parts) | [
"def",
"_replace_qs",
"(",
"self",
",",
"url",
",",
"qs",
")",
":",
"url_parts",
"=",
"list",
"(",
"urlparse",
"(",
"url",
")",
")",
"url_parts",
"[",
"4",
"]",
"=",
"qs",
"return",
"urlunparse",
"(",
"url_parts",
")"
] | Replace the query string of ``url`` with ``qs`` and return the new URL. | [
"Replace",
"the",
"query",
"string",
"of",
"url",
"with",
"qs",
"and",
"return",
"the",
"new",
"URL",
"."
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/auth.py#L141-L148 | train |
pjamesjoyce/lcopt | lcopt/settings.py | LcoptSettings.write | def write(self):
"""write the current settings to the config file"""
with open(storage.config_file, 'w') as cfg:
yaml.dump(self.as_dict(), cfg, default_flow_style=False)
storage.refresh() | python | def write(self):
"""write the current settings to the config file"""
with open(storage.config_file, 'w') as cfg:
yaml.dump(self.as_dict(), cfg, default_flow_style=False)
storage.refresh() | [
"def",
"write",
"(",
"self",
")",
":",
"with",
"open",
"(",
"storage",
".",
"config_file",
",",
"'w'",
")",
"as",
"cfg",
":",
"yaml",
".",
"dump",
"(",
"self",
".",
"as_dict",
"(",
")",
",",
"cfg",
",",
"default_flow_style",
"=",
"False",
")",
"storage",
".",
"refresh",
"(",
")"
] | write the current settings to the config file | [
"write",
"the",
"current",
"settings",
"to",
"the",
"config",
"file"
] | 3f1caca31fece4a3068a384900707e6d21d04597 | https://github.com/pjamesjoyce/lcopt/blob/3f1caca31fece4a3068a384900707e6d21d04597/lcopt/settings.py#L98-L103 | train |
camptocamp/Studio | studio/lib/sa_types.py | JsonString.process_bind_param | def process_bind_param(self, value, dialect):
"""convert value from python object to json"""
if value is not None:
value = simplejson.dumps(value)
return value | python | def process_bind_param(self, value, dialect):
"""convert value from python object to json"""
if value is not None:
value = simplejson.dumps(value)
return value | [
"def",
"process_bind_param",
"(",
"self",
",",
"value",
",",
"dialect",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"simplejson",
".",
"dumps",
"(",
"value",
")",
"return",
"value"
] | convert value from python object to json | [
"convert",
"value",
"from",
"python",
"object",
"to",
"json"
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/sa_types.py#L26-L30 | train |
camptocamp/Studio | studio/lib/sa_types.py | JsonString.process_result_value | def process_result_value(self, value, dialect):
"""convert value from json to a python object"""
if value is not None:
value = simplejson.loads(value)
return value | python | def process_result_value(self, value, dialect):
"""convert value from json to a python object"""
if value is not None:
value = simplejson.loads(value)
return value | [
"def",
"process_result_value",
"(",
"self",
",",
"value",
",",
"dialect",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"simplejson",
".",
"loads",
"(",
"value",
")",
"return",
"value"
] | convert value from json to a python object | [
"convert",
"value",
"from",
"json",
"to",
"a",
"python",
"object"
] | 43cb7298434fb606b15136801b79b03571a2f27e | https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/sa_types.py#L32-L36 | train |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | getBriefModuleInfoFromFile | def getBriefModuleInfoFromFile(fileName):
"""Builds the brief module info from file"""
modInfo = BriefModuleInfo()
_cdmpyparser.getBriefModuleInfoFromFile(modInfo, fileName)
modInfo.flush()
return modInfo | python | def getBriefModuleInfoFromFile(fileName):
"""Builds the brief module info from file"""
modInfo = BriefModuleInfo()
_cdmpyparser.getBriefModuleInfoFromFile(modInfo, fileName)
modInfo.flush()
return modInfo | [
"def",
"getBriefModuleInfoFromFile",
"(",
"fileName",
")",
":",
"modInfo",
"=",
"BriefModuleInfo",
"(",
")",
"_cdmpyparser",
".",
"getBriefModuleInfoFromFile",
"(",
"modInfo",
",",
"fileName",
")",
"modInfo",
".",
"flush",
"(",
")",
"return",
"modInfo"
] | Builds the brief module info from file | [
"Builds",
"the",
"brief",
"module",
"info",
"from",
"file"
] | 7e933aca899b1853d744082313ffc3a8b1154505 | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L609-L614 | train |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | getBriefModuleInfoFromMemory | def getBriefModuleInfoFromMemory(content):
"""Builds the brief module info from memory"""
modInfo = BriefModuleInfo()
_cdmpyparser.getBriefModuleInfoFromMemory(modInfo, content)
modInfo.flush()
return modInfo | python | def getBriefModuleInfoFromMemory(content):
"""Builds the brief module info from memory"""
modInfo = BriefModuleInfo()
_cdmpyparser.getBriefModuleInfoFromMemory(modInfo, content)
modInfo.flush()
return modInfo | [
"def",
"getBriefModuleInfoFromMemory",
"(",
"content",
")",
":",
"modInfo",
"=",
"BriefModuleInfo",
"(",
")",
"_cdmpyparser",
".",
"getBriefModuleInfoFromMemory",
"(",
"modInfo",
",",
"content",
")",
"modInfo",
".",
"flush",
"(",
")",
"return",
"modInfo"
] | Builds the brief module info from memory | [
"Builds",
"the",
"brief",
"module",
"info",
"from",
"memory"
] | 7e933aca899b1853d744082313ffc3a8b1154505 | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L617-L622 | train |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | ImportWhat.getDisplayName | def getDisplayName(self):
"""Provides a name for display purpose respecting the alias"""
if self.alias == "":
return self.name
return self.name + " as " + self.alias | python | def getDisplayName(self):
"""Provides a name for display purpose respecting the alias"""
if self.alias == "":
return self.name
return self.name + " as " + self.alias | [
"def",
"getDisplayName",
"(",
"self",
")",
":",
"if",
"self",
".",
"alias",
"==",
"\"\"",
":",
"return",
"self",
".",
"name",
"return",
"self",
".",
"name",
"+",
"\" as \"",
"+",
"self",
".",
"alias"
] | Provides a name for display purpose respecting the alias | [
"Provides",
"a",
"name",
"for",
"display",
"purpose",
"respecting",
"the",
"alias"
] | 7e933aca899b1853d744082313ffc3a8b1154505 | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L124-L128 | train |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | BriefModuleInfo.flush | def flush(self):
"""Flushes the collected information"""
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport) | python | def flush(self):
"""Flushes the collected information"""
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport) | [
"def",
"flush",
"(",
"self",
")",
":",
"self",
".",
"__flushLevel",
"(",
"0",
")",
"if",
"self",
".",
"__lastImport",
"is",
"not",
"None",
":",
"self",
".",
"imports",
".",
"append",
"(",
"self",
".",
"__lastImport",
")"
] | Flushes the collected information | [
"Flushes",
"the",
"collected",
"information"
] | 7e933aca899b1853d744082313ffc3a8b1154505 | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L456-L460 | train |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | BriefModuleInfo.__flushLevel | def __flushLevel(self, level):
"""Merge the found objects to the required level"""
objectsCount = len(self.objectsStack)
while objectsCount > level:
lastIndex = objectsCount - 1
if lastIndex == 0:
# We have exactly one element in the stack
if self.objectsStack[0].__class__.__name__ == "Class":
self.classes.append(self.objectsStack[0])
else:
self.functions.append(self.objectsStack[0])
self.objectsStack = []
break
# Append to the previous level
if self.objectsStack[lastIndex].__class__.__name__ == "Class":
self.objectsStack[lastIndex - 1].classes. \
append(self.objectsStack[lastIndex])
else:
self.objectsStack[lastIndex - 1].functions. \
append(self.objectsStack[lastIndex])
del self.objectsStack[lastIndex]
objectsCount -= 1 | python | def __flushLevel(self, level):
"""Merge the found objects to the required level"""
objectsCount = len(self.objectsStack)
while objectsCount > level:
lastIndex = objectsCount - 1
if lastIndex == 0:
# We have exactly one element in the stack
if self.objectsStack[0].__class__.__name__ == "Class":
self.classes.append(self.objectsStack[0])
else:
self.functions.append(self.objectsStack[0])
self.objectsStack = []
break
# Append to the previous level
if self.objectsStack[lastIndex].__class__.__name__ == "Class":
self.objectsStack[lastIndex - 1].classes. \
append(self.objectsStack[lastIndex])
else:
self.objectsStack[lastIndex - 1].functions. \
append(self.objectsStack[lastIndex])
del self.objectsStack[lastIndex]
objectsCount -= 1 | [
"def",
"__flushLevel",
"(",
"self",
",",
"level",
")",
":",
"objectsCount",
"=",
"len",
"(",
"self",
".",
"objectsStack",
")",
"while",
"objectsCount",
">",
"level",
":",
"lastIndex",
"=",
"objectsCount",
"-",
"1",
"if",
"lastIndex",
"==",
"0",
":",
"# We have exactly one element in the stack",
"if",
"self",
".",
"objectsStack",
"[",
"0",
"]",
".",
"__class__",
".",
"__name__",
"==",
"\"Class\"",
":",
"self",
".",
"classes",
".",
"append",
"(",
"self",
".",
"objectsStack",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"functions",
".",
"append",
"(",
"self",
".",
"objectsStack",
"[",
"0",
"]",
")",
"self",
".",
"objectsStack",
"=",
"[",
"]",
"break",
"# Append to the previous level",
"if",
"self",
".",
"objectsStack",
"[",
"lastIndex",
"]",
".",
"__class__",
".",
"__name__",
"==",
"\"Class\"",
":",
"self",
".",
"objectsStack",
"[",
"lastIndex",
"-",
"1",
"]",
".",
"classes",
".",
"append",
"(",
"self",
".",
"objectsStack",
"[",
"lastIndex",
"]",
")",
"else",
":",
"self",
".",
"objectsStack",
"[",
"lastIndex",
"-",
"1",
"]",
".",
"functions",
".",
"append",
"(",
"self",
".",
"objectsStack",
"[",
"lastIndex",
"]",
")",
"del",
"self",
".",
"objectsStack",
"[",
"lastIndex",
"]",
"objectsCount",
"-=",
"1"
] | Merge the found objects to the required level | [
"Merge",
"the",
"found",
"objects",
"to",
"the",
"required",
"level"
] | 7e933aca899b1853d744082313ffc3a8b1154505 | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L462-L486 | train |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | BriefModuleInfo._onEncoding | def _onEncoding(self, encString, line, pos, absPosition):
"""Memorizes module encoding"""
self.encoding = Encoding(encString, line, pos, absPosition) | python | def _onEncoding(self, encString, line, pos, absPosition):
"""Memorizes module encoding"""
self.encoding = Encoding(encString, line, pos, absPosition) | [
"def",
"_onEncoding",
"(",
"self",
",",
"encString",
",",
"line",
",",
"pos",
",",
"absPosition",
")",
":",
"self",
".",
"encoding",
"=",
"Encoding",
"(",
"encString",
",",
"line",
",",
"pos",
",",
"absPosition",
")"
] | Memorizes module encoding | [
"Memorizes",
"module",
"encoding"
] | 7e933aca899b1853d744082313ffc3a8b1154505 | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L488-L490 | train |
Subsets and Splits