repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
tinybike/weightedstats | weightedstats/__init__.py | weighted_mean | def weighted_mean(data, weights=None):
"""Calculate the weighted mean of a list."""
if weights is None:
return mean(data)
total_weight = float(sum(weights))
weights = [weight / total_weight for weight in weights]
w_mean = 0
for i, weight in enumerate(weights):
w_mean += weight * data[i]
return w_mean | python | def weighted_mean(data, weights=None):
"""Calculate the weighted mean of a list."""
if weights is None:
return mean(data)
total_weight = float(sum(weights))
weights = [weight / total_weight for weight in weights]
w_mean = 0
for i, weight in enumerate(weights):
w_mean += weight * data[i]
return w_mean | [
"def",
"weighted_mean",
"(",
"data",
",",
"weights",
"=",
"None",
")",
":",
"if",
"weights",
"is",
"None",
":",
"return",
"mean",
"(",
"data",
")",
"total_weight",
"=",
"float",
"(",
"sum",
"(",
"weights",
")",
")",
"weights",
"=",
"[",
"weight",
"/",
"total_weight",
"for",
"weight",
"in",
"weights",
"]",
"w_mean",
"=",
"0",
"for",
"i",
",",
"weight",
"in",
"enumerate",
"(",
"weights",
")",
":",
"w_mean",
"+=",
"weight",
"*",
"data",
"[",
"i",
"]",
"return",
"w_mean"
] | Calculate the weighted mean of a list. | [
"Calculate",
"the",
"weighted",
"mean",
"of",
"a",
"list",
"."
] | 0e2638099dba7f288a1553a83e957a95522229da | https://github.com/tinybike/weightedstats/blob/0e2638099dba7f288a1553a83e957a95522229da/weightedstats/__init__.py#L43-L52 | train |
tinybike/weightedstats | weightedstats/__init__.py | median | def median(data):
"""Calculate the median of a list."""
data.sort()
num_values = len(data)
half = num_values // 2
if num_values % 2:
return data[half]
return 0.5 * (data[half-1] + data[half]) | python | def median(data):
"""Calculate the median of a list."""
data.sort()
num_values = len(data)
half = num_values // 2
if num_values % 2:
return data[half]
return 0.5 * (data[half-1] + data[half]) | [
"def",
"median",
"(",
"data",
")",
":",
"data",
".",
"sort",
"(",
")",
"num_values",
"=",
"len",
"(",
"data",
")",
"half",
"=",
"num_values",
"//",
"2",
"if",
"num_values",
"%",
"2",
":",
"return",
"data",
"[",
"half",
"]",
"return",
"0.5",
"*",
"(",
"data",
"[",
"half",
"-",
"1",
"]",
"+",
"data",
"[",
"half",
"]",
")"
] | Calculate the median of a list. | [
"Calculate",
"the",
"median",
"of",
"a",
"list",
"."
] | 0e2638099dba7f288a1553a83e957a95522229da | https://github.com/tinybike/weightedstats/blob/0e2638099dba7f288a1553a83e957a95522229da/weightedstats/__init__.py#L60-L67 | train |
tinybike/weightedstats | weightedstats/__init__.py | weighted_median | def weighted_median(data, weights=None):
"""Calculate the weighted median of a list."""
if weights is None:
return median(data)
midpoint = 0.5 * sum(weights)
if any([j > midpoint for j in weights]):
return data[weights.index(max(weights))]
if any([j > 0 for j in weights]):
sorted_data, sorted_weights = zip(*sorted(zip(data, weights)))
cumulative_weight = 0
below_midpoint_index = 0
while cumulative_weight <= midpoint:
below_midpoint_index += 1
cumulative_weight += sorted_weights[below_midpoint_index-1]
cumulative_weight -= sorted_weights[below_midpoint_index-1]
if cumulative_weight - midpoint < sys.float_info.epsilon:
bounds = sorted_data[below_midpoint_index-2:below_midpoint_index]
return sum(bounds) / float(len(bounds))
return sorted_data[below_midpoint_index-1] | python | def weighted_median(data, weights=None):
"""Calculate the weighted median of a list."""
if weights is None:
return median(data)
midpoint = 0.5 * sum(weights)
if any([j > midpoint for j in weights]):
return data[weights.index(max(weights))]
if any([j > 0 for j in weights]):
sorted_data, sorted_weights = zip(*sorted(zip(data, weights)))
cumulative_weight = 0
below_midpoint_index = 0
while cumulative_weight <= midpoint:
below_midpoint_index += 1
cumulative_weight += sorted_weights[below_midpoint_index-1]
cumulative_weight -= sorted_weights[below_midpoint_index-1]
if cumulative_weight - midpoint < sys.float_info.epsilon:
bounds = sorted_data[below_midpoint_index-2:below_midpoint_index]
return sum(bounds) / float(len(bounds))
return sorted_data[below_midpoint_index-1] | [
"def",
"weighted_median",
"(",
"data",
",",
"weights",
"=",
"None",
")",
":",
"if",
"weights",
"is",
"None",
":",
"return",
"median",
"(",
"data",
")",
"midpoint",
"=",
"0.5",
"*",
"sum",
"(",
"weights",
")",
"if",
"any",
"(",
"[",
"j",
">",
"midpoint",
"for",
"j",
"in",
"weights",
"]",
")",
":",
"return",
"data",
"[",
"weights",
".",
"index",
"(",
"max",
"(",
"weights",
")",
")",
"]",
"if",
"any",
"(",
"[",
"j",
">",
"0",
"for",
"j",
"in",
"weights",
"]",
")",
":",
"sorted_data",
",",
"sorted_weights",
"=",
"zip",
"(",
"*",
"sorted",
"(",
"zip",
"(",
"data",
",",
"weights",
")",
")",
")",
"cumulative_weight",
"=",
"0",
"below_midpoint_index",
"=",
"0",
"while",
"cumulative_weight",
"<=",
"midpoint",
":",
"below_midpoint_index",
"+=",
"1",
"cumulative_weight",
"+=",
"sorted_weights",
"[",
"below_midpoint_index",
"-",
"1",
"]",
"cumulative_weight",
"-=",
"sorted_weights",
"[",
"below_midpoint_index",
"-",
"1",
"]",
"if",
"cumulative_weight",
"-",
"midpoint",
"<",
"sys",
".",
"float_info",
".",
"epsilon",
":",
"bounds",
"=",
"sorted_data",
"[",
"below_midpoint_index",
"-",
"2",
":",
"below_midpoint_index",
"]",
"return",
"sum",
"(",
"bounds",
")",
"/",
"float",
"(",
"len",
"(",
"bounds",
")",
")",
"return",
"sorted_data",
"[",
"below_midpoint_index",
"-",
"1",
"]"
] | Calculate the weighted median of a list. | [
"Calculate",
"the",
"weighted",
"median",
"of",
"a",
"list",
"."
] | 0e2638099dba7f288a1553a83e957a95522229da | https://github.com/tinybike/weightedstats/blob/0e2638099dba7f288a1553a83e957a95522229da/weightedstats/__init__.py#L69-L87 | train |
Robpol86/Flask-Redis-Helper | flask_redis.py | Redis.init_app | def init_app(self, app, config_prefix=None):
"""Actual method to read Redis settings from app configuration and initialize the StrictRedis instance.
Positional arguments:
app -- Flask application instance.
config_prefix -- Prefix used in config key names in the Flask app's configuration. Useful for applications which
interface with more than one Redis server. Default value is 'REDIS'. Will be converted to upper case (e.g.
'REDIS_CACHE').
Examples:
REDIS_URL = 'redis://localhost/0'
REDIS_CACHE_URL = 'redis://localhost/1'
"""
# Normalize the prefix and add this instance to app.extensions.
config_prefix = (config_prefix or 'REDIS').rstrip('_').upper()
if not hasattr(app, 'extensions'):
app.extensions = dict()
if config_prefix.lower() in app.extensions:
raise ValueError('Already registered config prefix {0!r}.'.format(config_prefix))
app.extensions[config_prefix.lower()] = _RedisState(self, app)
# Read config.
args = read_config(app.config, config_prefix)
# Instantiate StrictRedis.
super(Redis, self).__init__(**args) | python | def init_app(self, app, config_prefix=None):
"""Actual method to read Redis settings from app configuration and initialize the StrictRedis instance.
Positional arguments:
app -- Flask application instance.
config_prefix -- Prefix used in config key names in the Flask app's configuration. Useful for applications which
interface with more than one Redis server. Default value is 'REDIS'. Will be converted to upper case (e.g.
'REDIS_CACHE').
Examples:
REDIS_URL = 'redis://localhost/0'
REDIS_CACHE_URL = 'redis://localhost/1'
"""
# Normalize the prefix and add this instance to app.extensions.
config_prefix = (config_prefix or 'REDIS').rstrip('_').upper()
if not hasattr(app, 'extensions'):
app.extensions = dict()
if config_prefix.lower() in app.extensions:
raise ValueError('Already registered config prefix {0!r}.'.format(config_prefix))
app.extensions[config_prefix.lower()] = _RedisState(self, app)
# Read config.
args = read_config(app.config, config_prefix)
# Instantiate StrictRedis.
super(Redis, self).__init__(**args) | [
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"config_prefix",
"=",
"None",
")",
":",
"# Normalize the prefix and add this instance to app.extensions.",
"config_prefix",
"=",
"(",
"config_prefix",
"or",
"'REDIS'",
")",
".",
"rstrip",
"(",
"'_'",
")",
".",
"upper",
"(",
")",
"if",
"not",
"hasattr",
"(",
"app",
",",
"'extensions'",
")",
":",
"app",
".",
"extensions",
"=",
"dict",
"(",
")",
"if",
"config_prefix",
".",
"lower",
"(",
")",
"in",
"app",
".",
"extensions",
":",
"raise",
"ValueError",
"(",
"'Already registered config prefix {0!r}.'",
".",
"format",
"(",
"config_prefix",
")",
")",
"app",
".",
"extensions",
"[",
"config_prefix",
".",
"lower",
"(",
")",
"]",
"=",
"_RedisState",
"(",
"self",
",",
"app",
")",
"# Read config.",
"args",
"=",
"read_config",
"(",
"app",
".",
"config",
",",
"config_prefix",
")",
"# Instantiate StrictRedis.",
"super",
"(",
"Redis",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"args",
")"
] | Actual method to read Redis settings from app configuration and initialize the StrictRedis instance.
Positional arguments:
app -- Flask application instance.
config_prefix -- Prefix used in config key names in the Flask app's configuration. Useful for applications which
interface with more than one Redis server. Default value is 'REDIS'. Will be converted to upper case (e.g.
'REDIS_CACHE').
Examples:
REDIS_URL = 'redis://localhost/0'
REDIS_CACHE_URL = 'redis://localhost/1' | [
"Actual",
"method",
"to",
"read",
"Redis",
"settings",
"from",
"app",
"configuration",
"and",
"initialize",
"the",
"StrictRedis",
"instance",
"."
] | 5708b1287274ab5f09a57bba25b6f1e79cea9148 | https://github.com/Robpol86/Flask-Redis-Helper/blob/5708b1287274ab5f09a57bba25b6f1e79cea9148/flask_redis.py#L187-L211 | train |
Julian/Filesystems | filesystems/common.py | _recursive_remove | def _recursive_remove(fs, path):
"""
A recursive, non-atomic directory removal.
"""
if not fs.is_link(path=path) and fs.is_dir(path=path):
for child in fs.children(path=path):
_recursive_remove(fs=fs, path=child)
fs.remove_empty_directory(path=path)
else:
fs.remove_file(path=path) | python | def _recursive_remove(fs, path):
"""
A recursive, non-atomic directory removal.
"""
if not fs.is_link(path=path) and fs.is_dir(path=path):
for child in fs.children(path=path):
_recursive_remove(fs=fs, path=child)
fs.remove_empty_directory(path=path)
else:
fs.remove_file(path=path) | [
"def",
"_recursive_remove",
"(",
"fs",
",",
"path",
")",
":",
"if",
"not",
"fs",
".",
"is_link",
"(",
"path",
"=",
"path",
")",
"and",
"fs",
".",
"is_dir",
"(",
"path",
"=",
"path",
")",
":",
"for",
"child",
"in",
"fs",
".",
"children",
"(",
"path",
"=",
"path",
")",
":",
"_recursive_remove",
"(",
"fs",
"=",
"fs",
",",
"path",
"=",
"child",
")",
"fs",
".",
"remove_empty_directory",
"(",
"path",
"=",
"path",
")",
"else",
":",
"fs",
".",
"remove_file",
"(",
"path",
"=",
"path",
")"
] | A recursive, non-atomic directory removal. | [
"A",
"recursive",
"non",
"-",
"atomic",
"directory",
"removal",
"."
] | f366e877d6970712bb91d47167209ee2d1e489c5 | https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L37-L46 | train |
Julian/Filesystems | filesystems/common.py | create | def create(
name,
create_file,
open_file,
remove_file,
create_directory,
list_directory,
remove_empty_directory,
temporary_directory,
stat,
lstat,
link,
readlink,
realpath=_realpath,
remove=_recursive_remove,
):
"""
Create a new kind of filesystem.
"""
methods = dict(
create=create_file,
open=lambda fs, path, mode="r": open_file(
fs=fs, path=path, mode=mode,
),
remove_file=remove_file,
create_directory=create_directory,
list_directory=list_directory,
remove_empty_directory=remove_empty_directory,
temporary_directory=temporary_directory,
get_contents=_get_contents,
set_contents=_set_contents,
create_with_contents=_create_with_contents,
remove=remove,
removing=_removing,
stat=stat,
lstat=lstat,
link=link,
readlink=readlink,
realpath=realpath,
exists=_exists,
is_dir=_is_dir,
is_file=_is_file,
is_link=_is_link,
touch=_touch,
children=_children,
glob_children=_glob_children,
)
return attr.s(hash=True)(type(name, (object,), methods)) | python | def create(
name,
create_file,
open_file,
remove_file,
create_directory,
list_directory,
remove_empty_directory,
temporary_directory,
stat,
lstat,
link,
readlink,
realpath=_realpath,
remove=_recursive_remove,
):
"""
Create a new kind of filesystem.
"""
methods = dict(
create=create_file,
open=lambda fs, path, mode="r": open_file(
fs=fs, path=path, mode=mode,
),
remove_file=remove_file,
create_directory=create_directory,
list_directory=list_directory,
remove_empty_directory=remove_empty_directory,
temporary_directory=temporary_directory,
get_contents=_get_contents,
set_contents=_set_contents,
create_with_contents=_create_with_contents,
remove=remove,
removing=_removing,
stat=stat,
lstat=lstat,
link=link,
readlink=readlink,
realpath=realpath,
exists=_exists,
is_dir=_is_dir,
is_file=_is_file,
is_link=_is_link,
touch=_touch,
children=_children,
glob_children=_glob_children,
)
return attr.s(hash=True)(type(name, (object,), methods)) | [
"def",
"create",
"(",
"name",
",",
"create_file",
",",
"open_file",
",",
"remove_file",
",",
"create_directory",
",",
"list_directory",
",",
"remove_empty_directory",
",",
"temporary_directory",
",",
"stat",
",",
"lstat",
",",
"link",
",",
"readlink",
",",
"realpath",
"=",
"_realpath",
",",
"remove",
"=",
"_recursive_remove",
",",
")",
":",
"methods",
"=",
"dict",
"(",
"create",
"=",
"create_file",
",",
"open",
"=",
"lambda",
"fs",
",",
"path",
",",
"mode",
"=",
"\"r\"",
":",
"open_file",
"(",
"fs",
"=",
"fs",
",",
"path",
"=",
"path",
",",
"mode",
"=",
"mode",
",",
")",
",",
"remove_file",
"=",
"remove_file",
",",
"create_directory",
"=",
"create_directory",
",",
"list_directory",
"=",
"list_directory",
",",
"remove_empty_directory",
"=",
"remove_empty_directory",
",",
"temporary_directory",
"=",
"temporary_directory",
",",
"get_contents",
"=",
"_get_contents",
",",
"set_contents",
"=",
"_set_contents",
",",
"create_with_contents",
"=",
"_create_with_contents",
",",
"remove",
"=",
"remove",
",",
"removing",
"=",
"_removing",
",",
"stat",
"=",
"stat",
",",
"lstat",
"=",
"lstat",
",",
"link",
"=",
"link",
",",
"readlink",
"=",
"readlink",
",",
"realpath",
"=",
"realpath",
",",
"exists",
"=",
"_exists",
",",
"is_dir",
"=",
"_is_dir",
",",
"is_file",
"=",
"_is_file",
",",
"is_link",
"=",
"_is_link",
",",
"touch",
"=",
"_touch",
",",
"children",
"=",
"_children",
",",
"glob_children",
"=",
"_glob_children",
",",
")",
"return",
"attr",
".",
"s",
"(",
"hash",
"=",
"True",
")",
"(",
"type",
"(",
"name",
",",
"(",
"object",
",",
")",
",",
"methods",
")",
")"
] | Create a new kind of filesystem. | [
"Create",
"a",
"new",
"kind",
"of",
"filesystem",
"."
] | f366e877d6970712bb91d47167209ee2d1e489c5 | https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L49-L110 | train |
Julian/Filesystems | filesystems/common.py | _exists | def _exists(fs, path):
"""
Check that the given path exists on the filesystem.
Note that unlike `os.path.exists`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
fs.stat(path)
except (exceptions.FileNotFound, exceptions.NotADirectory):
return False
return True | python | def _exists(fs, path):
"""
Check that the given path exists on the filesystem.
Note that unlike `os.path.exists`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
fs.stat(path)
except (exceptions.FileNotFound, exceptions.NotADirectory):
return False
return True | [
"def",
"_exists",
"(",
"fs",
",",
"path",
")",
":",
"try",
":",
"fs",
".",
"stat",
"(",
"path",
")",
"except",
"(",
"exceptions",
".",
"FileNotFound",
",",
"exceptions",
".",
"NotADirectory",
")",
":",
"return",
"False",
"return",
"True"
] | Check that the given path exists on the filesystem.
Note that unlike `os.path.exists`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up. | [
"Check",
"that",
"the",
"given",
"path",
"exists",
"on",
"the",
"filesystem",
"."
] | f366e877d6970712bb91d47167209ee2d1e489c5 | https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L157-L170 | train |
Julian/Filesystems | filesystems/common.py | _is_dir | def _is_dir(fs, path):
"""
Check that the given path is a directory.
Note that unlike `os.path.isdir`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISDIR(fs.stat(path).st_mode)
except exceptions.FileNotFound:
return False | python | def _is_dir(fs, path):
"""
Check that the given path is a directory.
Note that unlike `os.path.isdir`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISDIR(fs.stat(path).st_mode)
except exceptions.FileNotFound:
return False | [
"def",
"_is_dir",
"(",
"fs",
",",
"path",
")",
":",
"try",
":",
"return",
"stat",
".",
"S_ISDIR",
"(",
"fs",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
")",
"except",
"exceptions",
".",
"FileNotFound",
":",
"return",
"False"
] | Check that the given path is a directory.
Note that unlike `os.path.isdir`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up. | [
"Check",
"that",
"the",
"given",
"path",
"is",
"a",
"directory",
"."
] | f366e877d6970712bb91d47167209ee2d1e489c5 | https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L173-L186 | train |
Julian/Filesystems | filesystems/common.py | _is_file | def _is_file(fs, path):
"""
Check that the given path is a file.
Note that unlike `os.path.isfile`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISREG(fs.stat(path).st_mode)
except exceptions.FileNotFound:
return False | python | def _is_file(fs, path):
"""
Check that the given path is a file.
Note that unlike `os.path.isfile`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISREG(fs.stat(path).st_mode)
except exceptions.FileNotFound:
return False | [
"def",
"_is_file",
"(",
"fs",
",",
"path",
")",
":",
"try",
":",
"return",
"stat",
".",
"S_ISREG",
"(",
"fs",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
")",
"except",
"exceptions",
".",
"FileNotFound",
":",
"return",
"False"
] | Check that the given path is a file.
Note that unlike `os.path.isfile`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up. | [
"Check",
"that",
"the",
"given",
"path",
"is",
"a",
"file",
"."
] | f366e877d6970712bb91d47167209ee2d1e489c5 | https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L189-L201 | train |
Julian/Filesystems | filesystems/common.py | _is_link | def _is_link(fs, path):
"""
Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISLNK(fs.lstat(path).st_mode)
except exceptions.FileNotFound:
return False | python | def _is_link(fs, path):
"""
Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISLNK(fs.lstat(path).st_mode)
except exceptions.FileNotFound:
return False | [
"def",
"_is_link",
"(",
"fs",
",",
"path",
")",
":",
"try",
":",
"return",
"stat",
".",
"S_ISLNK",
"(",
"fs",
".",
"lstat",
"(",
"path",
")",
".",
"st_mode",
")",
"except",
"exceptions",
".",
"FileNotFound",
":",
"return",
"False"
] | Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up. | [
"Check",
"that",
"the",
"given",
"path",
"is",
"a",
"symbolic",
"link",
"."
] | f366e877d6970712bb91d47167209ee2d1e489c5 | https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L204-L217 | train |
rycus86/ghost-client | ghost_client/models.py | Controller.list | def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
) | python | def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
) | [
"def",
"list",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"ModelList",
"(",
"self",
".",
"ghost",
".",
"execute_get",
"(",
"'%s/'",
"%",
"self",
".",
"_type_name",
",",
"*",
"*",
"kwargs",
")",
",",
"self",
".",
"_type_name",
",",
"self",
",",
"kwargs",
",",
"model_type",
"=",
"self",
".",
"_model_type",
")"
] | Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList` | [
"Fetch",
"a",
"list",
"of",
"resources",
"from",
"the",
"API",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L141-L154 | train |
rycus86/ghost-client | ghost_client/models.py | Controller.get | def get(self, id=None, slug=None, **kwargs):
"""
Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object
"""
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0]) | python | def get(self, id=None, slug=None, **kwargs):
"""
Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object
"""
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0]) | [
"def",
"get",
"(",
"self",
",",
"id",
"=",
"None",
",",
"slug",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"id",
":",
"items",
"=",
"self",
".",
"ghost",
".",
"execute_get",
"(",
"'%s/%s/'",
"%",
"(",
"self",
".",
"_type_name",
",",
"id",
")",
",",
"*",
"*",
"kwargs",
")",
"elif",
"slug",
":",
"items",
"=",
"self",
".",
"ghost",
".",
"execute_get",
"(",
"'%s/slug/%s/'",
"%",
"(",
"self",
".",
"_type_name",
",",
"slug",
")",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"GhostException",
"(",
"500",
",",
"'Either the ID or the Slug of the resource needs to be specified'",
")",
"return",
"self",
".",
"_model_type",
"(",
"items",
"[",
"self",
".",
"_type_name",
"]",
"[",
"0",
"]",
")"
] | Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object | [
"Fetch",
"a",
"resource",
"from",
"the",
"API",
".",
"Either",
"the",
"id",
"or",
"the",
"slug",
"has",
"to",
"be",
"present",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L156-L180 | train |
rycus86/ghost-client | ghost_client/models.py | Controller.create | def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0]) | python | def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0]) | [
"def",
"create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"ghost",
".",
"execute_post",
"(",
"'%s/'",
"%",
"self",
".",
"_type_name",
",",
"json",
"=",
"{",
"self",
".",
"_type_name",
":",
"[",
"kwargs",
"]",
"}",
")",
"return",
"self",
".",
"_model_type",
"(",
"response",
".",
"get",
"(",
"self",
".",
"_type_name",
")",
"[",
"0",
"]",
")"
] | Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object | [
"Creates",
"a",
"new",
"resource",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L182-L197 | train |
rycus86/ghost-client | ghost_client/models.py | PostController.create | def create(self, **kwargs):
"""
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
"""
return super(PostController, self).create(**self._with_markdown(kwargs)) | python | def create(self, **kwargs):
"""
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
"""
return super(PostController, self).create(**self._with_markdown(kwargs)) | [
"def",
"create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"PostController",
",",
"self",
")",
".",
"create",
"(",
"*",
"*",
"self",
".",
"_with_markdown",
"(",
"kwargs",
")",
")"
] | Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object | [
"Creates",
"a",
"new",
"post",
".",
"When",
"the",
"markdown",
"property",
"is",
"present",
"it",
"will",
"be",
"automatically",
"converted",
"to",
"mobiledoc",
"on",
"v1",
".",
"+",
"of",
"the",
"server",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L242-L252 | train |
rycus86/ghost-client | ghost_client/models.py | PostController.update | def update(self, id, **kwargs):
"""
Updates an existing post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param id: The ID of the existing post
:param kwargs: The properties of the post to change
:return: The updated `Post` object
"""
return super(PostController, self).update(id, **self._with_markdown(kwargs)) | python | def update(self, id, **kwargs):
"""
Updates an existing post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param id: The ID of the existing post
:param kwargs: The properties of the post to change
:return: The updated `Post` object
"""
return super(PostController, self).update(id, **self._with_markdown(kwargs)) | [
"def",
"update",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"PostController",
",",
"self",
")",
".",
"update",
"(",
"id",
",",
"*",
"*",
"self",
".",
"_with_markdown",
"(",
"kwargs",
")",
")"
] | Updates an existing post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param id: The ID of the existing post
:param kwargs: The properties of the post to change
:return: The updated `Post` object | [
"Updates",
"an",
"existing",
"post",
".",
"When",
"the",
"markdown",
"property",
"is",
"present",
"it",
"will",
"be",
"automatically",
"converted",
"to",
"mobiledoc",
"on",
"v1",
".",
"+",
"of",
"the",
"server",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L254-L265 | train |
ldomic/lintools | lintools/analysis/residence_time.py | Residence_time.define_residues_for_plotting_traj | def define_residues_for_plotting_traj(self, analysis_cutoff):
"""
Since plotting all residues that have made contact with the ligand over a lenghty
simulation is not always feasible or desirable. Therefore, only the residues that
have been in contact with ligand for a long amount of time will be plotted in the
final image.
The function first determines the fraction of time each residue spends in the
vicinity of the ligand for each trajectory. Once the data is processed, analysis
cutoff decides whether or not these residues are plotted based on the total
frequency this residue has spent in the vicinity of the ligand. The analysis
cutoff is supplied for a single trajectory and is therefore multiplied.
Takes:
* analysis_cutoff * - a fraction (of time) a residue has to spend in the
vicinity of the ligand for a single traj
Output:
* self.frequency * - frequency per residue per trajectory
* topol_data.dict_of_plotted_res * - the residues that should be plotted in
the final image with the frequency for each trajectory (used for plotting)
"""
self.residue_counts_fraction = {}
#Calculate the fraction of time a residue spends in each simulation
for traj in self.residue_counts:
self.residue_counts_fraction[traj] = {residue:float(values)/len(self.contacts_per_timeframe[traj]) for residue,values in self.residue_counts[traj].items()}
for traj in self.residue_counts_fraction:
for residue in self.residue_counts_fraction[traj]:
self.frequency[residue].append(self.residue_counts_fraction[traj][residue])
self.topology_data.dict_of_plotted_res = {i:self.frequency[i] for i in self.frequency if sum(self.frequency[i])>(int(len(self.trajectory))*analysis_cutoff)}
assert len(self.topology_data.dict_of_plotted_res)!=0,"Nothing to draw for this ligand:(residue number: "+ str(self.topology_data.universe.ligand.resids[0]) +" on the chain "+ str(self.topology_data.universe.ligand.segids[0]) +") - try reducing the analysis cutoff." | python | def define_residues_for_plotting_traj(self, analysis_cutoff):
"""
Since plotting all residues that have made contact with the ligand over a lenghty
simulation is not always feasible or desirable. Therefore, only the residues that
have been in contact with ligand for a long amount of time will be plotted in the
final image.
The function first determines the fraction of time each residue spends in the
vicinity of the ligand for each trajectory. Once the data is processed, analysis
cutoff decides whether or not these residues are plotted based on the total
frequency this residue has spent in the vicinity of the ligand. The analysis
cutoff is supplied for a single trajectory and is therefore multiplied.
Takes:
* analysis_cutoff * - a fraction (of time) a residue has to spend in the
vicinity of the ligand for a single traj
Output:
* self.frequency * - frequency per residue per trajectory
* topol_data.dict_of_plotted_res * - the residues that should be plotted in
the final image with the frequency for each trajectory (used for plotting)
"""
self.residue_counts_fraction = {}
#Calculate the fraction of time a residue spends in each simulation
for traj in self.residue_counts:
self.residue_counts_fraction[traj] = {residue:float(values)/len(self.contacts_per_timeframe[traj]) for residue,values in self.residue_counts[traj].items()}
for traj in self.residue_counts_fraction:
for residue in self.residue_counts_fraction[traj]:
self.frequency[residue].append(self.residue_counts_fraction[traj][residue])
self.topology_data.dict_of_plotted_res = {i:self.frequency[i] for i in self.frequency if sum(self.frequency[i])>(int(len(self.trajectory))*analysis_cutoff)}
assert len(self.topology_data.dict_of_plotted_res)!=0,"Nothing to draw for this ligand:(residue number: "+ str(self.topology_data.universe.ligand.resids[0]) +" on the chain "+ str(self.topology_data.universe.ligand.segids[0]) +") - try reducing the analysis cutoff." | [
"def",
"define_residues_for_plotting_traj",
"(",
"self",
",",
"analysis_cutoff",
")",
":",
"self",
".",
"residue_counts_fraction",
"=",
"{",
"}",
"#Calculate the fraction of time a residue spends in each simulation",
"for",
"traj",
"in",
"self",
".",
"residue_counts",
":",
"self",
".",
"residue_counts_fraction",
"[",
"traj",
"]",
"=",
"{",
"residue",
":",
"float",
"(",
"values",
")",
"/",
"len",
"(",
"self",
".",
"contacts_per_timeframe",
"[",
"traj",
"]",
")",
"for",
"residue",
",",
"values",
"in",
"self",
".",
"residue_counts",
"[",
"traj",
"]",
".",
"items",
"(",
")",
"}",
"for",
"traj",
"in",
"self",
".",
"residue_counts_fraction",
":",
"for",
"residue",
"in",
"self",
".",
"residue_counts_fraction",
"[",
"traj",
"]",
":",
"self",
".",
"frequency",
"[",
"residue",
"]",
".",
"append",
"(",
"self",
".",
"residue_counts_fraction",
"[",
"traj",
"]",
"[",
"residue",
"]",
")",
"self",
".",
"topology_data",
".",
"dict_of_plotted_res",
"=",
"{",
"i",
":",
"self",
".",
"frequency",
"[",
"i",
"]",
"for",
"i",
"in",
"self",
".",
"frequency",
"if",
"sum",
"(",
"self",
".",
"frequency",
"[",
"i",
"]",
")",
">",
"(",
"int",
"(",
"len",
"(",
"self",
".",
"trajectory",
")",
")",
"*",
"analysis_cutoff",
")",
"}",
"assert",
"len",
"(",
"self",
".",
"topology_data",
".",
"dict_of_plotted_res",
")",
"!=",
"0",
",",
"\"Nothing to draw for this ligand:(residue number: \"",
"+",
"str",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"resids",
"[",
"0",
"]",
")",
"+",
"\" on the chain \"",
"+",
"str",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"segids",
"[",
"0",
"]",
")",
"+",
"\") - try reducing the analysis cutoff.\""
] | Since plotting all residues that have made contact with the ligand over a lenghty
simulation is not always feasible or desirable. Therefore, only the residues that
have been in contact with ligand for a long amount of time will be plotted in the
final image.
The function first determines the fraction of time each residue spends in the
vicinity of the ligand for each trajectory. Once the data is processed, analysis
cutoff decides whether or not these residues are plotted based on the total
frequency this residue has spent in the vicinity of the ligand. The analysis
cutoff is supplied for a single trajectory and is therefore multiplied.
Takes:
* analysis_cutoff * - a fraction (of time) a residue has to spend in the
vicinity of the ligand for a single traj
Output:
* self.frequency * - frequency per residue per trajectory
* topol_data.dict_of_plotted_res * - the residues that should be plotted in
the final image with the frequency for each trajectory (used for plotting) | [
"Since",
"plotting",
"all",
"residues",
"that",
"have",
"made",
"contact",
"with",
"the",
"ligand",
"over",
"a",
"lenghty",
"simulation",
"is",
"not",
"always",
"feasible",
"or",
"desirable",
".",
"Therefore",
"only",
"the",
"residues",
"that",
"have",
"been",
"in",
"contact",
"with",
"ligand",
"for",
"a",
"long",
"amount",
"of",
"time",
"will",
"be",
"plotted",
"in",
"the",
"final",
"image",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/residence_time.py#L93-L125 | train |
ldomic/lintools | lintools/analysis/pistacking.py | PiStacking.detect_aromatic_rings_in_ligand | def detect_aromatic_rings_in_ligand(self):
"""Using rdkit to detect aromatic rings in ligand - size 4-6 atoms and all atoms are part of the ring. Saves this data in self.ligrings."""
self.ligrings = {}
try:
ring_info = self.topology_data.mol.GetRingInfo()
self.ligand_ring_num = ring_info.NumRings()
except Exception as e:
m = Chem.MolFromPDBFile("lig.pdb")
ring_info = m.GetRingInfo()
self.ligand_ring_num = ring_info.NumRings()
i=0
for ring in range(self.ligand_ring_num):
if 4 < len(ring_info.AtomRings()[ring]) <= 6 and False not in [self.topology_data.mol.GetAtomWithIdx(x).GetIsAromatic() for x in ring_info.AtomRings()[ring]]: #narrow ring definition
atom_ids_in_ring = []
for atom in ring_info.AtomRings()[ring]:
atom_ids_in_ring.append(self.topology_data.universe.ligand.atoms[atom].name)
self.ligrings[i]=atom_ids_in_ring
i+=1 | python | def detect_aromatic_rings_in_ligand(self):
"""Using rdkit to detect aromatic rings in ligand - size 4-6 atoms and all atoms are part of the ring. Saves this data in self.ligrings."""
self.ligrings = {}
try:
ring_info = self.topology_data.mol.GetRingInfo()
self.ligand_ring_num = ring_info.NumRings()
except Exception as e:
m = Chem.MolFromPDBFile("lig.pdb")
ring_info = m.GetRingInfo()
self.ligand_ring_num = ring_info.NumRings()
i=0
for ring in range(self.ligand_ring_num):
if 4 < len(ring_info.AtomRings()[ring]) <= 6 and False not in [self.topology_data.mol.GetAtomWithIdx(x).GetIsAromatic() for x in ring_info.AtomRings()[ring]]: #narrow ring definition
atom_ids_in_ring = []
for atom in ring_info.AtomRings()[ring]:
atom_ids_in_ring.append(self.topology_data.universe.ligand.atoms[atom].name)
self.ligrings[i]=atom_ids_in_ring
i+=1 | [
"def",
"detect_aromatic_rings_in_ligand",
"(",
"self",
")",
":",
"self",
".",
"ligrings",
"=",
"{",
"}",
"try",
":",
"ring_info",
"=",
"self",
".",
"topology_data",
".",
"mol",
".",
"GetRingInfo",
"(",
")",
"self",
".",
"ligand_ring_num",
"=",
"ring_info",
".",
"NumRings",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"m",
"=",
"Chem",
".",
"MolFromPDBFile",
"(",
"\"lig.pdb\"",
")",
"ring_info",
"=",
"m",
".",
"GetRingInfo",
"(",
")",
"self",
".",
"ligand_ring_num",
"=",
"ring_info",
".",
"NumRings",
"(",
")",
"i",
"=",
"0",
"for",
"ring",
"in",
"range",
"(",
"self",
".",
"ligand_ring_num",
")",
":",
"if",
"4",
"<",
"len",
"(",
"ring_info",
".",
"AtomRings",
"(",
")",
"[",
"ring",
"]",
")",
"<=",
"6",
"and",
"False",
"not",
"in",
"[",
"self",
".",
"topology_data",
".",
"mol",
".",
"GetAtomWithIdx",
"(",
"x",
")",
".",
"GetIsAromatic",
"(",
")",
"for",
"x",
"in",
"ring_info",
".",
"AtomRings",
"(",
")",
"[",
"ring",
"]",
"]",
":",
"#narrow ring definition",
"atom_ids_in_ring",
"=",
"[",
"]",
"for",
"atom",
"in",
"ring_info",
".",
"AtomRings",
"(",
")",
"[",
"ring",
"]",
":",
"atom_ids_in_ring",
".",
"append",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"atoms",
"[",
"atom",
"]",
".",
"name",
")",
"self",
".",
"ligrings",
"[",
"i",
"]",
"=",
"atom_ids_in_ring",
"i",
"+=",
"1"
] | Using rdkit to detect aromatic rings in ligand - size 4-6 atoms and all atoms are part of the ring. Saves this data in self.ligrings. | [
"Using",
"rdkit",
"to",
"detect",
"aromatic",
"rings",
"in",
"ligand",
"-",
"size",
"4",
"-",
"6",
"atoms",
"and",
"all",
"atoms",
"are",
"part",
"of",
"the",
"ring",
".",
"Saves",
"this",
"data",
"in",
"self",
".",
"ligrings",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/pistacking.py#L69-L86 | train |
ldomic/lintools | lintools/analysis/pistacking.py | PiStacking.define_all_protein_rings | def define_all_protein_rings(self):
"""Make MDAnalysis atom selections for rings in protein residues that will be plotted in the final figure - since they are the only ones that
should be analysed.
Saves the rings in self.protein_rings dictionary.
"""
self.protein_rings = {}
i=0
for residue in self.topology_data.dict_of_plotted_res:
for ring in self.rings:
if ring[0]==residue[0]:
atom_names =""
for atom in self.rings[ring]:
atom_names = atom_names+" "+atom
self.protein_rings[i]= self.topology_data.universe.select_atoms("resname "+residue[0]+" and resid "+residue[1]+" and segid "+ residue[2]+" and name "+atom_names)
i+=1 | python | def define_all_protein_rings(self):
"""Make MDAnalysis atom selections for rings in protein residues that will be plotted in the final figure - since they are the only ones that
should be analysed.
Saves the rings in self.protein_rings dictionary.
"""
self.protein_rings = {}
i=0
for residue in self.topology_data.dict_of_plotted_res:
for ring in self.rings:
if ring[0]==residue[0]:
atom_names =""
for atom in self.rings[ring]:
atom_names = atom_names+" "+atom
self.protein_rings[i]= self.topology_data.universe.select_atoms("resname "+residue[0]+" and resid "+residue[1]+" and segid "+ residue[2]+" and name "+atom_names)
i+=1 | [
"def",
"define_all_protein_rings",
"(",
"self",
")",
":",
"self",
".",
"protein_rings",
"=",
"{",
"}",
"i",
"=",
"0",
"for",
"residue",
"in",
"self",
".",
"topology_data",
".",
"dict_of_plotted_res",
":",
"for",
"ring",
"in",
"self",
".",
"rings",
":",
"if",
"ring",
"[",
"0",
"]",
"==",
"residue",
"[",
"0",
"]",
":",
"atom_names",
"=",
"\"\"",
"for",
"atom",
"in",
"self",
".",
"rings",
"[",
"ring",
"]",
":",
"atom_names",
"=",
"atom_names",
"+",
"\" \"",
"+",
"atom",
"self",
".",
"protein_rings",
"[",
"i",
"]",
"=",
"self",
".",
"topology_data",
".",
"universe",
".",
"select_atoms",
"(",
"\"resname \"",
"+",
"residue",
"[",
"0",
"]",
"+",
"\" and resid \"",
"+",
"residue",
"[",
"1",
"]",
"+",
"\" and segid \"",
"+",
"residue",
"[",
"2",
"]",
"+",
"\" and name \"",
"+",
"atom_names",
")",
"i",
"+=",
"1"
] | Make MDAnalysis atom selections for rings in protein residues that will be plotted in the final figure - since they are the only ones that
should be analysed.
Saves the rings in self.protein_rings dictionary. | [
"Make",
"MDAnalysis",
"atom",
"selections",
"for",
"rings",
"in",
"protein",
"residues",
"that",
"will",
"be",
"plotted",
"in",
"the",
"final",
"figure",
"-",
"since",
"they",
"are",
"the",
"only",
"ones",
"that",
"should",
"be",
"analysed",
".",
"Saves",
"the",
"rings",
"in",
"self",
".",
"protein_rings",
"dictionary",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/pistacking.py#L115-L129 | train |
ldomic/lintools | lintools/analysis/pistacking.py | PiStacking.count_by_type | def count_by_type(self):
"""Count how many times each individual pi-pi interaction occured throughout the simulation.
Returns numpy array."""
pistack = defaultdict(int)
for contact in self.timeseries:
#count by residue name not by proteinring
pkey = (contact.ligandring,contact.type, contact.resid,contact.resname,contact.segid)
pistack[pkey]+=1
dtype = [("ligand_ring_ids",list),("type","|U4"),("resid",int),("resname","|U4"),("segid","|U8"),("frequency",float) ]
out = np.empty((len(pistack),),dtype=dtype)
tsteps = float(len(self.timesteps))
for cursor,(key,count) in enumerate(pistack.iteritems()):
out[cursor] = key + (count / tsteps,)
return out.view(np.recarray) | python | def count_by_type(self):
"""Count how many times each individual pi-pi interaction occured throughout the simulation.
Returns numpy array."""
pistack = defaultdict(int)
for contact in self.timeseries:
#count by residue name not by proteinring
pkey = (contact.ligandring,contact.type, contact.resid,contact.resname,contact.segid)
pistack[pkey]+=1
dtype = [("ligand_ring_ids",list),("type","|U4"),("resid",int),("resname","|U4"),("segid","|U8"),("frequency",float) ]
out = np.empty((len(pistack),),dtype=dtype)
tsteps = float(len(self.timesteps))
for cursor,(key,count) in enumerate(pistack.iteritems()):
out[cursor] = key + (count / tsteps,)
return out.view(np.recarray) | [
"def",
"count_by_type",
"(",
"self",
")",
":",
"pistack",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"contact",
"in",
"self",
".",
"timeseries",
":",
"#count by residue name not by proteinring",
"pkey",
"=",
"(",
"contact",
".",
"ligandring",
",",
"contact",
".",
"type",
",",
"contact",
".",
"resid",
",",
"contact",
".",
"resname",
",",
"contact",
".",
"segid",
")",
"pistack",
"[",
"pkey",
"]",
"+=",
"1",
"dtype",
"=",
"[",
"(",
"\"ligand_ring_ids\"",
",",
"list",
")",
",",
"(",
"\"type\"",
",",
"\"|U4\"",
")",
",",
"(",
"\"resid\"",
",",
"int",
")",
",",
"(",
"\"resname\"",
",",
"\"|U4\"",
")",
",",
"(",
"\"segid\"",
",",
"\"|U8\"",
")",
",",
"(",
"\"frequency\"",
",",
"float",
")",
"]",
"out",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"pistack",
")",
",",
")",
",",
"dtype",
"=",
"dtype",
")",
"tsteps",
"=",
"float",
"(",
"len",
"(",
"self",
".",
"timesteps",
")",
")",
"for",
"cursor",
",",
"(",
"key",
",",
"count",
")",
"in",
"enumerate",
"(",
"pistack",
".",
"iteritems",
"(",
")",
")",
":",
"out",
"[",
"cursor",
"]",
"=",
"key",
"+",
"(",
"count",
"/",
"tsteps",
",",
")",
"return",
"out",
".",
"view",
"(",
"np",
".",
"recarray",
")"
] | Count how many times each individual pi-pi interaction occured throughout the simulation.
Returns numpy array. | [
"Count",
"how",
"many",
"times",
"each",
"individual",
"pi",
"-",
"pi",
"interaction",
"occured",
"throughout",
"the",
"simulation",
".",
"Returns",
"numpy",
"array",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/pistacking.py#L209-L222 | train |
eleme/meepo | examples/repl_db/repl.py | main | def main(master_dsn, slave_dsn, tables, blocking=False):
"""DB Replication app.
This script will replicate data from mysql master to other databases(
including mysql, postgres, sqlite).
This script only support a very limited replication:
1. data only. The script only replicates data, so you have to make sure
the tables already exists in slave db.
2. pk only. The script replicate data by pk, when a row_pk changed, it
retrieve it from master and write in to slave.
:param master_dsn: mysql dsn with row-based binlog enabled.
:param slave_dsn: slave dsn, most databases supported including mysql,
postgres, sqlite etc.
:param tables: the tables need to be replicated
:param blocking: by default, the script only reads existing binlog,
replicate them and exit. if set to True, this script will run as a
daemon and wait for more mysql binlog and do replicates.
"""
# currently only supports mysql master
assert master_dsn.startswith("mysql")
logger = logging.getLogger(__name__)
logger.info("replicating tables: %s" % ", ".join(tables))
repl_db_sub(master_dsn, slave_dsn, tables)
mysql_pub(master_dsn, blocking=blocking) | python | def main(master_dsn, slave_dsn, tables, blocking=False):
"""DB Replication app.
This script will replicate data from mysql master to other databases(
including mysql, postgres, sqlite).
This script only support a very limited replication:
1. data only. The script only replicates data, so you have to make sure
the tables already exists in slave db.
2. pk only. The script replicate data by pk, when a row_pk changed, it
retrieve it from master and write in to slave.
:param master_dsn: mysql dsn with row-based binlog enabled.
:param slave_dsn: slave dsn, most databases supported including mysql,
postgres, sqlite etc.
:param tables: the tables need to be replicated
:param blocking: by default, the script only reads existing binlog,
replicate them and exit. if set to True, this script will run as a
daemon and wait for more mysql binlog and do replicates.
"""
# currently only supports mysql master
assert master_dsn.startswith("mysql")
logger = logging.getLogger(__name__)
logger.info("replicating tables: %s" % ", ".join(tables))
repl_db_sub(master_dsn, slave_dsn, tables)
mysql_pub(master_dsn, blocking=blocking) | [
"def",
"main",
"(",
"master_dsn",
",",
"slave_dsn",
",",
"tables",
",",
"blocking",
"=",
"False",
")",
":",
"# currently only supports mysql master",
"assert",
"master_dsn",
".",
"startswith",
"(",
"\"mysql\"",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"\"replicating tables: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"tables",
")",
")",
"repl_db_sub",
"(",
"master_dsn",
",",
"slave_dsn",
",",
"tables",
")",
"mysql_pub",
"(",
"master_dsn",
",",
"blocking",
"=",
"blocking",
")"
] | DB Replication app.
This script will replicate data from mysql master to other databases(
including mysql, postgres, sqlite).
This script only support a very limited replication:
1. data only. The script only replicates data, so you have to make sure
the tables already exists in slave db.
2. pk only. The script replicate data by pk, when a row_pk changed, it
retrieve it from master and write in to slave.
:param master_dsn: mysql dsn with row-based binlog enabled.
:param slave_dsn: slave dsn, most databases supported including mysql,
postgres, sqlite etc.
:param tables: the tables need to be replicated
:param blocking: by default, the script only reads existing binlog,
replicate them and exit. if set to True, this script will run as a
daemon and wait for more mysql binlog and do replicates. | [
"DB",
"Replication",
"app",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/examples/repl_db/repl.py#L131-L158 | train |
jam31118/vis | vis/layout.py | get_text_position_in_ax_coord | def get_text_position_in_ax_coord(ax, pos, scale=default_text_relative_padding):
"""Return text position corresponding to given 'pos'.
The text alignment in the bounding box should be set accordingly
in order to have a good-looking layout.
This corresponding text alignment can be obtained by 'get_text_alignment'
or 'get_text_position_and_inner_alignment' function.
"""
ratio = get_axes_ratio(ax)
x, y = scale ,scale
if ratio > 1: # vertical is longer
y /= ratio
elif 0 < ratio: # 0 < ratio <= 1
x *= ratio
pos = pos.lower()
if pos == 'nw': y = 1 - y
elif pos == 'ne': x, y = 1 - x, 1 - y
elif pos == 'sw': pass
elif pos == 'se': x = 1 - x
else: raise ValueError("Unknown value for 'pos': %s" % (str(pos)))
return x, y | python | def get_text_position_in_ax_coord(ax, pos, scale=default_text_relative_padding):
"""Return text position corresponding to given 'pos'.
The text alignment in the bounding box should be set accordingly
in order to have a good-looking layout.
This corresponding text alignment can be obtained by 'get_text_alignment'
or 'get_text_position_and_inner_alignment' function.
"""
ratio = get_axes_ratio(ax)
x, y = scale ,scale
if ratio > 1: # vertical is longer
y /= ratio
elif 0 < ratio: # 0 < ratio <= 1
x *= ratio
pos = pos.lower()
if pos == 'nw': y = 1 - y
elif pos == 'ne': x, y = 1 - x, 1 - y
elif pos == 'sw': pass
elif pos == 'se': x = 1 - x
else: raise ValueError("Unknown value for 'pos': %s" % (str(pos)))
return x, y | [
"def",
"get_text_position_in_ax_coord",
"(",
"ax",
",",
"pos",
",",
"scale",
"=",
"default_text_relative_padding",
")",
":",
"ratio",
"=",
"get_axes_ratio",
"(",
"ax",
")",
"x",
",",
"y",
"=",
"scale",
",",
"scale",
"if",
"ratio",
">",
"1",
":",
"# vertical is longer",
"y",
"/=",
"ratio",
"elif",
"0",
"<",
"ratio",
":",
"# 0 < ratio <= 1",
"x",
"*=",
"ratio",
"pos",
"=",
"pos",
".",
"lower",
"(",
")",
"if",
"pos",
"==",
"'nw'",
":",
"y",
"=",
"1",
"-",
"y",
"elif",
"pos",
"==",
"'ne'",
":",
"x",
",",
"y",
"=",
"1",
"-",
"x",
",",
"1",
"-",
"y",
"elif",
"pos",
"==",
"'sw'",
":",
"pass",
"elif",
"pos",
"==",
"'se'",
":",
"x",
"=",
"1",
"-",
"x",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown value for 'pos': %s\"",
"%",
"(",
"str",
"(",
"pos",
")",
")",
")",
"return",
"x",
",",
"y"
] | Return text position corresponding to given 'pos'.
The text alignment in the bounding box should be set accordingly
in order to have a good-looking layout.
This corresponding text alignment can be obtained by 'get_text_alignment'
or 'get_text_position_and_inner_alignment' function. | [
"Return",
"text",
"position",
"corresponding",
"to",
"given",
"pos",
".",
"The",
"text",
"alignment",
"in",
"the",
"bounding",
"box",
"should",
"be",
"set",
"accordingly",
"in",
"order",
"to",
"have",
"a",
"good",
"-",
"looking",
"layout",
".",
"This",
"corresponding",
"text",
"alignment",
"can",
"be",
"obtained",
"by",
"get_text_alignment",
"or",
"get_text_position_and_inner_alignment",
"function",
"."
] | 965ebec102c539b323d5756fef04153ac71e50d9 | https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/layout.py#L24-L44 | train |
jam31118/vis | vis/layout.py | get_text_position_and_inner_alignment | def get_text_position_and_inner_alignment(ax, pos, scale=default_text_relative_padding, with_transAxes_kwargs=True):
"""Return text position and its alignment in its bounding box.
The returned position is given in Axes coordinate,
as defined in matplotlib documentation on transformation.
The returned alignment is given in dictionary,
which can be put as a fontdict to text-relavent method.
"""
xy = get_text_position_in_ax_coord(ax,pos,scale=scale)
alignment_fontdict = get_text_alignment(pos)
if with_transAxes_kwargs: alignment_fontdict = {**alignment_fontdict, **{'transform':ax.transAxes}}
return xy, alignment_fontdict | python | def get_text_position_and_inner_alignment(ax, pos, scale=default_text_relative_padding, with_transAxes_kwargs=True):
"""Return text position and its alignment in its bounding box.
The returned position is given in Axes coordinate,
as defined in matplotlib documentation on transformation.
The returned alignment is given in dictionary,
which can be put as a fontdict to text-relavent method.
"""
xy = get_text_position_in_ax_coord(ax,pos,scale=scale)
alignment_fontdict = get_text_alignment(pos)
if with_transAxes_kwargs: alignment_fontdict = {**alignment_fontdict, **{'transform':ax.transAxes}}
return xy, alignment_fontdict | [
"def",
"get_text_position_and_inner_alignment",
"(",
"ax",
",",
"pos",
",",
"scale",
"=",
"default_text_relative_padding",
",",
"with_transAxes_kwargs",
"=",
"True",
")",
":",
"xy",
"=",
"get_text_position_in_ax_coord",
"(",
"ax",
",",
"pos",
",",
"scale",
"=",
"scale",
")",
"alignment_fontdict",
"=",
"get_text_alignment",
"(",
"pos",
")",
"if",
"with_transAxes_kwargs",
":",
"alignment_fontdict",
"=",
"{",
"*",
"*",
"alignment_fontdict",
",",
"*",
"*",
"{",
"'transform'",
":",
"ax",
".",
"transAxes",
"}",
"}",
"return",
"xy",
",",
"alignment_fontdict"
] | Return text position and its alignment in its bounding box.
The returned position is given in Axes coordinate,
as defined in matplotlib documentation on transformation.
The returned alignment is given in dictionary,
which can be put as a fontdict to text-relavent method. | [
"Return",
"text",
"position",
"and",
"its",
"alignment",
"in",
"its",
"bounding",
"box",
".",
"The",
"returned",
"position",
"is",
"given",
"in",
"Axes",
"coordinate",
"as",
"defined",
"in",
"matplotlib",
"documentation",
"on",
"transformation",
"."
] | 965ebec102c539b323d5756fef04153ac71e50d9 | https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/layout.py#L59-L71 | train |
jam31118/vis | vis/layout.py | get_text_position | def get_text_position(fig, ax, ha='left', va='top', pad_scale=1.0):
"""Return text position inside of the given axis"""
## Check and preprocess input arguments
try: pad_scale = float(pad_scale)
except: raise TypeError("'pad_scale should be of type 'float'")
for arg in [va, ha]:
assert type(arg) is str
arg = arg.lower() # Make it lowercase to prevent case problem.
## Get axis size in inches
ax_height, ax_width = get_ax_size_in_inch(fig, ax)
## Construct inversion factor from inch to plot coordinate
length_x = ax.get_xlim()[1] - ax.get_xlim()[0]
length_y = ax.get_ylim()[1] - ax.get_ylim()[0]
inch2coord_x = length_x / ax_width
inch2coord_y = length_y / ax_height
## Set padding size relative to the text size
#pad_inch = text_bbox_inch.height * pad_scale
#pad_inch = fontsize_points * point2inch * pad_scale
ax_length_geom_average = (ax_height * ax_width) ** 0.5
pad_inch = ax_length_geom_average * 0.03 * pad_scale
pad_inch_x, pad_inch_y = pad_inch, pad_inch
pad_coord_x = pad_inch_x * inch2coord_x
pad_coord_y = pad_inch_y * inch2coord_y
if ha == 'left': pos_x = ax.get_xlim()[0] + pad_coord_x
elif ha == 'right': pos_x = ax.get_xlim()[1] - pad_coord_x
else: raise Exception("Unsupported value for 'ha'")
if va in ['top','up','upper']: pos_y = ax.get_ylim()[1] - pad_coord_y
elif va in ['bottom','down','lower']: pos_y = ax.get_ylim()[0] + pad_coord_y
else: raise Exception("Unsupported value for 'va'")
return pos_x, pos_y | python | def get_text_position(fig, ax, ha='left', va='top', pad_scale=1.0):
"""Return text position inside of the given axis"""
## Check and preprocess input arguments
try: pad_scale = float(pad_scale)
except: raise TypeError("'pad_scale should be of type 'float'")
for arg in [va, ha]:
assert type(arg) is str
arg = arg.lower() # Make it lowercase to prevent case problem.
## Get axis size in inches
ax_height, ax_width = get_ax_size_in_inch(fig, ax)
## Construct inversion factor from inch to plot coordinate
length_x = ax.get_xlim()[1] - ax.get_xlim()[0]
length_y = ax.get_ylim()[1] - ax.get_ylim()[0]
inch2coord_x = length_x / ax_width
inch2coord_y = length_y / ax_height
## Set padding size relative to the text size
#pad_inch = text_bbox_inch.height * pad_scale
#pad_inch = fontsize_points * point2inch * pad_scale
ax_length_geom_average = (ax_height * ax_width) ** 0.5
pad_inch = ax_length_geom_average * 0.03 * pad_scale
pad_inch_x, pad_inch_y = pad_inch, pad_inch
pad_coord_x = pad_inch_x * inch2coord_x
pad_coord_y = pad_inch_y * inch2coord_y
if ha == 'left': pos_x = ax.get_xlim()[0] + pad_coord_x
elif ha == 'right': pos_x = ax.get_xlim()[1] - pad_coord_x
else: raise Exception("Unsupported value for 'ha'")
if va in ['top','up','upper']: pos_y = ax.get_ylim()[1] - pad_coord_y
elif va in ['bottom','down','lower']: pos_y = ax.get_ylim()[0] + pad_coord_y
else: raise Exception("Unsupported value for 'va'")
return pos_x, pos_y | [
"def",
"get_text_position",
"(",
"fig",
",",
"ax",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
",",
"pad_scale",
"=",
"1.0",
")",
":",
"## Check and preprocess input arguments",
"try",
":",
"pad_scale",
"=",
"float",
"(",
"pad_scale",
")",
"except",
":",
"raise",
"TypeError",
"(",
"\"'pad_scale should be of type 'float'\"",
")",
"for",
"arg",
"in",
"[",
"va",
",",
"ha",
"]",
":",
"assert",
"type",
"(",
"arg",
")",
"is",
"str",
"arg",
"=",
"arg",
".",
"lower",
"(",
")",
"# Make it lowercase to prevent case problem.",
"## Get axis size in inches",
"ax_height",
",",
"ax_width",
"=",
"get_ax_size_in_inch",
"(",
"fig",
",",
"ax",
")",
"## Construct inversion factor from inch to plot coordinate",
"length_x",
"=",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"1",
"]",
"-",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"0",
"]",
"length_y",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"1",
"]",
"-",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"0",
"]",
"inch2coord_x",
"=",
"length_x",
"/",
"ax_width",
"inch2coord_y",
"=",
"length_y",
"/",
"ax_height",
"## Set padding size relative to the text size",
"#pad_inch = text_bbox_inch.height * pad_scale",
"#pad_inch = fontsize_points * point2inch * pad_scale",
"ax_length_geom_average",
"=",
"(",
"ax_height",
"*",
"ax_width",
")",
"**",
"0.5",
"pad_inch",
"=",
"ax_length_geom_average",
"*",
"0.03",
"*",
"pad_scale",
"pad_inch_x",
",",
"pad_inch_y",
"=",
"pad_inch",
",",
"pad_inch",
"pad_coord_x",
"=",
"pad_inch_x",
"*",
"inch2coord_x",
"pad_coord_y",
"=",
"pad_inch_y",
"*",
"inch2coord_y",
"if",
"ha",
"==",
"'left'",
":",
"pos_x",
"=",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"0",
"]",
"+",
"pad_coord_x",
"elif",
"ha",
"==",
"'right'",
":",
"pos_x",
"=",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"1",
"]",
"-",
"pad_coord_x",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported value for 'ha'\"",
")",
"if",
"va",
"in",
"[",
"'top'",
",",
"'up'",
",",
"'upper'",
"]",
":",
"pos_y",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"1",
"]",
"-",
"pad_coord_y",
"elif",
"va",
"in",
"[",
"'bottom'",
",",
"'down'",
",",
"'lower'",
"]",
":",
"pos_y",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"0",
"]",
"+",
"pad_coord_y",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported value for 'va'\"",
")",
"return",
"pos_x",
",",
"pos_y"
] | Return text position inside of the given axis | [
"Return",
"text",
"position",
"inside",
"of",
"the",
"given",
"axis"
] | 965ebec102c539b323d5756fef04153ac71e50d9 | https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/layout.py#L81-L118 | train |
robinandeer/puzzle | puzzle/server/factory.py | create_app | def create_app(config=None, config_obj=None):
"""Flask app factory function.
Args:
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object
"""
app = Flask(__name__)
# configure application from external configs
configure_app(app, config=config, config_obj=config_obj)
# register different parts of the application
register_blueprints(app)
# setup extensions
bind_extensions(app)
return app | python | def create_app(config=None, config_obj=None):
"""Flask app factory function.
Args:
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object
"""
app = Flask(__name__)
# configure application from external configs
configure_app(app, config=config, config_obj=config_obj)
# register different parts of the application
register_blueprints(app)
# setup extensions
bind_extensions(app)
return app | [
"def",
"create_app",
"(",
"config",
"=",
"None",
",",
"config_obj",
"=",
"None",
")",
":",
"app",
"=",
"Flask",
"(",
"__name__",
")",
"# configure application from external configs",
"configure_app",
"(",
"app",
",",
"config",
"=",
"config",
",",
"config_obj",
"=",
"config_obj",
")",
"# register different parts of the application",
"register_blueprints",
"(",
"app",
")",
"# setup extensions",
"bind_extensions",
"(",
"app",
")",
"return",
"app"
] | Flask app factory function.
Args:
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object | [
"Flask",
"app",
"factory",
"function",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/factory.py#L12-L26 | train |
robinandeer/puzzle | puzzle/server/factory.py | configure_app | def configure_app(app, config=None, config_obj=None):
"""Configure application instance.
Args:
app (Flask): initialized Flask app instance
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object
"""
app.config.from_object(config_obj or BaseConfig)
if config is not None:
app.config.from_pyfile(config) | python | def configure_app(app, config=None, config_obj=None):
"""Configure application instance.
Args:
app (Flask): initialized Flask app instance
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object
"""
app.config.from_object(config_obj or BaseConfig)
if config is not None:
app.config.from_pyfile(config) | [
"def",
"configure_app",
"(",
"app",
",",
"config",
"=",
"None",
",",
"config_obj",
"=",
"None",
")",
":",
"app",
".",
"config",
".",
"from_object",
"(",
"config_obj",
"or",
"BaseConfig",
")",
"if",
"config",
"is",
"not",
"None",
":",
"app",
".",
"config",
".",
"from_pyfile",
"(",
"config",
")"
] | Configure application instance.
Args:
app (Flask): initialized Flask app instance
config (Optional[path]): path to a Python module config file
config_obj (Optional[class]): Python config object | [
"Configure",
"application",
"instance",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/factory.py#L29-L39 | train |
robinandeer/puzzle | puzzle/server/factory.py | bind_extensions | def bind_extensions(app):
"""Configure extensions.
Args:
app (Flask): initialized Flask app instance
"""
# bind plugin to app object
app.db = app.config['PUZZLE_BACKEND']
app.db.init_app(app)
# bind bootstrap blueprints
bootstrap.init_app(app)
markdown(app)
@app.template_filter('islist')
def islist(object):
return isinstance(object, (tuple, list)) | python | def bind_extensions(app):
"""Configure extensions.
Args:
app (Flask): initialized Flask app instance
"""
# bind plugin to app object
app.db = app.config['PUZZLE_BACKEND']
app.db.init_app(app)
# bind bootstrap blueprints
bootstrap.init_app(app)
markdown(app)
@app.template_filter('islist')
def islist(object):
return isinstance(object, (tuple, list)) | [
"def",
"bind_extensions",
"(",
"app",
")",
":",
"# bind plugin to app object",
"app",
".",
"db",
"=",
"app",
".",
"config",
"[",
"'PUZZLE_BACKEND'",
"]",
"app",
".",
"db",
".",
"init_app",
"(",
"app",
")",
"# bind bootstrap blueprints",
"bootstrap",
".",
"init_app",
"(",
"app",
")",
"markdown",
"(",
"app",
")",
"@",
"app",
".",
"template_filter",
"(",
"'islist'",
")",
"def",
"islist",
"(",
"object",
")",
":",
"return",
"isinstance",
"(",
"object",
",",
"(",
"tuple",
",",
"list",
")",
")"
] | Configure extensions.
Args:
app (Flask): initialized Flask app instance | [
"Configure",
"extensions",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/factory.py#L52-L68 | train |
ldomic/lintools | lintools/analysis/hbonds.py | HBonds.find_donors_and_acceptors_in_ligand | def find_donors_and_acceptors_in_ligand(self):
"""
Since MDAnalysis a pre-set list for acceptor and donor atoms for proteins and solvents
from specific forcefields, it is necessary to find donor and acceptor atoms for the
ligand molecule. This function uses RDKit and searches through ligand atoms to find
matches for pre-set list of possible donor and acceptor atoms. The resulting list is then
parsed to MDAnalysis through the donors and acceptors arguments.
"""
atom_names=[x.name for x in self.topology_data.universe.ligand]
try:
for atom in self.topology_data.mol.GetSubstructMatches(self.HDonorSmarts, uniquify=1):
self.donors.append(atom_names[atom[0]])
for atom in self.topology_data.mol.GetSubstructMatches(self.HAcceptorSmarts, uniquify=1):
self.acceptors.append(atom_names[atom[0]])
except Exception as e:
m = Chem.MolFromPDBFile("lig.pdb")
self.donors = []
self.acceptors = []
for atom in m.GetSubstructMatches(self.HDonorSmarts, uniquify=1):
self.donors.append(atom_names[atom[0]])
haccep = "[$([O,S;H1;v2]-[!$(*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=!@[O,N,P,S])]),$([nH0,o,s;+0])]"
self.HAcceptorSmarts = Chem.MolFromSmarts(haccep)
for atom in m.GetSubstructMatches(self.HAcceptorSmarts, uniquify=1):
self.acceptors.append(atom_names[atom[0]]) | python | def find_donors_and_acceptors_in_ligand(self):
"""
Since MDAnalysis a pre-set list for acceptor and donor atoms for proteins and solvents
from specific forcefields, it is necessary to find donor and acceptor atoms for the
ligand molecule. This function uses RDKit and searches through ligand atoms to find
matches for pre-set list of possible donor and acceptor atoms. The resulting list is then
parsed to MDAnalysis through the donors and acceptors arguments.
"""
atom_names=[x.name for x in self.topology_data.universe.ligand]
try:
for atom in self.topology_data.mol.GetSubstructMatches(self.HDonorSmarts, uniquify=1):
self.donors.append(atom_names[atom[0]])
for atom in self.topology_data.mol.GetSubstructMatches(self.HAcceptorSmarts, uniquify=1):
self.acceptors.append(atom_names[atom[0]])
except Exception as e:
m = Chem.MolFromPDBFile("lig.pdb")
self.donors = []
self.acceptors = []
for atom in m.GetSubstructMatches(self.HDonorSmarts, uniquify=1):
self.donors.append(atom_names[atom[0]])
haccep = "[$([O,S;H1;v2]-[!$(*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=!@[O,N,P,S])]),$([nH0,o,s;+0])]"
self.HAcceptorSmarts = Chem.MolFromSmarts(haccep)
for atom in m.GetSubstructMatches(self.HAcceptorSmarts, uniquify=1):
self.acceptors.append(atom_names[atom[0]]) | [
"def",
"find_donors_and_acceptors_in_ligand",
"(",
"self",
")",
":",
"atom_names",
"=",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
"]",
"try",
":",
"for",
"atom",
"in",
"self",
".",
"topology_data",
".",
"mol",
".",
"GetSubstructMatches",
"(",
"self",
".",
"HDonorSmarts",
",",
"uniquify",
"=",
"1",
")",
":",
"self",
".",
"donors",
".",
"append",
"(",
"atom_names",
"[",
"atom",
"[",
"0",
"]",
"]",
")",
"for",
"atom",
"in",
"self",
".",
"topology_data",
".",
"mol",
".",
"GetSubstructMatches",
"(",
"self",
".",
"HAcceptorSmarts",
",",
"uniquify",
"=",
"1",
")",
":",
"self",
".",
"acceptors",
".",
"append",
"(",
"atom_names",
"[",
"atom",
"[",
"0",
"]",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"m",
"=",
"Chem",
".",
"MolFromPDBFile",
"(",
"\"lig.pdb\"",
")",
"self",
".",
"donors",
"=",
"[",
"]",
"self",
".",
"acceptors",
"=",
"[",
"]",
"for",
"atom",
"in",
"m",
".",
"GetSubstructMatches",
"(",
"self",
".",
"HDonorSmarts",
",",
"uniquify",
"=",
"1",
")",
":",
"self",
".",
"donors",
".",
"append",
"(",
"atom_names",
"[",
"atom",
"[",
"0",
"]",
"]",
")",
"haccep",
"=",
"\"[$([O,S;H1;v2]-[!$(*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=!@[O,N,P,S])]),$([nH0,o,s;+0])]\"",
"self",
".",
"HAcceptorSmarts",
"=",
"Chem",
".",
"MolFromSmarts",
"(",
"haccep",
")",
"for",
"atom",
"in",
"m",
".",
"GetSubstructMatches",
"(",
"self",
".",
"HAcceptorSmarts",
",",
"uniquify",
"=",
"1",
")",
":",
"self",
".",
"acceptors",
".",
"append",
"(",
"atom_names",
"[",
"atom",
"[",
"0",
"]",
"]",
")"
] | Since MDAnalysis a pre-set list for acceptor and donor atoms for proteins and solvents
from specific forcefields, it is necessary to find donor and acceptor atoms for the
ligand molecule. This function uses RDKit and searches through ligand atoms to find
matches for pre-set list of possible donor and acceptor atoms. The resulting list is then
parsed to MDAnalysis through the donors and acceptors arguments. | [
"Since",
"MDAnalysis",
"a",
"pre",
"-",
"set",
"list",
"for",
"acceptor",
"and",
"donor",
"atoms",
"for",
"proteins",
"and",
"solvents",
"from",
"specific",
"forcefields",
"it",
"is",
"necessary",
"to",
"find",
"donor",
"and",
"acceptor",
"atoms",
"for",
"the",
"ligand",
"molecule",
".",
"This",
"function",
"uses",
"RDKit",
"and",
"searches",
"through",
"ligand",
"atoms",
"to",
"find",
"matches",
"for",
"pre",
"-",
"set",
"list",
"of",
"possible",
"donor",
"and",
"acceptor",
"atoms",
".",
"The",
"resulting",
"list",
"is",
"then",
"parsed",
"to",
"MDAnalysis",
"through",
"the",
"donors",
"and",
"acceptors",
"arguments",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/hbonds.py#L64-L87 | train |
ldomic/lintools | lintools/analysis/hbonds.py | HBonds.count_by_type | def count_by_type(self,table,timesteps):
"""Count how many times each individual hydrogen bonds occured throughout the simulation.
Returns numpy array."""
hbonds = defaultdict(int)
for contact in table:
#count by residue name not by proteinring
pkey = (contact.donor_idx,contact.acceptor_idx,contact.donor_atom, contact.acceptor_atom,contact.donor_resnm,contact.donor_resid, contact.acceptor_resnm,contact.acceptor_resid)
hbonds[pkey]+=1
dtype = [("donor_idx",int),("acceptor_idx",int),("donor_atom","|U4"),("acceptor_atom","|U4"),("donor_resnm","|U8"),("donor_resid","|U8"),("acceptor_resnm","|U8"),("acceptor_resid","|U8"),("frequency",float) ]
out = np.empty((len(hbonds),),dtype=dtype)
tsteps = float(len(timesteps))
for cursor,(key,count) in enumerate(hbonds.iteritems()):
out[cursor] = key + (count / tsteps,)
return out.view(np.recarray) | python | def count_by_type(self,table,timesteps):
"""Count how many times each individual hydrogen bonds occured throughout the simulation.
Returns numpy array."""
hbonds = defaultdict(int)
for contact in table:
#count by residue name not by proteinring
pkey = (contact.donor_idx,contact.acceptor_idx,contact.donor_atom, contact.acceptor_atom,contact.donor_resnm,contact.donor_resid, contact.acceptor_resnm,contact.acceptor_resid)
hbonds[pkey]+=1
dtype = [("donor_idx",int),("acceptor_idx",int),("donor_atom","|U4"),("acceptor_atom","|U4"),("donor_resnm","|U8"),("donor_resid","|U8"),("acceptor_resnm","|U8"),("acceptor_resid","|U8"),("frequency",float) ]
out = np.empty((len(hbonds),),dtype=dtype)
tsteps = float(len(timesteps))
for cursor,(key,count) in enumerate(hbonds.iteritems()):
out[cursor] = key + (count / tsteps,)
return out.view(np.recarray) | [
"def",
"count_by_type",
"(",
"self",
",",
"table",
",",
"timesteps",
")",
":",
"hbonds",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"contact",
"in",
"table",
":",
"#count by residue name not by proteinring",
"pkey",
"=",
"(",
"contact",
".",
"donor_idx",
",",
"contact",
".",
"acceptor_idx",
",",
"contact",
".",
"donor_atom",
",",
"contact",
".",
"acceptor_atom",
",",
"contact",
".",
"donor_resnm",
",",
"contact",
".",
"donor_resid",
",",
"contact",
".",
"acceptor_resnm",
",",
"contact",
".",
"acceptor_resid",
")",
"hbonds",
"[",
"pkey",
"]",
"+=",
"1",
"dtype",
"=",
"[",
"(",
"\"donor_idx\"",
",",
"int",
")",
",",
"(",
"\"acceptor_idx\"",
",",
"int",
")",
",",
"(",
"\"donor_atom\"",
",",
"\"|U4\"",
")",
",",
"(",
"\"acceptor_atom\"",
",",
"\"|U4\"",
")",
",",
"(",
"\"donor_resnm\"",
",",
"\"|U8\"",
")",
",",
"(",
"\"donor_resid\"",
",",
"\"|U8\"",
")",
",",
"(",
"\"acceptor_resnm\"",
",",
"\"|U8\"",
")",
",",
"(",
"\"acceptor_resid\"",
",",
"\"|U8\"",
")",
",",
"(",
"\"frequency\"",
",",
"float",
")",
"]",
"out",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"hbonds",
")",
",",
")",
",",
"dtype",
"=",
"dtype",
")",
"tsteps",
"=",
"float",
"(",
"len",
"(",
"timesteps",
")",
")",
"for",
"cursor",
",",
"(",
"key",
",",
"count",
")",
"in",
"enumerate",
"(",
"hbonds",
".",
"iteritems",
"(",
")",
")",
":",
"out",
"[",
"cursor",
"]",
"=",
"key",
"+",
"(",
"count",
"/",
"tsteps",
",",
")",
"return",
"out",
".",
"view",
"(",
"np",
".",
"recarray",
")"
] | Count how many times each individual hydrogen bonds occured throughout the simulation.
Returns numpy array. | [
"Count",
"how",
"many",
"times",
"each",
"individual",
"hydrogen",
"bonds",
"occured",
"throughout",
"the",
"simulation",
".",
"Returns",
"numpy",
"array",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/hbonds.py#L118-L131 | train |
ldomic/lintools | lintools/analysis/hbonds.py | HBonds.determine_hbonds_for_drawing | def determine_hbonds_for_drawing(self, analysis_cutoff):
"""
Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple-
mented. In this function the frequency of each hydrogen bond is summated and the total
compared against analysis cutoff - a fraction multiplied by trajectory count. Those
hydrogen bonds that are present for longer than analysis cutoff will be plotted in the
final plot.
Takes:
* analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be
present for to be plotted (default - 0.3). It is multiplied by number of trajectories
Output:
* frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies
These hydrogen bonds will be plotted in the final image.
"""
self.frequency = defaultdict(int)
for traj in self.hbonds_by_type:
for bond in self.hbonds_by_type[traj]:
# frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency
# residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone
if bond["donor_resnm"]!="LIG":
self.frequency[(bond["donor_idx"],bond["acceptor_atom"],bond["donor_atom"],bond["acceptor_idx"])] += bond["frequency"]
#check whether ligand is donor or acceptor
else:
self.frequency[(bond["acceptor_idx"],bond["donor_atom"],bond["acceptor_atom"],bond["donor_idx"])] += bond["frequency"]
#Add the frequency counts
self.frequency = {i:self.frequency[i] for i in self.frequency if self.frequency[i]>(int(len(self.trajectory))*analysis_cutoff)}
#change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image
self.hbonds_for_drawing = {}
for bond in self.frequency:
atomname = bond[1]
if atomname.startswith("O",0) or atomname.startswith("N",0):
lig_atom=atomname
else:
atomindex = [index for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if atom.name==atomname][0]
rdkit_atom = self.topology_data.mol.GetAtomWithIdx(atomindex)
for neigh in rdkit_atom.GetNeighbors():
neigh_atom_id = neigh.GetIdx()
lig_atom = [atom.name for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if index==neigh_atom_id][0]
self.hbonds_for_drawing[(bond[0],lig_atom,bond[2],bond[3])]=self.frequency[bond] | python | def determine_hbonds_for_drawing(self, analysis_cutoff):
"""
Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple-
mented. In this function the frequency of each hydrogen bond is summated and the total
compared against analysis cutoff - a fraction multiplied by trajectory count. Those
hydrogen bonds that are present for longer than analysis cutoff will be plotted in the
final plot.
Takes:
* analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be
present for to be plotted (default - 0.3). It is multiplied by number of trajectories
Output:
* frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies
These hydrogen bonds will be plotted in the final image.
"""
self.frequency = defaultdict(int)
for traj in self.hbonds_by_type:
for bond in self.hbonds_by_type[traj]:
# frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency
# residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone
if bond["donor_resnm"]!="LIG":
self.frequency[(bond["donor_idx"],bond["acceptor_atom"],bond["donor_atom"],bond["acceptor_idx"])] += bond["frequency"]
#check whether ligand is donor or acceptor
else:
self.frequency[(bond["acceptor_idx"],bond["donor_atom"],bond["acceptor_atom"],bond["donor_idx"])] += bond["frequency"]
#Add the frequency counts
self.frequency = {i:self.frequency[i] for i in self.frequency if self.frequency[i]>(int(len(self.trajectory))*analysis_cutoff)}
#change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image
self.hbonds_for_drawing = {}
for bond in self.frequency:
atomname = bond[1]
if atomname.startswith("O",0) or atomname.startswith("N",0):
lig_atom=atomname
else:
atomindex = [index for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if atom.name==atomname][0]
rdkit_atom = self.topology_data.mol.GetAtomWithIdx(atomindex)
for neigh in rdkit_atom.GetNeighbors():
neigh_atom_id = neigh.GetIdx()
lig_atom = [atom.name for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if index==neigh_atom_id][0]
self.hbonds_for_drawing[(bond[0],lig_atom,bond[2],bond[3])]=self.frequency[bond] | [
"def",
"determine_hbonds_for_drawing",
"(",
"self",
",",
"analysis_cutoff",
")",
":",
"self",
".",
"frequency",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"traj",
"in",
"self",
".",
"hbonds_by_type",
":",
"for",
"bond",
"in",
"self",
".",
"hbonds_by_type",
"[",
"traj",
"]",
":",
"# frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency",
"# residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone",
"if",
"bond",
"[",
"\"donor_resnm\"",
"]",
"!=",
"\"LIG\"",
":",
"self",
".",
"frequency",
"[",
"(",
"bond",
"[",
"\"donor_idx\"",
"]",
",",
"bond",
"[",
"\"acceptor_atom\"",
"]",
",",
"bond",
"[",
"\"donor_atom\"",
"]",
",",
"bond",
"[",
"\"acceptor_idx\"",
"]",
")",
"]",
"+=",
"bond",
"[",
"\"frequency\"",
"]",
"#check whether ligand is donor or acceptor",
"else",
":",
"self",
".",
"frequency",
"[",
"(",
"bond",
"[",
"\"acceptor_idx\"",
"]",
",",
"bond",
"[",
"\"donor_atom\"",
"]",
",",
"bond",
"[",
"\"acceptor_atom\"",
"]",
",",
"bond",
"[",
"\"donor_idx\"",
"]",
")",
"]",
"+=",
"bond",
"[",
"\"frequency\"",
"]",
"#Add the frequency counts",
"self",
".",
"frequency",
"=",
"{",
"i",
":",
"self",
".",
"frequency",
"[",
"i",
"]",
"for",
"i",
"in",
"self",
".",
"frequency",
"if",
"self",
".",
"frequency",
"[",
"i",
"]",
">",
"(",
"int",
"(",
"len",
"(",
"self",
".",
"trajectory",
")",
")",
"*",
"analysis_cutoff",
")",
"}",
"#change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image",
"self",
".",
"hbonds_for_drawing",
"=",
"{",
"}",
"for",
"bond",
"in",
"self",
".",
"frequency",
":",
"atomname",
"=",
"bond",
"[",
"1",
"]",
"if",
"atomname",
".",
"startswith",
"(",
"\"O\"",
",",
"0",
")",
"or",
"atomname",
".",
"startswith",
"(",
"\"N\"",
",",
"0",
")",
":",
"lig_atom",
"=",
"atomname",
"else",
":",
"atomindex",
"=",
"[",
"index",
"for",
"index",
",",
"atom",
"in",
"enumerate",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"atoms",
")",
"if",
"atom",
".",
"name",
"==",
"atomname",
"]",
"[",
"0",
"]",
"rdkit_atom",
"=",
"self",
".",
"topology_data",
".",
"mol",
".",
"GetAtomWithIdx",
"(",
"atomindex",
")",
"for",
"neigh",
"in",
"rdkit_atom",
".",
"GetNeighbors",
"(",
")",
":",
"neigh_atom_id",
"=",
"neigh",
".",
"GetIdx",
"(",
")",
"lig_atom",
"=",
"[",
"atom",
".",
"name",
"for",
"index",
",",
"atom",
"in",
"enumerate",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"atoms",
")",
"if",
"index",
"==",
"neigh_atom_id",
"]",
"[",
"0",
"]",
"self",
".",
"hbonds_for_drawing",
"[",
"(",
"bond",
"[",
"0",
"]",
",",
"lig_atom",
",",
"bond",
"[",
"2",
"]",
",",
"bond",
"[",
"3",
"]",
")",
"]",
"=",
"self",
".",
"frequency",
"[",
"bond",
"]"
] | Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple-
mented. In this function the frequency of each hydrogen bond is summated and the total
compared against analysis cutoff - a fraction multiplied by trajectory count. Those
hydrogen bonds that are present for longer than analysis cutoff will be plotted in the
final plot.
Takes:
* analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be
present for to be plotted (default - 0.3). It is multiplied by number of trajectories
Output:
* frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies
These hydrogen bonds will be plotted in the final image. | [
"Since",
"plotting",
"all",
"hydrogen",
"bonds",
"could",
"lead",
"to",
"a",
"messy",
"plot",
"a",
"cutoff",
"has",
"to",
"be",
"imple",
"-",
"mented",
".",
"In",
"this",
"function",
"the",
"frequency",
"of",
"each",
"hydrogen",
"bond",
"is",
"summated",
"and",
"the",
"total",
"compared",
"against",
"analysis",
"cutoff",
"-",
"a",
"fraction",
"multiplied",
"by",
"trajectory",
"count",
".",
"Those",
"hydrogen",
"bonds",
"that",
"are",
"present",
"for",
"longer",
"than",
"analysis",
"cutoff",
"will",
"be",
"plotted",
"in",
"the",
"final",
"plot",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/hbonds.py#L153-L195 | train |
wistful/pympris | pympris/common.py | convert2dbus | def convert2dbus(value, signature):
"""Converts `value` type from python to dbus according signature.
:param value: value to convert to dbus object
:param str signature: dbus type signature.
:returns: value in dbus type.
"""
if len(signature) == 2 and signature.startswith('a'):
return dbus.Array(value, signature=signature[-1])
dbus_string_type = dbus.String if PY3 else dbus.UTF8String
type_map = {
'b': dbus.Boolean, 'y': dbus.Byte, 'n': dbus.Int16,
'i': dbus.Int32, 'x': dbus.Int64, 'q': dbus.UInt16, 'u': dbus.UInt32,
't': dbus.UInt64, 'd': dbus.Double, 'o': dbus.ObjectPath,
'g': dbus.Signature, 's': dbus_string_type}
return type_map[signature](value) | python | def convert2dbus(value, signature):
"""Converts `value` type from python to dbus according signature.
:param value: value to convert to dbus object
:param str signature: dbus type signature.
:returns: value in dbus type.
"""
if len(signature) == 2 and signature.startswith('a'):
return dbus.Array(value, signature=signature[-1])
dbus_string_type = dbus.String if PY3 else dbus.UTF8String
type_map = {
'b': dbus.Boolean, 'y': dbus.Byte, 'n': dbus.Int16,
'i': dbus.Int32, 'x': dbus.Int64, 'q': dbus.UInt16, 'u': dbus.UInt32,
't': dbus.UInt64, 'd': dbus.Double, 'o': dbus.ObjectPath,
'g': dbus.Signature, 's': dbus_string_type}
return type_map[signature](value) | [
"def",
"convert2dbus",
"(",
"value",
",",
"signature",
")",
":",
"if",
"len",
"(",
"signature",
")",
"==",
"2",
"and",
"signature",
".",
"startswith",
"(",
"'a'",
")",
":",
"return",
"dbus",
".",
"Array",
"(",
"value",
",",
"signature",
"=",
"signature",
"[",
"-",
"1",
"]",
")",
"dbus_string_type",
"=",
"dbus",
".",
"String",
"if",
"PY3",
"else",
"dbus",
".",
"UTF8String",
"type_map",
"=",
"{",
"'b'",
":",
"dbus",
".",
"Boolean",
",",
"'y'",
":",
"dbus",
".",
"Byte",
",",
"'n'",
":",
"dbus",
".",
"Int16",
",",
"'i'",
":",
"dbus",
".",
"Int32",
",",
"'x'",
":",
"dbus",
".",
"Int64",
",",
"'q'",
":",
"dbus",
".",
"UInt16",
",",
"'u'",
":",
"dbus",
".",
"UInt32",
",",
"'t'",
":",
"dbus",
".",
"UInt64",
",",
"'d'",
":",
"dbus",
".",
"Double",
",",
"'o'",
":",
"dbus",
".",
"ObjectPath",
",",
"'g'",
":",
"dbus",
".",
"Signature",
",",
"'s'",
":",
"dbus_string_type",
"}",
"return",
"type_map",
"[",
"signature",
"]",
"(",
"value",
")"
] | Converts `value` type from python to dbus according signature.
:param value: value to convert to dbus object
:param str signature: dbus type signature.
:returns: value in dbus type. | [
"Converts",
"value",
"type",
"from",
"python",
"to",
"dbus",
"according",
"signature",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L25-L40 | train |
wistful/pympris | pympris/common.py | convert | def convert(dbus_obj):
"""Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type.
"""
_isinstance = partial(isinstance, dbus_obj)
ConvertType = namedtuple('ConvertType', 'pytype dbustypes')
pyint = ConvertType(int, (dbus.Byte, dbus.Int16, dbus.Int32, dbus.Int64,
dbus.UInt16, dbus.UInt32, dbus.UInt64))
pybool = ConvertType(bool, (dbus.Boolean, ))
pyfloat = ConvertType(float, (dbus.Double, ))
pylist = ConvertType(lambda _obj: list(map(convert, dbus_obj)),
(dbus.Array, ))
pytuple = ConvertType(lambda _obj: tuple(map(convert, dbus_obj)),
(dbus.Struct, ))
types_str = (dbus.ObjectPath, dbus.Signature, dbus.String)
if not PY3:
types_str += (dbus.UTF8String,)
pystr = ConvertType(str if PY3 else unicode, types_str)
pydict = ConvertType(
lambda _obj: dict(zip(map(convert, dbus_obj.keys()),
map(convert, dbus_obj.values())
)
),
(dbus.Dictionary, )
)
for conv in (pyint, pybool, pyfloat, pylist, pytuple, pystr, pydict):
if any(map(_isinstance, conv.dbustypes)):
return conv.pytype(dbus_obj)
else:
return dbus_obj | python | def convert(dbus_obj):
"""Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type.
"""
_isinstance = partial(isinstance, dbus_obj)
ConvertType = namedtuple('ConvertType', 'pytype dbustypes')
pyint = ConvertType(int, (dbus.Byte, dbus.Int16, dbus.Int32, dbus.Int64,
dbus.UInt16, dbus.UInt32, dbus.UInt64))
pybool = ConvertType(bool, (dbus.Boolean, ))
pyfloat = ConvertType(float, (dbus.Double, ))
pylist = ConvertType(lambda _obj: list(map(convert, dbus_obj)),
(dbus.Array, ))
pytuple = ConvertType(lambda _obj: tuple(map(convert, dbus_obj)),
(dbus.Struct, ))
types_str = (dbus.ObjectPath, dbus.Signature, dbus.String)
if not PY3:
types_str += (dbus.UTF8String,)
pystr = ConvertType(str if PY3 else unicode, types_str)
pydict = ConvertType(
lambda _obj: dict(zip(map(convert, dbus_obj.keys()),
map(convert, dbus_obj.values())
)
),
(dbus.Dictionary, )
)
for conv in (pyint, pybool, pyfloat, pylist, pytuple, pystr, pydict):
if any(map(_isinstance, conv.dbustypes)):
return conv.pytype(dbus_obj)
else:
return dbus_obj | [
"def",
"convert",
"(",
"dbus_obj",
")",
":",
"_isinstance",
"=",
"partial",
"(",
"isinstance",
",",
"dbus_obj",
")",
"ConvertType",
"=",
"namedtuple",
"(",
"'ConvertType'",
",",
"'pytype dbustypes'",
")",
"pyint",
"=",
"ConvertType",
"(",
"int",
",",
"(",
"dbus",
".",
"Byte",
",",
"dbus",
".",
"Int16",
",",
"dbus",
".",
"Int32",
",",
"dbus",
".",
"Int64",
",",
"dbus",
".",
"UInt16",
",",
"dbus",
".",
"UInt32",
",",
"dbus",
".",
"UInt64",
")",
")",
"pybool",
"=",
"ConvertType",
"(",
"bool",
",",
"(",
"dbus",
".",
"Boolean",
",",
")",
")",
"pyfloat",
"=",
"ConvertType",
"(",
"float",
",",
"(",
"dbus",
".",
"Double",
",",
")",
")",
"pylist",
"=",
"ConvertType",
"(",
"lambda",
"_obj",
":",
"list",
"(",
"map",
"(",
"convert",
",",
"dbus_obj",
")",
")",
",",
"(",
"dbus",
".",
"Array",
",",
")",
")",
"pytuple",
"=",
"ConvertType",
"(",
"lambda",
"_obj",
":",
"tuple",
"(",
"map",
"(",
"convert",
",",
"dbus_obj",
")",
")",
",",
"(",
"dbus",
".",
"Struct",
",",
")",
")",
"types_str",
"=",
"(",
"dbus",
".",
"ObjectPath",
",",
"dbus",
".",
"Signature",
",",
"dbus",
".",
"String",
")",
"if",
"not",
"PY3",
":",
"types_str",
"+=",
"(",
"dbus",
".",
"UTF8String",
",",
")",
"pystr",
"=",
"ConvertType",
"(",
"str",
"if",
"PY3",
"else",
"unicode",
",",
"types_str",
")",
"pydict",
"=",
"ConvertType",
"(",
"lambda",
"_obj",
":",
"dict",
"(",
"zip",
"(",
"map",
"(",
"convert",
",",
"dbus_obj",
".",
"keys",
"(",
")",
")",
",",
"map",
"(",
"convert",
",",
"dbus_obj",
".",
"values",
"(",
")",
")",
")",
")",
",",
"(",
"dbus",
".",
"Dictionary",
",",
")",
")",
"for",
"conv",
"in",
"(",
"pyint",
",",
"pybool",
",",
"pyfloat",
",",
"pylist",
",",
"pytuple",
",",
"pystr",
",",
"pydict",
")",
":",
"if",
"any",
"(",
"map",
"(",
"_isinstance",
",",
"conv",
".",
"dbustypes",
")",
")",
":",
"return",
"conv",
".",
"pytype",
"(",
"dbus_obj",
")",
"else",
":",
"return",
"dbus_obj"
] | Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type. | [
"Converts",
"dbus_obj",
"from",
"dbus",
"type",
"to",
"python",
"type",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L43-L77 | train |
wistful/pympris | pympris/common.py | converter | def converter(f):
"""Decorator to convert value from dbus type to python type."""
@wraps(f)
def wrapper(*args, **kwds):
return convert(f(*args, **kwds))
return wrapper | python | def converter(f):
"""Decorator to convert value from dbus type to python type."""
@wraps(f)
def wrapper(*args, **kwds):
return convert(f(*args, **kwds))
return wrapper | [
"def",
"converter",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"convert",
"(",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
")",
"return",
"wrapper"
] | Decorator to convert value from dbus type to python type. | [
"Decorator",
"to",
"convert",
"value",
"from",
"dbus",
"type",
"to",
"python",
"type",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L80-L85 | train |
wistful/pympris | pympris/common.py | exception_wrapper | def exception_wrapper(f):
"""Decorator to convert dbus exception to pympris exception."""
@wraps(f)
def wrapper(*args, **kwds):
try:
return f(*args, **kwds)
except dbus.exceptions.DBusException as err:
_args = err.args
raise PyMPRISException(*_args)
return wrapper | python | def exception_wrapper(f):
"""Decorator to convert dbus exception to pympris exception."""
@wraps(f)
def wrapper(*args, **kwds):
try:
return f(*args, **kwds)
except dbus.exceptions.DBusException as err:
_args = err.args
raise PyMPRISException(*_args)
return wrapper | [
"def",
"exception_wrapper",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"except",
"dbus",
".",
"exceptions",
".",
"DBusException",
"as",
"err",
":",
"_args",
"=",
"err",
".",
"args",
"raise",
"PyMPRISException",
"(",
"*",
"_args",
")",
"return",
"wrapper"
] | Decorator to convert dbus exception to pympris exception. | [
"Decorator",
"to",
"convert",
"dbus",
"exception",
"to",
"pympris",
"exception",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L88-L97 | train |
wistful/pympris | pympris/common.py | available_players | def available_players():
"""Searchs and returns set of unique names of objects
which implements MPRIS2 interfaces.
:returns: set of unique names.
:type: set
"""
bus = dbus.SessionBus()
players = set()
for name in filter(lambda item: item.startswith(MPRIS_NAME_PREFIX),
bus.list_names()):
owner_name = bus.get_name_owner(name)
players.add(convert(owner_name))
return players | python | def available_players():
"""Searchs and returns set of unique names of objects
which implements MPRIS2 interfaces.
:returns: set of unique names.
:type: set
"""
bus = dbus.SessionBus()
players = set()
for name in filter(lambda item: item.startswith(MPRIS_NAME_PREFIX),
bus.list_names()):
owner_name = bus.get_name_owner(name)
players.add(convert(owner_name))
return players | [
"def",
"available_players",
"(",
")",
":",
"bus",
"=",
"dbus",
".",
"SessionBus",
"(",
")",
"players",
"=",
"set",
"(",
")",
"for",
"name",
"in",
"filter",
"(",
"lambda",
"item",
":",
"item",
".",
"startswith",
"(",
"MPRIS_NAME_PREFIX",
")",
",",
"bus",
".",
"list_names",
"(",
")",
")",
":",
"owner_name",
"=",
"bus",
".",
"get_name_owner",
"(",
"name",
")",
"players",
".",
"add",
"(",
"convert",
"(",
"owner_name",
")",
")",
"return",
"players"
] | Searchs and returns set of unique names of objects
which implements MPRIS2 interfaces.
:returns: set of unique names.
:type: set | [
"Searchs",
"and",
"returns",
"set",
"of",
"unique",
"names",
"of",
"objects",
"which",
"implements",
"MPRIS2",
"interfaces",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L100-L113 | train |
wistful/pympris | pympris/common.py | signal_wrapper | def signal_wrapper(f):
"""Decorator converts function's arguments from dbus types to python."""
@wraps(f)
def wrapper(*args, **kwds):
args = map(convert, args)
kwds = {convert(k): convert(v) for k, v in kwds.items()}
return f(*args, **kwds)
return wrapper | python | def signal_wrapper(f):
"""Decorator converts function's arguments from dbus types to python."""
@wraps(f)
def wrapper(*args, **kwds):
args = map(convert, args)
kwds = {convert(k): convert(v) for k, v in kwds.items()}
return f(*args, **kwds)
return wrapper | [
"def",
"signal_wrapper",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"args",
"=",
"map",
"(",
"convert",
",",
"args",
")",
"kwds",
"=",
"{",
"convert",
"(",
"k",
")",
":",
"convert",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwds",
".",
"items",
"(",
")",
"}",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"return",
"wrapper"
] | Decorator converts function's arguments from dbus types to python. | [
"Decorator",
"converts",
"function",
"s",
"arguments",
"from",
"dbus",
"types",
"to",
"python",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L124-L131 | train |
wistful/pympris | pympris/common.py | filter_properties_signals | def filter_properties_signals(f, signal_iface_name):
"""Filters signals by iface name.
:param function f: function to wrap.
:param str signal_iface_name: interface name.
"""
@wraps(f)
def wrapper(iface, changed_props, invalidated_props, *args, **kwargs):
if iface == signal_iface_name:
f(changed_props, invalidated_props)
return wrapper | python | def filter_properties_signals(f, signal_iface_name):
"""Filters signals by iface name.
:param function f: function to wrap.
:param str signal_iface_name: interface name.
"""
@wraps(f)
def wrapper(iface, changed_props, invalidated_props, *args, **kwargs):
if iface == signal_iface_name:
f(changed_props, invalidated_props)
return wrapper | [
"def",
"filter_properties_signals",
"(",
"f",
",",
"signal_iface_name",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"iface",
",",
"changed_props",
",",
"invalidated_props",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"iface",
"==",
"signal_iface_name",
":",
"f",
"(",
"changed_props",
",",
"invalidated_props",
")",
"return",
"wrapper"
] | Filters signals by iface name.
:param function f: function to wrap.
:param str signal_iface_name: interface name. | [
"Filters",
"signals",
"by",
"iface",
"name",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/common.py#L134-L145 | train |
inveniosoftware-contrib/json-merger | json_merger/contrib/inspirehep/match.py | distance_function_match | def distance_function_match(l1, l2, thresh, dist_fn, norm_funcs=[]):
"""Returns pairs of matching indices from l1 and l2."""
common = []
# We will keep track of the global index in the source list as we
# will successively reduce their sizes.
l1 = list(enumerate(l1))
l2 = list(enumerate(l2))
# Use the distance function and threshold on hints given by normalization.
# See _match_by_norm_func for implementation details.
# Also wrap the list element function function to ignore the global list
# index computed above.
for norm_fn in norm_funcs:
new_common, l1, l2 = _match_by_norm_func(
l1, l2,
lambda a: norm_fn(a[1]),
lambda a1, a2: dist_fn(a1[1], a2[1]),
thresh)
# Keep only the global list index in the end result.
common.extend((c1[0], c2[0]) for c1, c2 in new_common)
# Take any remaining umatched entries and try to match them using the
# Munkres algorithm.
dist_matrix = [[dist_fn(e1, e2) for i2, e2 in l2] for i1, e1 in l1]
# Call Munkres on connected components on the remaining bipartite graph.
# An edge links an element from l1 with an element from l2 only if
# the distance between the elements is less (or equal) than the theshold.
components = BipartiteConnectedComponents()
for l1_i in range(len(l1)):
for l2_i in range(len(l2)):
if dist_matrix[l1_i][l2_i] > thresh:
continue
components.add_edge(l1_i, l2_i)
for l1_indices, l2_indices in components.get_connected_components():
# Build a partial distance matrix for each connected component.
part_l1 = [l1[i] for i in l1_indices]
part_l2 = [l2[i] for i in l2_indices]
part_dist_matrix = [[dist_matrix[l1_i][l2_i] for l2_i in l2_indices]
for l1_i in l1_indices]
part_cmn = _match_munkres(part_l1, part_l2, part_dist_matrix, thresh)
common.extend((c1[0], c2[0]) for c1, c2 in part_cmn)
return common | python | def distance_function_match(l1, l2, thresh, dist_fn, norm_funcs=[]):
"""Returns pairs of matching indices from l1 and l2."""
common = []
# We will keep track of the global index in the source list as we
# will successively reduce their sizes.
l1 = list(enumerate(l1))
l2 = list(enumerate(l2))
# Use the distance function and threshold on hints given by normalization.
# See _match_by_norm_func for implementation details.
# Also wrap the list element function function to ignore the global list
# index computed above.
for norm_fn in norm_funcs:
new_common, l1, l2 = _match_by_norm_func(
l1, l2,
lambda a: norm_fn(a[1]),
lambda a1, a2: dist_fn(a1[1], a2[1]),
thresh)
# Keep only the global list index in the end result.
common.extend((c1[0], c2[0]) for c1, c2 in new_common)
# Take any remaining umatched entries and try to match them using the
# Munkres algorithm.
dist_matrix = [[dist_fn(e1, e2) for i2, e2 in l2] for i1, e1 in l1]
# Call Munkres on connected components on the remaining bipartite graph.
# An edge links an element from l1 with an element from l2 only if
# the distance between the elements is less (or equal) than the theshold.
components = BipartiteConnectedComponents()
for l1_i in range(len(l1)):
for l2_i in range(len(l2)):
if dist_matrix[l1_i][l2_i] > thresh:
continue
components.add_edge(l1_i, l2_i)
for l1_indices, l2_indices in components.get_connected_components():
# Build a partial distance matrix for each connected component.
part_l1 = [l1[i] for i in l1_indices]
part_l2 = [l2[i] for i in l2_indices]
part_dist_matrix = [[dist_matrix[l1_i][l2_i] for l2_i in l2_indices]
for l1_i in l1_indices]
part_cmn = _match_munkres(part_l1, part_l2, part_dist_matrix, thresh)
common.extend((c1[0], c2[0]) for c1, c2 in part_cmn)
return common | [
"def",
"distance_function_match",
"(",
"l1",
",",
"l2",
",",
"thresh",
",",
"dist_fn",
",",
"norm_funcs",
"=",
"[",
"]",
")",
":",
"common",
"=",
"[",
"]",
"# We will keep track of the global index in the source list as we",
"# will successively reduce their sizes.",
"l1",
"=",
"list",
"(",
"enumerate",
"(",
"l1",
")",
")",
"l2",
"=",
"list",
"(",
"enumerate",
"(",
"l2",
")",
")",
"# Use the distance function and threshold on hints given by normalization.",
"# See _match_by_norm_func for implementation details.",
"# Also wrap the list element function function to ignore the global list",
"# index computed above.",
"for",
"norm_fn",
"in",
"norm_funcs",
":",
"new_common",
",",
"l1",
",",
"l2",
"=",
"_match_by_norm_func",
"(",
"l1",
",",
"l2",
",",
"lambda",
"a",
":",
"norm_fn",
"(",
"a",
"[",
"1",
"]",
")",
",",
"lambda",
"a1",
",",
"a2",
":",
"dist_fn",
"(",
"a1",
"[",
"1",
"]",
",",
"a2",
"[",
"1",
"]",
")",
",",
"thresh",
")",
"# Keep only the global list index in the end result.",
"common",
".",
"extend",
"(",
"(",
"c1",
"[",
"0",
"]",
",",
"c2",
"[",
"0",
"]",
")",
"for",
"c1",
",",
"c2",
"in",
"new_common",
")",
"# Take any remaining umatched entries and try to match them using the",
"# Munkres algorithm.",
"dist_matrix",
"=",
"[",
"[",
"dist_fn",
"(",
"e1",
",",
"e2",
")",
"for",
"i2",
",",
"e2",
"in",
"l2",
"]",
"for",
"i1",
",",
"e1",
"in",
"l1",
"]",
"# Call Munkres on connected components on the remaining bipartite graph.",
"# An edge links an element from l1 with an element from l2 only if",
"# the distance between the elements is less (or equal) than the theshold.",
"components",
"=",
"BipartiteConnectedComponents",
"(",
")",
"for",
"l1_i",
"in",
"range",
"(",
"len",
"(",
"l1",
")",
")",
":",
"for",
"l2_i",
"in",
"range",
"(",
"len",
"(",
"l2",
")",
")",
":",
"if",
"dist_matrix",
"[",
"l1_i",
"]",
"[",
"l2_i",
"]",
">",
"thresh",
":",
"continue",
"components",
".",
"add_edge",
"(",
"l1_i",
",",
"l2_i",
")",
"for",
"l1_indices",
",",
"l2_indices",
"in",
"components",
".",
"get_connected_components",
"(",
")",
":",
"# Build a partial distance matrix for each connected component.",
"part_l1",
"=",
"[",
"l1",
"[",
"i",
"]",
"for",
"i",
"in",
"l1_indices",
"]",
"part_l2",
"=",
"[",
"l2",
"[",
"i",
"]",
"for",
"i",
"in",
"l2_indices",
"]",
"part_dist_matrix",
"=",
"[",
"[",
"dist_matrix",
"[",
"l1_i",
"]",
"[",
"l2_i",
"]",
"for",
"l2_i",
"in",
"l2_indices",
"]",
"for",
"l1_i",
"in",
"l1_indices",
"]",
"part_cmn",
"=",
"_match_munkres",
"(",
"part_l1",
",",
"part_l2",
",",
"part_dist_matrix",
",",
"thresh",
")",
"common",
".",
"extend",
"(",
"(",
"c1",
"[",
"0",
"]",
",",
"c2",
"[",
"0",
"]",
")",
"for",
"c1",
",",
"c2",
"in",
"part_cmn",
")",
"return",
"common"
] | Returns pairs of matching indices from l1 and l2. | [
"Returns",
"pairs",
"of",
"matching",
"indices",
"from",
"l1",
"and",
"l2",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/contrib/inspirehep/match.py#L30-L76 | train |
inveniosoftware-contrib/json-merger | json_merger/contrib/inspirehep/match.py | _match_by_norm_func | def _match_by_norm_func(l1, l2, norm_fn, dist_fn, thresh):
"""Matches elements in l1 and l2 using normalization functions.
Splits the elements in each list into buckets given by the normalization
function. If the same normalization value points to a bucket from the
first list and a bucket from the second list, both with a single element
we consider the elements in the list as matching if the distance between
them is less (or equal) than the threshold.
e.g. l1 = ['X1', 'Y1', 'Y2', 'Z5'], l2 = ['X1', 'Y3', 'Z1']
norm_fn = lambda x: x[0]
dist_fn = lambda e1, e2: 0 if e1 == e2 else 1
thresh = 0
The buckets will then be:
l1_bucket = {'X': ['X1'], 'Y': ['Y1', 'Y2'], 'Z': ['Z5']}
l2_bucket = {'X': ['X1'], 'Y': ['Y3'], 'Z': ['Z1']}
For each normalized value:
'X' -> consider 'X1' equal with 'X1' since the distance is equal with
the thershold
'Y' -> skip the lists since we have multiple possible matches
'Z' -> consider 'Z1' and 'Z5' as different since the distance is
greater than the threshold.
Return:
[('X1', 'X2')]
"""
common = []
l1_only_idx = set(range(len(l1)))
l2_only_idx = set(range(len(l2)))
buckets_l1 = _group_by_fn(enumerate(l1), lambda x: norm_fn(x[1]))
buckets_l2 = _group_by_fn(enumerate(l2), lambda x: norm_fn(x[1]))
for normed, l1_elements in buckets_l1.items():
l2_elements = buckets_l2.get(normed, [])
if not l1_elements or not l2_elements:
continue
_, (_, e1_first) = l1_elements[0]
_, (_, e2_first) = l2_elements[0]
match_is_ambiguous = not (
len(l1_elements) == len(l2_elements) and (
all(e2 == e2_first for (_, (_, e2)) in l2_elements) or
all(e1 == e1_first for (_, (_, e1)) in l1_elements)
)
)
if match_is_ambiguous:
continue
for (e1_idx, e1), (e2_idx, e2) in zip(l1_elements, l2_elements):
if dist_fn(e1, e2) > thresh:
continue
l1_only_idx.remove(e1_idx)
l2_only_idx.remove(e2_idx)
common.append((e1, e2))
l1_only = [l1[i] for i in l1_only_idx]
l2_only = [l2[i] for i in l2_only_idx]
return common, l1_only, l2_only | python | def _match_by_norm_func(l1, l2, norm_fn, dist_fn, thresh):
"""Matches elements in l1 and l2 using normalization functions.
Splits the elements in each list into buckets given by the normalization
function. If the same normalization value points to a bucket from the
first list and a bucket from the second list, both with a single element
we consider the elements in the list as matching if the distance between
them is less (or equal) than the threshold.
e.g. l1 = ['X1', 'Y1', 'Y2', 'Z5'], l2 = ['X1', 'Y3', 'Z1']
norm_fn = lambda x: x[0]
dist_fn = lambda e1, e2: 0 if e1 == e2 else 1
thresh = 0
The buckets will then be:
l1_bucket = {'X': ['X1'], 'Y': ['Y1', 'Y2'], 'Z': ['Z5']}
l2_bucket = {'X': ['X1'], 'Y': ['Y3'], 'Z': ['Z1']}
For each normalized value:
'X' -> consider 'X1' equal with 'X1' since the distance is equal with
the thershold
'Y' -> skip the lists since we have multiple possible matches
'Z' -> consider 'Z1' and 'Z5' as different since the distance is
greater than the threshold.
Return:
[('X1', 'X2')]
"""
common = []
l1_only_idx = set(range(len(l1)))
l2_only_idx = set(range(len(l2)))
buckets_l1 = _group_by_fn(enumerate(l1), lambda x: norm_fn(x[1]))
buckets_l2 = _group_by_fn(enumerate(l2), lambda x: norm_fn(x[1]))
for normed, l1_elements in buckets_l1.items():
l2_elements = buckets_l2.get(normed, [])
if not l1_elements or not l2_elements:
continue
_, (_, e1_first) = l1_elements[0]
_, (_, e2_first) = l2_elements[0]
match_is_ambiguous = not (
len(l1_elements) == len(l2_elements) and (
all(e2 == e2_first for (_, (_, e2)) in l2_elements) or
all(e1 == e1_first for (_, (_, e1)) in l1_elements)
)
)
if match_is_ambiguous:
continue
for (e1_idx, e1), (e2_idx, e2) in zip(l1_elements, l2_elements):
if dist_fn(e1, e2) > thresh:
continue
l1_only_idx.remove(e1_idx)
l2_only_idx.remove(e2_idx)
common.append((e1, e2))
l1_only = [l1[i] for i in l1_only_idx]
l2_only = [l2[i] for i in l2_only_idx]
return common, l1_only, l2_only | [
"def",
"_match_by_norm_func",
"(",
"l1",
",",
"l2",
",",
"norm_fn",
",",
"dist_fn",
",",
"thresh",
")",
":",
"common",
"=",
"[",
"]",
"l1_only_idx",
"=",
"set",
"(",
"range",
"(",
"len",
"(",
"l1",
")",
")",
")",
"l2_only_idx",
"=",
"set",
"(",
"range",
"(",
"len",
"(",
"l2",
")",
")",
")",
"buckets_l1",
"=",
"_group_by_fn",
"(",
"enumerate",
"(",
"l1",
")",
",",
"lambda",
"x",
":",
"norm_fn",
"(",
"x",
"[",
"1",
"]",
")",
")",
"buckets_l2",
"=",
"_group_by_fn",
"(",
"enumerate",
"(",
"l2",
")",
",",
"lambda",
"x",
":",
"norm_fn",
"(",
"x",
"[",
"1",
"]",
")",
")",
"for",
"normed",
",",
"l1_elements",
"in",
"buckets_l1",
".",
"items",
"(",
")",
":",
"l2_elements",
"=",
"buckets_l2",
".",
"get",
"(",
"normed",
",",
"[",
"]",
")",
"if",
"not",
"l1_elements",
"or",
"not",
"l2_elements",
":",
"continue",
"_",
",",
"(",
"_",
",",
"e1_first",
")",
"=",
"l1_elements",
"[",
"0",
"]",
"_",
",",
"(",
"_",
",",
"e2_first",
")",
"=",
"l2_elements",
"[",
"0",
"]",
"match_is_ambiguous",
"=",
"not",
"(",
"len",
"(",
"l1_elements",
")",
"==",
"len",
"(",
"l2_elements",
")",
"and",
"(",
"all",
"(",
"e2",
"==",
"e2_first",
"for",
"(",
"_",
",",
"(",
"_",
",",
"e2",
")",
")",
"in",
"l2_elements",
")",
"or",
"all",
"(",
"e1",
"==",
"e1_first",
"for",
"(",
"_",
",",
"(",
"_",
",",
"e1",
")",
")",
"in",
"l1_elements",
")",
")",
")",
"if",
"match_is_ambiguous",
":",
"continue",
"for",
"(",
"e1_idx",
",",
"e1",
")",
",",
"(",
"e2_idx",
",",
"e2",
")",
"in",
"zip",
"(",
"l1_elements",
",",
"l2_elements",
")",
":",
"if",
"dist_fn",
"(",
"e1",
",",
"e2",
")",
">",
"thresh",
":",
"continue",
"l1_only_idx",
".",
"remove",
"(",
"e1_idx",
")",
"l2_only_idx",
".",
"remove",
"(",
"e2_idx",
")",
"common",
".",
"append",
"(",
"(",
"e1",
",",
"e2",
")",
")",
"l1_only",
"=",
"[",
"l1",
"[",
"i",
"]",
"for",
"i",
"in",
"l1_only_idx",
"]",
"l2_only",
"=",
"[",
"l2",
"[",
"i",
"]",
"for",
"i",
"in",
"l2_only_idx",
"]",
"return",
"common",
",",
"l1_only",
",",
"l2_only"
] | Matches elements in l1 and l2 using normalization functions.
Splits the elements in each list into buckets given by the normalization
function. If the same normalization value points to a bucket from the
first list and a bucket from the second list, both with a single element
we consider the elements in the list as matching if the distance between
them is less (or equal) than the threshold.
e.g. l1 = ['X1', 'Y1', 'Y2', 'Z5'], l2 = ['X1', 'Y3', 'Z1']
norm_fn = lambda x: x[0]
dist_fn = lambda e1, e2: 0 if e1 == e2 else 1
thresh = 0
The buckets will then be:
l1_bucket = {'X': ['X1'], 'Y': ['Y1', 'Y2'], 'Z': ['Z5']}
l2_bucket = {'X': ['X1'], 'Y': ['Y3'], 'Z': ['Z1']}
For each normalized value:
'X' -> consider 'X1' equal with 'X1' since the distance is equal with
the thershold
'Y' -> skip the lists since we have multiple possible matches
'Z' -> consider 'Z1' and 'Z5' as different since the distance is
greater than the threshold.
Return:
[('X1', 'X2')] | [
"Matches",
"elements",
"in",
"l1",
"and",
"l2",
"using",
"normalization",
"functions",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/contrib/inspirehep/match.py#L79-L138 | train |
inveniosoftware-contrib/json-merger | json_merger/contrib/inspirehep/match.py | _match_munkres | def _match_munkres(l1, l2, dist_matrix, thresh):
"""Matches two lists using the Munkres algorithm.
Returns pairs of matching indices from the two lists by minimizing the sum
of the distance between the linked elements and taking only the elements
which have the distance between them less (or equal) than the threshold.
"""
equal_dist_matches = set()
m = Munkres()
indices = m.compute(dist_matrix)
for l1_idx, l2_idx in indices:
dst = dist_matrix[l1_idx][l2_idx]
if dst > thresh:
continue
for eq_l2_idx, eq_val in enumerate(dist_matrix[l1_idx]):
if abs(dst - eq_val) < 1e-9:
equal_dist_matches.add((l1_idx, eq_l2_idx))
for eq_l1_idx, eq_row in enumerate(dist_matrix):
if abs(dst - eq_row[l2_idx]) < 1e-9:
equal_dist_matches.add((eq_l1_idx, l2_idx))
return [(l1[l1_idx], l2[l2_idx]) for l1_idx, l2_idx in equal_dist_matches] | python | def _match_munkres(l1, l2, dist_matrix, thresh):
"""Matches two lists using the Munkres algorithm.
Returns pairs of matching indices from the two lists by minimizing the sum
of the distance between the linked elements and taking only the elements
which have the distance between them less (or equal) than the threshold.
"""
equal_dist_matches = set()
m = Munkres()
indices = m.compute(dist_matrix)
for l1_idx, l2_idx in indices:
dst = dist_matrix[l1_idx][l2_idx]
if dst > thresh:
continue
for eq_l2_idx, eq_val in enumerate(dist_matrix[l1_idx]):
if abs(dst - eq_val) < 1e-9:
equal_dist_matches.add((l1_idx, eq_l2_idx))
for eq_l1_idx, eq_row in enumerate(dist_matrix):
if abs(dst - eq_row[l2_idx]) < 1e-9:
equal_dist_matches.add((eq_l1_idx, l2_idx))
return [(l1[l1_idx], l2[l2_idx]) for l1_idx, l2_idx in equal_dist_matches] | [
"def",
"_match_munkres",
"(",
"l1",
",",
"l2",
",",
"dist_matrix",
",",
"thresh",
")",
":",
"equal_dist_matches",
"=",
"set",
"(",
")",
"m",
"=",
"Munkres",
"(",
")",
"indices",
"=",
"m",
".",
"compute",
"(",
"dist_matrix",
")",
"for",
"l1_idx",
",",
"l2_idx",
"in",
"indices",
":",
"dst",
"=",
"dist_matrix",
"[",
"l1_idx",
"]",
"[",
"l2_idx",
"]",
"if",
"dst",
">",
"thresh",
":",
"continue",
"for",
"eq_l2_idx",
",",
"eq_val",
"in",
"enumerate",
"(",
"dist_matrix",
"[",
"l1_idx",
"]",
")",
":",
"if",
"abs",
"(",
"dst",
"-",
"eq_val",
")",
"<",
"1e-9",
":",
"equal_dist_matches",
".",
"add",
"(",
"(",
"l1_idx",
",",
"eq_l2_idx",
")",
")",
"for",
"eq_l1_idx",
",",
"eq_row",
"in",
"enumerate",
"(",
"dist_matrix",
")",
":",
"if",
"abs",
"(",
"dst",
"-",
"eq_row",
"[",
"l2_idx",
"]",
")",
"<",
"1e-9",
":",
"equal_dist_matches",
".",
"add",
"(",
"(",
"eq_l1_idx",
",",
"l2_idx",
")",
")",
"return",
"[",
"(",
"l1",
"[",
"l1_idx",
"]",
",",
"l2",
"[",
"l2_idx",
"]",
")",
"for",
"l1_idx",
",",
"l2_idx",
"in",
"equal_dist_matches",
"]"
] | Matches two lists using the Munkres algorithm.
Returns pairs of matching indices from the two lists by minimizing the sum
of the distance between the linked elements and taking only the elements
which have the distance between them less (or equal) than the threshold. | [
"Matches",
"two",
"lists",
"using",
"the",
"Munkres",
"algorithm",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/contrib/inspirehep/match.py#L141-L163 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/suspect.py | SuspectActions.add_suspect | def add_suspect(self, case_obj, variant_obj):
"""Link a suspect to a case."""
new_suspect = Suspect(case=case_obj, variant_id=variant_obj.variant_id,
name=variant_obj.display_name)
self.session.add(new_suspect)
self.save()
return new_suspect | python | def add_suspect(self, case_obj, variant_obj):
"""Link a suspect to a case."""
new_suspect = Suspect(case=case_obj, variant_id=variant_obj.variant_id,
name=variant_obj.display_name)
self.session.add(new_suspect)
self.save()
return new_suspect | [
"def",
"add_suspect",
"(",
"self",
",",
"case_obj",
",",
"variant_obj",
")",
":",
"new_suspect",
"=",
"Suspect",
"(",
"case",
"=",
"case_obj",
",",
"variant_id",
"=",
"variant_obj",
".",
"variant_id",
",",
"name",
"=",
"variant_obj",
".",
"display_name",
")",
"self",
".",
"session",
".",
"add",
"(",
"new_suspect",
")",
"self",
".",
"save",
"(",
")",
"return",
"new_suspect"
] | Link a suspect to a case. | [
"Link",
"a",
"suspect",
"to",
"a",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/suspect.py#L10-L16 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/suspect.py | SuspectActions.delete_suspect | def delete_suspect(self, suspect_id):
"""De-link a suspect from a case."""
suspect_obj = self.suspect(suspect_id)
logger.debug("Deleting suspect {0}".format(suspect_obj.name))
self.session.delete(suspect_obj)
self.save() | python | def delete_suspect(self, suspect_id):
"""De-link a suspect from a case."""
suspect_obj = self.suspect(suspect_id)
logger.debug("Deleting suspect {0}".format(suspect_obj.name))
self.session.delete(suspect_obj)
self.save() | [
"def",
"delete_suspect",
"(",
"self",
",",
"suspect_id",
")",
":",
"suspect_obj",
"=",
"self",
".",
"suspect",
"(",
"suspect_id",
")",
"logger",
".",
"debug",
"(",
"\"Deleting suspect {0}\"",
".",
"format",
"(",
"suspect_obj",
".",
"name",
")",
")",
"self",
".",
"session",
".",
"delete",
"(",
"suspect_obj",
")",
"self",
".",
"save",
"(",
")"
] | De-link a suspect from a case. | [
"De",
"-",
"link",
"a",
"suspect",
"from",
"a",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/suspect.py#L22-L27 | train |
robinandeer/puzzle | puzzle/log.py | configure_stream | def configure_stream(level='WARNING'):
"""Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
"""
# get the root logger
root_logger = logging.getLogger()
# set the logger level to the same as will be used by the handler
root_logger.setLevel(level)
# customize formatter, align each column
template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s"
formatter = logging.Formatter(template)
# add a basic STDERR handler to the logger
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(formatter)
root_logger.addHandler(console)
return root_logger | python | def configure_stream(level='WARNING'):
"""Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
"""
# get the root logger
root_logger = logging.getLogger()
# set the logger level to the same as will be used by the handler
root_logger.setLevel(level)
# customize formatter, align each column
template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s"
formatter = logging.Formatter(template)
# add a basic STDERR handler to the logger
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(formatter)
root_logger.addHandler(console)
return root_logger | [
"def",
"configure_stream",
"(",
"level",
"=",
"'WARNING'",
")",
":",
"# get the root logger",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"# set the logger level to the same as will be used by the handler",
"root_logger",
".",
"setLevel",
"(",
"level",
")",
"# customize formatter, align each column",
"template",
"=",
"\"[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s\"",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"template",
")",
"# add a basic STDERR handler to the logger",
"console",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console",
".",
"setLevel",
"(",
"level",
")",
"console",
".",
"setFormatter",
"(",
"formatter",
")",
"root_logger",
".",
"addHandler",
"(",
"console",
")",
"return",
"root_logger"
] | Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler | [
"Configure",
"root",
"logger",
"using",
"a",
"standard",
"stream",
"handler",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/log.py#L7-L31 | train |
iwoca/django-deep-collector | deep_collector/core.py | DeepCollector._is_same_type_as_root | def _is_same_type_as_root(self, obj):
"""
Testing if we try to collect an object of the same type as root.
This is not really a good sign, because it means that we are going to collect a whole new tree, that will
maybe collect a new tree, that will...
"""
if not self.ALLOWS_SAME_TYPE_AS_ROOT_COLLECT:
obj_model = get_model_from_instance(obj)
obj_key = get_key_from_instance(obj)
is_same_type_as_root = obj_model == self.root_obj_model and obj_key != self.root_obj_key
if is_same_type_as_root:
self.emit_event(type='same_type_as_root', obj=obj)
return is_same_type_as_root
else:
return False | python | def _is_same_type_as_root(self, obj):
"""
Testing if we try to collect an object of the same type as root.
This is not really a good sign, because it means that we are going to collect a whole new tree, that will
maybe collect a new tree, that will...
"""
if not self.ALLOWS_SAME_TYPE_AS_ROOT_COLLECT:
obj_model = get_model_from_instance(obj)
obj_key = get_key_from_instance(obj)
is_same_type_as_root = obj_model == self.root_obj_model and obj_key != self.root_obj_key
if is_same_type_as_root:
self.emit_event(type='same_type_as_root', obj=obj)
return is_same_type_as_root
else:
return False | [
"def",
"_is_same_type_as_root",
"(",
"self",
",",
"obj",
")",
":",
"if",
"not",
"self",
".",
"ALLOWS_SAME_TYPE_AS_ROOT_COLLECT",
":",
"obj_model",
"=",
"get_model_from_instance",
"(",
"obj",
")",
"obj_key",
"=",
"get_key_from_instance",
"(",
"obj",
")",
"is_same_type_as_root",
"=",
"obj_model",
"==",
"self",
".",
"root_obj_model",
"and",
"obj_key",
"!=",
"self",
".",
"root_obj_key",
"if",
"is_same_type_as_root",
":",
"self",
".",
"emit_event",
"(",
"type",
"=",
"'same_type_as_root'",
",",
"obj",
"=",
"obj",
")",
"return",
"is_same_type_as_root",
"else",
":",
"return",
"False"
] | Testing if we try to collect an object of the same type as root.
This is not really a good sign, because it means that we are going to collect a whole new tree, that will
maybe collect a new tree, that will... | [
"Testing",
"if",
"we",
"try",
"to",
"collect",
"an",
"object",
"of",
"the",
"same",
"type",
"as",
"root",
".",
"This",
"is",
"not",
"really",
"a",
"good",
"sign",
"because",
"it",
"means",
"that",
"we",
"are",
"going",
"to",
"collect",
"a",
"whole",
"new",
"tree",
"that",
"will",
"maybe",
"collect",
"a",
"new",
"tree",
"that",
"will",
"..."
] | 1bd599d5362ade525cb51d6ee70713a3f58af219 | https://github.com/iwoca/django-deep-collector/blob/1bd599d5362ade525cb51d6ee70713a3f58af219/deep_collector/core.py#L203-L219 | train |
jalmeroth/pymusiccast | pymusiccast/media_status.py | MediaStatus.initialize | def initialize(self, data):
""" initialize variable from loaded data """
for item in data:
if hasattr(self, item):
setattr(self, item, data[item]) | python | def initialize(self, data):
""" initialize variable from loaded data """
for item in data:
if hasattr(self, item):
setattr(self, item, data[item]) | [
"def",
"initialize",
"(",
"self",
",",
"data",
")",
":",
"for",
"item",
"in",
"data",
":",
"if",
"hasattr",
"(",
"self",
",",
"item",
")",
":",
"setattr",
"(",
"self",
",",
"item",
",",
"data",
"[",
"item",
"]",
")"
] | initialize variable from loaded data | [
"initialize",
"variable",
"from",
"loaded",
"data"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/media_status.py#L57-L61 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/transcripts.py | TranscriptExtras._add_transcripts | def _add_transcripts(self, variant_obj, info_dict):
"""Return all transcripts sound in the vcf file"""
vep_string = info_dict.get('CSQ')
#Check if snpeff annotation:
snpeff_string = info_dict.get('ANN')
# We check one of these.
# VEP has presedence over snpeff
if vep_string:
#Get the vep annotations
vep_info = get_vep_info(
vep_string = vep_string,
vep_header = self.vep_header
)
for transcript_info in vep_info:
transcript = self._get_vep_transcript(transcript_info)
variant_obj.add_transcript(transcript)
elif snpeff_string:
#Get the vep annotations
snpeff_info = get_snpeff_info(
snpeff_string = snpeff_string,
snpeff_header = self.snpeff_header
)
for transcript_info in snpeff_info:
transcript = self._get_snpeff_transcript(transcript_info)
variant_obj.add_transcript(transcript) | python | def _add_transcripts(self, variant_obj, info_dict):
"""Return all transcripts sound in the vcf file"""
vep_string = info_dict.get('CSQ')
#Check if snpeff annotation:
snpeff_string = info_dict.get('ANN')
# We check one of these.
# VEP has presedence over snpeff
if vep_string:
#Get the vep annotations
vep_info = get_vep_info(
vep_string = vep_string,
vep_header = self.vep_header
)
for transcript_info in vep_info:
transcript = self._get_vep_transcript(transcript_info)
variant_obj.add_transcript(transcript)
elif snpeff_string:
#Get the vep annotations
snpeff_info = get_snpeff_info(
snpeff_string = snpeff_string,
snpeff_header = self.snpeff_header
)
for transcript_info in snpeff_info:
transcript = self._get_snpeff_transcript(transcript_info)
variant_obj.add_transcript(transcript) | [
"def",
"_add_transcripts",
"(",
"self",
",",
"variant_obj",
",",
"info_dict",
")",
":",
"vep_string",
"=",
"info_dict",
".",
"get",
"(",
"'CSQ'",
")",
"#Check if snpeff annotation:",
"snpeff_string",
"=",
"info_dict",
".",
"get",
"(",
"'ANN'",
")",
"# We check one of these.",
"# VEP has presedence over snpeff",
"if",
"vep_string",
":",
"#Get the vep annotations",
"vep_info",
"=",
"get_vep_info",
"(",
"vep_string",
"=",
"vep_string",
",",
"vep_header",
"=",
"self",
".",
"vep_header",
")",
"for",
"transcript_info",
"in",
"vep_info",
":",
"transcript",
"=",
"self",
".",
"_get_vep_transcript",
"(",
"transcript_info",
")",
"variant_obj",
".",
"add_transcript",
"(",
"transcript",
")",
"elif",
"snpeff_string",
":",
"#Get the vep annotations",
"snpeff_info",
"=",
"get_snpeff_info",
"(",
"snpeff_string",
"=",
"snpeff_string",
",",
"snpeff_header",
"=",
"self",
".",
"snpeff_header",
")",
"for",
"transcript_info",
"in",
"snpeff_info",
":",
"transcript",
"=",
"self",
".",
"_get_snpeff_transcript",
"(",
"transcript_info",
")",
"variant_obj",
".",
"add_transcript",
"(",
"transcript",
")"
] | Return all transcripts sound in the vcf file | [
"Return",
"all",
"transcripts",
"sound",
"in",
"the",
"vcf",
"file"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/transcripts.py#L9-L36 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/transcripts.py | TranscriptExtras._get_vep_transcript | def _get_vep_transcript(self, transcript_info):
"""Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
"""
transcript = Transcript(
hgnc_symbol = transcript_info.get('SYMBOL'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene'),
biotype = transcript_info.get('BIOTYPE'),
consequence = transcript_info.get('Consequence'),
strand = transcript_info.get('STRAND'),
sift = transcript_info.get('SIFT'),
polyphen = transcript_info.get('PolyPhen'),
exon = transcript_info.get('EXON'),
HGVSc = transcript_info.get('HGVSc'),
HGVSp = transcript_info.get('HGVSp'),
GMAF = transcript_info.get('GMAF'),
ExAC_MAF = transcript_info.get('ExAC_MAF')
)
return transcript | python | def _get_vep_transcript(self, transcript_info):
"""Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
"""
transcript = Transcript(
hgnc_symbol = transcript_info.get('SYMBOL'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene'),
biotype = transcript_info.get('BIOTYPE'),
consequence = transcript_info.get('Consequence'),
strand = transcript_info.get('STRAND'),
sift = transcript_info.get('SIFT'),
polyphen = transcript_info.get('PolyPhen'),
exon = transcript_info.get('EXON'),
HGVSc = transcript_info.get('HGVSc'),
HGVSp = transcript_info.get('HGVSp'),
GMAF = transcript_info.get('GMAF'),
ExAC_MAF = transcript_info.get('ExAC_MAF')
)
return transcript | [
"def",
"_get_vep_transcript",
"(",
"self",
",",
"transcript_info",
")",
":",
"transcript",
"=",
"Transcript",
"(",
"hgnc_symbol",
"=",
"transcript_info",
".",
"get",
"(",
"'SYMBOL'",
")",
",",
"transcript_id",
"=",
"transcript_info",
".",
"get",
"(",
"'Feature'",
")",
",",
"ensembl_id",
"=",
"transcript_info",
".",
"get",
"(",
"'Gene'",
")",
",",
"biotype",
"=",
"transcript_info",
".",
"get",
"(",
"'BIOTYPE'",
")",
",",
"consequence",
"=",
"transcript_info",
".",
"get",
"(",
"'Consequence'",
")",
",",
"strand",
"=",
"transcript_info",
".",
"get",
"(",
"'STRAND'",
")",
",",
"sift",
"=",
"transcript_info",
".",
"get",
"(",
"'SIFT'",
")",
",",
"polyphen",
"=",
"transcript_info",
".",
"get",
"(",
"'PolyPhen'",
")",
",",
"exon",
"=",
"transcript_info",
".",
"get",
"(",
"'EXON'",
")",
",",
"HGVSc",
"=",
"transcript_info",
".",
"get",
"(",
"'HGVSc'",
")",
",",
"HGVSp",
"=",
"transcript_info",
".",
"get",
"(",
"'HGVSp'",
")",
",",
"GMAF",
"=",
"transcript_info",
".",
"get",
"(",
"'GMAF'",
")",
",",
"ExAC_MAF",
"=",
"transcript_info",
".",
"get",
"(",
"'ExAC_MAF'",
")",
")",
"return",
"transcript"
] | Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts | [
"Create",
"a",
"Transcript",
"based",
"on",
"the",
"vep",
"annotation"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/transcripts.py#L39-L63 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/transcripts.py | TranscriptExtras._get_snpeff_transcript | def _get_snpeff_transcript(self, transcript_info):
"""Create a transcript based on the snpeff annotation
Args:
transcript_info (dict): A dict with snpeff info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
"""
transcript = Transcript(
hgnc_symbol = transcript_info.get('Gene_Name'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene_ID'),
biotype = transcript_info.get('Transcript_BioType'),
consequence = transcript_info.get('Annotation'),
exon = transcript_info.get('Rank'),
HGVSc = transcript_info.get('HGVS.c'),
HGVSp = transcript_info.get('HGVS.p')
)
return transcript | python | def _get_snpeff_transcript(self, transcript_info):
"""Create a transcript based on the snpeff annotation
Args:
transcript_info (dict): A dict with snpeff info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
"""
transcript = Transcript(
hgnc_symbol = transcript_info.get('Gene_Name'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene_ID'),
biotype = transcript_info.get('Transcript_BioType'),
consequence = transcript_info.get('Annotation'),
exon = transcript_info.get('Rank'),
HGVSc = transcript_info.get('HGVS.c'),
HGVSp = transcript_info.get('HGVS.p')
)
return transcript | [
"def",
"_get_snpeff_transcript",
"(",
"self",
",",
"transcript_info",
")",
":",
"transcript",
"=",
"Transcript",
"(",
"hgnc_symbol",
"=",
"transcript_info",
".",
"get",
"(",
"'Gene_Name'",
")",
",",
"transcript_id",
"=",
"transcript_info",
".",
"get",
"(",
"'Feature'",
")",
",",
"ensembl_id",
"=",
"transcript_info",
".",
"get",
"(",
"'Gene_ID'",
")",
",",
"biotype",
"=",
"transcript_info",
".",
"get",
"(",
"'Transcript_BioType'",
")",
",",
"consequence",
"=",
"transcript_info",
".",
"get",
"(",
"'Annotation'",
")",
",",
"exon",
"=",
"transcript_info",
".",
"get",
"(",
"'Rank'",
")",
",",
"HGVSc",
"=",
"transcript_info",
".",
"get",
"(",
"'HGVS.c'",
")",
",",
"HGVSp",
"=",
"transcript_info",
".",
"get",
"(",
"'HGVS.p'",
")",
")",
"return",
"transcript"
] | Create a transcript based on the snpeff annotation
Args:
transcript_info (dict): A dict with snpeff info
Returns:
transcript (puzzle.models.Transcript): A Transcripts | [
"Create",
"a",
"transcript",
"based",
"on",
"the",
"snpeff",
"annotation"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/transcripts.py#L65-L84 | train |
tswicegood/Dolt | dolt/__init__.py | _makes_clone | def _makes_clone(_func, *args, **kw):
"""
A decorator that returns a clone of the current object so that
we can re-use the object for similar requests.
"""
self = args[0]._clone()
_func(self, *args[1:], **kw)
return self | python | def _makes_clone(_func, *args, **kw):
"""
A decorator that returns a clone of the current object so that
we can re-use the object for similar requests.
"""
self = args[0]._clone()
_func(self, *args[1:], **kw)
return self | [
"def",
"_makes_clone",
"(",
"_func",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"self",
"=",
"args",
"[",
"0",
"]",
".",
"_clone",
"(",
")",
"_func",
"(",
"self",
",",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kw",
")",
"return",
"self"
] | A decorator that returns a clone of the current object so that
we can re-use the object for similar requests. | [
"A",
"decorator",
"that",
"returns",
"a",
"clone",
"of",
"the",
"current",
"object",
"so",
"that",
"we",
"can",
"re",
"-",
"use",
"the",
"object",
"for",
"similar",
"requests",
"."
] | e0da1918b7db18f885734a89f824b9e173cc30a5 | https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L22-L29 | train |
tswicegood/Dolt | dolt/__init__.py | Dolt._handle_response | def _handle_response(self, response, data):
"""
Deserializes JSON if the content-type matches, otherwise returns the response
body as is.
"""
# Content-Type headers can include additional parameters(RFC 1521), so
# we split on ; to match against only the type/subtype
if data and response.get('content-type', '').split(';')[0] in (
'application/json',
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json'
):
return json.loads(data)
else:
return data | python | def _handle_response(self, response, data):
"""
Deserializes JSON if the content-type matches, otherwise returns the response
body as is.
"""
# Content-Type headers can include additional parameters(RFC 1521), so
# we split on ; to match against only the type/subtype
if data and response.get('content-type', '').split(';')[0] in (
'application/json',
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json'
):
return json.loads(data)
else:
return data | [
"def",
"_handle_response",
"(",
"self",
",",
"response",
",",
"data",
")",
":",
"# Content-Type headers can include additional parameters(RFC 1521), so",
"# we split on ; to match against only the type/subtype",
"if",
"data",
"and",
"response",
".",
"get",
"(",
"'content-type'",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"in",
"(",
"'application/json'",
",",
"'application/x-javascript'",
",",
"'text/javascript'",
",",
"'text/x-javascript'",
",",
"'text/x-json'",
")",
":",
"return",
"json",
".",
"loads",
"(",
"data",
")",
"else",
":",
"return",
"data"
] | Deserializes JSON if the content-type matches, otherwise returns the response
body as is. | [
"Deserializes",
"JSON",
"if",
"the",
"content",
"-",
"type",
"matches",
"otherwise",
"returns",
"the",
"response",
"body",
"as",
"is",
"."
] | e0da1918b7db18f885734a89f824b9e173cc30a5 | https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L80-L96 | train |
tswicegood/Dolt | dolt/__init__.py | Dolt.get_url | def get_url(self, *paths, **params):
"""
Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request
"""
path_stack = self._attribute_stack[:]
if paths:
path_stack.extend(paths)
u = self._stack_collapser(path_stack)
url = self._url_template % {
"domain": self._api_url,
"generated_url" : u,
}
if self._params or params:
internal_params = self._params.copy()
internal_params.update(params)
url += self._generate_params(internal_params)
return url | python | def get_url(self, *paths, **params):
"""
Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request
"""
path_stack = self._attribute_stack[:]
if paths:
path_stack.extend(paths)
u = self._stack_collapser(path_stack)
url = self._url_template % {
"domain": self._api_url,
"generated_url" : u,
}
if self._params or params:
internal_params = self._params.copy()
internal_params.update(params)
url += self._generate_params(internal_params)
return url | [
"def",
"get_url",
"(",
"self",
",",
"*",
"paths",
",",
"*",
"*",
"params",
")",
":",
"path_stack",
"=",
"self",
".",
"_attribute_stack",
"[",
":",
"]",
"if",
"paths",
":",
"path_stack",
".",
"extend",
"(",
"paths",
")",
"u",
"=",
"self",
".",
"_stack_collapser",
"(",
"path_stack",
")",
"url",
"=",
"self",
".",
"_url_template",
"%",
"{",
"\"domain\"",
":",
"self",
".",
"_api_url",
",",
"\"generated_url\"",
":",
"u",
",",
"}",
"if",
"self",
".",
"_params",
"or",
"params",
":",
"internal_params",
"=",
"self",
".",
"_params",
".",
"copy",
"(",
")",
"internal_params",
".",
"update",
"(",
"params",
")",
"url",
"+=",
"self",
".",
"_generate_params",
"(",
"internal_params",
")",
"return",
"url"
] | Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request | [
"Returns",
"the",
"URL",
"for",
"this",
"request",
"."
] | e0da1918b7db18f885734a89f824b9e173cc30a5 | https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L222-L244 | train |
tswicegood/Dolt | dolt/__init__.py | Dolt._clone | def _clone(self):
"""
Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat'
"""
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
q._params = self._params.copy()
q._headers = self._headers.copy()
q._attribute_stack = self._attribute_stack[:]
return q | python | def _clone(self):
"""
Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat'
"""
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
q._params = self._params.copy()
q._headers = self._headers.copy()
q._attribute_stack = self._attribute_stack[:]
return q | [
"def",
"_clone",
"(",
"self",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"q",
"=",
"cls",
".",
"__new__",
"(",
"cls",
")",
"q",
".",
"__dict__",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"q",
".",
"_params",
"=",
"self",
".",
"_params",
".",
"copy",
"(",
")",
"q",
".",
"_headers",
"=",
"self",
".",
"_headers",
".",
"copy",
"(",
")",
"q",
".",
"_attribute_stack",
"=",
"self",
".",
"_attribute_stack",
"[",
":",
"]",
"return",
"q"
] | Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat' | [
"Clones",
"the",
"state",
"of",
"the",
"current",
"operation",
"."
] | e0da1918b7db18f885734a89f824b9e173cc30a5 | https://github.com/tswicegood/Dolt/blob/e0da1918b7db18f885734a89f824b9e173cc30a5/dolt/__init__.py#L246-L271 | train |
robinandeer/puzzle | puzzle/cli/delete.py | delete | def delete(ctx, family_id, individual_id, root):
"""
Delete a case or individual from the database.
If no database was found run puzzle init first.
"""
root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort()
logger.info("Root directory is: {}".format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info("db path is: {}".format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort()
store = SqlStore(db_path)
if family_id:
case_obj = store.case(case_id=family_id)
if case_obj is None:
logger.warning("Family {0} does not exist in database"
.format(family_id))
ctx.abort()
store.delete_case(case_obj)
elif individual_id:
ind_obj = store.individual(ind_id=individual_id)
if ind_obj.ind_id != individual_id:
logger.warning("Individual {0} does not exist in database"
.format(individual_id))
ctx.abort()
store.delete_individual(ind_obj)
else:
logger.warning("Please provide a family or individual id")
ctx.abort() | python | def delete(ctx, family_id, individual_id, root):
"""
Delete a case or individual from the database.
If no database was found run puzzle init first.
"""
root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort()
logger.info("Root directory is: {}".format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info("db path is: {}".format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort()
store = SqlStore(db_path)
if family_id:
case_obj = store.case(case_id=family_id)
if case_obj is None:
logger.warning("Family {0} does not exist in database"
.format(family_id))
ctx.abort()
store.delete_case(case_obj)
elif individual_id:
ind_obj = store.individual(ind_id=individual_id)
if ind_obj.ind_id != individual_id:
logger.warning("Individual {0} does not exist in database"
.format(individual_id))
ctx.abort()
store.delete_individual(ind_obj)
else:
logger.warning("Please provide a family or individual id")
ctx.abort() | [
"def",
"delete",
"(",
"ctx",
",",
"family_id",
",",
"individual_id",
",",
"root",
")",
":",
"root",
"=",
"root",
"or",
"ctx",
".",
"obj",
".",
"get",
"(",
"'root'",
")",
"or",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.puzzle\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"root",
")",
":",
"logger",
".",
"error",
"(",
"\"'root' can't be a file\"",
")",
"ctx",
".",
"abort",
"(",
")",
"logger",
".",
"info",
"(",
"\"Root directory is: {}\"",
".",
"format",
"(",
"root",
")",
")",
"db_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'puzzle_db.sqlite3'",
")",
"logger",
".",
"info",
"(",
"\"db path is: {}\"",
".",
"format",
"(",
"db_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"db_path",
")",
":",
"logger",
".",
"warn",
"(",
"\"database not initialized, run 'puzzle init'\"",
")",
"ctx",
".",
"abort",
"(",
")",
"store",
"=",
"SqlStore",
"(",
"db_path",
")",
"if",
"family_id",
":",
"case_obj",
"=",
"store",
".",
"case",
"(",
"case_id",
"=",
"family_id",
")",
"if",
"case_obj",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Family {0} does not exist in database\"",
".",
"format",
"(",
"family_id",
")",
")",
"ctx",
".",
"abort",
"(",
")",
"store",
".",
"delete_case",
"(",
"case_obj",
")",
"elif",
"individual_id",
":",
"ind_obj",
"=",
"store",
".",
"individual",
"(",
"ind_id",
"=",
"individual_id",
")",
"if",
"ind_obj",
".",
"ind_id",
"!=",
"individual_id",
":",
"logger",
".",
"warning",
"(",
"\"Individual {0} does not exist in database\"",
".",
"format",
"(",
"individual_id",
")",
")",
"ctx",
".",
"abort",
"(",
")",
"store",
".",
"delete_individual",
"(",
"ind_obj",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Please provide a family or individual id\"",
")",
"ctx",
".",
"abort",
"(",
")"
] | Delete a case or individual from the database.
If no database was found run puzzle init first. | [
"Delete",
"a",
"case",
"or",
"individual",
"from",
"the",
"database",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/delete.py#L18-L57 | train |
robinandeer/puzzle | puzzle/server/blueprints/variants/views.py | variants | def variants(case_id):
"""Show all variants for a case."""
filters = parse_filters()
values = [value for key, value in iteritems(filters)
if not isinstance(value, dict) and key != 'skip']
is_active = any(values)
variants, nr_of_variants = app.db.variants(
case_id,
skip=filters['skip'],
filters={
'gene_ids': filters['gene_symbols'],
'frequency': filters.get('frequency'),
'cadd': filters.get('cadd'),
'sv_len': filters.get('sv_len'),
'consequence': filters['selected_consequences'],
'genetic_models': filters['selected_models'],
'sv_types': filters['selected_sv_types'],
'gene_lists': filters['gene_lists'],
'impact_severities': filters['impact_severities'],
'gemini_query': filters['gemini_query'],
'range': filters['range'],
}
)
gene_lists = ([gene_list.list_id for gene_list in app.db.gene_lists()]
if app.config['STORE_ENABLED'] else [])
queries = ([(query.name or query.query, query.query) for query
in app.db.gemini_queries()]
if app.config['STORE_ENABLED'] else [])
kwargs = dict(variants=variants, case_id=case_id, db=app.db,
filters=filters, consequences=SO_TERMS,
inheritance_models=INHERITANCE_MODELS_SHORT,
gene_lists=gene_lists, impact_severities=IMPACT_LEVELS,
is_active=is_active, nr_of_variants=nr_of_variants,
queries=queries)
if app.db.variant_type == 'sv':
return render_template('sv_variants.html', sv_types=SV_TYPES, **kwargs)
else:
return render_template('variants.html', **kwargs) | python | def variants(case_id):
"""Show all variants for a case."""
filters = parse_filters()
values = [value for key, value in iteritems(filters)
if not isinstance(value, dict) and key != 'skip']
is_active = any(values)
variants, nr_of_variants = app.db.variants(
case_id,
skip=filters['skip'],
filters={
'gene_ids': filters['gene_symbols'],
'frequency': filters.get('frequency'),
'cadd': filters.get('cadd'),
'sv_len': filters.get('sv_len'),
'consequence': filters['selected_consequences'],
'genetic_models': filters['selected_models'],
'sv_types': filters['selected_sv_types'],
'gene_lists': filters['gene_lists'],
'impact_severities': filters['impact_severities'],
'gemini_query': filters['gemini_query'],
'range': filters['range'],
}
)
gene_lists = ([gene_list.list_id for gene_list in app.db.gene_lists()]
if app.config['STORE_ENABLED'] else [])
queries = ([(query.name or query.query, query.query) for query
in app.db.gemini_queries()]
if app.config['STORE_ENABLED'] else [])
kwargs = dict(variants=variants, case_id=case_id, db=app.db,
filters=filters, consequences=SO_TERMS,
inheritance_models=INHERITANCE_MODELS_SHORT,
gene_lists=gene_lists, impact_severities=IMPACT_LEVELS,
is_active=is_active, nr_of_variants=nr_of_variants,
queries=queries)
if app.db.variant_type == 'sv':
return render_template('sv_variants.html', sv_types=SV_TYPES, **kwargs)
else:
return render_template('variants.html', **kwargs) | [
"def",
"variants",
"(",
"case_id",
")",
":",
"filters",
"=",
"parse_filters",
"(",
")",
"values",
"=",
"[",
"value",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"filters",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
"and",
"key",
"!=",
"'skip'",
"]",
"is_active",
"=",
"any",
"(",
"values",
")",
"variants",
",",
"nr_of_variants",
"=",
"app",
".",
"db",
".",
"variants",
"(",
"case_id",
",",
"skip",
"=",
"filters",
"[",
"'skip'",
"]",
",",
"filters",
"=",
"{",
"'gene_ids'",
":",
"filters",
"[",
"'gene_symbols'",
"]",
",",
"'frequency'",
":",
"filters",
".",
"get",
"(",
"'frequency'",
")",
",",
"'cadd'",
":",
"filters",
".",
"get",
"(",
"'cadd'",
")",
",",
"'sv_len'",
":",
"filters",
".",
"get",
"(",
"'sv_len'",
")",
",",
"'consequence'",
":",
"filters",
"[",
"'selected_consequences'",
"]",
",",
"'genetic_models'",
":",
"filters",
"[",
"'selected_models'",
"]",
",",
"'sv_types'",
":",
"filters",
"[",
"'selected_sv_types'",
"]",
",",
"'gene_lists'",
":",
"filters",
"[",
"'gene_lists'",
"]",
",",
"'impact_severities'",
":",
"filters",
"[",
"'impact_severities'",
"]",
",",
"'gemini_query'",
":",
"filters",
"[",
"'gemini_query'",
"]",
",",
"'range'",
":",
"filters",
"[",
"'range'",
"]",
",",
"}",
")",
"gene_lists",
"=",
"(",
"[",
"gene_list",
".",
"list_id",
"for",
"gene_list",
"in",
"app",
".",
"db",
".",
"gene_lists",
"(",
")",
"]",
"if",
"app",
".",
"config",
"[",
"'STORE_ENABLED'",
"]",
"else",
"[",
"]",
")",
"queries",
"=",
"(",
"[",
"(",
"query",
".",
"name",
"or",
"query",
".",
"query",
",",
"query",
".",
"query",
")",
"for",
"query",
"in",
"app",
".",
"db",
".",
"gemini_queries",
"(",
")",
"]",
"if",
"app",
".",
"config",
"[",
"'STORE_ENABLED'",
"]",
"else",
"[",
"]",
")",
"kwargs",
"=",
"dict",
"(",
"variants",
"=",
"variants",
",",
"case_id",
"=",
"case_id",
",",
"db",
"=",
"app",
".",
"db",
",",
"filters",
"=",
"filters",
",",
"consequences",
"=",
"SO_TERMS",
",",
"inheritance_models",
"=",
"INHERITANCE_MODELS_SHORT",
",",
"gene_lists",
"=",
"gene_lists",
",",
"impact_severities",
"=",
"IMPACT_LEVELS",
",",
"is_active",
"=",
"is_active",
",",
"nr_of_variants",
"=",
"nr_of_variants",
",",
"queries",
"=",
"queries",
")",
"if",
"app",
".",
"db",
".",
"variant_type",
"==",
"'sv'",
":",
"return",
"render_template",
"(",
"'sv_variants.html'",
",",
"sv_types",
"=",
"SV_TYPES",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"render_template",
"(",
"'variants.html'",
",",
"*",
"*",
"kwargs",
")"
] | Show all variants for a case. | [
"Show",
"all",
"variants",
"for",
"a",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/variants/views.py#L16-L54 | train |
robinandeer/puzzle | puzzle/server/blueprints/variants/views.py | variant | def variant(case_id, variant_id):
"""Show a single variant."""
case_obj = app.db.case(case_id)
variant = app.db.variant(case_id, variant_id)
if variant is None:
return abort(404, "variant not found")
comments = app.db.comments(variant_id=variant.md5)
template = 'sv_variant.html' if app.db.variant_type == 'sv' else 'variant.html'
return render_template(template, variant=variant, case_id=case_id,
comments=comments, case=case_obj) | python | def variant(case_id, variant_id):
"""Show a single variant."""
case_obj = app.db.case(case_id)
variant = app.db.variant(case_id, variant_id)
if variant is None:
return abort(404, "variant not found")
comments = app.db.comments(variant_id=variant.md5)
template = 'sv_variant.html' if app.db.variant_type == 'sv' else 'variant.html'
return render_template(template, variant=variant, case_id=case_id,
comments=comments, case=case_obj) | [
"def",
"variant",
"(",
"case_id",
",",
"variant_id",
")",
":",
"case_obj",
"=",
"app",
".",
"db",
".",
"case",
"(",
"case_id",
")",
"variant",
"=",
"app",
".",
"db",
".",
"variant",
"(",
"case_id",
",",
"variant_id",
")",
"if",
"variant",
"is",
"None",
":",
"return",
"abort",
"(",
"404",
",",
"\"variant not found\"",
")",
"comments",
"=",
"app",
".",
"db",
".",
"comments",
"(",
"variant_id",
"=",
"variant",
".",
"md5",
")",
"template",
"=",
"'sv_variant.html'",
"if",
"app",
".",
"db",
".",
"variant_type",
"==",
"'sv'",
"else",
"'variant.html'",
"return",
"render_template",
"(",
"template",
",",
"variant",
"=",
"variant",
",",
"case_id",
"=",
"case_id",
",",
"comments",
"=",
"comments",
",",
"case",
"=",
"case_obj",
")"
] | Show a single variant. | [
"Show",
"a",
"single",
"variant",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/variants/views.py#L58-L68 | train |
robinandeer/puzzle | puzzle/server/blueprints/variants/views.py | parse_filters | def parse_filters():
"""Parse variant filters from the request object."""
genes_str = request.args.get('gene_symbol')
filters = {}
for key in ('frequency', 'cadd', 'sv_len'):
try:
filters[key] = float(request.args.get(key))
except (ValueError, TypeError):
pass
filters['gene_symbols'] = genes_str.split(',') if genes_str else None
filters['selected_models'] = request.args.getlist('inheritance_models')
filters['selected_consequences'] = request.args.getlist('consequences')
filters['selected_sv_types'] = request.args.getlist('sv_types')
filters['skip'] = int(request.args.get('skip', 0))
filters['gene_lists'] = request.args.getlist('gene_lists')
filters['gemini_query'] = (request.args.get('gemini_query') or
request.args.get('preset_gemini_query'))
filters['impact_severities'] = request.args.getlist('impact_severities')
filters['range'] = None
if request.args.get('range'):
chromosome, raw_pos = request.args.get('range').split(':')
start, end = map(int, raw_pos.split('-'))
filters['range'] = {'chromosome': chromosome, 'start': start,
'end': end}
filters['query_dict'] = {key: request.args.getlist(key) for key
in request.args.keys()}
filters['query_dict'].update({'skip': (filters['skip'] + 30)})
return filters | python | def parse_filters():
"""Parse variant filters from the request object."""
genes_str = request.args.get('gene_symbol')
filters = {}
for key in ('frequency', 'cadd', 'sv_len'):
try:
filters[key] = float(request.args.get(key))
except (ValueError, TypeError):
pass
filters['gene_symbols'] = genes_str.split(',') if genes_str else None
filters['selected_models'] = request.args.getlist('inheritance_models')
filters['selected_consequences'] = request.args.getlist('consequences')
filters['selected_sv_types'] = request.args.getlist('sv_types')
filters['skip'] = int(request.args.get('skip', 0))
filters['gene_lists'] = request.args.getlist('gene_lists')
filters['gemini_query'] = (request.args.get('gemini_query') or
request.args.get('preset_gemini_query'))
filters['impact_severities'] = request.args.getlist('impact_severities')
filters['range'] = None
if request.args.get('range'):
chromosome, raw_pos = request.args.get('range').split(':')
start, end = map(int, raw_pos.split('-'))
filters['range'] = {'chromosome': chromosome, 'start': start,
'end': end}
filters['query_dict'] = {key: request.args.getlist(key) for key
in request.args.keys()}
filters['query_dict'].update({'skip': (filters['skip'] + 30)})
return filters | [
"def",
"parse_filters",
"(",
")",
":",
"genes_str",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'gene_symbol'",
")",
"filters",
"=",
"{",
"}",
"for",
"key",
"in",
"(",
"'frequency'",
",",
"'cadd'",
",",
"'sv_len'",
")",
":",
"try",
":",
"filters",
"[",
"key",
"]",
"=",
"float",
"(",
"request",
".",
"args",
".",
"get",
"(",
"key",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"filters",
"[",
"'gene_symbols'",
"]",
"=",
"genes_str",
".",
"split",
"(",
"','",
")",
"if",
"genes_str",
"else",
"None",
"filters",
"[",
"'selected_models'",
"]",
"=",
"request",
".",
"args",
".",
"getlist",
"(",
"'inheritance_models'",
")",
"filters",
"[",
"'selected_consequences'",
"]",
"=",
"request",
".",
"args",
".",
"getlist",
"(",
"'consequences'",
")",
"filters",
"[",
"'selected_sv_types'",
"]",
"=",
"request",
".",
"args",
".",
"getlist",
"(",
"'sv_types'",
")",
"filters",
"[",
"'skip'",
"]",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'skip'",
",",
"0",
")",
")",
"filters",
"[",
"'gene_lists'",
"]",
"=",
"request",
".",
"args",
".",
"getlist",
"(",
"'gene_lists'",
")",
"filters",
"[",
"'gemini_query'",
"]",
"=",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'gemini_query'",
")",
"or",
"request",
".",
"args",
".",
"get",
"(",
"'preset_gemini_query'",
")",
")",
"filters",
"[",
"'impact_severities'",
"]",
"=",
"request",
".",
"args",
".",
"getlist",
"(",
"'impact_severities'",
")",
"filters",
"[",
"'range'",
"]",
"=",
"None",
"if",
"request",
".",
"args",
".",
"get",
"(",
"'range'",
")",
":",
"chromosome",
",",
"raw_pos",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'range'",
")",
".",
"split",
"(",
"':'",
")",
"start",
",",
"end",
"=",
"map",
"(",
"int",
",",
"raw_pos",
".",
"split",
"(",
"'-'",
")",
")",
"filters",
"[",
"'range'",
"]",
"=",
"{",
"'chromosome'",
":",
"chromosome",
",",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
"}",
"filters",
"[",
"'query_dict'",
"]",
"=",
"{",
"key",
":",
"request",
".",
"args",
".",
"getlist",
"(",
"key",
")",
"for",
"key",
"in",
"request",
".",
"args",
".",
"keys",
"(",
")",
"}",
"filters",
"[",
"'query_dict'",
"]",
".",
"update",
"(",
"{",
"'skip'",
":",
"(",
"filters",
"[",
"'skip'",
"]",
"+",
"30",
")",
"}",
")",
"return",
"filters"
] | Parse variant filters from the request object. | [
"Parse",
"variant",
"filters",
"from",
"the",
"request",
"object",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/variants/views.py#L71-L102 | train |
robinandeer/puzzle | puzzle/server/blueprints/variants/views.py | suspects | def suspects(case_id, variant_id):
"""Pin a variant as a suspect for a given case."""
case_obj = app.db.case(case_id)
variant_obj = app.db.variant(case_id, variant_id)
app.db.add_suspect(case_obj, variant_obj)
return redirect(request.referrer) | python | def suspects(case_id, variant_id):
"""Pin a variant as a suspect for a given case."""
case_obj = app.db.case(case_id)
variant_obj = app.db.variant(case_id, variant_id)
app.db.add_suspect(case_obj, variant_obj)
return redirect(request.referrer) | [
"def",
"suspects",
"(",
"case_id",
",",
"variant_id",
")",
":",
"case_obj",
"=",
"app",
".",
"db",
".",
"case",
"(",
"case_id",
")",
"variant_obj",
"=",
"app",
".",
"db",
".",
"variant",
"(",
"case_id",
",",
"variant_id",
")",
"app",
".",
"db",
".",
"add_suspect",
"(",
"case_obj",
",",
"variant_obj",
")",
"return",
"redirect",
"(",
"request",
".",
"referrer",
")"
] | Pin a variant as a suspect for a given case. | [
"Pin",
"a",
"variant",
"as",
"a",
"suspect",
"for",
"a",
"given",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/variants/views.py#L106-L111 | train |
robinandeer/puzzle | puzzle/server/blueprints/variants/views.py | queries | def queries():
"""Store a new GEMINI query."""
query = request.form['query']
name = request.form.get('name')
app.db.add_gemini_query(name, query)
return redirect(request.referrer) | python | def queries():
"""Store a new GEMINI query."""
query = request.form['query']
name = request.form.get('name')
app.db.add_gemini_query(name, query)
return redirect(request.referrer) | [
"def",
"queries",
"(",
")",
":",
"query",
"=",
"request",
".",
"form",
"[",
"'query'",
"]",
"name",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'name'",
")",
"app",
".",
"db",
".",
"add_gemini_query",
"(",
"name",
",",
"query",
")",
"return",
"redirect",
"(",
"request",
".",
"referrer",
")"
] | Store a new GEMINI query. | [
"Store",
"a",
"new",
"GEMINI",
"query",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/server/blueprints/variants/views.py#L122-L127 | train |
jwodder/javaproperties | javaproperties/reading.py | load | def load(fp, object_pairs_hook=dict):
"""
Parse the contents of the `~io.IOBase.readline`-supporting file-like object
``fp`` as a simple line-oriented ``.properties`` file and return a `dict`
of the key-value pairs.
``fp`` may be either a text or binary filehandle, with or without universal
newlines enabled. If it is a binary filehandle, its contents are decoded
as Latin-1.
By default, the key-value pairs extracted from ``fp`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``fp`` (including duplicates) in order of occurrence. `load` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
return object_pairs_hook((k,v) for k,v,_ in parse(fp) if k is not None) | python | def load(fp, object_pairs_hook=dict):
"""
Parse the contents of the `~io.IOBase.readline`-supporting file-like object
``fp`` as a simple line-oriented ``.properties`` file and return a `dict`
of the key-value pairs.
``fp`` may be either a text or binary filehandle, with or without universal
newlines enabled. If it is a binary filehandle, its contents are decoded
as Latin-1.
By default, the key-value pairs extracted from ``fp`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``fp`` (including duplicates) in order of occurrence. `load` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
return object_pairs_hook((k,v) for k,v,_ in parse(fp) if k is not None) | [
"def",
"load",
"(",
"fp",
",",
"object_pairs_hook",
"=",
"dict",
")",
":",
"return",
"object_pairs_hook",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
",",
"_",
"in",
"parse",
"(",
"fp",
")",
"if",
"k",
"is",
"not",
"None",
")"
] | Parse the contents of the `~io.IOBase.readline`-supporting file-like object
``fp`` as a simple line-oriented ``.properties`` file and return a `dict`
of the key-value pairs.
``fp`` may be either a text or binary filehandle, with or without universal
newlines enabled. If it is a binary filehandle, its contents are decoded
as Latin-1.
By default, the key-value pairs extracted from ``fp`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``fp`` (including duplicates) in order of occurrence. `load` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input | [
"Parse",
"the",
"contents",
"of",
"the",
"~io",
".",
"IOBase",
".",
"readline",
"-",
"supporting",
"file",
"-",
"like",
"object",
"fp",
"as",
"a",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"file",
"and",
"return",
"a",
"dict",
"of",
"the",
"key",
"-",
"value",
"pairs",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/reading.py#L6-L36 | train |
jwodder/javaproperties | javaproperties/reading.py | loads | def loads(s, object_pairs_hook=dict):
"""
Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `dict` of the key-value pairs.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
By default, the key-value pairs extracted from ``s`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``s`` (including duplicates) in order of occurrence. `loads` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties`` document
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
fp = BytesIO(s) if isinstance(s, binary_type) else StringIO(s)
return load(fp, object_pairs_hook=object_pairs_hook) | python | def loads(s, object_pairs_hook=dict):
"""
Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `dict` of the key-value pairs.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
By default, the key-value pairs extracted from ``s`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``s`` (including duplicates) in order of occurrence. `loads` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties`` document
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
fp = BytesIO(s) if isinstance(s, binary_type) else StringIO(s)
return load(fp, object_pairs_hook=object_pairs_hook) | [
"def",
"loads",
"(",
"s",
",",
"object_pairs_hook",
"=",
"dict",
")",
":",
"fp",
"=",
"BytesIO",
"(",
"s",
")",
"if",
"isinstance",
"(",
"s",
",",
"binary_type",
")",
"else",
"StringIO",
"(",
"s",
")",
"return",
"load",
"(",
"fp",
",",
"object_pairs_hook",
"=",
"object_pairs_hook",
")"
] | Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `dict` of the key-value pairs.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
By default, the key-value pairs extracted from ``s`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``s`` (including duplicates) in order of occurrence. `loads` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties`` document
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input | [
"Parse",
"the",
"contents",
"of",
"the",
"string",
"s",
"as",
"a",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"file",
"and",
"return",
"a",
"dict",
"of",
"the",
"key",
"-",
"value",
"pairs",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/reading.py#L38-L66 | train |
TiagoBras/audio-clip-extractor | audioclipextractor/core.py | AudioClipExtractor._extractClipData | def _extractClipData(self, audioClipSpec, showLogs=False):
"""Extracts a single clip according to audioClipSpec.
Arguments:
audioClipSpec (AudioClipSpec): Clip specification
showLogs (bool): Show ffmpeg output
"""
command = [self._ffmpegPath]
if not showLogs:
command += ['-nostats', '-loglevel', '0']
command += [
'-i', self._audioFilePath,
'-ss', '%.3f' % audioClipSpec.start,
'-t', '%.3f' % audioClipSpec.duration(),
'-c', 'copy',
'-map', '0',
'-acodec', 'libmp3lame',
'-ab', '128k',
'-f', 'mp3'
]
# Add clip TEXT as metadata and set a few more to default
metadata = { self._textMetadataName: audioClipSpec.text }
for k, v in metadata.items():
command.append('-metadata')
command.append("{}='{}'".format(k, v))
command.append('pipe:1')
return subprocess.check_output(command) | python | def _extractClipData(self, audioClipSpec, showLogs=False):
"""Extracts a single clip according to audioClipSpec.
Arguments:
audioClipSpec (AudioClipSpec): Clip specification
showLogs (bool): Show ffmpeg output
"""
command = [self._ffmpegPath]
if not showLogs:
command += ['-nostats', '-loglevel', '0']
command += [
'-i', self._audioFilePath,
'-ss', '%.3f' % audioClipSpec.start,
'-t', '%.3f' % audioClipSpec.duration(),
'-c', 'copy',
'-map', '0',
'-acodec', 'libmp3lame',
'-ab', '128k',
'-f', 'mp3'
]
# Add clip TEXT as metadata and set a few more to default
metadata = { self._textMetadataName: audioClipSpec.text }
for k, v in metadata.items():
command.append('-metadata')
command.append("{}='{}'".format(k, v))
command.append('pipe:1')
return subprocess.check_output(command) | [
"def",
"_extractClipData",
"(",
"self",
",",
"audioClipSpec",
",",
"showLogs",
"=",
"False",
")",
":",
"command",
"=",
"[",
"self",
".",
"_ffmpegPath",
"]",
"if",
"not",
"showLogs",
":",
"command",
"+=",
"[",
"'-nostats'",
",",
"'-loglevel'",
",",
"'0'",
"]",
"command",
"+=",
"[",
"'-i'",
",",
"self",
".",
"_audioFilePath",
",",
"'-ss'",
",",
"'%.3f'",
"%",
"audioClipSpec",
".",
"start",
",",
"'-t'",
",",
"'%.3f'",
"%",
"audioClipSpec",
".",
"duration",
"(",
")",
",",
"'-c'",
",",
"'copy'",
",",
"'-map'",
",",
"'0'",
",",
"'-acodec'",
",",
"'libmp3lame'",
",",
"'-ab'",
",",
"'128k'",
",",
"'-f'",
",",
"'mp3'",
"]",
"# Add clip TEXT as metadata and set a few more to default",
"metadata",
"=",
"{",
"self",
".",
"_textMetadataName",
":",
"audioClipSpec",
".",
"text",
"}",
"for",
"k",
",",
"v",
"in",
"metadata",
".",
"items",
"(",
")",
":",
"command",
".",
"append",
"(",
"'-metadata'",
")",
"command",
".",
"append",
"(",
"\"{}='{}'\"",
".",
"format",
"(",
"k",
",",
"v",
")",
")",
"command",
".",
"append",
"(",
"'pipe:1'",
")",
"return",
"subprocess",
".",
"check_output",
"(",
"command",
")"
] | Extracts a single clip according to audioClipSpec.
Arguments:
audioClipSpec (AudioClipSpec): Clip specification
showLogs (bool): Show ffmpeg output | [
"Extracts",
"a",
"single",
"clip",
"according",
"to",
"audioClipSpec",
"."
] | b0dd90266656dcbf7e663b3e174dce4d09e74c32 | https://github.com/TiagoBras/audio-clip-extractor/blob/b0dd90266656dcbf7e663b3e174dce4d09e74c32/audioclipextractor/core.py#L82-L114 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/phenotype.py | PhenotypeActions.add_phenotype | def add_phenotype(self, ind_obj, phenotype_id):
"""Add a phenotype term to the case."""
if phenotype_id.startswith('HP:') or len(phenotype_id) == 7:
logger.debug('querying on HPO term')
hpo_results = phizz.query_hpo([phenotype_id])
else:
logger.debug('querying on OMIM term')
hpo_results = phizz.query_disease([phenotype_id])
added_terms = [] if hpo_results else None
existing_ids = set(term.phenotype_id for term in ind_obj.phenotypes)
for result in hpo_results:
if result['hpo_term'] not in existing_ids:
term = PhenotypeTerm(phenotype_id=result['hpo_term'],
description=result['description'])
logger.info('adding new HPO term: %s', term.phenotype_id)
ind_obj.phenotypes.append(term)
added_terms.append(term)
logger.debug('storing new HPO terms')
self.save()
if added_terms is not None and len(added_terms) > 0:
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj)
return added_terms | python | def add_phenotype(self, ind_obj, phenotype_id):
"""Add a phenotype term to the case."""
if phenotype_id.startswith('HP:') or len(phenotype_id) == 7:
logger.debug('querying on HPO term')
hpo_results = phizz.query_hpo([phenotype_id])
else:
logger.debug('querying on OMIM term')
hpo_results = phizz.query_disease([phenotype_id])
added_terms = [] if hpo_results else None
existing_ids = set(term.phenotype_id for term in ind_obj.phenotypes)
for result in hpo_results:
if result['hpo_term'] not in existing_ids:
term = PhenotypeTerm(phenotype_id=result['hpo_term'],
description=result['description'])
logger.info('adding new HPO term: %s', term.phenotype_id)
ind_obj.phenotypes.append(term)
added_terms.append(term)
logger.debug('storing new HPO terms')
self.save()
if added_terms is not None and len(added_terms) > 0:
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj)
return added_terms | [
"def",
"add_phenotype",
"(",
"self",
",",
"ind_obj",
",",
"phenotype_id",
")",
":",
"if",
"phenotype_id",
".",
"startswith",
"(",
"'HP:'",
")",
"or",
"len",
"(",
"phenotype_id",
")",
"==",
"7",
":",
"logger",
".",
"debug",
"(",
"'querying on HPO term'",
")",
"hpo_results",
"=",
"phizz",
".",
"query_hpo",
"(",
"[",
"phenotype_id",
"]",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'querying on OMIM term'",
")",
"hpo_results",
"=",
"phizz",
".",
"query_disease",
"(",
"[",
"phenotype_id",
"]",
")",
"added_terms",
"=",
"[",
"]",
"if",
"hpo_results",
"else",
"None",
"existing_ids",
"=",
"set",
"(",
"term",
".",
"phenotype_id",
"for",
"term",
"in",
"ind_obj",
".",
"phenotypes",
")",
"for",
"result",
"in",
"hpo_results",
":",
"if",
"result",
"[",
"'hpo_term'",
"]",
"not",
"in",
"existing_ids",
":",
"term",
"=",
"PhenotypeTerm",
"(",
"phenotype_id",
"=",
"result",
"[",
"'hpo_term'",
"]",
",",
"description",
"=",
"result",
"[",
"'description'",
"]",
")",
"logger",
".",
"info",
"(",
"'adding new HPO term: %s'",
",",
"term",
".",
"phenotype_id",
")",
"ind_obj",
".",
"phenotypes",
".",
"append",
"(",
"term",
")",
"added_terms",
".",
"append",
"(",
"term",
")",
"logger",
".",
"debug",
"(",
"'storing new HPO terms'",
")",
"self",
".",
"save",
"(",
")",
"if",
"added_terms",
"is",
"not",
"None",
"and",
"len",
"(",
"added_terms",
")",
">",
"0",
":",
"for",
"case_obj",
"in",
"ind_obj",
".",
"cases",
":",
"self",
".",
"update_hpolist",
"(",
"case_obj",
")",
"return",
"added_terms"
] | Add a phenotype term to the case. | [
"Add",
"a",
"phenotype",
"term",
"to",
"the",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/phenotype.py#L13-L39 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/phenotype.py | PhenotypeActions.update_hpolist | def update_hpolist(self, case_obj):
"""Update the HPO gene list for a case based on current terms."""
hpo_list = self.case_genelist(case_obj)
hpo_results = hpo_genes(case_obj.phenotype_ids(),
*self.phenomizer_auth)
if hpo_results is None:
pass
# Why raise here?
# raise RuntimeError("couldn't link to genes, try again")
else:
gene_ids = [result['gene_id'] for result in hpo_results
if result['gene_id']]
hpo_list.gene_ids = gene_ids
self.save() | python | def update_hpolist(self, case_obj):
"""Update the HPO gene list for a case based on current terms."""
hpo_list = self.case_genelist(case_obj)
hpo_results = hpo_genes(case_obj.phenotype_ids(),
*self.phenomizer_auth)
if hpo_results is None:
pass
# Why raise here?
# raise RuntimeError("couldn't link to genes, try again")
else:
gene_ids = [result['gene_id'] for result in hpo_results
if result['gene_id']]
hpo_list.gene_ids = gene_ids
self.save() | [
"def",
"update_hpolist",
"(",
"self",
",",
"case_obj",
")",
":",
"hpo_list",
"=",
"self",
".",
"case_genelist",
"(",
"case_obj",
")",
"hpo_results",
"=",
"hpo_genes",
"(",
"case_obj",
".",
"phenotype_ids",
"(",
")",
",",
"*",
"self",
".",
"phenomizer_auth",
")",
"if",
"hpo_results",
"is",
"None",
":",
"pass",
"# Why raise here?",
"# raise RuntimeError(\"couldn't link to genes, try again\")",
"else",
":",
"gene_ids",
"=",
"[",
"result",
"[",
"'gene_id'",
"]",
"for",
"result",
"in",
"hpo_results",
"if",
"result",
"[",
"'gene_id'",
"]",
"]",
"hpo_list",
".",
"gene_ids",
"=",
"gene_ids",
"self",
".",
"save",
"(",
")"
] | Update the HPO gene list for a case based on current terms. | [
"Update",
"the",
"HPO",
"gene",
"list",
"for",
"a",
"case",
"based",
"on",
"current",
"terms",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/phenotype.py#L41-L55 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/phenotype.py | PhenotypeActions.remove_phenotype | def remove_phenotype(self, ind_obj, phenotypes=None):
"""Remove multiple phenotypes from an individual."""
if phenotypes is None:
logger.info("delete all phenotypes related to %s", ind_obj.ind_id)
self.query(PhenotypeTerm).filter_by(ind_id=ind_obj.id).delete()
else:
for term in ind_obj.phenotypes:
if term.phenotype_id in phenotypes:
logger.info("delete phenotype: %s from %s",
term.phenotype_id, ind_obj.ind_id)
self.session.delete(term)
logger.debug('persist removals')
self.save()
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj) | python | def remove_phenotype(self, ind_obj, phenotypes=None):
"""Remove multiple phenotypes from an individual."""
if phenotypes is None:
logger.info("delete all phenotypes related to %s", ind_obj.ind_id)
self.query(PhenotypeTerm).filter_by(ind_id=ind_obj.id).delete()
else:
for term in ind_obj.phenotypes:
if term.phenotype_id in phenotypes:
logger.info("delete phenotype: %s from %s",
term.phenotype_id, ind_obj.ind_id)
self.session.delete(term)
logger.debug('persist removals')
self.save()
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj) | [
"def",
"remove_phenotype",
"(",
"self",
",",
"ind_obj",
",",
"phenotypes",
"=",
"None",
")",
":",
"if",
"phenotypes",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"delete all phenotypes related to %s\"",
",",
"ind_obj",
".",
"ind_id",
")",
"self",
".",
"query",
"(",
"PhenotypeTerm",
")",
".",
"filter_by",
"(",
"ind_id",
"=",
"ind_obj",
".",
"id",
")",
".",
"delete",
"(",
")",
"else",
":",
"for",
"term",
"in",
"ind_obj",
".",
"phenotypes",
":",
"if",
"term",
".",
"phenotype_id",
"in",
"phenotypes",
":",
"logger",
".",
"info",
"(",
"\"delete phenotype: %s from %s\"",
",",
"term",
".",
"phenotype_id",
",",
"ind_obj",
".",
"ind_id",
")",
"self",
".",
"session",
".",
"delete",
"(",
"term",
")",
"logger",
".",
"debug",
"(",
"'persist removals'",
")",
"self",
".",
"save",
"(",
")",
"for",
"case_obj",
"in",
"ind_obj",
".",
"cases",
":",
"self",
".",
"update_hpolist",
"(",
"case_obj",
")"
] | Remove multiple phenotypes from an individual. | [
"Remove",
"multiple",
"phenotypes",
"from",
"an",
"individual",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/phenotype.py#L57-L71 | train |
thautwarm/Redy | Redy/ADT/Core.py | match | def match(mode_lst: list, obj: 'object that has __destruct__ method'):
"""
>>> from Redy.ADT.Core import match, data, P
>>> from Redy.ADT.traits import ConsInd, Discrete
>>> @data
>>> class List(ConsInd, Discrete):
>>> # ConsInd(index following constructing)
>>> # |-> Ind;
>>> # Discrete
>>> # |-> Im(Immutable), Eq
>>> Nil : ...
>>> Cons: lambda head, tail: ...
>>> lst = List.Cons(2, List.Cons(1, List.Nil))
>>> mode_lst = P[List.Cons, P, P[List.Cons, 1]]
>>> if match(mode_lst, lst):
>>> assert mode_lst == [List.Cons, 2, [List.Cons, 1]]
"""
# noinspection PyUnresolvedReferences
try:
# noinspection PyUnresolvedReferences
structure = obj.__destruct__()
except AttributeError:
return False
n = len(mode_lst)
if n > len(structure):
return False
for i in range(n):
mode = mode_lst[i]
# noinspection PyUnresolvedReferences
elem = obj[i]
if isinstance(mode, PatternList):
if not match(mode, elem):
return False
elif mode is P:
# noinspection PyUnresolvedReferences
mode_lst[i] = elem
elif mode is any:
pass
elif mode != elem:
return False
return True | python | def match(mode_lst: list, obj: 'object that has __destruct__ method'):
"""
>>> from Redy.ADT.Core import match, data, P
>>> from Redy.ADT.traits import ConsInd, Discrete
>>> @data
>>> class List(ConsInd, Discrete):
>>> # ConsInd(index following constructing)
>>> # |-> Ind;
>>> # Discrete
>>> # |-> Im(Immutable), Eq
>>> Nil : ...
>>> Cons: lambda head, tail: ...
>>> lst = List.Cons(2, List.Cons(1, List.Nil))
>>> mode_lst = P[List.Cons, P, P[List.Cons, 1]]
>>> if match(mode_lst, lst):
>>> assert mode_lst == [List.Cons, 2, [List.Cons, 1]]
"""
# noinspection PyUnresolvedReferences
try:
# noinspection PyUnresolvedReferences
structure = obj.__destruct__()
except AttributeError:
return False
n = len(mode_lst)
if n > len(structure):
return False
for i in range(n):
mode = mode_lst[i]
# noinspection PyUnresolvedReferences
elem = obj[i]
if isinstance(mode, PatternList):
if not match(mode, elem):
return False
elif mode is P:
# noinspection PyUnresolvedReferences
mode_lst[i] = elem
elif mode is any:
pass
elif mode != elem:
return False
return True | [
"def",
"match",
"(",
"mode_lst",
":",
"list",
",",
"obj",
":",
"'object that has __destruct__ method'",
")",
":",
"# noinspection PyUnresolvedReferences",
"try",
":",
"# noinspection PyUnresolvedReferences",
"structure",
"=",
"obj",
".",
"__destruct__",
"(",
")",
"except",
"AttributeError",
":",
"return",
"False",
"n",
"=",
"len",
"(",
"mode_lst",
")",
"if",
"n",
">",
"len",
"(",
"structure",
")",
":",
"return",
"False",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"mode",
"=",
"mode_lst",
"[",
"i",
"]",
"# noinspection PyUnresolvedReferences",
"elem",
"=",
"obj",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"mode",
",",
"PatternList",
")",
":",
"if",
"not",
"match",
"(",
"mode",
",",
"elem",
")",
":",
"return",
"False",
"elif",
"mode",
"is",
"P",
":",
"# noinspection PyUnresolvedReferences",
"mode_lst",
"[",
"i",
"]",
"=",
"elem",
"elif",
"mode",
"is",
"any",
":",
"pass",
"elif",
"mode",
"!=",
"elem",
":",
"return",
"False",
"return",
"True"
] | >>> from Redy.ADT.Core import match, data, P
>>> from Redy.ADT.traits import ConsInd, Discrete
>>> @data
>>> class List(ConsInd, Discrete):
>>> # ConsInd(index following constructing)
>>> # |-> Ind;
>>> # Discrete
>>> # |-> Im(Immutable), Eq
>>> Nil : ...
>>> Cons: lambda head, tail: ...
>>> lst = List.Cons(2, List.Cons(1, List.Nil))
>>> mode_lst = P[List.Cons, P, P[List.Cons, 1]]
>>> if match(mode_lst, lst):
>>> assert mode_lst == [List.Cons, 2, [List.Cons, 1]] | [
">>>",
"from",
"Redy",
".",
"ADT",
".",
"Core",
"import",
"match",
"data",
"P",
">>>",
"from",
"Redy",
".",
"ADT",
".",
"traits",
"import",
"ConsInd",
"Discrete",
">>>"
] | 8beee5c5f752edfd2754bb1e6b5f4acb016a7770 | https://github.com/thautwarm/Redy/blob/8beee5c5f752edfd2754bb1e6b5f4acb016a7770/Redy/ADT/Core.py#L185-L227 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/gemini.py | GeminiActions.gemini_query | def gemini_query(self, query_id):
"""Return a gemini query
Args:
name (str)
"""
logger.debug("Looking for query with id {0}".format(query_id))
return self.query(GeminiQuery).filter_by(id=query_id).first() | python | def gemini_query(self, query_id):
"""Return a gemini query
Args:
name (str)
"""
logger.debug("Looking for query with id {0}".format(query_id))
return self.query(GeminiQuery).filter_by(id=query_id).first() | [
"def",
"gemini_query",
"(",
"self",
",",
"query_id",
")",
":",
"logger",
".",
"debug",
"(",
"\"Looking for query with id {0}\"",
".",
"format",
"(",
"query_id",
")",
")",
"return",
"self",
".",
"query",
"(",
"GeminiQuery",
")",
".",
"filter_by",
"(",
"id",
"=",
"query_id",
")",
".",
"first",
"(",
")"
] | Return a gemini query
Args:
name (str) | [
"Return",
"a",
"gemini",
"query"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/gemini.py#L10-L17 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/gemini.py | GeminiActions.add_gemini_query | def add_gemini_query(self, name, query):
"""Add a user defined gemini query
Args:
name (str)
query (str)
"""
logger.info("Adding query {0} with text {1}".format(name, query))
new_query = GeminiQuery(name=name, query=query)
self.session.add(new_query)
self.save()
return new_query | python | def add_gemini_query(self, name, query):
"""Add a user defined gemini query
Args:
name (str)
query (str)
"""
logger.info("Adding query {0} with text {1}".format(name, query))
new_query = GeminiQuery(name=name, query=query)
self.session.add(new_query)
self.save()
return new_query | [
"def",
"add_gemini_query",
"(",
"self",
",",
"name",
",",
"query",
")",
":",
"logger",
".",
"info",
"(",
"\"Adding query {0} with text {1}\"",
".",
"format",
"(",
"name",
",",
"query",
")",
")",
"new_query",
"=",
"GeminiQuery",
"(",
"name",
"=",
"name",
",",
"query",
"=",
"query",
")",
"self",
".",
"session",
".",
"add",
"(",
"new_query",
")",
"self",
".",
"save",
"(",
")",
"return",
"new_query"
] | Add a user defined gemini query
Args:
name (str)
query (str) | [
"Add",
"a",
"user",
"defined",
"gemini",
"query"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/gemini.py#L23-L34 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/gemini.py | GeminiActions.delete_gemini_query | def delete_gemini_query(self, query_id):
"""Delete a gemini query
Args:
name (str)
"""
query_obj = self.gemini_query(query_id)
logger.debug("Delete query: {0}".format(query_obj.name_query))
self.session.delete(query_obj)
self.save() | python | def delete_gemini_query(self, query_id):
"""Delete a gemini query
Args:
name (str)
"""
query_obj = self.gemini_query(query_id)
logger.debug("Delete query: {0}".format(query_obj.name_query))
self.session.delete(query_obj)
self.save() | [
"def",
"delete_gemini_query",
"(",
"self",
",",
"query_id",
")",
":",
"query_obj",
"=",
"self",
".",
"gemini_query",
"(",
"query_id",
")",
"logger",
".",
"debug",
"(",
"\"Delete query: {0}\"",
".",
"format",
"(",
"query_obj",
".",
"name_query",
")",
")",
"self",
".",
"session",
".",
"delete",
"(",
"query_obj",
")",
"self",
".",
"save",
"(",
")"
] | Delete a gemini query
Args:
name (str) | [
"Delete",
"a",
"gemini",
"query"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/gemini.py#L36-L45 | train |
gusdan/geoindex | geoindex/geo_point.py | GeoPoint.distance_to | def distance_to(self, point, unit='km'):
"""
Calculate distance in miles or kilometers between current and other
passed point.
"""
assert isinstance(point, GeoPoint), (
'Other point should also be a Point instance.'
)
if self == point:
return 0.0
coefficient = 69.09
theta = self.longitude - point.longitude
unit = unit.lower() if unit else None
distance = math.degrees(math.acos(
math.sin(self.rad_latitude) * math.sin(point.rad_latitude) +
math.cos(self.rad_latitude) * math.cos(point.rad_latitude) *
math.cos(math.radians(theta))
)) * coefficient
if unit == 'km':
return utils.mi_to_km(distance)
return distance | python | def distance_to(self, point, unit='km'):
"""
Calculate distance in miles or kilometers between current and other
passed point.
"""
assert isinstance(point, GeoPoint), (
'Other point should also be a Point instance.'
)
if self == point:
return 0.0
coefficient = 69.09
theta = self.longitude - point.longitude
unit = unit.lower() if unit else None
distance = math.degrees(math.acos(
math.sin(self.rad_latitude) * math.sin(point.rad_latitude) +
math.cos(self.rad_latitude) * math.cos(point.rad_latitude) *
math.cos(math.radians(theta))
)) * coefficient
if unit == 'km':
return utils.mi_to_km(distance)
return distance | [
"def",
"distance_to",
"(",
"self",
",",
"point",
",",
"unit",
"=",
"'km'",
")",
":",
"assert",
"isinstance",
"(",
"point",
",",
"GeoPoint",
")",
",",
"(",
"'Other point should also be a Point instance.'",
")",
"if",
"self",
"==",
"point",
":",
"return",
"0.0",
"coefficient",
"=",
"69.09",
"theta",
"=",
"self",
".",
"longitude",
"-",
"point",
".",
"longitude",
"unit",
"=",
"unit",
".",
"lower",
"(",
")",
"if",
"unit",
"else",
"None",
"distance",
"=",
"math",
".",
"degrees",
"(",
"math",
".",
"acos",
"(",
"math",
".",
"sin",
"(",
"self",
".",
"rad_latitude",
")",
"*",
"math",
".",
"sin",
"(",
"point",
".",
"rad_latitude",
")",
"+",
"math",
".",
"cos",
"(",
"self",
".",
"rad_latitude",
")",
"*",
"math",
".",
"cos",
"(",
"point",
".",
"rad_latitude",
")",
"*",
"math",
".",
"cos",
"(",
"math",
".",
"radians",
"(",
"theta",
")",
")",
")",
")",
"*",
"coefficient",
"if",
"unit",
"==",
"'km'",
":",
"return",
"utils",
".",
"mi_to_km",
"(",
"distance",
")",
"return",
"distance"
] | Calculate distance in miles or kilometers between current and other
passed point. | [
"Calculate",
"distance",
"in",
"miles",
"or",
"kilometers",
"between",
"current",
"and",
"other",
"passed",
"point",
"."
] | d1b3b5a52271200713a64041576caa1f2d588f55 | https://github.com/gusdan/geoindex/blob/d1b3b5a52271200713a64041576caa1f2d588f55/geoindex/geo_point.py#L49-L72 | train |
gusdan/geoindex | geoindex/geo_point.py | GeoPoint.rad_latitude | def rad_latitude(self):
"""
Lazy conversion degrees latitude to radians.
"""
if self._rad_latitude is None:
self._rad_latitude = math.radians(self.latitude)
return self._rad_latitude | python | def rad_latitude(self):
"""
Lazy conversion degrees latitude to radians.
"""
if self._rad_latitude is None:
self._rad_latitude = math.radians(self.latitude)
return self._rad_latitude | [
"def",
"rad_latitude",
"(",
"self",
")",
":",
"if",
"self",
".",
"_rad_latitude",
"is",
"None",
":",
"self",
".",
"_rad_latitude",
"=",
"math",
".",
"radians",
"(",
"self",
".",
"latitude",
")",
"return",
"self",
".",
"_rad_latitude"
] | Lazy conversion degrees latitude to radians. | [
"Lazy",
"conversion",
"degrees",
"latitude",
"to",
"radians",
"."
] | d1b3b5a52271200713a64041576caa1f2d588f55 | https://github.com/gusdan/geoindex/blob/d1b3b5a52271200713a64041576caa1f2d588f55/geoindex/geo_point.py#L75-L81 | train |
gusdan/geoindex | geoindex/geo_point.py | GeoPoint.rad_longitude | def rad_longitude(self):
"""
Lazy conversion degrees longitude to radians.
"""
if self._rad_longitude is None:
self._rad_longitude = math.radians(self.longitude)
return self._rad_longitude | python | def rad_longitude(self):
"""
Lazy conversion degrees longitude to radians.
"""
if self._rad_longitude is None:
self._rad_longitude = math.radians(self.longitude)
return self._rad_longitude | [
"def",
"rad_longitude",
"(",
"self",
")",
":",
"if",
"self",
".",
"_rad_longitude",
"is",
"None",
":",
"self",
".",
"_rad_longitude",
"=",
"math",
".",
"radians",
"(",
"self",
".",
"longitude",
")",
"return",
"self",
".",
"_rad_longitude"
] | Lazy conversion degrees longitude to radians. | [
"Lazy",
"conversion",
"degrees",
"longitude",
"to",
"radians",
"."
] | d1b3b5a52271200713a64041576caa1f2d588f55 | https://github.com/gusdan/geoindex/blob/d1b3b5a52271200713a64041576caa1f2d588f55/geoindex/geo_point.py#L84-L90 | train |
okeuday/erlang_py | examples/port.py | send | def send(term, stream):
"""Write an Erlang term to an output stream."""
payload = erlang.term_to_binary(term)
header = struct.pack('!I', len(payload))
stream.write(header)
stream.write(payload)
stream.flush() | python | def send(term, stream):
"""Write an Erlang term to an output stream."""
payload = erlang.term_to_binary(term)
header = struct.pack('!I', len(payload))
stream.write(header)
stream.write(payload)
stream.flush() | [
"def",
"send",
"(",
"term",
",",
"stream",
")",
":",
"payload",
"=",
"erlang",
".",
"term_to_binary",
"(",
"term",
")",
"header",
"=",
"struct",
".",
"pack",
"(",
"'!I'",
",",
"len",
"(",
"payload",
")",
")",
"stream",
".",
"write",
"(",
"header",
")",
"stream",
".",
"write",
"(",
"payload",
")",
"stream",
".",
"flush",
"(",
")"
] | Write an Erlang term to an output stream. | [
"Write",
"an",
"Erlang",
"term",
"to",
"an",
"output",
"stream",
"."
] | 81b7c2ace66b6bdee23602a6802efff541223fa3 | https://github.com/okeuday/erlang_py/blob/81b7c2ace66b6bdee23602a6802efff541223fa3/examples/port.py#L6-L12 | train |
okeuday/erlang_py | examples/port.py | recv | def recv(stream):
"""Read an Erlang term from an input stream."""
header = stream.read(4)
if len(header) != 4:
return None # EOF
(length,) = struct.unpack('!I', header)
payload = stream.read(length)
if len(payload) != length:
return None
term = erlang.binary_to_term(payload)
return term | python | def recv(stream):
"""Read an Erlang term from an input stream."""
header = stream.read(4)
if len(header) != 4:
return None # EOF
(length,) = struct.unpack('!I', header)
payload = stream.read(length)
if len(payload) != length:
return None
term = erlang.binary_to_term(payload)
return term | [
"def",
"recv",
"(",
"stream",
")",
":",
"header",
"=",
"stream",
".",
"read",
"(",
"4",
")",
"if",
"len",
"(",
"header",
")",
"!=",
"4",
":",
"return",
"None",
"# EOF",
"(",
"length",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"'!I'",
",",
"header",
")",
"payload",
"=",
"stream",
".",
"read",
"(",
"length",
")",
"if",
"len",
"(",
"payload",
")",
"!=",
"length",
":",
"return",
"None",
"term",
"=",
"erlang",
".",
"binary_to_term",
"(",
"payload",
")",
"return",
"term"
] | Read an Erlang term from an input stream. | [
"Read",
"an",
"Erlang",
"term",
"from",
"an",
"input",
"stream",
"."
] | 81b7c2ace66b6bdee23602a6802efff541223fa3 | https://github.com/okeuday/erlang_py/blob/81b7c2ace66b6bdee23602a6802efff541223fa3/examples/port.py#L14-L24 | train |
okeuday/erlang_py | examples/port.py | recv_loop | def recv_loop(stream):
"""Yield Erlang terms from an input stream."""
message = recv(stream)
while message:
yield message
message = recv(stream) | python | def recv_loop(stream):
"""Yield Erlang terms from an input stream."""
message = recv(stream)
while message:
yield message
message = recv(stream) | [
"def",
"recv_loop",
"(",
"stream",
")",
":",
"message",
"=",
"recv",
"(",
"stream",
")",
"while",
"message",
":",
"yield",
"message",
"message",
"=",
"recv",
"(",
"stream",
")"
] | Yield Erlang terms from an input stream. | [
"Yield",
"Erlang",
"terms",
"from",
"an",
"input",
"stream",
"."
] | 81b7c2ace66b6bdee23602a6802efff541223fa3 | https://github.com/okeuday/erlang_py/blob/81b7c2ace66b6bdee23602a6802efff541223fa3/examples/port.py#L26-L31 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/genotype.py | GenotypeExtras._add_genotype_calls | def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
"""Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
"""
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
)) | python | def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
"""Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
"""
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
)) | [
"def",
"_add_genotype_calls",
"(",
"self",
",",
"variant_obj",
",",
"variant_line",
",",
"case_obj",
")",
":",
"variant_line",
"=",
"variant_line",
".",
"split",
"(",
"'\\t'",
")",
"#if there is gt calls we have no individuals to add",
"if",
"len",
"(",
"variant_line",
")",
">",
"8",
":",
"gt_format",
"=",
"variant_line",
"[",
"8",
"]",
".",
"split",
"(",
"':'",
")",
"for",
"individual",
"in",
"case_obj",
".",
"individuals",
":",
"sample_id",
"=",
"individual",
".",
"ind_id",
"index",
"=",
"individual",
".",
"ind_index",
"gt_call",
"=",
"variant_line",
"[",
"9",
"+",
"index",
"]",
".",
"split",
"(",
"':'",
")",
"raw_call",
"=",
"dict",
"(",
"zip",
"(",
"gt_format",
",",
"gt_call",
")",
")",
"genotype",
"=",
"Genotype",
"(",
"*",
"*",
"raw_call",
")",
"variant_obj",
".",
"add_individual",
"(",
"puzzle_genotype",
"(",
"sample_id",
"=",
"sample_id",
",",
"genotype",
"=",
"genotype",
".",
"genotype",
",",
"case_id",
"=",
"case_obj",
".",
"name",
",",
"phenotype",
"=",
"individual",
".",
"phenotype",
",",
"ref_depth",
"=",
"genotype",
".",
"ref_depth",
",",
"alt_depth",
"=",
"genotype",
".",
"alt_depth",
",",
"genotype_quality",
"=",
"genotype",
".",
"genotype_quality",
",",
"depth",
"=",
"genotype",
".",
"depth_of_coverage",
",",
"supporting_evidence",
"=",
"genotype",
".",
"supporting_evidence",
",",
"pe_support",
"=",
"genotype",
".",
"pe_support",
",",
"sr_support",
"=",
"genotype",
".",
"sr_support",
",",
")",
")"
] | Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case) | [
"Add",
"the",
"genotype",
"calls",
"for",
"the",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/genotype.py#L12-L47 | train |
inveniosoftware-contrib/json-merger | json_merger/conflict.py | Conflict.with_prefix | def with_prefix(self, root_path):
"""Returns a new conflict with a prepended prefix as a path."""
return Conflict(self.conflict_type, root_path + self.path, self.body) | python | def with_prefix(self, root_path):
"""Returns a new conflict with a prepended prefix as a path."""
return Conflict(self.conflict_type, root_path + self.path, self.body) | [
"def",
"with_prefix",
"(",
"self",
",",
"root_path",
")",
":",
"return",
"Conflict",
"(",
"self",
".",
"conflict_type",
",",
"root_path",
"+",
"self",
".",
"path",
",",
"self",
".",
"body",
")"
] | Returns a new conflict with a prepended prefix as a path. | [
"Returns",
"a",
"new",
"conflict",
"with",
"a",
"prepended",
"prefix",
"as",
"a",
"path",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/conflict.py#L95-L97 | train |
inveniosoftware-contrib/json-merger | json_merger/conflict.py | Conflict.to_json | def to_json(self):
"""Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
"""
# map ConflictType to json-patch operator
path = self.path
if self.conflict_type in ('REORDER', 'SET_FIELD'):
op = 'replace'
elif self.conflict_type in ('MANUAL_MERGE', 'ADD_BACK_TO_HEAD'):
op = 'add'
path += ('-',)
elif self.conflict_type == 'REMOVE_FIELD':
op = 'remove'
else:
raise ValueError(
'Conflict Type %s can not be mapped to a json-patch operation'
% conflict_type
)
# stringify path array
json_pointer = '/' + '/'.join(str(el) for el in path)
conflict_values = force_list(self.body)
conflicts = []
for value in conflict_values:
if value is not None or self.conflict_type == 'REMOVE_FIELD':
conflicts.append({
'path': json_pointer,
'op': op,
'value': value,
'$type': self.conflict_type
})
return json.dumps(conflicts) | python | def to_json(self):
"""Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
"""
# map ConflictType to json-patch operator
path = self.path
if self.conflict_type in ('REORDER', 'SET_FIELD'):
op = 'replace'
elif self.conflict_type in ('MANUAL_MERGE', 'ADD_BACK_TO_HEAD'):
op = 'add'
path += ('-',)
elif self.conflict_type == 'REMOVE_FIELD':
op = 'remove'
else:
raise ValueError(
'Conflict Type %s can not be mapped to a json-patch operation'
% conflict_type
)
# stringify path array
json_pointer = '/' + '/'.join(str(el) for el in path)
conflict_values = force_list(self.body)
conflicts = []
for value in conflict_values:
if value is not None or self.conflict_type == 'REMOVE_FIELD':
conflicts.append({
'path': json_pointer,
'op': op,
'value': value,
'$type': self.conflict_type
})
return json.dumps(conflicts) | [
"def",
"to_json",
"(",
"self",
")",
":",
"# map ConflictType to json-patch operator",
"path",
"=",
"self",
".",
"path",
"if",
"self",
".",
"conflict_type",
"in",
"(",
"'REORDER'",
",",
"'SET_FIELD'",
")",
":",
"op",
"=",
"'replace'",
"elif",
"self",
".",
"conflict_type",
"in",
"(",
"'MANUAL_MERGE'",
",",
"'ADD_BACK_TO_HEAD'",
")",
":",
"op",
"=",
"'add'",
"path",
"+=",
"(",
"'-'",
",",
")",
"elif",
"self",
".",
"conflict_type",
"==",
"'REMOVE_FIELD'",
":",
"op",
"=",
"'remove'",
"else",
":",
"raise",
"ValueError",
"(",
"'Conflict Type %s can not be mapped to a json-patch operation'",
"%",
"conflict_type",
")",
"# stringify path array",
"json_pointer",
"=",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"str",
"(",
"el",
")",
"for",
"el",
"in",
"path",
")",
"conflict_values",
"=",
"force_list",
"(",
"self",
".",
"body",
")",
"conflicts",
"=",
"[",
"]",
"for",
"value",
"in",
"conflict_values",
":",
"if",
"value",
"is",
"not",
"None",
"or",
"self",
".",
"conflict_type",
"==",
"'REMOVE_FIELD'",
":",
"conflicts",
".",
"append",
"(",
"{",
"'path'",
":",
"json_pointer",
",",
"'op'",
":",
"op",
",",
"'value'",
":",
"value",
",",
"'$type'",
":",
"self",
".",
"conflict_type",
"}",
")",
"return",
"json",
".",
"dumps",
"(",
"conflicts",
")"
] | Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type" | [
"Deserializes",
"conflict",
"to",
"a",
"JSON",
"object",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/conflict.py#L99-L139 | train |
jwodder/javaproperties | javaproperties/writing.py | dump | def dump(props, fp, separator='=', comments=None, timestamp=True,
sort_keys=False):
"""
Write a series of key-value pairs to a file in simple line-oriented
``.properties`` format.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: A file-like object to write the values of ``props`` to. It must
have been opened as a text file with a Latin-1-compatible encoding.
:param separator: The string to use for separating keys & values. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:param comments: if non-`None`, ``comments`` will be written to ``fp`` as a
comment before any other content
:type comments: text string or `None`
:param timestamp: If neither `None` nor `False`, a timestamp in the form of
``Mon Sep 02 14:00:54 EDT 2016`` is written as a comment to ``fp``
after ``comments`` (if any) and before the key-value pairs. If
``timestamp`` is `True`, the current date & time is used. If it is a
number, it is converted from seconds since the epoch to local time. If
it is a `datetime.datetime` object, its value is used directly, with
naïve objects assumed to be in the local timezone.
:type timestamp: `None`, `bool`, number, or `datetime.datetime`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None`
"""
if comments is not None:
print(to_comment(comments), file=fp)
if timestamp is not None and timestamp is not False:
print(to_comment(java_timestamp(timestamp)), file=fp)
for k,v in itemize(props, sort_keys=sort_keys):
print(join_key_value(k, v, separator), file=fp) | python | def dump(props, fp, separator='=', comments=None, timestamp=True,
sort_keys=False):
"""
Write a series of key-value pairs to a file in simple line-oriented
``.properties`` format.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: A file-like object to write the values of ``props`` to. It must
have been opened as a text file with a Latin-1-compatible encoding.
:param separator: The string to use for separating keys & values. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:param comments: if non-`None`, ``comments`` will be written to ``fp`` as a
comment before any other content
:type comments: text string or `None`
:param timestamp: If neither `None` nor `False`, a timestamp in the form of
``Mon Sep 02 14:00:54 EDT 2016`` is written as a comment to ``fp``
after ``comments`` (if any) and before the key-value pairs. If
``timestamp`` is `True`, the current date & time is used. If it is a
number, it is converted from seconds since the epoch to local time. If
it is a `datetime.datetime` object, its value is used directly, with
naïve objects assumed to be in the local timezone.
:type timestamp: `None`, `bool`, number, or `datetime.datetime`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None`
"""
if comments is not None:
print(to_comment(comments), file=fp)
if timestamp is not None and timestamp is not False:
print(to_comment(java_timestamp(timestamp)), file=fp)
for k,v in itemize(props, sort_keys=sort_keys):
print(join_key_value(k, v, separator), file=fp) | [
"def",
"dump",
"(",
"props",
",",
"fp",
",",
"separator",
"=",
"'='",
",",
"comments",
"=",
"None",
",",
"timestamp",
"=",
"True",
",",
"sort_keys",
"=",
"False",
")",
":",
"if",
"comments",
"is",
"not",
"None",
":",
"print",
"(",
"to_comment",
"(",
"comments",
")",
",",
"file",
"=",
"fp",
")",
"if",
"timestamp",
"is",
"not",
"None",
"and",
"timestamp",
"is",
"not",
"False",
":",
"print",
"(",
"to_comment",
"(",
"java_timestamp",
"(",
"timestamp",
")",
")",
",",
"file",
"=",
"fp",
")",
"for",
"k",
",",
"v",
"in",
"itemize",
"(",
"props",
",",
"sort_keys",
"=",
"sort_keys",
")",
":",
"print",
"(",
"join_key_value",
"(",
"k",
",",
"v",
",",
"separator",
")",
",",
"file",
"=",
"fp",
")"
] | Write a series of key-value pairs to a file in simple line-oriented
``.properties`` format.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: A file-like object to write the values of ``props`` to. It must
have been opened as a text file with a Latin-1-compatible encoding.
:param separator: The string to use for separating keys & values. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:param comments: if non-`None`, ``comments`` will be written to ``fp`` as a
comment before any other content
:type comments: text string or `None`
:param timestamp: If neither `None` nor `False`, a timestamp in the form of
``Mon Sep 02 14:00:54 EDT 2016`` is written as a comment to ``fp``
after ``comments`` (if any) and before the key-value pairs. If
``timestamp`` is `True`, the current date & time is used. If it is a
number, it is converted from seconds since the epoch to local time. If
it is a `datetime.datetime` object, its value is used directly, with
naïve objects assumed to be in the local timezone.
:type timestamp: `None`, `bool`, number, or `datetime.datetime`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None` | [
"Write",
"a",
"series",
"of",
"key",
"-",
"value",
"pairs",
"to",
"a",
"file",
"in",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"format",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/writing.py#L10-L45 | train |
jwodder/javaproperties | javaproperties/writing.py | dumps | def dumps(props, separator='=', comments=None, timestamp=True, sort_keys=False):
"""
Convert a series of key-value pairs to a text string in simple
line-oriented ``.properties`` format.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param separator: The string to use for separating keys & values. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:param comments: if non-`None`, ``comments`` will be output as a comment
before any other content
:type comments: text string or `None`
:param timestamp: If neither `None` nor `False`, a timestamp in the form of
``Mon Sep 02 14:00:54 EDT 2016`` is output as a comment after
``comments`` (if any) and before the key-value pairs. If ``timestamp``
is `True`, the current date & time is used. If it is a number, it is
converted from seconds since the epoch to local time. If it is a
`datetime.datetime` object, its value is used directly, with naïve
objects assumed to be in the local timezone.
:type timestamp: `None`, `bool`, number, or `datetime.datetime`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string
"""
s = StringIO()
dump(props, s, separator=separator, comments=comments, timestamp=timestamp,
sort_keys=sort_keys)
return s.getvalue() | python | def dumps(props, separator='=', comments=None, timestamp=True, sort_keys=False):
"""
Convert a series of key-value pairs to a text string in simple
line-oriented ``.properties`` format.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param separator: The string to use for separating keys & values. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:param comments: if non-`None`, ``comments`` will be output as a comment
before any other content
:type comments: text string or `None`
:param timestamp: If neither `None` nor `False`, a timestamp in the form of
``Mon Sep 02 14:00:54 EDT 2016`` is output as a comment after
``comments`` (if any) and before the key-value pairs. If ``timestamp``
is `True`, the current date & time is used. If it is a number, it is
converted from seconds since the epoch to local time. If it is a
`datetime.datetime` object, its value is used directly, with naïve
objects assumed to be in the local timezone.
:type timestamp: `None`, `bool`, number, or `datetime.datetime`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string
"""
s = StringIO()
dump(props, s, separator=separator, comments=comments, timestamp=timestamp,
sort_keys=sort_keys)
return s.getvalue() | [
"def",
"dumps",
"(",
"props",
",",
"separator",
"=",
"'='",
",",
"comments",
"=",
"None",
",",
"timestamp",
"=",
"True",
",",
"sort_keys",
"=",
"False",
")",
":",
"s",
"=",
"StringIO",
"(",
")",
"dump",
"(",
"props",
",",
"s",
",",
"separator",
"=",
"separator",
",",
"comments",
"=",
"comments",
",",
"timestamp",
"=",
"timestamp",
",",
"sort_keys",
"=",
"sort_keys",
")",
"return",
"s",
".",
"getvalue",
"(",
")"
] | Convert a series of key-value pairs to a text string in simple
line-oriented ``.properties`` format.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param separator: The string to use for separating keys & values. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:param comments: if non-`None`, ``comments`` will be output as a comment
before any other content
:type comments: text string or `None`
:param timestamp: If neither `None` nor `False`, a timestamp in the form of
``Mon Sep 02 14:00:54 EDT 2016`` is output as a comment after
``comments`` (if any) and before the key-value pairs. If ``timestamp``
is `True`, the current date & time is used. If it is a number, it is
converted from seconds since the epoch to local time. If it is a
`datetime.datetime` object, its value is used directly, with naïve
objects assumed to be in the local timezone.
:type timestamp: `None`, `bool`, number, or `datetime.datetime`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string | [
"Convert",
"a",
"series",
"of",
"key",
"-",
"value",
"pairs",
"to",
"a",
"text",
"string",
"in",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"format",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/writing.py#L47-L77 | train |
jwodder/javaproperties | javaproperties/writing.py | join_key_value | def join_key_value(key, value, separator='='):
r"""
Join a key and value together into a single line suitable for adding to a
simple line-oriented ``.properties`` file. No trailing newline is added.
>>> join_key_value('possible separators', '= : space')
'possible\\ separators=\\= \\: space'
:param key: the key
:type key: text string
:param value: the value
:type value: text string
:param separator: the string to use for separating the key & value. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:rtype: text string
"""
# Escapes `key` and `value` the same way as java.util.Properties.store()
return escape(key) \
+ separator \
+ re.sub(r'^ +', lambda m: r'\ ' * m.end(), _base_escape(value)) | python | def join_key_value(key, value, separator='='):
r"""
Join a key and value together into a single line suitable for adding to a
simple line-oriented ``.properties`` file. No trailing newline is added.
>>> join_key_value('possible separators', '= : space')
'possible\\ separators=\\= \\: space'
:param key: the key
:type key: text string
:param value: the value
:type value: text string
:param separator: the string to use for separating the key & value. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:rtype: text string
"""
# Escapes `key` and `value` the same way as java.util.Properties.store()
return escape(key) \
+ separator \
+ re.sub(r'^ +', lambda m: r'\ ' * m.end(), _base_escape(value)) | [
"def",
"join_key_value",
"(",
"key",
",",
"value",
",",
"separator",
"=",
"'='",
")",
":",
"# Escapes `key` and `value` the same way as java.util.Properties.store()",
"return",
"escape",
"(",
"key",
")",
"+",
"separator",
"+",
"re",
".",
"sub",
"(",
"r'^ +'",
",",
"lambda",
"m",
":",
"r'\\ '",
"*",
"m",
".",
"end",
"(",
")",
",",
"_base_escape",
"(",
"value",
")",
")"
] | r"""
Join a key and value together into a single line suitable for adding to a
simple line-oriented ``.properties`` file. No trailing newline is added.
>>> join_key_value('possible separators', '= : space')
'possible\\ separators=\\= \\: space'
:param key: the key
:type key: text string
:param value: the value
:type value: text string
:param separator: the string to use for separating the key & value. Only
``" "``, ``"="``, and ``":"`` (possibly with added whitespace) should
ever be used as the separator.
:type separator: text string
:rtype: text string | [
"r",
"Join",
"a",
"key",
"and",
"value",
"together",
"into",
"a",
"single",
"line",
"suitable",
"for",
"adding",
"to",
"a",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"file",
".",
"No",
"trailing",
"newline",
"is",
"added",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/writing.py#L99-L120 | train |
wistful/pympris | pympris/PlayLists.py | PlayLists.GetPlaylists | def GetPlaylists(self, start, max_count, order, reversed):
"""Gets a set of playlists.
:param int start: The index of the first playlist to be fetched
(according to the ordering).
:param int max_count: The maximum number of playlists to fetch.
:param str order: The ordering that should be used.
:param bool reversed: Whether the order should be reversed.
"""
cv = convert2dbus
return self.iface.GetPlaylists(cv(start, 'u'),
cv(max_count, 'u'),
cv(order, 's'),
cv(reversed, 'b')) | python | def GetPlaylists(self, start, max_count, order, reversed):
"""Gets a set of playlists.
:param int start: The index of the first playlist to be fetched
(according to the ordering).
:param int max_count: The maximum number of playlists to fetch.
:param str order: The ordering that should be used.
:param bool reversed: Whether the order should be reversed.
"""
cv = convert2dbus
return self.iface.GetPlaylists(cv(start, 'u'),
cv(max_count, 'u'),
cv(order, 's'),
cv(reversed, 'b')) | [
"def",
"GetPlaylists",
"(",
"self",
",",
"start",
",",
"max_count",
",",
"order",
",",
"reversed",
")",
":",
"cv",
"=",
"convert2dbus",
"return",
"self",
".",
"iface",
".",
"GetPlaylists",
"(",
"cv",
"(",
"start",
",",
"'u'",
")",
",",
"cv",
"(",
"max_count",
",",
"'u'",
")",
",",
"cv",
"(",
"order",
",",
"'s'",
")",
",",
"cv",
"(",
"reversed",
",",
"'b'",
")",
")"
] | Gets a set of playlists.
:param int start: The index of the first playlist to be fetched
(according to the ordering).
:param int max_count: The maximum number of playlists to fetch.
:param str order: The ordering that should be used.
:param bool reversed: Whether the order should be reversed. | [
"Gets",
"a",
"set",
"of",
"playlists",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/PlayLists.py#L58-L71 | train |
ThaWeatherman/flask-hashing | flask_hashing.py | Hashing.init_app | def init_app(self, app):
'''Initializes the Flask application with this extension. It grabs
the necessary configuration values from ``app.config``, those being
HASHING_METHOD and HASHING_ROUNDS. HASHING_METHOD defaults to ``sha256``
but can be any one of ``hashlib.algorithms``. HASHING_ROUNDS specifies
the number of times to hash the input with the specified algorithm.
This defaults to 1.
:param app: Flask application object
'''
self.algorithm = app.config.get('HASHING_METHOD', 'sha256')
if self.algorithm not in algs:
raise ValueError('{} not one of {}'.format(self.algorithm, algs))
self.rounds = app.config.get('HASHING_ROUNDS', 1)
if not isinstance(self.rounds, int):
raise TypeError('HASHING_ROUNDS must be type int') | python | def init_app(self, app):
'''Initializes the Flask application with this extension. It grabs
the necessary configuration values from ``app.config``, those being
HASHING_METHOD and HASHING_ROUNDS. HASHING_METHOD defaults to ``sha256``
but can be any one of ``hashlib.algorithms``. HASHING_ROUNDS specifies
the number of times to hash the input with the specified algorithm.
This defaults to 1.
:param app: Flask application object
'''
self.algorithm = app.config.get('HASHING_METHOD', 'sha256')
if self.algorithm not in algs:
raise ValueError('{} not one of {}'.format(self.algorithm, algs))
self.rounds = app.config.get('HASHING_ROUNDS', 1)
if not isinstance(self.rounds, int):
raise TypeError('HASHING_ROUNDS must be type int') | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"self",
".",
"algorithm",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'HASHING_METHOD'",
",",
"'sha256'",
")",
"if",
"self",
".",
"algorithm",
"not",
"in",
"algs",
":",
"raise",
"ValueError",
"(",
"'{} not one of {}'",
".",
"format",
"(",
"self",
".",
"algorithm",
",",
"algs",
")",
")",
"self",
".",
"rounds",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'HASHING_ROUNDS'",
",",
"1",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"rounds",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"'HASHING_ROUNDS must be type int'",
")"
] | Initializes the Flask application with this extension. It grabs
the necessary configuration values from ``app.config``, those being
HASHING_METHOD and HASHING_ROUNDS. HASHING_METHOD defaults to ``sha256``
but can be any one of ``hashlib.algorithms``. HASHING_ROUNDS specifies
the number of times to hash the input with the specified algorithm.
This defaults to 1.
:param app: Flask application object | [
"Initializes",
"the",
"Flask",
"application",
"with",
"this",
"extension",
".",
"It",
"grabs",
"the",
"necessary",
"configuration",
"values",
"from",
"app",
".",
"config",
"those",
"being",
"HASHING_METHOD",
"and",
"HASHING_ROUNDS",
".",
"HASHING_METHOD",
"defaults",
"to",
"sha256",
"but",
"can",
"be",
"any",
"one",
"of",
"hashlib",
".",
"algorithms",
".",
"HASHING_ROUNDS",
"specifies",
"the",
"number",
"of",
"times",
"to",
"hash",
"the",
"input",
"with",
"the",
"specified",
"algorithm",
".",
"This",
"defaults",
"to",
"1",
"."
] | e2cc8526569f63362e2d79bea49c4809d4416c8a | https://github.com/ThaWeatherman/flask-hashing/blob/e2cc8526569f63362e2d79bea49c4809d4416c8a/flask_hashing.py#L62-L77 | train |
ThaWeatherman/flask-hashing | flask_hashing.py | Hashing.hash_value | def hash_value(self, value, salt=''):
'''Hashes the specified value combined with the specified salt.
The hash is done HASHING_ROUNDS times as specified by the application
configuration.
An example usage of :class:``hash_value`` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
# save to a db or check against known hash
:param value: The value we want hashed
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: The resulting hash as a string
:rtype: str
'''
def hashit(value, salt):
h = hashlib.new(self.algorithm)
tgt = salt+value
h.update(tgt)
return h.hexdigest()
def fix_unicode(value):
if VER < 3 and isinstance(value, unicode):
value = str(value)
elif VER >= 3 and isinstance(value, str):
value = str.encode(value)
return value
salt = fix_unicode(salt)
for i in range(self.rounds):
value = fix_unicode(value)
value = hashit(value, salt)
return value | python | def hash_value(self, value, salt=''):
'''Hashes the specified value combined with the specified salt.
The hash is done HASHING_ROUNDS times as specified by the application
configuration.
An example usage of :class:``hash_value`` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
# save to a db or check against known hash
:param value: The value we want hashed
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: The resulting hash as a string
:rtype: str
'''
def hashit(value, salt):
h = hashlib.new(self.algorithm)
tgt = salt+value
h.update(tgt)
return h.hexdigest()
def fix_unicode(value):
if VER < 3 and isinstance(value, unicode):
value = str(value)
elif VER >= 3 and isinstance(value, str):
value = str.encode(value)
return value
salt = fix_unicode(salt)
for i in range(self.rounds):
value = fix_unicode(value)
value = hashit(value, salt)
return value | [
"def",
"hash_value",
"(",
"self",
",",
"value",
",",
"salt",
"=",
"''",
")",
":",
"def",
"hashit",
"(",
"value",
",",
"salt",
")",
":",
"h",
"=",
"hashlib",
".",
"new",
"(",
"self",
".",
"algorithm",
")",
"tgt",
"=",
"salt",
"+",
"value",
"h",
".",
"update",
"(",
"tgt",
")",
"return",
"h",
".",
"hexdigest",
"(",
")",
"def",
"fix_unicode",
"(",
"value",
")",
":",
"if",
"VER",
"<",
"3",
"and",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"elif",
"VER",
">=",
"3",
"and",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"str",
".",
"encode",
"(",
"value",
")",
"return",
"value",
"salt",
"=",
"fix_unicode",
"(",
"salt",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"rounds",
")",
":",
"value",
"=",
"fix_unicode",
"(",
"value",
")",
"value",
"=",
"hashit",
"(",
"value",
",",
"salt",
")",
"return",
"value"
] | Hashes the specified value combined with the specified salt.
The hash is done HASHING_ROUNDS times as specified by the application
configuration.
An example usage of :class:``hash_value`` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
# save to a db or check against known hash
:param value: The value we want hashed
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: The resulting hash as a string
:rtype: str | [
"Hashes",
"the",
"specified",
"value",
"combined",
"with",
"the",
"specified",
"salt",
".",
"The",
"hash",
"is",
"done",
"HASHING_ROUNDS",
"times",
"as",
"specified",
"by",
"the",
"application",
"configuration",
"."
] | e2cc8526569f63362e2d79bea49c4809d4416c8a | https://github.com/ThaWeatherman/flask-hashing/blob/e2cc8526569f63362e2d79bea49c4809d4416c8a/flask_hashing.py#L79-L111 | train |
ThaWeatherman/flask-hashing | flask_hashing.py | Hashing.check_value | def check_value(self, value_hash, value, salt=''):
'''Checks the specified hash value against the hash of the provided
salt and value.
An example usage of :class:`check_value` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
if hashing.check_value(val_hash, 'mysecretdata', salt='abcd'):
# do something special
:param value_hash: The hash value to check against
:param value: The value we want hashed to compare
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: True if equal, False otherwise
:rtype: bool
'''
h = self.hash_value(value, salt=salt)
return h == value_hash | python | def check_value(self, value_hash, value, salt=''):
'''Checks the specified hash value against the hash of the provided
salt and value.
An example usage of :class:`check_value` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
if hashing.check_value(val_hash, 'mysecretdata', salt='abcd'):
# do something special
:param value_hash: The hash value to check against
:param value: The value we want hashed to compare
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: True if equal, False otherwise
:rtype: bool
'''
h = self.hash_value(value, salt=salt)
return h == value_hash | [
"def",
"check_value",
"(",
"self",
",",
"value_hash",
",",
"value",
",",
"salt",
"=",
"''",
")",
":",
"h",
"=",
"self",
".",
"hash_value",
"(",
"value",
",",
"salt",
"=",
"salt",
")",
"return",
"h",
"==",
"value_hash"
] | Checks the specified hash value against the hash of the provided
salt and value.
An example usage of :class:`check_value` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
if hashing.check_value(val_hash, 'mysecretdata', salt='abcd'):
# do something special
:param value_hash: The hash value to check against
:param value: The value we want hashed to compare
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: True if equal, False otherwise
:rtype: bool | [
"Checks",
"the",
"specified",
"hash",
"value",
"against",
"the",
"hash",
"of",
"the",
"provided",
"salt",
"and",
"value",
"."
] | e2cc8526569f63362e2d79bea49c4809d4416c8a | https://github.com/ThaWeatherman/flask-hashing/blob/e2cc8526569f63362e2d79bea49c4809d4416c8a/flask_hashing.py#L113-L130 | train |
wistful/pympris | pympris/TrackList.py | TrackList.AddTrack | def AddTrack(self, uri, after_track, set_as_current):
"""Adds a URI in the TrackList.
:param str uri: The uri of the item to add.
:param str after_track: The identifier of the track
after which the new item should be inserted.
:param bool set_as_current: Whether the newly inserted track
should be considered as the current track.
"""
self.iface.AddTrack(uri,
convert2dbus(after_track, 'o'),
convert2dbus(set_as_current, 'b')) | python | def AddTrack(self, uri, after_track, set_as_current):
"""Adds a URI in the TrackList.
:param str uri: The uri of the item to add.
:param str after_track: The identifier of the track
after which the new item should be inserted.
:param bool set_as_current: Whether the newly inserted track
should be considered as the current track.
"""
self.iface.AddTrack(uri,
convert2dbus(after_track, 'o'),
convert2dbus(set_as_current, 'b')) | [
"def",
"AddTrack",
"(",
"self",
",",
"uri",
",",
"after_track",
",",
"set_as_current",
")",
":",
"self",
".",
"iface",
".",
"AddTrack",
"(",
"uri",
",",
"convert2dbus",
"(",
"after_track",
",",
"'o'",
")",
",",
"convert2dbus",
"(",
"set_as_current",
",",
"'b'",
")",
")"
] | Adds a URI in the TrackList.
:param str uri: The uri of the item to add.
:param str after_track: The identifier of the track
after which the new item should be inserted.
:param bool set_as_current: Whether the newly inserted track
should be considered as the current track. | [
"Adds",
"a",
"URI",
"in",
"the",
"TrackList",
"."
] | 4bd64a1f0d151f2adfc392ab34fd9b38894786cb | https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/TrackList.py#L48-L59 | train |
robinandeer/puzzle | puzzle/models/sql/genelist.py | GeneList.delete_gene | def delete_gene(self, *gene_ids):
"""Delete one or more gene ids form the list."""
self.gene_ids = [gene_id for gene_id in self.gene_ids
if gene_id not in gene_ids] | python | def delete_gene(self, *gene_ids):
"""Delete one or more gene ids form the list."""
self.gene_ids = [gene_id for gene_id in self.gene_ids
if gene_id not in gene_ids] | [
"def",
"delete_gene",
"(",
"self",
",",
"*",
"gene_ids",
")",
":",
"self",
".",
"gene_ids",
"=",
"[",
"gene_id",
"for",
"gene_id",
"in",
"self",
".",
"gene_ids",
"if",
"gene_id",
"not",
"in",
"gene_ids",
"]"
] | Delete one or more gene ids form the list. | [
"Delete",
"one",
"or",
"more",
"gene",
"ids",
"form",
"the",
"list",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/sql/genelist.py#L44-L47 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.healthy_update_timer | def healthy_update_timer(self):
"""Check state of update timer."""
state = None
if self.update_status_timer and self.update_status_timer.is_alive():
_LOGGER.debug("Timer: healthy")
state = True
else:
_LOGGER.debug("Timer: not healthy")
state = False
return state | python | def healthy_update_timer(self):
"""Check state of update timer."""
state = None
if self.update_status_timer and self.update_status_timer.is_alive():
_LOGGER.debug("Timer: healthy")
state = True
else:
_LOGGER.debug("Timer: not healthy")
state = False
return state | [
"def",
"healthy_update_timer",
"(",
"self",
")",
":",
"state",
"=",
"None",
"if",
"self",
".",
"update_status_timer",
"and",
"self",
".",
"update_status_timer",
".",
"is_alive",
"(",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Timer: healthy\"",
")",
"state",
"=",
"True",
"else",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Timer: not healthy\"",
")",
"state",
"=",
"False",
"return",
"state"
] | Check state of update timer. | [
"Check",
"state",
"of",
"update",
"timer",
"."
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L71-L82 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.initialize | def initialize(self):
"""initialize the object"""
self.network_status = self.get_network_status()
self.name = self.network_status.get('network_name', 'Unknown')
self.location_info = self.get_location_info()
self.device_info = self.get_device_info()
self.device_id = (
self.device_info.get('device_id')
if self.device_info else "Unknown")
self.initialize_socket()
self.initialize_worker()
self.initialize_zones() | python | def initialize(self):
"""initialize the object"""
self.network_status = self.get_network_status()
self.name = self.network_status.get('network_name', 'Unknown')
self.location_info = self.get_location_info()
self.device_info = self.get_device_info()
self.device_id = (
self.device_info.get('device_id')
if self.device_info else "Unknown")
self.initialize_socket()
self.initialize_worker()
self.initialize_zones() | [
"def",
"initialize",
"(",
"self",
")",
":",
"self",
".",
"network_status",
"=",
"self",
".",
"get_network_status",
"(",
")",
"self",
".",
"name",
"=",
"self",
".",
"network_status",
".",
"get",
"(",
"'network_name'",
",",
"'Unknown'",
")",
"self",
".",
"location_info",
"=",
"self",
".",
"get_location_info",
"(",
")",
"self",
".",
"device_info",
"=",
"self",
".",
"get_device_info",
"(",
")",
"self",
".",
"device_id",
"=",
"(",
"self",
".",
"device_info",
".",
"get",
"(",
"'device_id'",
")",
"if",
"self",
".",
"device_info",
"else",
"\"Unknown\"",
")",
"self",
".",
"initialize_socket",
"(",
")",
"self",
".",
"initialize_worker",
"(",
")",
"self",
".",
"initialize_zones",
"(",
")"
] | initialize the object | [
"initialize",
"the",
"object"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L84-L95 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.initialize_socket | def initialize_socket(self):
"""initialize the socket"""
try:
_LOGGER.debug("Trying to open socket.")
self._socket = socket.socket(
socket.AF_INET, # IPv4
socket.SOCK_DGRAM # UDP
)
self._socket.bind(('', self._udp_port))
except socket.error as err:
raise err
else:
_LOGGER.debug("Socket open.")
socket_thread = threading.Thread(
name="SocketThread", target=socket_worker,
args=(self._socket, self.messages,))
socket_thread.setDaemon(True)
socket_thread.start() | python | def initialize_socket(self):
"""initialize the socket"""
try:
_LOGGER.debug("Trying to open socket.")
self._socket = socket.socket(
socket.AF_INET, # IPv4
socket.SOCK_DGRAM # UDP
)
self._socket.bind(('', self._udp_port))
except socket.error as err:
raise err
else:
_LOGGER.debug("Socket open.")
socket_thread = threading.Thread(
name="SocketThread", target=socket_worker,
args=(self._socket, self.messages,))
socket_thread.setDaemon(True)
socket_thread.start() | [
"def",
"initialize_socket",
"(",
"self",
")",
":",
"try",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Trying to open socket.\"",
")",
"self",
".",
"_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"# IPv4",
"socket",
".",
"SOCK_DGRAM",
"# UDP",
")",
"self",
".",
"_socket",
".",
"bind",
"(",
"(",
"''",
",",
"self",
".",
"_udp_port",
")",
")",
"except",
"socket",
".",
"error",
"as",
"err",
":",
"raise",
"err",
"else",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Socket open.\"",
")",
"socket_thread",
"=",
"threading",
".",
"Thread",
"(",
"name",
"=",
"\"SocketThread\"",
",",
"target",
"=",
"socket_worker",
",",
"args",
"=",
"(",
"self",
".",
"_socket",
",",
"self",
".",
"messages",
",",
")",
")",
"socket_thread",
".",
"setDaemon",
"(",
"True",
")",
"socket_thread",
".",
"start",
"(",
")"
] | initialize the socket | [
"initialize",
"the",
"socket"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L97-L114 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.initialize_worker | def initialize_worker(self):
"""initialize the worker thread"""
worker_thread = threading.Thread(
name="WorkerThread", target=message_worker, args=(self,))
worker_thread.setDaemon(True)
worker_thread.start() | python | def initialize_worker(self):
"""initialize the worker thread"""
worker_thread = threading.Thread(
name="WorkerThread", target=message_worker, args=(self,))
worker_thread.setDaemon(True)
worker_thread.start() | [
"def",
"initialize_worker",
"(",
"self",
")",
":",
"worker_thread",
"=",
"threading",
".",
"Thread",
"(",
"name",
"=",
"\"WorkerThread\"",
",",
"target",
"=",
"message_worker",
",",
"args",
"=",
"(",
"self",
",",
")",
")",
"worker_thread",
".",
"setDaemon",
"(",
"True",
")",
"worker_thread",
".",
"start",
"(",
")"
] | initialize the worker thread | [
"initialize",
"the",
"worker",
"thread"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L116-L121 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.initialize_zones | def initialize_zones(self):
"""initialize receiver zones"""
zone_list = self.location_info.get('zone_list', {'main': True})
for zone_id in zone_list:
if zone_list[zone_id]: # Location setup is valid
self.zones[zone_id] = Zone(self, zone_id=zone_id)
else: # Location setup is not valid
_LOGGER.debug("Ignoring zone: %s", zone_id) | python | def initialize_zones(self):
"""initialize receiver zones"""
zone_list = self.location_info.get('zone_list', {'main': True})
for zone_id in zone_list:
if zone_list[zone_id]: # Location setup is valid
self.zones[zone_id] = Zone(self, zone_id=zone_id)
else: # Location setup is not valid
_LOGGER.debug("Ignoring zone: %s", zone_id) | [
"def",
"initialize_zones",
"(",
"self",
")",
":",
"zone_list",
"=",
"self",
".",
"location_info",
".",
"get",
"(",
"'zone_list'",
",",
"{",
"'main'",
":",
"True",
"}",
")",
"for",
"zone_id",
"in",
"zone_list",
":",
"if",
"zone_list",
"[",
"zone_id",
"]",
":",
"# Location setup is valid",
"self",
".",
"zones",
"[",
"zone_id",
"]",
"=",
"Zone",
"(",
"self",
",",
"zone_id",
"=",
"zone_id",
")",
"else",
":",
"# Location setup is not valid",
"_LOGGER",
".",
"debug",
"(",
"\"Ignoring zone: %s\"",
",",
"zone_id",
")"
] | initialize receiver zones | [
"initialize",
"receiver",
"zones"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L123-L131 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.handle_status | def handle_status(self):
"""Handle status from device"""
status = self.get_status()
if status:
# Update main-zone
self.zones['main'].update_status(status) | python | def handle_status(self):
"""Handle status from device"""
status = self.get_status()
if status:
# Update main-zone
self.zones['main'].update_status(status) | [
"def",
"handle_status",
"(",
"self",
")",
":",
"status",
"=",
"self",
".",
"get_status",
"(",
")",
"if",
"status",
":",
"# Update main-zone",
"self",
".",
"zones",
"[",
"'main'",
"]",
".",
"update_status",
"(",
"status",
")"
] | Handle status from device | [
"Handle",
"status",
"from",
"device"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L162-L168 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.handle_netusb | def handle_netusb(self, message):
"""Handles 'netusb' in message"""
# _LOGGER.debug("message: {}".format(message))
needs_update = 0
if self._yamaha:
if 'play_info_updated' in message:
play_info = self.get_play_info()
# _LOGGER.debug(play_info)
if play_info:
new_media_status = MediaStatus(play_info, self._ip_address)
if self._yamaha.media_status != new_media_status:
# we need to send an update upwards
self._yamaha.new_media_status(new_media_status)
needs_update += 1
playback = play_info.get('playback')
# _LOGGER.debug("Playback: {}".format(playback))
if playback == "play":
new_status = STATE_PLAYING
elif playback == "stop":
new_status = STATE_IDLE
elif playback == "pause":
new_status = STATE_PAUSED
else:
new_status = STATE_UNKNOWN
if self._yamaha.status is not new_status:
_LOGGER.debug("playback: %s", new_status)
self._yamaha.status = new_status
needs_update += 1
return needs_update | python | def handle_netusb(self, message):
"""Handles 'netusb' in message"""
# _LOGGER.debug("message: {}".format(message))
needs_update = 0
if self._yamaha:
if 'play_info_updated' in message:
play_info = self.get_play_info()
# _LOGGER.debug(play_info)
if play_info:
new_media_status = MediaStatus(play_info, self._ip_address)
if self._yamaha.media_status != new_media_status:
# we need to send an update upwards
self._yamaha.new_media_status(new_media_status)
needs_update += 1
playback = play_info.get('playback')
# _LOGGER.debug("Playback: {}".format(playback))
if playback == "play":
new_status = STATE_PLAYING
elif playback == "stop":
new_status = STATE_IDLE
elif playback == "pause":
new_status = STATE_PAUSED
else:
new_status = STATE_UNKNOWN
if self._yamaha.status is not new_status:
_LOGGER.debug("playback: %s", new_status)
self._yamaha.status = new_status
needs_update += 1
return needs_update | [
"def",
"handle_netusb",
"(",
"self",
",",
"message",
")",
":",
"# _LOGGER.debug(\"message: {}\".format(message))",
"needs_update",
"=",
"0",
"if",
"self",
".",
"_yamaha",
":",
"if",
"'play_info_updated'",
"in",
"message",
":",
"play_info",
"=",
"self",
".",
"get_play_info",
"(",
")",
"# _LOGGER.debug(play_info)",
"if",
"play_info",
":",
"new_media_status",
"=",
"MediaStatus",
"(",
"play_info",
",",
"self",
".",
"_ip_address",
")",
"if",
"self",
".",
"_yamaha",
".",
"media_status",
"!=",
"new_media_status",
":",
"# we need to send an update upwards",
"self",
".",
"_yamaha",
".",
"new_media_status",
"(",
"new_media_status",
")",
"needs_update",
"+=",
"1",
"playback",
"=",
"play_info",
".",
"get",
"(",
"'playback'",
")",
"# _LOGGER.debug(\"Playback: {}\".format(playback))",
"if",
"playback",
"==",
"\"play\"",
":",
"new_status",
"=",
"STATE_PLAYING",
"elif",
"playback",
"==",
"\"stop\"",
":",
"new_status",
"=",
"STATE_IDLE",
"elif",
"playback",
"==",
"\"pause\"",
":",
"new_status",
"=",
"STATE_PAUSED",
"else",
":",
"new_status",
"=",
"STATE_UNKNOWN",
"if",
"self",
".",
"_yamaha",
".",
"status",
"is",
"not",
"new_status",
":",
"_LOGGER",
".",
"debug",
"(",
"\"playback: %s\"",
",",
"new_status",
")",
"self",
".",
"_yamaha",
".",
"status",
"=",
"new_status",
"needs_update",
"+=",
"1",
"return",
"needs_update"
] | Handles 'netusb' in message | [
"Handles",
"netusb",
"in",
"message"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L170-L203 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.handle_features | def handle_features(self, device_features):
"""Handles features of the device"""
self.device_features = device_features
if device_features and 'zone' in device_features:
for zone in device_features['zone']:
zone_id = zone.get('id')
if zone_id in self.zones:
_LOGGER.debug("handle_features: %s", zone_id)
input_list = zone.get('input_list', [])
input_list.sort()
self.zones[zone_id].source_list = input_list | python | def handle_features(self, device_features):
"""Handles features of the device"""
self.device_features = device_features
if device_features and 'zone' in device_features:
for zone in device_features['zone']:
zone_id = zone.get('id')
if zone_id in self.zones:
_LOGGER.debug("handle_features: %s", zone_id)
input_list = zone.get('input_list', [])
input_list.sort()
self.zones[zone_id].source_list = input_list | [
"def",
"handle_features",
"(",
"self",
",",
"device_features",
")",
":",
"self",
".",
"device_features",
"=",
"device_features",
"if",
"device_features",
"and",
"'zone'",
"in",
"device_features",
":",
"for",
"zone",
"in",
"device_features",
"[",
"'zone'",
"]",
":",
"zone_id",
"=",
"zone",
".",
"get",
"(",
"'id'",
")",
"if",
"zone_id",
"in",
"self",
".",
"zones",
":",
"_LOGGER",
".",
"debug",
"(",
"\"handle_features: %s\"",
",",
"zone_id",
")",
"input_list",
"=",
"zone",
".",
"get",
"(",
"'input_list'",
",",
"[",
"]",
")",
"input_list",
".",
"sort",
"(",
")",
"self",
".",
"zones",
"[",
"zone_id",
"]",
".",
"source_list",
"=",
"input_list"
] | Handles features of the device | [
"Handles",
"features",
"of",
"the",
"device"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L205-L217 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.handle_event | def handle_event(self, message):
"""Dispatch all event messages"""
# _LOGGER.debug(message)
needs_update = 0
for zone in self.zones:
if zone in message:
_LOGGER.debug("Received message for zone: %s", zone)
self.zones[zone].update_status(message[zone])
if 'netusb' in message:
needs_update += self.handle_netusb(message['netusb'])
if needs_update > 0:
_LOGGER.debug("needs_update: %d", needs_update)
self.update_hass() | python | def handle_event(self, message):
"""Dispatch all event messages"""
# _LOGGER.debug(message)
needs_update = 0
for zone in self.zones:
if zone in message:
_LOGGER.debug("Received message for zone: %s", zone)
self.zones[zone].update_status(message[zone])
if 'netusb' in message:
needs_update += self.handle_netusb(message['netusb'])
if needs_update > 0:
_LOGGER.debug("needs_update: %d", needs_update)
self.update_hass() | [
"def",
"handle_event",
"(",
"self",
",",
"message",
")",
":",
"# _LOGGER.debug(message)",
"needs_update",
"=",
"0",
"for",
"zone",
"in",
"self",
".",
"zones",
":",
"if",
"zone",
"in",
"message",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Received message for zone: %s\"",
",",
"zone",
")",
"self",
".",
"zones",
"[",
"zone",
"]",
".",
"update_status",
"(",
"message",
"[",
"zone",
"]",
")",
"if",
"'netusb'",
"in",
"message",
":",
"needs_update",
"+=",
"self",
".",
"handle_netusb",
"(",
"message",
"[",
"'netusb'",
"]",
")",
"if",
"needs_update",
">",
"0",
":",
"_LOGGER",
".",
"debug",
"(",
"\"needs_update: %d\"",
",",
"needs_update",
")",
"self",
".",
"update_hass",
"(",
")"
] | Dispatch all event messages | [
"Dispatch",
"all",
"event",
"messages"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L219-L233 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.update_status | def update_status(self, reset=False):
"""Update device status."""
if self.healthy_update_timer and not reset:
return
# get device features only once
if not self.device_features:
self.handle_features(self.get_features())
# Get status from device to register/keep alive UDP
self.handle_status()
# Schedule next execution
self.setup_update_timer() | python | def update_status(self, reset=False):
"""Update device status."""
if self.healthy_update_timer and not reset:
return
# get device features only once
if not self.device_features:
self.handle_features(self.get_features())
# Get status from device to register/keep alive UDP
self.handle_status()
# Schedule next execution
self.setup_update_timer() | [
"def",
"update_status",
"(",
"self",
",",
"reset",
"=",
"False",
")",
":",
"if",
"self",
".",
"healthy_update_timer",
"and",
"not",
"reset",
":",
"return",
"# get device features only once",
"if",
"not",
"self",
".",
"device_features",
":",
"self",
".",
"handle_features",
"(",
"self",
".",
"get_features",
"(",
")",
")",
"# Get status from device to register/keep alive UDP",
"self",
".",
"handle_status",
"(",
")",
"# Schedule next execution",
"self",
".",
"setup_update_timer",
"(",
")"
] | Update device status. | [
"Update",
"device",
"status",
"."
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L239-L252 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.setup_update_timer | def setup_update_timer(self, reset=False):
"""Schedule a Timer Thread."""
_LOGGER.debug("Timer: firing again in %d seconds", self._interval)
self.update_status_timer = threading.Timer(
self._interval, self.update_status, [True])
self.update_status_timer.setDaemon(True)
self.update_status_timer.start() | python | def setup_update_timer(self, reset=False):
"""Schedule a Timer Thread."""
_LOGGER.debug("Timer: firing again in %d seconds", self._interval)
self.update_status_timer = threading.Timer(
self._interval, self.update_status, [True])
self.update_status_timer.setDaemon(True)
self.update_status_timer.start() | [
"def",
"setup_update_timer",
"(",
"self",
",",
"reset",
"=",
"False",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Timer: firing again in %d seconds\"",
",",
"self",
".",
"_interval",
")",
"self",
".",
"update_status_timer",
"=",
"threading",
".",
"Timer",
"(",
"self",
".",
"_interval",
",",
"self",
".",
"update_status",
",",
"[",
"True",
"]",
")",
"self",
".",
"update_status_timer",
".",
"setDaemon",
"(",
"True",
")",
"self",
".",
"update_status_timer",
".",
"start",
"(",
")"
] | Schedule a Timer Thread. | [
"Schedule",
"a",
"Timer",
"Thread",
"."
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L254-L260 | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | McDevice.set_playback | def set_playback(self, playback):
"""Send Playback command."""
req_url = ENDPOINTS["setPlayback"].format(self._ip_address)
params = {"playback": playback}
return request(req_url, params=params) | python | def set_playback(self, playback):
"""Send Playback command."""
req_url = ENDPOINTS["setPlayback"].format(self._ip_address)
params = {"playback": playback}
return request(req_url, params=params) | [
"def",
"set_playback",
"(",
"self",
",",
"playback",
")",
":",
"req_url",
"=",
"ENDPOINTS",
"[",
"\"setPlayback\"",
"]",
".",
"format",
"(",
"self",
".",
"_ip_address",
")",
"params",
"=",
"{",
"\"playback\"",
":",
"playback",
"}",
"return",
"request",
"(",
"req_url",
",",
"params",
"=",
"params",
")"
] | Send Playback command. | [
"Send",
"Playback",
"command",
"."
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L272-L276 | train |
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant.py | VariantMixin.build_gemini_query | def build_gemini_query(self, query, extra_info):
"""Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str)
"""
if 'WHERE' in query:
return "{0} AND {1}".format(query, extra_info)
else:
return "{0} WHERE {1}".format(query, extra_info) | python | def build_gemini_query(self, query, extra_info):
"""Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str)
"""
if 'WHERE' in query:
return "{0} AND {1}".format(query, extra_info)
else:
return "{0} WHERE {1}".format(query, extra_info) | [
"def",
"build_gemini_query",
"(",
"self",
",",
"query",
",",
"extra_info",
")",
":",
"if",
"'WHERE'",
"in",
"query",
":",
"return",
"\"{0} AND {1}\"",
".",
"format",
"(",
"query",
",",
"extra_info",
")",
"else",
":",
"return",
"\"{0} WHERE {1}\"",
".",
"format",
"(",
"query",
",",
"extra_info",
")"
] | Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str) | [
"Append",
"sql",
"to",
"a",
"gemini",
"query"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant.py#L20-L33 | train |
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant.py | VariantMixin.variants | def variants(self, case_id, skip=0, count=1000, filters=None):
"""Return count variants for a case.
This function needs to have different behaviours based on what is asked
for. It should allways try to give minimal information back to improve
on speed. For example, if consequences are not asked for we will not
build all transcripts. If not sv variants we will not build sv
coordinates.
So the minimal case is to just show what is asked for in the variants
interface.
Args:
case_id (str): A gemini db
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
consequence: [] (list of consequences),
impact_severities: [] (list of consequences),
genetic_models [] (list of genetic models)
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants
"""
filters = filters or {}
logger.debug("Looking for variants in {0}".format(case_id))
limit = count + skip
gemini_query = filters.get('gemini_query') or "SELECT * from variants v"
any_filter = False
if filters.get('frequency'):
frequency = filters['frequency']
extra_info = "(v.max_aaf_all < {0} or v.max_aaf_all is"\
" Null)".format(frequency)
gemini_query = self.build_gemini_query(gemini_query, extra_info)
if filters.get('cadd'):
cadd_score = filters['cadd']
extra_info = "(v.cadd_scaled > {0})".format(cadd_score)
gemini_query = self.build_gemini_query(gemini_query, extra_info)
if filters.get('gene_ids'):
gene_list = [gene_id.strip() for gene_id in filters['gene_ids']]
gene_string = "v.gene in ("
for index, gene_id in enumerate(gene_list):
if index == 0:
gene_string += "'{0}'".format(gene_id)
else:
gene_string += ", '{0}'".format(gene_id)
gene_string += ")"
gemini_query = self.build_gemini_query(gemini_query, gene_string)
if filters.get('range'):
chrom = filters['range']['chromosome']
if not chrom.startswith('chr'):
chrom = "chr{0}".format(chrom)
range_string = "v.chrom = '{0}' AND "\
"((v.start BETWEEN {1} AND {2}) OR "\
"(v.end BETWEEN {1} AND {2}))".format(
chrom,
filters['range']['start'],
filters['range']['end']
)
gemini_query = self.build_gemini_query(gemini_query, range_string)
filtered_variants = self._variants(
case_id=case_id,
gemini_query=gemini_query,
)
if filters.get('consequence'):
consequences = set(filters['consequence'])
filtered_variants = (variant for variant in filtered_variants if
set(variant.consequences).intersection(consequences))
if filters.get('impact_severities'):
severities = set([severity.strip()
for severity in filters['impact_severities']])
new_filtered_variants = []
filtered_variants = (variant for variant in filtered_variants if
set([variant.impact_severity]).intersection(severities))
if filters.get('sv_len'):
sv_len = int(filters['sv_len'])
filtered_variants = (variant for variant in filtered_variants if
variant.sv_len >= sv_len)
variants = []
for index, variant_obj in enumerate(filtered_variants):
if index >= skip:
if index < limit:
variants.append(variant_obj)
else:
break
return Results(variants, len(variants)) | python | def variants(self, case_id, skip=0, count=1000, filters=None):
"""Return count variants for a case.
This function needs to have different behaviours based on what is asked
for. It should allways try to give minimal information back to improve
on speed. For example, if consequences are not asked for we will not
build all transcripts. If not sv variants we will not build sv
coordinates.
So the minimal case is to just show what is asked for in the variants
interface.
Args:
case_id (str): A gemini db
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
consequence: [] (list of consequences),
impact_severities: [] (list of consequences),
genetic_models [] (list of genetic models)
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants
"""
filters = filters or {}
logger.debug("Looking for variants in {0}".format(case_id))
limit = count + skip
gemini_query = filters.get('gemini_query') or "SELECT * from variants v"
any_filter = False
if filters.get('frequency'):
frequency = filters['frequency']
extra_info = "(v.max_aaf_all < {0} or v.max_aaf_all is"\
" Null)".format(frequency)
gemini_query = self.build_gemini_query(gemini_query, extra_info)
if filters.get('cadd'):
cadd_score = filters['cadd']
extra_info = "(v.cadd_scaled > {0})".format(cadd_score)
gemini_query = self.build_gemini_query(gemini_query, extra_info)
if filters.get('gene_ids'):
gene_list = [gene_id.strip() for gene_id in filters['gene_ids']]
gene_string = "v.gene in ("
for index, gene_id in enumerate(gene_list):
if index == 0:
gene_string += "'{0}'".format(gene_id)
else:
gene_string += ", '{0}'".format(gene_id)
gene_string += ")"
gemini_query = self.build_gemini_query(gemini_query, gene_string)
if filters.get('range'):
chrom = filters['range']['chromosome']
if not chrom.startswith('chr'):
chrom = "chr{0}".format(chrom)
range_string = "v.chrom = '{0}' AND "\
"((v.start BETWEEN {1} AND {2}) OR "\
"(v.end BETWEEN {1} AND {2}))".format(
chrom,
filters['range']['start'],
filters['range']['end']
)
gemini_query = self.build_gemini_query(gemini_query, range_string)
filtered_variants = self._variants(
case_id=case_id,
gemini_query=gemini_query,
)
if filters.get('consequence'):
consequences = set(filters['consequence'])
filtered_variants = (variant for variant in filtered_variants if
set(variant.consequences).intersection(consequences))
if filters.get('impact_severities'):
severities = set([severity.strip()
for severity in filters['impact_severities']])
new_filtered_variants = []
filtered_variants = (variant for variant in filtered_variants if
set([variant.impact_severity]).intersection(severities))
if filters.get('sv_len'):
sv_len = int(filters['sv_len'])
filtered_variants = (variant for variant in filtered_variants if
variant.sv_len >= sv_len)
variants = []
for index, variant_obj in enumerate(filtered_variants):
if index >= skip:
if index < limit:
variants.append(variant_obj)
else:
break
return Results(variants, len(variants)) | [
"def",
"variants",
"(",
"self",
",",
"case_id",
",",
"skip",
"=",
"0",
",",
"count",
"=",
"1000",
",",
"filters",
"=",
"None",
")",
":",
"filters",
"=",
"filters",
"or",
"{",
"}",
"logger",
".",
"debug",
"(",
"\"Looking for variants in {0}\"",
".",
"format",
"(",
"case_id",
")",
")",
"limit",
"=",
"count",
"+",
"skip",
"gemini_query",
"=",
"filters",
".",
"get",
"(",
"'gemini_query'",
")",
"or",
"\"SELECT * from variants v\"",
"any_filter",
"=",
"False",
"if",
"filters",
".",
"get",
"(",
"'frequency'",
")",
":",
"frequency",
"=",
"filters",
"[",
"'frequency'",
"]",
"extra_info",
"=",
"\"(v.max_aaf_all < {0} or v.max_aaf_all is\"",
"\" Null)\"",
".",
"format",
"(",
"frequency",
")",
"gemini_query",
"=",
"self",
".",
"build_gemini_query",
"(",
"gemini_query",
",",
"extra_info",
")",
"if",
"filters",
".",
"get",
"(",
"'cadd'",
")",
":",
"cadd_score",
"=",
"filters",
"[",
"'cadd'",
"]",
"extra_info",
"=",
"\"(v.cadd_scaled > {0})\"",
".",
"format",
"(",
"cadd_score",
")",
"gemini_query",
"=",
"self",
".",
"build_gemini_query",
"(",
"gemini_query",
",",
"extra_info",
")",
"if",
"filters",
".",
"get",
"(",
"'gene_ids'",
")",
":",
"gene_list",
"=",
"[",
"gene_id",
".",
"strip",
"(",
")",
"for",
"gene_id",
"in",
"filters",
"[",
"'gene_ids'",
"]",
"]",
"gene_string",
"=",
"\"v.gene in (\"",
"for",
"index",
",",
"gene_id",
"in",
"enumerate",
"(",
"gene_list",
")",
":",
"if",
"index",
"==",
"0",
":",
"gene_string",
"+=",
"\"'{0}'\"",
".",
"format",
"(",
"gene_id",
")",
"else",
":",
"gene_string",
"+=",
"\", '{0}'\"",
".",
"format",
"(",
"gene_id",
")",
"gene_string",
"+=",
"\")\"",
"gemini_query",
"=",
"self",
".",
"build_gemini_query",
"(",
"gemini_query",
",",
"gene_string",
")",
"if",
"filters",
".",
"get",
"(",
"'range'",
")",
":",
"chrom",
"=",
"filters",
"[",
"'range'",
"]",
"[",
"'chromosome'",
"]",
"if",
"not",
"chrom",
".",
"startswith",
"(",
"'chr'",
")",
":",
"chrom",
"=",
"\"chr{0}\"",
".",
"format",
"(",
"chrom",
")",
"range_string",
"=",
"\"v.chrom = '{0}' AND \"",
"\"((v.start BETWEEN {1} AND {2}) OR \"",
"\"(v.end BETWEEN {1} AND {2}))\"",
".",
"format",
"(",
"chrom",
",",
"filters",
"[",
"'range'",
"]",
"[",
"'start'",
"]",
",",
"filters",
"[",
"'range'",
"]",
"[",
"'end'",
"]",
")",
"gemini_query",
"=",
"self",
".",
"build_gemini_query",
"(",
"gemini_query",
",",
"range_string",
")",
"filtered_variants",
"=",
"self",
".",
"_variants",
"(",
"case_id",
"=",
"case_id",
",",
"gemini_query",
"=",
"gemini_query",
",",
")",
"if",
"filters",
".",
"get",
"(",
"'consequence'",
")",
":",
"consequences",
"=",
"set",
"(",
"filters",
"[",
"'consequence'",
"]",
")",
"filtered_variants",
"=",
"(",
"variant",
"for",
"variant",
"in",
"filtered_variants",
"if",
"set",
"(",
"variant",
".",
"consequences",
")",
".",
"intersection",
"(",
"consequences",
")",
")",
"if",
"filters",
".",
"get",
"(",
"'impact_severities'",
")",
":",
"severities",
"=",
"set",
"(",
"[",
"severity",
".",
"strip",
"(",
")",
"for",
"severity",
"in",
"filters",
"[",
"'impact_severities'",
"]",
"]",
")",
"new_filtered_variants",
"=",
"[",
"]",
"filtered_variants",
"=",
"(",
"variant",
"for",
"variant",
"in",
"filtered_variants",
"if",
"set",
"(",
"[",
"variant",
".",
"impact_severity",
"]",
")",
".",
"intersection",
"(",
"severities",
")",
")",
"if",
"filters",
".",
"get",
"(",
"'sv_len'",
")",
":",
"sv_len",
"=",
"int",
"(",
"filters",
"[",
"'sv_len'",
"]",
")",
"filtered_variants",
"=",
"(",
"variant",
"for",
"variant",
"in",
"filtered_variants",
"if",
"variant",
".",
"sv_len",
">=",
"sv_len",
")",
"variants",
"=",
"[",
"]",
"for",
"index",
",",
"variant_obj",
"in",
"enumerate",
"(",
"filtered_variants",
")",
":",
"if",
"index",
">=",
"skip",
":",
"if",
"index",
"<",
"limit",
":",
"variants",
".",
"append",
"(",
"variant_obj",
")",
"else",
":",
"break",
"return",
"Results",
"(",
"variants",
",",
"len",
"(",
"variants",
")",
")"
] | Return count variants for a case.
This function needs to have different behaviours based on what is asked
for. It should allways try to give minimal information back to improve
on speed. For example, if consequences are not asked for we will not
build all transcripts. If not sv variants we will not build sv
coordinates.
So the minimal case is to just show what is asked for in the variants
interface.
Args:
case_id (str): A gemini db
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
consequence: [] (list of consequences),
impact_severities: [] (list of consequences),
genetic_models [] (list of genetic models)
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants | [
"Return",
"count",
"variants",
"for",
"a",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant.py#L35-L144 | train |
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant.py | VariantMixin._variants | def _variants(self, case_id, gemini_query):
"""Return variants found in the gemini database
Args:
case_id (str): The case for which we want to see information
gemini_query (str): What variants should be chosen
filters (dict): A dictionary with filters
Yields:
variant_obj (dict): A Variant formatted dictionary
"""
individuals = []
# Get the individuals for the case
case_obj = self.case(case_id)
for individual in case_obj.individuals:
individuals.append(individual)
self.db = case_obj.variant_source
self.variant_type = case_obj.variant_type
gq = GeminiQuery(self.db)
gq.run(gemini_query)
index = 0
for gemini_variant in gq:
variant = None
# Check if variant is non ref in the individuals
is_variant = self._is_variant(gemini_variant, individuals)
if self.variant_type == 'snv' and not is_variant:
variant = None
else:
index += 1
logger.debug("Updating index to: {0}".format(index))
variant = self._format_variant(
case_id=case_id,
gemini_variant=gemini_variant,
individual_objs=individuals,
index=index
)
if variant:
yield variant | python | def _variants(self, case_id, gemini_query):
"""Return variants found in the gemini database
Args:
case_id (str): The case for which we want to see information
gemini_query (str): What variants should be chosen
filters (dict): A dictionary with filters
Yields:
variant_obj (dict): A Variant formatted dictionary
"""
individuals = []
# Get the individuals for the case
case_obj = self.case(case_id)
for individual in case_obj.individuals:
individuals.append(individual)
self.db = case_obj.variant_source
self.variant_type = case_obj.variant_type
gq = GeminiQuery(self.db)
gq.run(gemini_query)
index = 0
for gemini_variant in gq:
variant = None
# Check if variant is non ref in the individuals
is_variant = self._is_variant(gemini_variant, individuals)
if self.variant_type == 'snv' and not is_variant:
variant = None
else:
index += 1
logger.debug("Updating index to: {0}".format(index))
variant = self._format_variant(
case_id=case_id,
gemini_variant=gemini_variant,
individual_objs=individuals,
index=index
)
if variant:
yield variant | [
"def",
"_variants",
"(",
"self",
",",
"case_id",
",",
"gemini_query",
")",
":",
"individuals",
"=",
"[",
"]",
"# Get the individuals for the case",
"case_obj",
"=",
"self",
".",
"case",
"(",
"case_id",
")",
"for",
"individual",
"in",
"case_obj",
".",
"individuals",
":",
"individuals",
".",
"append",
"(",
"individual",
")",
"self",
".",
"db",
"=",
"case_obj",
".",
"variant_source",
"self",
".",
"variant_type",
"=",
"case_obj",
".",
"variant_type",
"gq",
"=",
"GeminiQuery",
"(",
"self",
".",
"db",
")",
"gq",
".",
"run",
"(",
"gemini_query",
")",
"index",
"=",
"0",
"for",
"gemini_variant",
"in",
"gq",
":",
"variant",
"=",
"None",
"# Check if variant is non ref in the individuals",
"is_variant",
"=",
"self",
".",
"_is_variant",
"(",
"gemini_variant",
",",
"individuals",
")",
"if",
"self",
".",
"variant_type",
"==",
"'snv'",
"and",
"not",
"is_variant",
":",
"variant",
"=",
"None",
"else",
":",
"index",
"+=",
"1",
"logger",
".",
"debug",
"(",
"\"Updating index to: {0}\"",
".",
"format",
"(",
"index",
")",
")",
"variant",
"=",
"self",
".",
"_format_variant",
"(",
"case_id",
"=",
"case_id",
",",
"gemini_variant",
"=",
"gemini_variant",
",",
"individual_objs",
"=",
"individuals",
",",
"index",
"=",
"index",
")",
"if",
"variant",
":",
"yield",
"variant"
] | Return variants found in the gemini database
Args:
case_id (str): The case for which we want to see information
gemini_query (str): What variants should be chosen
filters (dict): A dictionary with filters
Yields:
variant_obj (dict): A Variant formatted dictionary | [
"Return",
"variants",
"found",
"in",
"the",
"gemini",
"database"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant.py#L189-L235 | train |
Subsets and Splits