repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
loganasherjones/yapconf | yapconf/items.py | YapconfBoolItem.convert_config_value | def convert_config_value(self, value, label):
"""Converts all 'Truthy' values to True and 'Falsy' values to False.
Args:
value: Value to convert
label: Label of the config which this item was found.
Returns:
"""
if isinstance(value, six.string_types):
value = value.lower()
if value in self.TRUTHY_VALUES:
return True
elif value in self.FALSY_VALUES:
return False
else:
raise YapconfValueError("Cowardly refusing to interpret "
"config value as a boolean. Name: "
"{0}, Value: {1}"
.format(self.name, value)) | python | def convert_config_value(self, value, label):
"""Converts all 'Truthy' values to True and 'Falsy' values to False.
Args:
value: Value to convert
label: Label of the config which this item was found.
Returns:
"""
if isinstance(value, six.string_types):
value = value.lower()
if value in self.TRUTHY_VALUES:
return True
elif value in self.FALSY_VALUES:
return False
else:
raise YapconfValueError("Cowardly refusing to interpret "
"config value as a boolean. Name: "
"{0}, Value: {1}"
.format(self.name, value)) | [
"def",
"convert_config_value",
"(",
"self",
",",
"value",
",",
"label",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"if",
"value",
"in",
"self",
".",
"TRUTHY_VALUES",
":",
"return",
"True",
"elif",
"value",
"in",
"self",
".",
"FALSY_VALUES",
":",
"return",
"False",
"else",
":",
"raise",
"YapconfValueError",
"(",
"\"Cowardly refusing to interpret \"",
"\"config value as a boolean. Name: \"",
"\"{0}, Value: {1}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"value",
")",
")"
]
| Converts all 'Truthy' values to True and 'Falsy' values to False.
Args:
value: Value to convert
label: Label of the config which this item was found.
Returns: | [
"Converts",
"all",
"Truthy",
"values",
"to",
"True",
"and",
"Falsy",
"values",
"to",
"False",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L622-L643 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfListItem.add_argument | def add_argument(self, parser, bootstrap=False):
"""Add list-style item as an argument to the given parser.
Generally speaking, this works mostly like the normal append
action, but there are special rules for boolean cases. See the
AppendReplace action for more details.
Examples:
A non-nested list value with the name 'values' and a child name of
'value' will result in a command-line argument that will correctly
handle arguments like the following:
['--value', 'VALUE1', '--value', 'VALUE2']
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not.
"""
if self.cli_expose:
if isinstance(self.child, YapconfBoolItem):
original_default = self.child.default
self.child.default = True
args = self.child._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
parser.add_argument(*args, **kwargs)
self.child.default = False
args = self.child._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
parser.add_argument(*args, **kwargs)
self.child.default = original_default
else:
super(YapconfListItem, self).add_argument(parser, bootstrap) | python | def add_argument(self, parser, bootstrap=False):
"""Add list-style item as an argument to the given parser.
Generally speaking, this works mostly like the normal append
action, but there are special rules for boolean cases. See the
AppendReplace action for more details.
Examples:
A non-nested list value with the name 'values' and a child name of
'value' will result in a command-line argument that will correctly
handle arguments like the following:
['--value', 'VALUE1', '--value', 'VALUE2']
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not.
"""
if self.cli_expose:
if isinstance(self.child, YapconfBoolItem):
original_default = self.child.default
self.child.default = True
args = self.child._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
parser.add_argument(*args, **kwargs)
self.child.default = False
args = self.child._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
parser.add_argument(*args, **kwargs)
self.child.default = original_default
else:
super(YapconfListItem, self).add_argument(parser, bootstrap) | [
"def",
"add_argument",
"(",
"self",
",",
"parser",
",",
"bootstrap",
"=",
"False",
")",
":",
"if",
"self",
".",
"cli_expose",
":",
"if",
"isinstance",
"(",
"self",
".",
"child",
",",
"YapconfBoolItem",
")",
":",
"original_default",
"=",
"self",
".",
"child",
".",
"default",
"self",
".",
"child",
".",
"default",
"=",
"True",
"args",
"=",
"self",
".",
"child",
".",
"_get_argparse_names",
"(",
"parser",
".",
"prefix_chars",
")",
"kwargs",
"=",
"self",
".",
"_get_argparse_kwargs",
"(",
"bootstrap",
")",
"parser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"child",
".",
"default",
"=",
"False",
"args",
"=",
"self",
".",
"child",
".",
"_get_argparse_names",
"(",
"parser",
".",
"prefix_chars",
")",
"kwargs",
"=",
"self",
".",
"_get_argparse_kwargs",
"(",
"bootstrap",
")",
"parser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"child",
".",
"default",
"=",
"original_default",
"else",
":",
"super",
"(",
"YapconfListItem",
",",
"self",
")",
".",
"add_argument",
"(",
"parser",
",",
"bootstrap",
")"
]
| Add list-style item as an argument to the given parser.
Generally speaking, this works mostly like the normal append
action, but there are special rules for boolean cases. See the
AppendReplace action for more details.
Examples:
A non-nested list value with the name 'values' and a child name of
'value' will result in a command-line argument that will correctly
handle arguments like the following:
['--value', 'VALUE1', '--value', 'VALUE2']
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not. | [
"Add",
"list",
"-",
"style",
"item",
"as",
"an",
"argument",
"to",
"the",
"given",
"parser",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L729-L764 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfDictItem.add_argument | def add_argument(self, parser, bootstrap=False):
"""Add dict-style item as an argument to the given parser.
The dict item will take all the nested items in the dictionary and
namespace them with the dict name, adding each child item as
their own CLI argument.
Examples:
A non-nested dict item with the name 'db' and children named
'port' and 'host' will result in the following being valid
CLI args:
['--db-host', 'localhost', '--db-port', '1234']
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not.
"""
if self.cli_expose:
for child in self.children.values():
child.add_argument(parser, bootstrap) | python | def add_argument(self, parser, bootstrap=False):
"""Add dict-style item as an argument to the given parser.
The dict item will take all the nested items in the dictionary and
namespace them with the dict name, adding each child item as
their own CLI argument.
Examples:
A non-nested dict item with the name 'db' and children named
'port' and 'host' will result in the following being valid
CLI args:
['--db-host', 'localhost', '--db-port', '1234']
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not.
"""
if self.cli_expose:
for child in self.children.values():
child.add_argument(parser, bootstrap) | [
"def",
"add_argument",
"(",
"self",
",",
"parser",
",",
"bootstrap",
"=",
"False",
")",
":",
"if",
"self",
".",
"cli_expose",
":",
"for",
"child",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
":",
"child",
".",
"add_argument",
"(",
"parser",
",",
"bootstrap",
")"
]
| Add dict-style item as an argument to the given parser.
The dict item will take all the nested items in the dictionary and
namespace them with the dict name, adding each child item as
their own CLI argument.
Examples:
A non-nested dict item with the name 'db' and children named
'port' and 'host' will result in the following being valid
CLI args:
['--db-host', 'localhost', '--db-port', '1234']
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not. | [
"Add",
"dict",
"-",
"style",
"item",
"as",
"an",
"argument",
"to",
"the",
"given",
"parser",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L817-L838 | train |
sirfoga/pyhal | hal/wrappers/profile.py | log_time | def log_time(func):
"""Executes function and logs time
:param func: function to call
:return: function result
"""
@functools.wraps(func)
def _execute(*args, **kwargs):
"""Executes function and logs time
:param args: args of function
:param kwargs: extra args of function
:param *args: args
:param **kwargs: extra args
:return: function result
"""
func_name = get_method_name(func)
timer = Timer()
log_message(func_name, "has started")
with timer:
result = func(*args, **kwargs)
seconds = "{:.3f}".format(timer.elapsed_time())
log_message(func_name, "has finished. Execution time:", seconds, "s")
return result
return _execute | python | def log_time(func):
"""Executes function and logs time
:param func: function to call
:return: function result
"""
@functools.wraps(func)
def _execute(*args, **kwargs):
"""Executes function and logs time
:param args: args of function
:param kwargs: extra args of function
:param *args: args
:param **kwargs: extra args
:return: function result
"""
func_name = get_method_name(func)
timer = Timer()
log_message(func_name, "has started")
with timer:
result = func(*args, **kwargs)
seconds = "{:.3f}".format(timer.elapsed_time())
log_message(func_name, "has finished. Execution time:", seconds, "s")
return result
return _execute | [
"def",
"log_time",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"_execute",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Executes function and logs time\n\n :param args: args of function\n :param kwargs: extra args of function\n :param *args: args\n :param **kwargs: extra args\n :return: function result\n \"\"\"",
"func_name",
"=",
"get_method_name",
"(",
"func",
")",
"timer",
"=",
"Timer",
"(",
")",
"log_message",
"(",
"func_name",
",",
"\"has started\"",
")",
"with",
"timer",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"seconds",
"=",
"\"{:.3f}\"",
".",
"format",
"(",
"timer",
".",
"elapsed_time",
"(",
")",
")",
"log_message",
"(",
"func_name",
",",
"\"has finished. Execution time:\"",
",",
"seconds",
",",
"\"s\"",
")",
"return",
"result",
"return",
"_execute"
]
| Executes function and logs time
:param func: function to call
:return: function result | [
"Executes",
"function",
"and",
"logs",
"time"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/wrappers/profile.py#L12-L42 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | load_configuration | def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike:
""" Load an analysis configuration from a file.
Args:
yaml: YAML object to use in loading the configuration.
filename: Filename of the YAML configuration file.
Returns:
dict-like object containing the loaded configuration
"""
with open(filename, "r") as f:
config = yaml.load(f)
return config | python | def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike:
""" Load an analysis configuration from a file.
Args:
yaml: YAML object to use in loading the configuration.
filename: Filename of the YAML configuration file.
Returns:
dict-like object containing the loaded configuration
"""
with open(filename, "r") as f:
config = yaml.load(f)
return config | [
"def",
"load_configuration",
"(",
"yaml",
":",
"yaml",
".",
"ruamel",
".",
"yaml",
".",
"YAML",
",",
"filename",
":",
"str",
")",
"->",
"DictLike",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"f",
":",
"config",
"=",
"yaml",
".",
"load",
"(",
"f",
")",
"return",
"config"
]
| Load an analysis configuration from a file.
Args:
yaml: YAML object to use in loading the configuration.
filename: Filename of the YAML configuration file.
Returns:
dict-like object containing the loaded configuration | [
"Load",
"an",
"analysis",
"configuration",
"from",
"a",
"file",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L23-L35 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | override_options | def override_options(config: DictLike, selected_options: Tuple[Any, ...], set_of_possible_options: Tuple[enum.Enum, ...], config_containing_override: DictLike = None) -> DictLike:
""" Determine override options for a particular configuration.
The options are determined by searching following the order specified in selected_options.
For the example config,
.. code-block:: yaml
config:
value: 3
override:
2.76:
track:
value: 5
value will be assigned the value 5 if we are at 2.76 TeV with a track bias, regardless of the event
activity or leading hadron bias. The order of this configuration is specified by the order of the
selected_options passed. The above example configuration is from the jet-hadron analysis.
Since anchors aren't kept for scalar values, if you want to override an anchored value, you need to
specify it as a single value in a list (or dict, but list is easier). After the anchor values propagate,
single element lists can be converted into scalar values using ``simplify_data_representations()``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
selected_options: The selected analysis options. They will be checked in the order with which
they are passed, so make certain that it matches the order in the configuration file!
set_of_possible_options (tuple of enums): Possible options for the override value categories.
config_containing_override: The dict-like config containing the override options in a map called
"override". If it is not specified, it will look for it in the main config.
Returns:
dict-like object: The updated configuration
"""
if config_containing_override is None:
config_containing_override = config
override_opts = config_containing_override.pop("override")
override_dict = determine_override_options(selected_options, override_opts, set_of_possible_options)
logger.debug(f"override_dict: {override_dict}")
# Set the configuration values to those specified in the override options
# Cannot just use update() on config because we need to maintain the anchors.
for k, v in override_dict.items():
# Check if key is there and if it is not None! (The second part is important)
if k in config:
try:
# If it has an anchor, we know that we want to preserve the type. So we check for the anchor
# by trying to access it (Note that we don't actually care what the value is - just that it
# exists). If it fails with an AttributeError, then we know we can just assign the value. If it
# has an anchor, then we want to preserve the anchor information.
config[k].anchor
logger.debug(f"type: {type(config[k])}, k: {k}")
if isinstance(config[k], list):
# Clear out the existing list entries
del config[k][:]
if isinstance(override_dict[k], (str, int, float, bool)):
# We have to treat str carefully because it is an iterable, but it will be expanded as
# individual characters if it's treated the same as a list, which is not the desired
# behavior! If we wrap it in [], then it will be treated as the only entry in the list
# NOTE: We also treat the basic types this way because they will be passed this way if
# overriding indirectly with anchors (since the basic scalar types don't yet support
# reassignment while maintaining their anchors).
config[k].append(override_dict[k])
else:
# Here we just assign all entries of the list to all entries of override_dict[k]
config[k].extend(override_dict[k])
elif isinstance(config[k], dict):
# Clear out the existing entries because we are trying to replace everything
# Then we can simply update the dict with our new values
config[k].clear()
config[k].update(override_dict[k])
elif isinstance(config[k], (int, float, bool)):
# This isn't really very good (since we lose information), but there's nothing that can be done
# about it at the moment (Dec 2018)
logger.debug("Overwriting YAML anchor object. It is currently unclear how to reassign this value.")
config[k] = v
else:
# Raise a value error on all of the cases that we aren't already aware of.
raise ValueError(f"Object {k} (type {type(config[k])}) somehow has an anchor, but is something other than a list or dict. Attempting to assign directly to it.")
except AttributeError:
# If no anchor, just overwrite the value at this key
config[k] = v
else:
raise KeyError(k, f"Trying to override key \"{k}\" that it is not in the config.")
return config | python | def override_options(config: DictLike, selected_options: Tuple[Any, ...], set_of_possible_options: Tuple[enum.Enum, ...], config_containing_override: DictLike = None) -> DictLike:
""" Determine override options for a particular configuration.
The options are determined by searching following the order specified in selected_options.
For the example config,
.. code-block:: yaml
config:
value: 3
override:
2.76:
track:
value: 5
value will be assigned the value 5 if we are at 2.76 TeV with a track bias, regardless of the event
activity or leading hadron bias. The order of this configuration is specified by the order of the
selected_options passed. The above example configuration is from the jet-hadron analysis.
Since anchors aren't kept for scalar values, if you want to override an anchored value, you need to
specify it as a single value in a list (or dict, but list is easier). After the anchor values propagate,
single element lists can be converted into scalar values using ``simplify_data_representations()``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
selected_options: The selected analysis options. They will be checked in the order with which
they are passed, so make certain that it matches the order in the configuration file!
set_of_possible_options (tuple of enums): Possible options for the override value categories.
config_containing_override: The dict-like config containing the override options in a map called
"override". If it is not specified, it will look for it in the main config.
Returns:
dict-like object: The updated configuration
"""
if config_containing_override is None:
config_containing_override = config
override_opts = config_containing_override.pop("override")
override_dict = determine_override_options(selected_options, override_opts, set_of_possible_options)
logger.debug(f"override_dict: {override_dict}")
# Set the configuration values to those specified in the override options
# Cannot just use update() on config because we need to maintain the anchors.
for k, v in override_dict.items():
# Check if key is there and if it is not None! (The second part is important)
if k in config:
try:
# If it has an anchor, we know that we want to preserve the type. So we check for the anchor
# by trying to access it (Note that we don't actually care what the value is - just that it
# exists). If it fails with an AttributeError, then we know we can just assign the value. If it
# has an anchor, then we want to preserve the anchor information.
config[k].anchor
logger.debug(f"type: {type(config[k])}, k: {k}")
if isinstance(config[k], list):
# Clear out the existing list entries
del config[k][:]
if isinstance(override_dict[k], (str, int, float, bool)):
# We have to treat str carefully because it is an iterable, but it will be expanded as
# individual characters if it's treated the same as a list, which is not the desired
# behavior! If we wrap it in [], then it will be treated as the only entry in the list
# NOTE: We also treat the basic types this way because they will be passed this way if
# overriding indirectly with anchors (since the basic scalar types don't yet support
# reassignment while maintaining their anchors).
config[k].append(override_dict[k])
else:
# Here we just assign all entries of the list to all entries of override_dict[k]
config[k].extend(override_dict[k])
elif isinstance(config[k], dict):
# Clear out the existing entries because we are trying to replace everything
# Then we can simply update the dict with our new values
config[k].clear()
config[k].update(override_dict[k])
elif isinstance(config[k], (int, float, bool)):
# This isn't really very good (since we lose information), but there's nothing that can be done
# about it at the moment (Dec 2018)
logger.debug("Overwriting YAML anchor object. It is currently unclear how to reassign this value.")
config[k] = v
else:
# Raise a value error on all of the cases that we aren't already aware of.
raise ValueError(f"Object {k} (type {type(config[k])}) somehow has an anchor, but is something other than a list or dict. Attempting to assign directly to it.")
except AttributeError:
# If no anchor, just overwrite the value at this key
config[k] = v
else:
raise KeyError(k, f"Trying to override key \"{k}\" that it is not in the config.")
return config | [
"def",
"override_options",
"(",
"config",
":",
"DictLike",
",",
"selected_options",
":",
"Tuple",
"[",
"Any",
",",
"...",
"]",
",",
"set_of_possible_options",
":",
"Tuple",
"[",
"enum",
".",
"Enum",
",",
"...",
"]",
",",
"config_containing_override",
":",
"DictLike",
"=",
"None",
")",
"->",
"DictLike",
":",
"if",
"config_containing_override",
"is",
"None",
":",
"config_containing_override",
"=",
"config",
"override_opts",
"=",
"config_containing_override",
".",
"pop",
"(",
"\"override\"",
")",
"override_dict",
"=",
"determine_override_options",
"(",
"selected_options",
",",
"override_opts",
",",
"set_of_possible_options",
")",
"logger",
".",
"debug",
"(",
"f\"override_dict: {override_dict}\"",
")",
"# Set the configuration values to those specified in the override options",
"# Cannot just use update() on config because we need to maintain the anchors.",
"for",
"k",
",",
"v",
"in",
"override_dict",
".",
"items",
"(",
")",
":",
"# Check if key is there and if it is not None! (The second part is important)",
"if",
"k",
"in",
"config",
":",
"try",
":",
"# If it has an anchor, we know that we want to preserve the type. So we check for the anchor",
"# by trying to access it (Note that we don't actually care what the value is - just that it",
"# exists). If it fails with an AttributeError, then we know we can just assign the value. If it",
"# has an anchor, then we want to preserve the anchor information.",
"config",
"[",
"k",
"]",
".",
"anchor",
"logger",
".",
"debug",
"(",
"f\"type: {type(config[k])}, k: {k}\"",
")",
"if",
"isinstance",
"(",
"config",
"[",
"k",
"]",
",",
"list",
")",
":",
"# Clear out the existing list entries",
"del",
"config",
"[",
"k",
"]",
"[",
":",
"]",
"if",
"isinstance",
"(",
"override_dict",
"[",
"k",
"]",
",",
"(",
"str",
",",
"int",
",",
"float",
",",
"bool",
")",
")",
":",
"# We have to treat str carefully because it is an iterable, but it will be expanded as",
"# individual characters if it's treated the same as a list, which is not the desired",
"# behavior! If we wrap it in [], then it will be treated as the only entry in the list",
"# NOTE: We also treat the basic types this way because they will be passed this way if",
"# overriding indirectly with anchors (since the basic scalar types don't yet support",
"# reassignment while maintaining their anchors).",
"config",
"[",
"k",
"]",
".",
"append",
"(",
"override_dict",
"[",
"k",
"]",
")",
"else",
":",
"# Here we just assign all entries of the list to all entries of override_dict[k]",
"config",
"[",
"k",
"]",
".",
"extend",
"(",
"override_dict",
"[",
"k",
"]",
")",
"elif",
"isinstance",
"(",
"config",
"[",
"k",
"]",
",",
"dict",
")",
":",
"# Clear out the existing entries because we are trying to replace everything",
"# Then we can simply update the dict with our new values",
"config",
"[",
"k",
"]",
".",
"clear",
"(",
")",
"config",
"[",
"k",
"]",
".",
"update",
"(",
"override_dict",
"[",
"k",
"]",
")",
"elif",
"isinstance",
"(",
"config",
"[",
"k",
"]",
",",
"(",
"int",
",",
"float",
",",
"bool",
")",
")",
":",
"# This isn't really very good (since we lose information), but there's nothing that can be done",
"# about it at the moment (Dec 2018)",
"logger",
".",
"debug",
"(",
"\"Overwriting YAML anchor object. It is currently unclear how to reassign this value.\"",
")",
"config",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"# Raise a value error on all of the cases that we aren't already aware of.",
"raise",
"ValueError",
"(",
"f\"Object {k} (type {type(config[k])}) somehow has an anchor, but is something other than a list or dict. Attempting to assign directly to it.\"",
")",
"except",
"AttributeError",
":",
"# If no anchor, just overwrite the value at this key",
"config",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"raise",
"KeyError",
"(",
"k",
",",
"f\"Trying to override key \\\"{k}\\\" that it is not in the config.\"",
")",
"return",
"config"
]
| Determine override options for a particular configuration.
The options are determined by searching following the order specified in selected_options.
For the example config,
.. code-block:: yaml
config:
value: 3
override:
2.76:
track:
value: 5
value will be assigned the value 5 if we are at 2.76 TeV with a track bias, regardless of the event
activity or leading hadron bias. The order of this configuration is specified by the order of the
selected_options passed. The above example configuration is from the jet-hadron analysis.
Since anchors aren't kept for scalar values, if you want to override an anchored value, you need to
specify it as a single value in a list (or dict, but list is easier). After the anchor values propagate,
single element lists can be converted into scalar values using ``simplify_data_representations()``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
selected_options: The selected analysis options. They will be checked in the order with which
they are passed, so make certain that it matches the order in the configuration file!
set_of_possible_options (tuple of enums): Possible options for the override value categories.
config_containing_override: The dict-like config containing the override options in a map called
"override". If it is not specified, it will look for it in the main config.
Returns:
dict-like object: The updated configuration | [
"Determine",
"override",
"options",
"for",
"a",
"particular",
"configuration",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L37-L122 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | simplify_data_representations | def simplify_data_representations(config: DictLike) -> DictLike:
""" Convert one entry lists to the scalar value
This step is necessary because anchors are not kept for scalar values - just for lists and dictionaries.
Now that we are done with all of our anchor references, we can convert these single entry lists to
just the scalar entry, which is more usable.
Some notes on anchors in ruamel.yaml are here: https://stackoverflow.com/a/48559644
Args:
config: The dict-like configuration from ruamel.yaml which should be simplified.
Returns:
The updated configuration.
"""
for k, v in config.items():
if v and isinstance(v, list) and len(v) == 1:
logger.debug("v: {}".format(v))
config[k] = v[0]
return config | python | def simplify_data_representations(config: DictLike) -> DictLike:
""" Convert one entry lists to the scalar value
This step is necessary because anchors are not kept for scalar values - just for lists and dictionaries.
Now that we are done with all of our anchor references, we can convert these single entry lists to
just the scalar entry, which is more usable.
Some notes on anchors in ruamel.yaml are here: https://stackoverflow.com/a/48559644
Args:
config: The dict-like configuration from ruamel.yaml which should be simplified.
Returns:
The updated configuration.
"""
for k, v in config.items():
if v and isinstance(v, list) and len(v) == 1:
logger.debug("v: {}".format(v))
config[k] = v[0]
return config | [
"def",
"simplify_data_representations",
"(",
"config",
":",
"DictLike",
")",
"->",
"DictLike",
":",
"for",
"k",
",",
"v",
"in",
"config",
".",
"items",
"(",
")",
":",
"if",
"v",
"and",
"isinstance",
"(",
"v",
",",
"list",
")",
"and",
"len",
"(",
"v",
")",
"==",
"1",
":",
"logger",
".",
"debug",
"(",
"\"v: {}\"",
".",
"format",
"(",
"v",
")",
")",
"config",
"[",
"k",
"]",
"=",
"v",
"[",
"0",
"]",
"return",
"config"
]
| Convert one entry lists to the scalar value
This step is necessary because anchors are not kept for scalar values - just for lists and dictionaries.
Now that we are done with all of our anchor references, we can convert these single entry lists to
just the scalar entry, which is more usable.
Some notes on anchors in ruamel.yaml are here: https://stackoverflow.com/a/48559644
Args:
config: The dict-like configuration from ruamel.yaml which should be simplified.
Returns:
The updated configuration. | [
"Convert",
"one",
"entry",
"lists",
"to",
"the",
"scalar",
"value"
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L124-L143 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | determine_selection_of_iterable_values_from_config | def determine_selection_of_iterable_values_from_config(config: DictLike, possible_iterables: Mapping[str, Type[enum.Enum]]) -> Dict[str, List[Any]]:
""" Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
"""
iterables = {}
requested_iterables = config["iterables"]
for k, v in requested_iterables.items():
if k not in possible_iterables:
raise KeyError(k, f"Cannot find requested iterable in possible_iterables: {possible_iterables}")
logger.debug(f"k: {k}, v: {v}")
additional_iterable: List[Any] = []
enum_values = possible_iterables[k]
# Check for a string. This is wrong, and the user should be notified.
if isinstance(v, str):
raise TypeError(type(v), f"Passed string {v} when must be either bool or list")
# Allow the possibility to skip
if v is False:
continue
# Allow the possibility to including all possible values in the enum.
elif v is True:
additional_iterable = list(enum_values)
else:
if enum_values is None:
# The enumeration values are none, which means that we want to take
# all of the values defined in the config.
additional_iterable = list(v)
else:
# Otherwise, only take the requested values.
for el in v:
additional_iterable.append(enum_values[el])
# Store for later
iterables[k] = additional_iterable
return iterables | python | def determine_selection_of_iterable_values_from_config(config: DictLike, possible_iterables: Mapping[str, Type[enum.Enum]]) -> Dict[str, List[Any]]:
""" Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
"""
iterables = {}
requested_iterables = config["iterables"]
for k, v in requested_iterables.items():
if k not in possible_iterables:
raise KeyError(k, f"Cannot find requested iterable in possible_iterables: {possible_iterables}")
logger.debug(f"k: {k}, v: {v}")
additional_iterable: List[Any] = []
enum_values = possible_iterables[k]
# Check for a string. This is wrong, and the user should be notified.
if isinstance(v, str):
raise TypeError(type(v), f"Passed string {v} when must be either bool or list")
# Allow the possibility to skip
if v is False:
continue
# Allow the possibility to including all possible values in the enum.
elif v is True:
additional_iterable = list(enum_values)
else:
if enum_values is None:
# The enumeration values are none, which means that we want to take
# all of the values defined in the config.
additional_iterable = list(v)
else:
# Otherwise, only take the requested values.
for el in v:
additional_iterable.append(enum_values[el])
# Store for later
iterables[k] = additional_iterable
return iterables | [
"def",
"determine_selection_of_iterable_values_from_config",
"(",
"config",
":",
"DictLike",
",",
"possible_iterables",
":",
"Mapping",
"[",
"str",
",",
"Type",
"[",
"enum",
".",
"Enum",
"]",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"Any",
"]",
"]",
":",
"iterables",
"=",
"{",
"}",
"requested_iterables",
"=",
"config",
"[",
"\"iterables\"",
"]",
"for",
"k",
",",
"v",
"in",
"requested_iterables",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"possible_iterables",
":",
"raise",
"KeyError",
"(",
"k",
",",
"f\"Cannot find requested iterable in possible_iterables: {possible_iterables}\"",
")",
"logger",
".",
"debug",
"(",
"f\"k: {k}, v: {v}\"",
")",
"additional_iterable",
":",
"List",
"[",
"Any",
"]",
"=",
"[",
"]",
"enum_values",
"=",
"possible_iterables",
"[",
"k",
"]",
"# Check for a string. This is wrong, and the user should be notified.",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"type",
"(",
"v",
")",
",",
"f\"Passed string {v} when must be either bool or list\"",
")",
"# Allow the possibility to skip",
"if",
"v",
"is",
"False",
":",
"continue",
"# Allow the possibility to including all possible values in the enum.",
"elif",
"v",
"is",
"True",
":",
"additional_iterable",
"=",
"list",
"(",
"enum_values",
")",
"else",
":",
"if",
"enum_values",
"is",
"None",
":",
"# The enumeration values are none, which means that we want to take",
"# all of the values defined in the config.",
"additional_iterable",
"=",
"list",
"(",
"v",
")",
"else",
":",
"# Otherwise, only take the requested values.",
"for",
"el",
"in",
"v",
":",
"additional_iterable",
".",
"append",
"(",
"enum_values",
"[",
"el",
"]",
")",
"# Store for later",
"iterables",
"[",
"k",
"]",
"=",
"additional_iterable",
"return",
"iterables"
]
| Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config. | [
"Determine",
"iterable",
"values",
"to",
"use",
"to",
"create",
"objects",
"for",
"a",
"given",
"configuration",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L193-L234 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | _key_index_iter | def _key_index_iter(self) -> Iterator[Tuple[str, Any]]:
""" Allows for iteration over the ``KeyIndex`` values.
This function is intended to be assigned to a newly created KeyIndex class. It enables iteration
over the ``KeyIndex`` names and values. We don't use a mixin to avoid issues with YAML.
Note:
This isn't recursive like ``dataclasses.asdict(...)``. Generally, we don't want those recursive
conversion properties. Plus, this approach is much faster.
"""
for k, v in vars(self).items():
yield k, v | python | def _key_index_iter(self) -> Iterator[Tuple[str, Any]]:
""" Allows for iteration over the ``KeyIndex`` values.
This function is intended to be assigned to a newly created KeyIndex class. It enables iteration
over the ``KeyIndex`` names and values. We don't use a mixin to avoid issues with YAML.
Note:
This isn't recursive like ``dataclasses.asdict(...)``. Generally, we don't want those recursive
conversion properties. Plus, this approach is much faster.
"""
for k, v in vars(self).items():
yield k, v | [
"def",
"_key_index_iter",
"(",
"self",
")",
"->",
"Iterator",
"[",
"Tuple",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"for",
"k",
",",
"v",
"in",
"vars",
"(",
"self",
")",
".",
"items",
"(",
")",
":",
"yield",
"k",
",",
"v"
]
| Allows for iteration over the ``KeyIndex`` values.
This function is intended to be assigned to a newly created KeyIndex class. It enables iteration
over the ``KeyIndex`` names and values. We don't use a mixin to avoid issues with YAML.
Note:
This isn't recursive like ``dataclasses.asdict(...)``. Generally, we don't want those recursive
conversion properties. Plus, this approach is much faster. | [
"Allows",
"for",
"iteration",
"over",
"the",
"KeyIndex",
"values",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L236-L247 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | create_key_index_object | def create_key_index_object(key_index_name: str, iterables: Dict[str, Any]) -> Any:
""" Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate.
"""
# Validation
# We are going to use the iterators when determining the fields, so we need to notify if an iterator was
# passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,
# which can recreate the iter.
# See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/
for name, iterable in iterables.items():
if iter(iterable) == iter(iterable):
raise TypeError(
f"Iterable {name} is in iterator which can be exhausted. Please pass the iterable"
f" in a container that can recreate the iterable. See the comments here for more info."
)
# We need the types of the fields to create the dataclass. However, we are provided with iterables
# in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use
# that to determine the type of that particular iterable. This is safe to do because the iterables
# must always have at least one entry (or else they wouldn't be one of the iterables).
# NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all
# objects from the iterables and blindly use set because set won't preserve the order.
fields = [(name, type(next(iter(iterable)))) for name, iterable in iterables.items()]
KeyIndex = dataclasses.make_dataclass(
key_index_name,
fields,
frozen = True
)
# Allow for iteration over the key index values
KeyIndex.__iter__ = _key_index_iter
return KeyIndex | python | def create_key_index_object(key_index_name: str, iterables: Dict[str, Any]) -> Any:
""" Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate.
"""
# Validation
# We are going to use the iterators when determining the fields, so we need to notify if an iterator was
# passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,
# which can recreate the iter.
# See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/
for name, iterable in iterables.items():
if iter(iterable) == iter(iterable):
raise TypeError(
f"Iterable {name} is in iterator which can be exhausted. Please pass the iterable"
f" in a container that can recreate the iterable. See the comments here for more info."
)
# We need the types of the fields to create the dataclass. However, we are provided with iterables
# in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use
# that to determine the type of that particular iterable. This is safe to do because the iterables
# must always have at least one entry (or else they wouldn't be one of the iterables).
# NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all
# objects from the iterables and blindly use set because set won't preserve the order.
fields = [(name, type(next(iter(iterable)))) for name, iterable in iterables.items()]
KeyIndex = dataclasses.make_dataclass(
key_index_name,
fields,
frozen = True
)
# Allow for iteration over the key index values
KeyIndex.__iter__ = _key_index_iter
return KeyIndex | [
"def",
"create_key_index_object",
"(",
"key_index_name",
":",
"str",
",",
"iterables",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Any",
":",
"# Validation",
"# We are going to use the iterators when determining the fields, so we need to notify if an iterator was",
"# passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,",
"# which can recreate the iter.",
"# See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/",
"for",
"name",
",",
"iterable",
"in",
"iterables",
".",
"items",
"(",
")",
":",
"if",
"iter",
"(",
"iterable",
")",
"==",
"iter",
"(",
"iterable",
")",
":",
"raise",
"TypeError",
"(",
"f\"Iterable {name} is in iterator which can be exhausted. Please pass the iterable\"",
"f\" in a container that can recreate the iterable. See the comments here for more info.\"",
")",
"# We need the types of the fields to create the dataclass. However, we are provided with iterables",
"# in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use",
"# that to determine the type of that particular iterable. This is safe to do because the iterables",
"# must always have at least one entry (or else they wouldn't be one of the iterables).",
"# NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all",
"# objects from the iterables and blindly use set because set won't preserve the order.",
"fields",
"=",
"[",
"(",
"name",
",",
"type",
"(",
"next",
"(",
"iter",
"(",
"iterable",
")",
")",
")",
")",
"for",
"name",
",",
"iterable",
"in",
"iterables",
".",
"items",
"(",
")",
"]",
"KeyIndex",
"=",
"dataclasses",
".",
"make_dataclass",
"(",
"key_index_name",
",",
"fields",
",",
"frozen",
"=",
"True",
")",
"# Allow for iteration over the key index values",
"KeyIndex",
".",
"__iter__",
"=",
"_key_index_iter",
"return",
"KeyIndex"
]
| Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate. | [
"Create",
"a",
"KeyIndex",
"class",
"based",
"on",
"the",
"passed",
"attributes",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L249-L293 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | create_objects_from_iterables | def create_objects_from_iterables(obj, args: dict, iterables: Dict[str, Any], formatting_options: Dict[str, Any], key_index_name: str = "KeyIndex") -> Tuple[Any, Dict[str, Any], dict]:
""" Create objects for each set of values based on the given arguments.
The iterable values are available under a key index ``dataclass`` which is used to index the returned
dictionary. The names of the fields are determined by the keys of iterables dictionary. The values are
the newly created object. Note that the iterable values must be convertible to a str() so they can be
included in the formatting dictionary.
Each set of values is also included in the object args.
As a basic example,
.. code-block:: python
>>> create_objects_from_iterables(
... obj = obj,
... args = {},
... iterables = {"a" : ["a1","a2"], "b" : ["b1", "b2"]},
... formatting_options = {}
... )
(
KeyIndex,
{"a": ["a1", "a2"], "b": ["b1", "b2"]}
{
KeyIndex(a = "a1", b = "b1"): obj(a = "a1", b = "b1"),
KeyIndex(a = "a1", b = "b2"): obj(a = "a1", b = "b2"),
KeyIndex(a = "a2", b = "b1"): obj(a = "a2", b = "b1"),
KeyIndex(a = "a2", b = "b2"): obj(a = "a2", b = "b2"),
}
)
Args:
obj (object): The object to be constructed.
args: Arguments to be passed to the object to create it.
iterables: Iterables to be used to create the objects, with entries of the form
``"name_of_iterable": iterable``.
formatting_options: Values to be used in formatting strings in the arguments.
key_index_name: Name of the iterable key index.
Returns:
(object, list, dict, dict): Roughly, (KeyIndex, iterables, objects). Specifically, the
key_index is a new dataclass which defines the parameters used to create the object, iterables
are the iterables used to create the objects, which names as keys and the iterables as values.
The objects dictionary keys are KeyIndex objects which describe the iterable arguments passed to the
object, while the values are the newly constructed arguments. See the example above.
"""
# Setup
objects = {}
names = list(iterables)
logger.debug(f"iterables: {iterables}")
# Create the key index object, where the name of each field is the name of each iterable.
KeyIndex = create_key_index_object(
key_index_name = key_index_name,
iterables = iterables,
)
# ``itertools.product`` produces all possible permutations of the iterables values.
# NOTE: Product preserves the order of the iterables values, which is important for properly
# assigning the values to the ``KeyIndex``.
for values in itertools.product(*iterables.values()):
logger.debug(f"Values: {values}")
# Skip if we don't have a sufficient set of values to create an object.
if not values:
continue
# Add in the values into the arguments and formatting options.
# NOTE: We don't need a deep copy for the iterable values in the args and formatting options
# because the values will be overwritten for each object.
for name, val in zip(names, values):
# We want to keep the original value for the arguments.
args[name] = val
# Here, we convert the value, regardless of type, into a string that can be displayed.
formatting_options[name] = str(val)
# Apply formatting options
# If we formatted in place, we would need to deepcopy the args to ensure that the iterable dependent
# values in the formatted values are properly set for each iterable object individually.
# However, by formatting into new variables, we can avoid a deepcopy, which greatly improves performance!
# NOTE: We don't need a deep copy do this for iterable value names themselves because they will be overwritten
# for each object. They are set in the block above.
object_args = copy.copy(args)
logger.debug(f"object_args pre format: {object_args}")
object_args = apply_formatting_dict(object_args, formatting_options)
# Print our results for debugging purposes. However, we skip printing the full
# config because it is quite long
print_args = {k: v for k, v in object_args.items() if k != "config"}
print_args["config"] = "..."
logger.debug(f"Constructing obj \"{obj}\" with args: \"{print_args}\"")
# Finally create the object.
objects[KeyIndex(*values)] = obj(**object_args)
# If nothing has been created at this point, then we are didn't iterating over anything and something
# has gone wrong.
if not objects:
raise ValueError(iterables, "There appear to be no iterables to use in creating objects.")
return (KeyIndex, iterables, objects) | python | def create_objects_from_iterables(obj, args: dict, iterables: Dict[str, Any], formatting_options: Dict[str, Any], key_index_name: str = "KeyIndex") -> Tuple[Any, Dict[str, Any], dict]:
""" Create objects for each set of values based on the given arguments.
The iterable values are available under a key index ``dataclass`` which is used to index the returned
dictionary. The names of the fields are determined by the keys of iterables dictionary. The values are
the newly created object. Note that the iterable values must be convertible to a str() so they can be
included in the formatting dictionary.
Each set of values is also included in the object args.
As a basic example,
.. code-block:: python
>>> create_objects_from_iterables(
... obj = obj,
... args = {},
... iterables = {"a" : ["a1","a2"], "b" : ["b1", "b2"]},
... formatting_options = {}
... )
(
KeyIndex,
{"a": ["a1", "a2"], "b": ["b1", "b2"]}
{
KeyIndex(a = "a1", b = "b1"): obj(a = "a1", b = "b1"),
KeyIndex(a = "a1", b = "b2"): obj(a = "a1", b = "b2"),
KeyIndex(a = "a2", b = "b1"): obj(a = "a2", b = "b1"),
KeyIndex(a = "a2", b = "b2"): obj(a = "a2", b = "b2"),
}
)
Args:
obj (object): The object to be constructed.
args: Arguments to be passed to the object to create it.
iterables: Iterables to be used to create the objects, with entries of the form
``"name_of_iterable": iterable``.
formatting_options: Values to be used in formatting strings in the arguments.
key_index_name: Name of the iterable key index.
Returns:
(object, list, dict, dict): Roughly, (KeyIndex, iterables, objects). Specifically, the
key_index is a new dataclass which defines the parameters used to create the object, iterables
are the iterables used to create the objects, which names as keys and the iterables as values.
The objects dictionary keys are KeyIndex objects which describe the iterable arguments passed to the
object, while the values are the newly constructed arguments. See the example above.
"""
# Setup
objects = {}
names = list(iterables)
logger.debug(f"iterables: {iterables}")
# Create the key index object, where the name of each field is the name of each iterable.
KeyIndex = create_key_index_object(
key_index_name = key_index_name,
iterables = iterables,
)
# ``itertools.product`` produces all possible permutations of the iterables values.
# NOTE: Product preserves the order of the iterables values, which is important for properly
# assigning the values to the ``KeyIndex``.
for values in itertools.product(*iterables.values()):
logger.debug(f"Values: {values}")
# Skip if we don't have a sufficient set of values to create an object.
if not values:
continue
# Add in the values into the arguments and formatting options.
# NOTE: We don't need a deep copy for the iterable values in the args and formatting options
# because the values will be overwritten for each object.
for name, val in zip(names, values):
# We want to keep the original value for the arguments.
args[name] = val
# Here, we convert the value, regardless of type, into a string that can be displayed.
formatting_options[name] = str(val)
# Apply formatting options
# If we formatted in place, we would need to deepcopy the args to ensure that the iterable dependent
# values in the formatted values are properly set for each iterable object individually.
# However, by formatting into new variables, we can avoid a deepcopy, which greatly improves performance!
# NOTE: We don't need a deep copy do this for iterable value names themselves because they will be overwritten
# for each object. They are set in the block above.
object_args = copy.copy(args)
logger.debug(f"object_args pre format: {object_args}")
object_args = apply_formatting_dict(object_args, formatting_options)
# Print our results for debugging purposes. However, we skip printing the full
# config because it is quite long
print_args = {k: v for k, v in object_args.items() if k != "config"}
print_args["config"] = "..."
logger.debug(f"Constructing obj \"{obj}\" with args: \"{print_args}\"")
# Finally create the object.
objects[KeyIndex(*values)] = obj(**object_args)
# If nothing has been created at this point, then we are didn't iterating over anything and something
# has gone wrong.
if not objects:
raise ValueError(iterables, "There appear to be no iterables to use in creating objects.")
return (KeyIndex, iterables, objects) | [
"def",
"create_objects_from_iterables",
"(",
"obj",
",",
"args",
":",
"dict",
",",
"iterables",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"formatting_options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"key_index_name",
":",
"str",
"=",
"\"KeyIndex\"",
")",
"->",
"Tuple",
"[",
"Any",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"dict",
"]",
":",
"# Setup",
"objects",
"=",
"{",
"}",
"names",
"=",
"list",
"(",
"iterables",
")",
"logger",
".",
"debug",
"(",
"f\"iterables: {iterables}\"",
")",
"# Create the key index object, where the name of each field is the name of each iterable.",
"KeyIndex",
"=",
"create_key_index_object",
"(",
"key_index_name",
"=",
"key_index_name",
",",
"iterables",
"=",
"iterables",
",",
")",
"# ``itertools.product`` produces all possible permutations of the iterables values.",
"# NOTE: Product preserves the order of the iterables values, which is important for properly",
"# assigning the values to the ``KeyIndex``.",
"for",
"values",
"in",
"itertools",
".",
"product",
"(",
"*",
"iterables",
".",
"values",
"(",
")",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Values: {values}\"",
")",
"# Skip if we don't have a sufficient set of values to create an object.",
"if",
"not",
"values",
":",
"continue",
"# Add in the values into the arguments and formatting options.",
"# NOTE: We don't need a deep copy for the iterable values in the args and formatting options",
"# because the values will be overwritten for each object.",
"for",
"name",
",",
"val",
"in",
"zip",
"(",
"names",
",",
"values",
")",
":",
"# We want to keep the original value for the arguments.",
"args",
"[",
"name",
"]",
"=",
"val",
"# Here, we convert the value, regardless of type, into a string that can be displayed.",
"formatting_options",
"[",
"name",
"]",
"=",
"str",
"(",
"val",
")",
"# Apply formatting options",
"# If we formatted in place, we would need to deepcopy the args to ensure that the iterable dependent",
"# values in the formatted values are properly set for each iterable object individually.",
"# However, by formatting into new variables, we can avoid a deepcopy, which greatly improves performance!",
"# NOTE: We don't need a deep copy do this for iterable value names themselves because they will be overwritten",
"# for each object. They are set in the block above.",
"object_args",
"=",
"copy",
".",
"copy",
"(",
"args",
")",
"logger",
".",
"debug",
"(",
"f\"object_args pre format: {object_args}\"",
")",
"object_args",
"=",
"apply_formatting_dict",
"(",
"object_args",
",",
"formatting_options",
")",
"# Print our results for debugging purposes. However, we skip printing the full",
"# config because it is quite long",
"print_args",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"object_args",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"\"config\"",
"}",
"print_args",
"[",
"\"config\"",
"]",
"=",
"\"...\"",
"logger",
".",
"debug",
"(",
"f\"Constructing obj \\\"{obj}\\\" with args: \\\"{print_args}\\\"\"",
")",
"# Finally create the object.",
"objects",
"[",
"KeyIndex",
"(",
"*",
"values",
")",
"]",
"=",
"obj",
"(",
"*",
"*",
"object_args",
")",
"# If nothing has been created at this point, then we are didn't iterating over anything and something",
"# has gone wrong.",
"if",
"not",
"objects",
":",
"raise",
"ValueError",
"(",
"iterables",
",",
"\"There appear to be no iterables to use in creating objects.\"",
")",
"return",
"(",
"KeyIndex",
",",
"iterables",
",",
"objects",
")"
]
| Create objects for each set of values based on the given arguments.
The iterable values are available under a key index ``dataclass`` which is used to index the returned
dictionary. The names of the fields are determined by the keys of iterables dictionary. The values are
the newly created object. Note that the iterable values must be convertible to a str() so they can be
included in the formatting dictionary.
Each set of values is also included in the object args.
As a basic example,
.. code-block:: python
>>> create_objects_from_iterables(
... obj = obj,
... args = {},
... iterables = {"a" : ["a1","a2"], "b" : ["b1", "b2"]},
... formatting_options = {}
... )
(
KeyIndex,
{"a": ["a1", "a2"], "b": ["b1", "b2"]}
{
KeyIndex(a = "a1", b = "b1"): obj(a = "a1", b = "b1"),
KeyIndex(a = "a1", b = "b2"): obj(a = "a1", b = "b2"),
KeyIndex(a = "a2", b = "b1"): obj(a = "a2", b = "b1"),
KeyIndex(a = "a2", b = "b2"): obj(a = "a2", b = "b2"),
}
)
Args:
obj (object): The object to be constructed.
args: Arguments to be passed to the object to create it.
iterables: Iterables to be used to create the objects, with entries of the form
``"name_of_iterable": iterable``.
formatting_options: Values to be used in formatting strings in the arguments.
key_index_name: Name of the iterable key index.
Returns:
(object, list, dict, dict): Roughly, (KeyIndex, iterables, objects). Specifically, the
key_index is a new dataclass which defines the parameters used to create the object, iterables
are the iterables used to create the objects, which names as keys and the iterables as values.
The objects dictionary keys are KeyIndex objects which describe the iterable arguments passed to the
object, while the values are the newly constructed arguments. See the example above. | [
"Create",
"objects",
"for",
"each",
"set",
"of",
"values",
"based",
"on",
"the",
"given",
"arguments",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L295-L390 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | apply_formatting_dict | def apply_formatting_dict(obj: Any, formatting: Dict[str, Any]) -> Any:
""" Recursively apply a formatting dict to all strings in a configuration.
Note that it skips applying the formatting if the string appears to contain latex (specifically,
if it contains an "$"), since the formatting fails on nested brackets.
Args:
obj: Some configuration object to recursively applying the formatting to.
formatting (dict): String formatting options to apply to each configuration field.
Returns:
dict: Configuration with formatting applied to every field.
"""
#logger.debug("Processing object of type {}".format(type(obj)))
new_obj = obj
if isinstance(obj, str):
# Apply the formatting options to the string.
# We explicitly allow for missing keys. They will be kept so they can be filled later.
# see: https://stackoverflow.com/a/17215533
# If a more sophisticated solution is needed,
# see: https://ashwch.github.io/handling-missing-keys-in-str-format-map.html
# Note that we can't use format_map because it is python 3.2+ only.
# The solution below works in py 2/3
if "$" not in obj:
new_obj = string.Formatter().vformat(obj, (), formatting_dict(**formatting))
#else:
# logger.debug("Skipping str {} since it appears to be a latex string, which may break the formatting.".format(obj))
elif isinstance(obj, dict):
new_obj = {}
for k, v in obj.items():
# Using indirect access to ensure that the original object is updated.
new_obj[k] = apply_formatting_dict(v, formatting)
elif isinstance(obj, list):
new_obj = []
for i, el in enumerate(obj):
# Using indirect access to ensure that the original object is updated.
new_obj.append(apply_formatting_dict(el, formatting))
elif isinstance(obj, int) or isinstance(obj, float) or obj is None:
# Skip over this, as there is nothing to be done - we just keep the value.
pass
elif isinstance(obj, enum.Enum):
# Skip over this, as there is nothing to be done - we just keep the value.
# This only occurs when a formatting value has already been transformed
# into an enumeration.
pass
else:
# This may or may not be expected, depending on the particular value.
logger.debug(f"Unrecognized obj '{obj}' of type '{type(obj)}'")
return new_obj | python | def apply_formatting_dict(obj: Any, formatting: Dict[str, Any]) -> Any:
""" Recursively apply a formatting dict to all strings in a configuration.
Note that it skips applying the formatting if the string appears to contain latex (specifically,
if it contains an "$"), since the formatting fails on nested brackets.
Args:
obj: Some configuration object to recursively applying the formatting to.
formatting (dict): String formatting options to apply to each configuration field.
Returns:
dict: Configuration with formatting applied to every field.
"""
#logger.debug("Processing object of type {}".format(type(obj)))
new_obj = obj
if isinstance(obj, str):
# Apply the formatting options to the string.
# We explicitly allow for missing keys. They will be kept so they can be filled later.
# see: https://stackoverflow.com/a/17215533
# If a more sophisticated solution is needed,
# see: https://ashwch.github.io/handling-missing-keys-in-str-format-map.html
# Note that we can't use format_map because it is python 3.2+ only.
# The solution below works in py 2/3
if "$" not in obj:
new_obj = string.Formatter().vformat(obj, (), formatting_dict(**formatting))
#else:
# logger.debug("Skipping str {} since it appears to be a latex string, which may break the formatting.".format(obj))
elif isinstance(obj, dict):
new_obj = {}
for k, v in obj.items():
# Using indirect access to ensure that the original object is updated.
new_obj[k] = apply_formatting_dict(v, formatting)
elif isinstance(obj, list):
new_obj = []
for i, el in enumerate(obj):
# Using indirect access to ensure that the original object is updated.
new_obj.append(apply_formatting_dict(el, formatting))
elif isinstance(obj, int) or isinstance(obj, float) or obj is None:
# Skip over this, as there is nothing to be done - we just keep the value.
pass
elif isinstance(obj, enum.Enum):
# Skip over this, as there is nothing to be done - we just keep the value.
# This only occurs when a formatting value has already been transformed
# into an enumeration.
pass
else:
# This may or may not be expected, depending on the particular value.
logger.debug(f"Unrecognized obj '{obj}' of type '{type(obj)}'")
return new_obj | [
"def",
"apply_formatting_dict",
"(",
"obj",
":",
"Any",
",",
"formatting",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Any",
":",
"#logger.debug(\"Processing object of type {}\".format(type(obj)))",
"new_obj",
"=",
"obj",
"if",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"# Apply the formatting options to the string.",
"# We explicitly allow for missing keys. They will be kept so they can be filled later.",
"# see: https://stackoverflow.com/a/17215533",
"# If a more sophisticated solution is needed,",
"# see: https://ashwch.github.io/handling-missing-keys-in-str-format-map.html",
"# Note that we can't use format_map because it is python 3.2+ only.",
"# The solution below works in py 2/3",
"if",
"\"$\"",
"not",
"in",
"obj",
":",
"new_obj",
"=",
"string",
".",
"Formatter",
"(",
")",
".",
"vformat",
"(",
"obj",
",",
"(",
")",
",",
"formatting_dict",
"(",
"*",
"*",
"formatting",
")",
")",
"#else:",
"# logger.debug(\"Skipping str {} since it appears to be a latex string, which may break the formatting.\".format(obj))",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"new_obj",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
":",
"# Using indirect access to ensure that the original object is updated.",
"new_obj",
"[",
"k",
"]",
"=",
"apply_formatting_dict",
"(",
"v",
",",
"formatting",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"new_obj",
"=",
"[",
"]",
"for",
"i",
",",
"el",
"in",
"enumerate",
"(",
"obj",
")",
":",
"# Using indirect access to ensure that the original object is updated.",
"new_obj",
".",
"append",
"(",
"apply_formatting_dict",
"(",
"el",
",",
"formatting",
")",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"int",
")",
"or",
"isinstance",
"(",
"obj",
",",
"float",
")",
"or",
"obj",
"is",
"None",
":",
"# Skip over this, as there is nothing to be done - we just keep the value.",
"pass",
"elif",
"isinstance",
"(",
"obj",
",",
"enum",
".",
"Enum",
")",
":",
"# Skip over this, as there is nothing to be done - we just keep the value.",
"# This only occurs when a formatting value has already been transformed",
"# into an enumeration.",
"pass",
"else",
":",
"# This may or may not be expected, depending on the particular value.",
"logger",
".",
"debug",
"(",
"f\"Unrecognized obj '{obj}' of type '{type(obj)}'\"",
")",
"return",
"new_obj"
]
| Recursively apply a formatting dict to all strings in a configuration.
Note that it skips applying the formatting if the string appears to contain latex (specifically,
if it contains an "$"), since the formatting fails on nested brackets.
Args:
obj: Some configuration object to recursively applying the formatting to.
formatting (dict): String formatting options to apply to each configuration field.
Returns:
dict: Configuration with formatting applied to every field. | [
"Recursively",
"apply",
"a",
"formatting",
"dict",
"to",
"all",
"strings",
"in",
"a",
"configuration",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L400-L449 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | iterate_with_selected_objects | def iterate_with_selected_objects(analysis_objects: Mapping[Any, Any], **selections: Mapping[str, Any]) -> Iterator[Tuple[Any, Any]]:
""" Iterate over an analysis dictionary with selected attributes.
Args:
analysis_objects: Analysis objects dictionary.
selections: Keyword arguments used to select attributes from the analysis dictionary.
Yields:
object: Matching analysis object.
"""
for key_index, obj in analysis_objects.items():
# If selections is empty, we return every object. If it's not empty, then we only want to return
# objects which are selected in through the selections.
selected_obj = not selections or all([getattr(key_index, selector) == selected_value for selector, selected_value in selections.items()])
if selected_obj:
yield key_index, obj | python | def iterate_with_selected_objects(analysis_objects: Mapping[Any, Any], **selections: Mapping[str, Any]) -> Iterator[Tuple[Any, Any]]:
""" Iterate over an analysis dictionary with selected attributes.
Args:
analysis_objects: Analysis objects dictionary.
selections: Keyword arguments used to select attributes from the analysis dictionary.
Yields:
object: Matching analysis object.
"""
for key_index, obj in analysis_objects.items():
# If selections is empty, we return every object. If it's not empty, then we only want to return
# objects which are selected in through the selections.
selected_obj = not selections or all([getattr(key_index, selector) == selected_value for selector, selected_value in selections.items()])
if selected_obj:
yield key_index, obj | [
"def",
"iterate_with_selected_objects",
"(",
"analysis_objects",
":",
"Mapping",
"[",
"Any",
",",
"Any",
"]",
",",
"*",
"*",
"selections",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Iterator",
"[",
"Tuple",
"[",
"Any",
",",
"Any",
"]",
"]",
":",
"for",
"key_index",
",",
"obj",
"in",
"analysis_objects",
".",
"items",
"(",
")",
":",
"# If selections is empty, we return every object. If it's not empty, then we only want to return",
"# objects which are selected in through the selections.",
"selected_obj",
"=",
"not",
"selections",
"or",
"all",
"(",
"[",
"getattr",
"(",
"key_index",
",",
"selector",
")",
"==",
"selected_value",
"for",
"selector",
",",
"selected_value",
"in",
"selections",
".",
"items",
"(",
")",
"]",
")",
"if",
"selected_obj",
":",
"yield",
"key_index",
",",
"obj"
]
| Iterate over an analysis dictionary with selected attributes.
Args:
analysis_objects: Analysis objects dictionary.
selections: Keyword arguments used to select attributes from the analysis dictionary.
Yields:
object: Matching analysis object. | [
"Iterate",
"over",
"an",
"analysis",
"dictionary",
"with",
"selected",
"attributes",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L451-L466 | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | iterate_with_selected_objects_in_order | def iterate_with_selected_objects_in_order(analysis_objects: Mapping[Any, Any],
analysis_iterables: Dict[str, Sequence[Any]],
selection: Union[str, Sequence[str]]) -> Iterator[List[Tuple[Any, Any]]]:
""" Iterate over an analysis dictionary, yielding the selected attributes in order.
So if there are three iterables, a, b, and c, if we selected c, then we iterate over a and b,
and return c in the same order each time for each set of values of a and b. As an example, consider
the set of iterables:
.. code-block:: python
>>> a = ["a1", "a2"]
>>> b = ["b1", "b2"]
>>> c = ["c1", "c2"]
then it will effectively return:
.. code-block:: python
>>> for a_val in a:
... for b_val in b:
... for c_val in c:
... obj(a_val, b_val, c_val)
This will yield:
.. code-block:: python
>>> output = list(iterate_with_selected_objects_in_order(..., selection = ["a"]))
[[("a1", "b1", "c1"), ("a2", "b1", "c1")], [("a1", "b2", "c1"), ("a2", "b2", "c1")], ...]
This is particularly nice because we can then select on a set of iterables to be returned without
having to specify the rest of the iterables that we don't really care about.
Args:
analysis_objects: Analysis objects dictionary.
analysis_iterables: Iterables used in constructing the analysis objects.
selection: Selection of analysis selections to return. Can be either a string or a sequence of
selections.
Yields:
object: Matching analysis object.
"""
# Validation
if isinstance(selection, str):
selection = [selection]
# Help out mypy. We don't check if it is a list to allow for other sequences.
assert not isinstance(selection, str)
# We don't want to impact the original analysis iterables when we pop some values below.
analysis_iterables = copy.copy(analysis_iterables)
# Extract the selected iterators from the possible iterators so we can select on them later.
# First, we want want each set of iterators to be of the form:
# {"selection1": [value1, value2, ...], "selection2": [value3, value4, ...]}
selected_iterators = {}
for s in selection:
selected_iterators[s] = analysis_iterables.pop(s)
logger.debug(f"Initial analysis_iterables: {analysis_iterables}")
logger.debug(f"Initial selected_iterators: {selected_iterators}")
# Now, we convert them to the form:
# [[("selection1", value1), ("selection1", value2)], [("selection2", value3), ("selection2", value4)]]
# This allows them to iterated over conveniently via itertools.product(...)
selected_iterators = [[(k, v) for v in values] for k, values in selected_iterators.items()] # type: ignore
analysis_iterables = [[(k, v) for v in values] for k, values in analysis_iterables.items()] # type: ignore
logger.debug(f"Final analysis_iterables: {analysis_iterables}")
logger.debug(f"Final selected_iterators: {selected_iterators}")
# Useful debug information, but too verbose for standard usage.
#logger.debug(f"analysis_iterables product: {list(itertools.product(*analysis_iterables))}")
#logger.debug(f"selected_iterators product: {list(itertools.product(*selected_iterators))}")
for values in itertools.product(*analysis_iterables):
selected_analysis_objects = []
for selected_values in itertools.product(*selected_iterators):
for key_index, obj in analysis_objects.items():
selected_via_analysis_iterables = all(
getattr(key_index, k) == v for k, v in values
)
selected_via_selected_iterators = all(
getattr(key_index, k) == v for k, v in selected_values
)
selected_obj = selected_via_analysis_iterables and selected_via_selected_iterators
if selected_obj:
selected_analysis_objects.append((key_index, obj))
logger.debug(f"Yielding: {selected_analysis_objects}")
yield selected_analysis_objects | python | def iterate_with_selected_objects_in_order(analysis_objects: Mapping[Any, Any],
analysis_iterables: Dict[str, Sequence[Any]],
selection: Union[str, Sequence[str]]) -> Iterator[List[Tuple[Any, Any]]]:
""" Iterate over an analysis dictionary, yielding the selected attributes in order.
So if there are three iterables, a, b, and c, if we selected c, then we iterate over a and b,
and return c in the same order each time for each set of values of a and b. As an example, consider
the set of iterables:
.. code-block:: python
>>> a = ["a1", "a2"]
>>> b = ["b1", "b2"]
>>> c = ["c1", "c2"]
then it will effectively return:
.. code-block:: python
>>> for a_val in a:
... for b_val in b:
... for c_val in c:
... obj(a_val, b_val, c_val)
This will yield:
.. code-block:: python
>>> output = list(iterate_with_selected_objects_in_order(..., selection = ["a"]))
[[("a1", "b1", "c1"), ("a2", "b1", "c1")], [("a1", "b2", "c1"), ("a2", "b2", "c1")], ...]
This is particularly nice because we can then select on a set of iterables to be returned without
having to specify the rest of the iterables that we don't really care about.
Args:
analysis_objects: Analysis objects dictionary.
analysis_iterables: Iterables used in constructing the analysis objects.
selection: Selection of analysis selections to return. Can be either a string or a sequence of
selections.
Yields:
object: Matching analysis object.
"""
# Validation
if isinstance(selection, str):
selection = [selection]
# Help out mypy. We don't check if it is a list to allow for other sequences.
assert not isinstance(selection, str)
# We don't want to impact the original analysis iterables when we pop some values below.
analysis_iterables = copy.copy(analysis_iterables)
# Extract the selected iterators from the possible iterators so we can select on them later.
# First, we want want each set of iterators to be of the form:
# {"selection1": [value1, value2, ...], "selection2": [value3, value4, ...]}
selected_iterators = {}
for s in selection:
selected_iterators[s] = analysis_iterables.pop(s)
logger.debug(f"Initial analysis_iterables: {analysis_iterables}")
logger.debug(f"Initial selected_iterators: {selected_iterators}")
# Now, we convert them to the form:
# [[("selection1", value1), ("selection1", value2)], [("selection2", value3), ("selection2", value4)]]
# This allows them to iterated over conveniently via itertools.product(...)
selected_iterators = [[(k, v) for v in values] for k, values in selected_iterators.items()] # type: ignore
analysis_iterables = [[(k, v) for v in values] for k, values in analysis_iterables.items()] # type: ignore
logger.debug(f"Final analysis_iterables: {analysis_iterables}")
logger.debug(f"Final selected_iterators: {selected_iterators}")
# Useful debug information, but too verbose for standard usage.
#logger.debug(f"analysis_iterables product: {list(itertools.product(*analysis_iterables))}")
#logger.debug(f"selected_iterators product: {list(itertools.product(*selected_iterators))}")
for values in itertools.product(*analysis_iterables):
selected_analysis_objects = []
for selected_values in itertools.product(*selected_iterators):
for key_index, obj in analysis_objects.items():
selected_via_analysis_iterables = all(
getattr(key_index, k) == v for k, v in values
)
selected_via_selected_iterators = all(
getattr(key_index, k) == v for k, v in selected_values
)
selected_obj = selected_via_analysis_iterables and selected_via_selected_iterators
if selected_obj:
selected_analysis_objects.append((key_index, obj))
logger.debug(f"Yielding: {selected_analysis_objects}")
yield selected_analysis_objects | [
"def",
"iterate_with_selected_objects_in_order",
"(",
"analysis_objects",
":",
"Mapping",
"[",
"Any",
",",
"Any",
"]",
",",
"analysis_iterables",
":",
"Dict",
"[",
"str",
",",
"Sequence",
"[",
"Any",
"]",
"]",
",",
"selection",
":",
"Union",
"[",
"str",
",",
"Sequence",
"[",
"str",
"]",
"]",
")",
"->",
"Iterator",
"[",
"List",
"[",
"Tuple",
"[",
"Any",
",",
"Any",
"]",
"]",
"]",
":",
"# Validation",
"if",
"isinstance",
"(",
"selection",
",",
"str",
")",
":",
"selection",
"=",
"[",
"selection",
"]",
"# Help out mypy. We don't check if it is a list to allow for other sequences.",
"assert",
"not",
"isinstance",
"(",
"selection",
",",
"str",
")",
"# We don't want to impact the original analysis iterables when we pop some values below.",
"analysis_iterables",
"=",
"copy",
".",
"copy",
"(",
"analysis_iterables",
")",
"# Extract the selected iterators from the possible iterators so we can select on them later.",
"# First, we want want each set of iterators to be of the form:",
"# {\"selection1\": [value1, value2, ...], \"selection2\": [value3, value4, ...]}",
"selected_iterators",
"=",
"{",
"}",
"for",
"s",
"in",
"selection",
":",
"selected_iterators",
"[",
"s",
"]",
"=",
"analysis_iterables",
".",
"pop",
"(",
"s",
")",
"logger",
".",
"debug",
"(",
"f\"Initial analysis_iterables: {analysis_iterables}\"",
")",
"logger",
".",
"debug",
"(",
"f\"Initial selected_iterators: {selected_iterators}\"",
")",
"# Now, we convert them to the form:",
"# [[(\"selection1\", value1), (\"selection1\", value2)], [(\"selection2\", value3), (\"selection2\", value4)]]",
"# This allows them to iterated over conveniently via itertools.product(...)",
"selected_iterators",
"=",
"[",
"[",
"(",
"k",
",",
"v",
")",
"for",
"v",
"in",
"values",
"]",
"for",
"k",
",",
"values",
"in",
"selected_iterators",
".",
"items",
"(",
")",
"]",
"# type: ignore",
"analysis_iterables",
"=",
"[",
"[",
"(",
"k",
",",
"v",
")",
"for",
"v",
"in",
"values",
"]",
"for",
"k",
",",
"values",
"in",
"analysis_iterables",
".",
"items",
"(",
")",
"]",
"# type: ignore",
"logger",
".",
"debug",
"(",
"f\"Final analysis_iterables: {analysis_iterables}\"",
")",
"logger",
".",
"debug",
"(",
"f\"Final selected_iterators: {selected_iterators}\"",
")",
"# Useful debug information, but too verbose for standard usage.",
"#logger.debug(f\"analysis_iterables product: {list(itertools.product(*analysis_iterables))}\")",
"#logger.debug(f\"selected_iterators product: {list(itertools.product(*selected_iterators))}\")",
"for",
"values",
"in",
"itertools",
".",
"product",
"(",
"*",
"analysis_iterables",
")",
":",
"selected_analysis_objects",
"=",
"[",
"]",
"for",
"selected_values",
"in",
"itertools",
".",
"product",
"(",
"*",
"selected_iterators",
")",
":",
"for",
"key_index",
",",
"obj",
"in",
"analysis_objects",
".",
"items",
"(",
")",
":",
"selected_via_analysis_iterables",
"=",
"all",
"(",
"getattr",
"(",
"key_index",
",",
"k",
")",
"==",
"v",
"for",
"k",
",",
"v",
"in",
"values",
")",
"selected_via_selected_iterators",
"=",
"all",
"(",
"getattr",
"(",
"key_index",
",",
"k",
")",
"==",
"v",
"for",
"k",
",",
"v",
"in",
"selected_values",
")",
"selected_obj",
"=",
"selected_via_analysis_iterables",
"and",
"selected_via_selected_iterators",
"if",
"selected_obj",
":",
"selected_analysis_objects",
".",
"append",
"(",
"(",
"key_index",
",",
"obj",
")",
")",
"logger",
".",
"debug",
"(",
"f\"Yielding: {selected_analysis_objects}\"",
")",
"yield",
"selected_analysis_objects"
]
| Iterate over an analysis dictionary, yielding the selected attributes in order.
So if there are three iterables, a, b, and c, if we selected c, then we iterate over a and b,
and return c in the same order each time for each set of values of a and b. As an example, consider
the set of iterables:
.. code-block:: python
>>> a = ["a1", "a2"]
>>> b = ["b1", "b2"]
>>> c = ["c1", "c2"]
then it will effectively return:
.. code-block:: python
>>> for a_val in a:
... for b_val in b:
... for c_val in c:
... obj(a_val, b_val, c_val)
This will yield:
.. code-block:: python
>>> output = list(iterate_with_selected_objects_in_order(..., selection = ["a"]))
[[("a1", "b1", "c1"), ("a2", "b1", "c1")], [("a1", "b2", "c1"), ("a2", "b2", "c1")], ...]
This is particularly nice because we can then select on a set of iterables to be returned without
having to specify the rest of the iterables that we don't really care about.
Args:
analysis_objects: Analysis objects dictionary.
analysis_iterables: Iterables used in constructing the analysis objects.
selection: Selection of analysis selections to return. Can be either a string or a sequence of
selections.
Yields:
object: Matching analysis object. | [
"Iterate",
"over",
"an",
"analysis",
"dictionary",
"yielding",
"the",
"selected",
"attributes",
"in",
"order",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L468-L556 | train |
dpa-newslab/livebridge | livebridge/base/sources.py | BaseSource._db | def _db(self):
"""Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` """
if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None:
self._db_client = get_db_client()
return self._db_client | python | def _db(self):
"""Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` """
if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None:
self._db_client = get_db_client()
return self._db_client | [
"def",
"_db",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_db_client\"",
")",
"or",
"getattr",
"(",
"self",
",",
"\"_db_client\"",
")",
"is",
"None",
":",
"self",
".",
"_db_client",
"=",
"get_db_client",
"(",
")",
"return",
"self",
".",
"_db_client"
]
| Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` | [
"Database",
"client",
"for",
"accessing",
"storage",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/sources.py#L38-L44 | train |
dpa-newslab/livebridge | livebridge/base/sources.py | BaseSource.filter_new_posts | async def filter_new_posts(self, source_id, post_ids):
"""Filters ist of post_id for new ones.
:param source_id: id of the source
:type string:
:param post_ids: list of post ids
:type list:
:returns: list of unknown post ids."""
new_ids = []
try:
db_client = self._db
posts_in_db = await db_client.get_known_posts(source_id, post_ids)
new_ids = [p for p in post_ids if p not in posts_in_db]
except Exception as exc:
logger.error("Error when filtering for new posts {} {}".format(source_id, post_ids))
logger.exception(exc)
return new_ids | python | async def filter_new_posts(self, source_id, post_ids):
"""Filters ist of post_id for new ones.
:param source_id: id of the source
:type string:
:param post_ids: list of post ids
:type list:
:returns: list of unknown post ids."""
new_ids = []
try:
db_client = self._db
posts_in_db = await db_client.get_known_posts(source_id, post_ids)
new_ids = [p for p in post_ids if p not in posts_in_db]
except Exception as exc:
logger.error("Error when filtering for new posts {} {}".format(source_id, post_ids))
logger.exception(exc)
return new_ids | [
"async",
"def",
"filter_new_posts",
"(",
"self",
",",
"source_id",
",",
"post_ids",
")",
":",
"new_ids",
"=",
"[",
"]",
"try",
":",
"db_client",
"=",
"self",
".",
"_db",
"posts_in_db",
"=",
"await",
"db_client",
".",
"get_known_posts",
"(",
"source_id",
",",
"post_ids",
")",
"new_ids",
"=",
"[",
"p",
"for",
"p",
"in",
"post_ids",
"if",
"p",
"not",
"in",
"posts_in_db",
"]",
"except",
"Exception",
"as",
"exc",
":",
"logger",
".",
"error",
"(",
"\"Error when filtering for new posts {} {}\"",
".",
"format",
"(",
"source_id",
",",
"post_ids",
")",
")",
"logger",
".",
"exception",
"(",
"exc",
")",
"return",
"new_ids"
]
| Filters ist of post_id for new ones.
:param source_id: id of the source
:type string:
:param post_ids: list of post ids
:type list:
:returns: list of unknown post ids. | [
"Filters",
"ist",
"of",
"post_id",
"for",
"new",
"ones",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/sources.py#L46-L62 | train |
dpa-newslab/livebridge | livebridge/base/sources.py | BaseSource.get_last_updated | async def get_last_updated(self, source_id):
"""Returns latest update-timestamp from storage for source.
:param source_id: id of the source (source_id, ticker_id, blog_id pp)
:type string:
:returns: :py:class:`datetime.datetime` object of latest update datetime in db."""
last_updated = await self._db.get_last_updated(source_id)
logger.info("LAST UPDATED: {} {}".format(last_updated, self))
return last_updated | python | async def get_last_updated(self, source_id):
"""Returns latest update-timestamp from storage for source.
:param source_id: id of the source (source_id, ticker_id, blog_id pp)
:type string:
:returns: :py:class:`datetime.datetime` object of latest update datetime in db."""
last_updated = await self._db.get_last_updated(source_id)
logger.info("LAST UPDATED: {} {}".format(last_updated, self))
return last_updated | [
"async",
"def",
"get_last_updated",
"(",
"self",
",",
"source_id",
")",
":",
"last_updated",
"=",
"await",
"self",
".",
"_db",
".",
"get_last_updated",
"(",
"source_id",
")",
"logger",
".",
"info",
"(",
"\"LAST UPDATED: {} {}\"",
".",
"format",
"(",
"last_updated",
",",
"self",
")",
")",
"return",
"last_updated"
]
| Returns latest update-timestamp from storage for source.
:param source_id: id of the source (source_id, ticker_id, blog_id pp)
:type string:
:returns: :py:class:`datetime.datetime` object of latest update datetime in db. | [
"Returns",
"latest",
"update",
"-",
"timestamp",
"from",
"storage",
"for",
"source",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/sources.py#L64-L72 | train |
portfors-lab/sparkle | sparkle/gui/stim/qauto_parameter_model.py | QAutoParameterModel.clearParameters | def clearParameters(self):
"""Removes all parameters from model"""
self.beginRemoveRows(QtCore.QModelIndex(), 0, self.rowCount())
self.model.clear_parameters()
self.endRemoveRows() | python | def clearParameters(self):
"""Removes all parameters from model"""
self.beginRemoveRows(QtCore.QModelIndex(), 0, self.rowCount())
self.model.clear_parameters()
self.endRemoveRows() | [
"def",
"clearParameters",
"(",
"self",
")",
":",
"self",
".",
"beginRemoveRows",
"(",
"QtCore",
".",
"QModelIndex",
"(",
")",
",",
"0",
",",
"self",
".",
"rowCount",
"(",
")",
")",
"self",
".",
"model",
".",
"clear_parameters",
"(",
")",
"self",
".",
"endRemoveRows",
"(",
")"
]
| Removes all parameters from model | [
"Removes",
"all",
"parameters",
"from",
"model"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/qauto_parameter_model.py#L51-L55 | train |
portfors-lab/sparkle | sparkle/gui/stim/qauto_parameter_model.py | QAutoParameterModel.insertRows | def insertRows(self, position, rows, parent = QtCore.QModelIndex()):
"""Inserts new parameters and emits an emptied False signal
:param position: row location to insert new parameter
:type position: int
:param rows: number of new parameters to insert
:type rows: int
:param parent: Required by QAbstractItemModel, can be safely ignored
"""
self.beginInsertRows(parent, position, position + rows - 1)
for i in range(rows):
self.model.insertRow(position)
# self._selectionmap[self._paramid].hintRequested.connect(self.hintRequested)
self.endInsertRows()
if self.rowCount() == 1:
self.emptied.emit(False)
return True | python | def insertRows(self, position, rows, parent = QtCore.QModelIndex()):
"""Inserts new parameters and emits an emptied False signal
:param position: row location to insert new parameter
:type position: int
:param rows: number of new parameters to insert
:type rows: int
:param parent: Required by QAbstractItemModel, can be safely ignored
"""
self.beginInsertRows(parent, position, position + rows - 1)
for i in range(rows):
self.model.insertRow(position)
# self._selectionmap[self._paramid].hintRequested.connect(self.hintRequested)
self.endInsertRows()
if self.rowCount() == 1:
self.emptied.emit(False)
return True | [
"def",
"insertRows",
"(",
"self",
",",
"position",
",",
"rows",
",",
"parent",
"=",
"QtCore",
".",
"QModelIndex",
"(",
")",
")",
":",
"self",
".",
"beginInsertRows",
"(",
"parent",
",",
"position",
",",
"position",
"+",
"rows",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"rows",
")",
":",
"self",
".",
"model",
".",
"insertRow",
"(",
"position",
")",
"# self._selectionmap[self._paramid].hintRequested.connect(self.hintRequested)",
"self",
".",
"endInsertRows",
"(",
")",
"if",
"self",
".",
"rowCount",
"(",
")",
"==",
"1",
":",
"self",
".",
"emptied",
".",
"emit",
"(",
"False",
")",
"return",
"True"
]
| Inserts new parameters and emits an emptied False signal
:param position: row location to insert new parameter
:type position: int
:param rows: number of new parameters to insert
:type rows: int
:param parent: Required by QAbstractItemModel, can be safely ignored | [
"Inserts",
"new",
"parameters",
"and",
"emits",
"an",
"emptied",
"False",
"signal"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/qauto_parameter_model.py#L152-L168 | train |
portfors-lab/sparkle | sparkle/gui/stim/qauto_parameter_model.py | QAutoParameterModel.removeRows | def removeRows(self, position, rows, parent = QtCore.QModelIndex()):
"""Removes parameters from the model. Emits and emptied True signal, if there are no parameters left.
:param position: row location of parameters to remove
:type position: int
:param rows: number of parameters to remove
:type rows: int
:param parent: Required by QAbstractItemModel, can be safely ignored
"""
self.beginRemoveRows(parent, position, position + rows - 1)
for i in range(rows):
self.model.removeRow(position)
# cannot purge selection model, or else we have no way of
# recovering it when reordering
self.endRemoveRows()
if self.rowCount() == 0:
self.emptied.emit(True)
return True | python | def removeRows(self, position, rows, parent = QtCore.QModelIndex()):
"""Removes parameters from the model. Emits and emptied True signal, if there are no parameters left.
:param position: row location of parameters to remove
:type position: int
:param rows: number of parameters to remove
:type rows: int
:param parent: Required by QAbstractItemModel, can be safely ignored
"""
self.beginRemoveRows(parent, position, position + rows - 1)
for i in range(rows):
self.model.removeRow(position)
# cannot purge selection model, or else we have no way of
# recovering it when reordering
self.endRemoveRows()
if self.rowCount() == 0:
self.emptied.emit(True)
return True | [
"def",
"removeRows",
"(",
"self",
",",
"position",
",",
"rows",
",",
"parent",
"=",
"QtCore",
".",
"QModelIndex",
"(",
")",
")",
":",
"self",
".",
"beginRemoveRows",
"(",
"parent",
",",
"position",
",",
"position",
"+",
"rows",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"rows",
")",
":",
"self",
".",
"model",
".",
"removeRow",
"(",
"position",
")",
"# cannot purge selection model, or else we have no way of ",
"# recovering it when reordering",
"self",
".",
"endRemoveRows",
"(",
")",
"if",
"self",
".",
"rowCount",
"(",
")",
"==",
"0",
":",
"self",
".",
"emptied",
".",
"emit",
"(",
"True",
")",
"return",
"True"
]
| Removes parameters from the model. Emits and emptied True signal, if there are no parameters left.
:param position: row location of parameters to remove
:type position: int
:param rows: number of parameters to remove
:type rows: int
:param parent: Required by QAbstractItemModel, can be safely ignored | [
"Removes",
"parameters",
"from",
"the",
"model",
".",
"Emits",
"and",
"emptied",
"True",
"signal",
"if",
"there",
"are",
"no",
"parameters",
"left",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/qauto_parameter_model.py#L170-L187 | train |
portfors-lab/sparkle | sparkle/gui/stim/qauto_parameter_model.py | QAutoParameterModel.toggleSelection | def toggleSelection(self, index, comp):
"""Toggles a component in or out of the currently
selected parameter's compnents list"""
self.model.toggleSelection(index.row(), comp) | python | def toggleSelection(self, index, comp):
"""Toggles a component in or out of the currently
selected parameter's compnents list"""
self.model.toggleSelection(index.row(), comp) | [
"def",
"toggleSelection",
"(",
"self",
",",
"index",
",",
"comp",
")",
":",
"self",
".",
"model",
".",
"toggleSelection",
"(",
"index",
".",
"row",
"(",
")",
",",
"comp",
")"
]
| Toggles a component in or out of the currently
selected parameter's compnents list | [
"Toggles",
"a",
"component",
"in",
"or",
"out",
"of",
"the",
"currently",
"selected",
"parameter",
"s",
"compnents",
"list"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/qauto_parameter_model.py#L220-L223 | train |
lowandrew/OLCTools | spadespipeline/runMetadata.py | Metadata.parseruninfo | def parseruninfo(self):
"""Extracts the flowcell ID, as well as the instrument name from RunInfo.xml. If this file is not provided,
NA values are substituted"""
# Check if the RunInfo.xml file is provided, otherwise, yield N/A
try:
runinfo = ElementTree.ElementTree(file=self.runinfo)
# Get the run id from the
for elem in runinfo.iter():
for run in elem:
try:
self.runid = run.attrib['Id']
self.runnumber = run.attrib['Number']
except KeyError:
break
# pull the text from flowcell and instrument values using the .iter(tag="X") function
for elem in runinfo.iter(tag="Flowcell"):
self.flowcell = elem.text
for elem in runinfo.iter(tag="Instrument"):
self.instrument = elem.text
except IOError:
pass
# Extract run statistics from either GenerateRunStatistics.xml or indexingQC.txt
self.parserunstats() | python | def parseruninfo(self):
"""Extracts the flowcell ID, as well as the instrument name from RunInfo.xml. If this file is not provided,
NA values are substituted"""
# Check if the RunInfo.xml file is provided, otherwise, yield N/A
try:
runinfo = ElementTree.ElementTree(file=self.runinfo)
# Get the run id from the
for elem in runinfo.iter():
for run in elem:
try:
self.runid = run.attrib['Id']
self.runnumber = run.attrib['Number']
except KeyError:
break
# pull the text from flowcell and instrument values using the .iter(tag="X") function
for elem in runinfo.iter(tag="Flowcell"):
self.flowcell = elem.text
for elem in runinfo.iter(tag="Instrument"):
self.instrument = elem.text
except IOError:
pass
# Extract run statistics from either GenerateRunStatistics.xml or indexingQC.txt
self.parserunstats() | [
"def",
"parseruninfo",
"(",
"self",
")",
":",
"# Check if the RunInfo.xml file is provided, otherwise, yield N/A",
"try",
":",
"runinfo",
"=",
"ElementTree",
".",
"ElementTree",
"(",
"file",
"=",
"self",
".",
"runinfo",
")",
"# Get the run id from the",
"for",
"elem",
"in",
"runinfo",
".",
"iter",
"(",
")",
":",
"for",
"run",
"in",
"elem",
":",
"try",
":",
"self",
".",
"runid",
"=",
"run",
".",
"attrib",
"[",
"'Id'",
"]",
"self",
".",
"runnumber",
"=",
"run",
".",
"attrib",
"[",
"'Number'",
"]",
"except",
"KeyError",
":",
"break",
"# pull the text from flowcell and instrument values using the .iter(tag=\"X\") function",
"for",
"elem",
"in",
"runinfo",
".",
"iter",
"(",
"tag",
"=",
"\"Flowcell\"",
")",
":",
"self",
".",
"flowcell",
"=",
"elem",
".",
"text",
"for",
"elem",
"in",
"runinfo",
".",
"iter",
"(",
"tag",
"=",
"\"Instrument\"",
")",
":",
"self",
".",
"instrument",
"=",
"elem",
".",
"text",
"except",
"IOError",
":",
"pass",
"# Extract run statistics from either GenerateRunStatistics.xml or indexingQC.txt",
"self",
".",
"parserunstats",
"(",
")"
]
| Extracts the flowcell ID, as well as the instrument name from RunInfo.xml. If this file is not provided,
NA values are substituted | [
"Extracts",
"the",
"flowcell",
"ID",
"as",
"well",
"as",
"the",
"instrument",
"name",
"from",
"RunInfo",
".",
"xml",
".",
"If",
"this",
"file",
"is",
"not",
"provided",
"NA",
"values",
"are",
"substituted"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/runMetadata.py#L16-L38 | train |
sirfoga/pyhal | hal/files/parsers.py | Parser.get_lines | def get_lines(self):
"""Gets lines in file
:return: Lines in file
"""
with open(self.path, "r") as data:
self.lines = data.readlines() # store data in arrays
return self.lines | python | def get_lines(self):
"""Gets lines in file
:return: Lines in file
"""
with open(self.path, "r") as data:
self.lines = data.readlines() # store data in arrays
return self.lines | [
"def",
"get_lines",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"path",
",",
"\"r\"",
")",
"as",
"data",
":",
"self",
".",
"lines",
"=",
"data",
".",
"readlines",
"(",
")",
"# store data in arrays",
"return",
"self",
".",
"lines"
]
| Gets lines in file
:return: Lines in file | [
"Gets",
"lines",
"in",
"file"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/parsers.py#L19-L27 | train |
sirfoga/pyhal | hal/files/parsers.py | CSVParser.get_matrix | def get_matrix(self):
"""Stores values in array, store lines in array
:return: 2D matrix
"""
data = []
with open(self.path, encoding=self.encoding) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",", quotechar="\"")
for row in csv_reader:
data.append(row)
return data | python | def get_matrix(self):
"""Stores values in array, store lines in array
:return: 2D matrix
"""
data = []
with open(self.path, encoding=self.encoding) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",", quotechar="\"")
for row in csv_reader:
data.append(row)
return data | [
"def",
"get_matrix",
"(",
"self",
")",
":",
"data",
"=",
"[",
"]",
"with",
"open",
"(",
"self",
".",
"path",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"as",
"csv_file",
":",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"csv_file",
",",
"delimiter",
"=",
"\",\"",
",",
"quotechar",
"=",
"\"\\\"\"",
")",
"for",
"row",
"in",
"csv_reader",
":",
"data",
".",
"append",
"(",
"row",
")",
"return",
"data"
]
| Stores values in array, store lines in array
:return: 2D matrix | [
"Stores",
"values",
"in",
"array",
"store",
"lines",
"in",
"array"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/parsers.py#L41-L52 | train |
sirfoga/pyhal | hal/files/parsers.py | CSVParser.get_dicts | def get_dicts(self):
"""Gets dicts in file
:return: (generator of) of dicts with data from .csv file
"""
reader = csv.DictReader(open(self.path, "r", encoding=self.encoding))
for row in reader:
if row:
yield row | python | def get_dicts(self):
"""Gets dicts in file
:return: (generator of) of dicts with data from .csv file
"""
reader = csv.DictReader(open(self.path, "r", encoding=self.encoding))
for row in reader:
if row:
yield row | [
"def",
"get_dicts",
"(",
"self",
")",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"open",
"(",
"self",
".",
"path",
",",
"\"r\"",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
")",
"for",
"row",
"in",
"reader",
":",
"if",
"row",
":",
"yield",
"row"
]
| Gets dicts in file
:return: (generator of) of dicts with data from .csv file | [
"Gets",
"dicts",
"in",
"file"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/parsers.py#L62-L70 | train |
Egregors/cbrf | cbrf/models.py | CurrenciesInfo.get_by_id | def get_by_id(self, id_code: str) -> Currency or None:
""" Get currency by ID
:param id_code: set, like "R01305"
:return: currency or None.
"""
try:
return [_ for _ in self.currencies if _.id == id_code][0]
except IndexError:
return None | python | def get_by_id(self, id_code: str) -> Currency or None:
""" Get currency by ID
:param id_code: set, like "R01305"
:return: currency or None.
"""
try:
return [_ for _ in self.currencies if _.id == id_code][0]
except IndexError:
return None | [
"def",
"get_by_id",
"(",
"self",
",",
"id_code",
":",
"str",
")",
"->",
"Currency",
"or",
"None",
":",
"try",
":",
"return",
"[",
"_",
"for",
"_",
"in",
"self",
".",
"currencies",
"if",
"_",
".",
"id",
"==",
"id_code",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"return",
"None"
]
| Get currency by ID
:param id_code: set, like "R01305"
:return: currency or None. | [
"Get",
"currency",
"by",
"ID"
]
| e4ce332fcead83c75966337c97c0ae070fb7e576 | https://github.com/Egregors/cbrf/blob/e4ce332fcead83c75966337c97c0ae070fb7e576/cbrf/models.py#L125-L134 | train |
portfors-lab/sparkle | sparkle/stim/types/__init__.py | get_stimuli_models | def get_stimuli_models():
"""
Returns all subclasses of AbstractStimulusComponent in python files,
in this package
"""
package_path = os.path.dirname(__file__)
mod = '.'.join(get_stimuli_models.__module__.split('.'))
if mod == '__main__':
mod = ''
else:
mod = mod + '.'
module_files = glob.glob(package_path+os.sep+'[a-zA-Z]*.py')
module_names = [os.path.splitext(os.path.basename(x))[0] for x in module_files]
module_paths = [mod+x for x in module_names]
modules = [__import__(x, fromlist=['*']) for x in module_paths]
stimuli = []
for module in modules:
for name, attr in module.__dict__.iteritems():
#test if attr is subclass of AbstractStim
if type(attr) == type and issubclass(attr, AbstractStimulusComponent):
# print 'found subclass', name, '!!!'
stimuli.append(attr)
# print stimuli
return stimuli | python | def get_stimuli_models():
"""
Returns all subclasses of AbstractStimulusComponent in python files,
in this package
"""
package_path = os.path.dirname(__file__)
mod = '.'.join(get_stimuli_models.__module__.split('.'))
if mod == '__main__':
mod = ''
else:
mod = mod + '.'
module_files = glob.glob(package_path+os.sep+'[a-zA-Z]*.py')
module_names = [os.path.splitext(os.path.basename(x))[0] for x in module_files]
module_paths = [mod+x for x in module_names]
modules = [__import__(x, fromlist=['*']) for x in module_paths]
stimuli = []
for module in modules:
for name, attr in module.__dict__.iteritems():
#test if attr is subclass of AbstractStim
if type(attr) == type and issubclass(attr, AbstractStimulusComponent):
# print 'found subclass', name, '!!!'
stimuli.append(attr)
# print stimuli
return stimuli | [
"def",
"get_stimuli_models",
"(",
")",
":",
"package_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"mod",
"=",
"'.'",
".",
"join",
"(",
"get_stimuli_models",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
")",
"if",
"mod",
"==",
"'__main__'",
":",
"mod",
"=",
"''",
"else",
":",
"mod",
"=",
"mod",
"+",
"'.'",
"module_files",
"=",
"glob",
".",
"glob",
"(",
"package_path",
"+",
"os",
".",
"sep",
"+",
"'[a-zA-Z]*.py'",
")",
"module_names",
"=",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"x",
")",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"module_files",
"]",
"module_paths",
"=",
"[",
"mod",
"+",
"x",
"for",
"x",
"in",
"module_names",
"]",
"modules",
"=",
"[",
"__import__",
"(",
"x",
",",
"fromlist",
"=",
"[",
"'*'",
"]",
")",
"for",
"x",
"in",
"module_paths",
"]",
"stimuli",
"=",
"[",
"]",
"for",
"module",
"in",
"modules",
":",
"for",
"name",
",",
"attr",
"in",
"module",
".",
"__dict__",
".",
"iteritems",
"(",
")",
":",
"#test if attr is subclass of AbstractStim",
"if",
"type",
"(",
"attr",
")",
"==",
"type",
"and",
"issubclass",
"(",
"attr",
",",
"AbstractStimulusComponent",
")",
":",
"# print 'found subclass', name, '!!!'",
"stimuli",
".",
"append",
"(",
"attr",
")",
"# print stimuli",
"return",
"stimuli"
]
| Returns all subclasses of AbstractStimulusComponent in python files,
in this package | [
"Returns",
"all",
"subclasses",
"of",
"AbstractStimulusComponent",
"in",
"python",
"files",
"in",
"this",
"package"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/types/__init__.py#L5-L33 | train |
steven-lang/bottr | bottr/util.py | parse_wait_time | def parse_wait_time(text: str) -> int:
"""Parse the waiting time from the exception"""
val = RATELIMIT.findall(text)
if len(val) > 0:
try:
res = val[0]
if res[1] == 'minutes':
return int(res[0]) * 60
if res[1] == 'seconds':
return int(res[0])
except Exception as e:
util_logger.warning('Could not parse ratelimit: ' + str(e))
return 1 * 60 | python | def parse_wait_time(text: str) -> int:
"""Parse the waiting time from the exception"""
val = RATELIMIT.findall(text)
if len(val) > 0:
try:
res = val[0]
if res[1] == 'minutes':
return int(res[0]) * 60
if res[1] == 'seconds':
return int(res[0])
except Exception as e:
util_logger.warning('Could not parse ratelimit: ' + str(e))
return 1 * 60 | [
"def",
"parse_wait_time",
"(",
"text",
":",
"str",
")",
"->",
"int",
":",
"val",
"=",
"RATELIMIT",
".",
"findall",
"(",
"text",
")",
"if",
"len",
"(",
"val",
")",
">",
"0",
":",
"try",
":",
"res",
"=",
"val",
"[",
"0",
"]",
"if",
"res",
"[",
"1",
"]",
"==",
"'minutes'",
":",
"return",
"int",
"(",
"res",
"[",
"0",
"]",
")",
"*",
"60",
"if",
"res",
"[",
"1",
"]",
"==",
"'seconds'",
":",
"return",
"int",
"(",
"res",
"[",
"0",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"util_logger",
".",
"warning",
"(",
"'Could not parse ratelimit: '",
"+",
"str",
"(",
"e",
")",
")",
"return",
"1",
"*",
"60"
]
| Parse the waiting time from the exception | [
"Parse",
"the",
"waiting",
"time",
"from",
"the",
"exception"
]
| c1b92becc31adfbd5a7b77179b852a51da70b193 | https://github.com/steven-lang/bottr/blob/c1b92becc31adfbd5a7b77179b852a51da70b193/bottr/util.py#L13-L26 | train |
steven-lang/bottr | bottr/util.py | check_comment_depth | def check_comment_depth(comment: praw.models.Comment, max_depth=3) -> bool:
"""
Check if comment is in a allowed depth range
:param comment: :class:`praw.models.Comment` to count the depth of
:param max_depth: Maximum allowed depth
:return: True if comment is in depth range between 0 and max_depth
"""
count = 0
while not comment.is_root:
count += 1
if count > max_depth:
return False
comment = comment.parent()
return True | python | def check_comment_depth(comment: praw.models.Comment, max_depth=3) -> bool:
"""
Check if comment is in a allowed depth range
:param comment: :class:`praw.models.Comment` to count the depth of
:param max_depth: Maximum allowed depth
:return: True if comment is in depth range between 0 and max_depth
"""
count = 0
while not comment.is_root:
count += 1
if count > max_depth:
return False
comment = comment.parent()
return True | [
"def",
"check_comment_depth",
"(",
"comment",
":",
"praw",
".",
"models",
".",
"Comment",
",",
"max_depth",
"=",
"3",
")",
"->",
"bool",
":",
"count",
"=",
"0",
"while",
"not",
"comment",
".",
"is_root",
":",
"count",
"+=",
"1",
"if",
"count",
">",
"max_depth",
":",
"return",
"False",
"comment",
"=",
"comment",
".",
"parent",
"(",
")",
"return",
"True"
]
| Check if comment is in a allowed depth range
:param comment: :class:`praw.models.Comment` to count the depth of
:param max_depth: Maximum allowed depth
:return: True if comment is in depth range between 0 and max_depth | [
"Check",
"if",
"comment",
"is",
"in",
"a",
"allowed",
"depth",
"range"
]
| c1b92becc31adfbd5a7b77179b852a51da70b193 | https://github.com/steven-lang/bottr/blob/c1b92becc31adfbd5a7b77179b852a51da70b193/bottr/util.py#L59-L75 | train |
steven-lang/bottr | bottr/util.py | get_subs | def get_subs(subs_file='subreddits.txt', blacklist_file='blacklist.txt') -> List[str]:
"""
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
"""
# Get subs and blacklisted subs
subsf = open(subs_file)
blacklf = open(blacklist_file)
subs = [b.lower().replace('\n','') for b in subsf.readlines()]
blacklisted = [b.lower().replace('\n','') for b in blacklf.readlines()]
subsf.close()
blacklf.close()
# Filter blacklisted
subs_filtered = list(sorted(set(subs).difference(set(blacklisted))))
return subs_filtered | python | def get_subs(subs_file='subreddits.txt', blacklist_file='blacklist.txt') -> List[str]:
"""
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
"""
# Get subs and blacklisted subs
subsf = open(subs_file)
blacklf = open(blacklist_file)
subs = [b.lower().replace('\n','') for b in subsf.readlines()]
blacklisted = [b.lower().replace('\n','') for b in blacklf.readlines()]
subsf.close()
blacklf.close()
# Filter blacklisted
subs_filtered = list(sorted(set(subs).difference(set(blacklisted))))
return subs_filtered | [
"def",
"get_subs",
"(",
"subs_file",
"=",
"'subreddits.txt'",
",",
"blacklist_file",
"=",
"'blacklist.txt'",
")",
"->",
"List",
"[",
"str",
"]",
":",
"# Get subs and blacklisted subs",
"subsf",
"=",
"open",
"(",
"subs_file",
")",
"blacklf",
"=",
"open",
"(",
"blacklist_file",
")",
"subs",
"=",
"[",
"b",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"for",
"b",
"in",
"subsf",
".",
"readlines",
"(",
")",
"]",
"blacklisted",
"=",
"[",
"b",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"for",
"b",
"in",
"blacklf",
".",
"readlines",
"(",
")",
"]",
"subsf",
".",
"close",
"(",
")",
"blacklf",
".",
"close",
"(",
")",
"# Filter blacklisted",
"subs_filtered",
"=",
"list",
"(",
"sorted",
"(",
"set",
"(",
"subs",
")",
".",
"difference",
"(",
"set",
"(",
"blacklisted",
")",
")",
")",
")",
"return",
"subs_filtered"
]
| Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
... | [
"Get",
"subs",
"based",
"on",
"a",
"file",
"of",
"subreddits",
"and",
"a",
"file",
"of",
"blacklisted",
"subreddits",
"."
]
| c1b92becc31adfbd5a7b77179b852a51da70b193 | https://github.com/steven-lang/bottr/blob/c1b92becc31adfbd5a7b77179b852a51da70b193/bottr/util.py#L98-L123 | train |
yamcs/yamcs-python | yamcs-client/examples/data_links.py | enable_all_links | def enable_all_links():
"""Enable all links."""
for link in client.list_data_links(instance='simulator'):
client.enable_data_link(instance=link.instance, link=link.name) | python | def enable_all_links():
"""Enable all links."""
for link in client.list_data_links(instance='simulator'):
client.enable_data_link(instance=link.instance, link=link.name) | [
"def",
"enable_all_links",
"(",
")",
":",
"for",
"link",
"in",
"client",
".",
"list_data_links",
"(",
"instance",
"=",
"'simulator'",
")",
":",
"client",
".",
"enable_data_link",
"(",
"instance",
"=",
"link",
".",
"instance",
",",
"link",
"=",
"link",
".",
"name",
")"
]
| Enable all links. | [
"Enable",
"all",
"links",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/data_links.py#L8-L11 | train |
ElofssonLab/pyGaussDCA | src/gaussdca/_load_data.py | load_a3m | def load_a3m(fasta, max_gap_fraction=0.9):
""" load alignment with the alphabet used in GaussDCA """
mapping = {'-': 21, 'A': 1, 'B': 21, 'C': 2, 'D': 3, 'E': 4, 'F': 5,
'G': 6, 'H': 7, 'I': 8, 'K': 9, 'L': 10, 'M': 11,
'N': 12, 'O': 21, 'P': 13, 'Q': 14, 'R': 15, 'S': 16, 'T': 17,
'V': 18, 'W': 19, 'Y': 20,
'U': 21, 'Z': 21, 'X': 21, 'J': 21}
# We want to exclude the lowercase, not ignore the uppercase because of gaps.
lowercase = set('abcdefghijklmnopqrstuvwxyz')
# Figure out the length of the sequence
f = open(fasta)
for line in f:
if line.startswith('>'):
continue
seq_length = len(line.strip())
break
else:
raise RuntimeError('I cannot find the first sequence')
f.seek(0)
parsed = []
for line in f:
if line.startswith('>'):
continue
line = line.strip()
gap_fraction = line.count('-') / seq_length
if gap_fraction <= max_gap_fraction:
parsed.append([mapping.get(ch, 22) for ch in line
if ch not in lowercase])
return np.array(parsed, dtype=np.int8).T | python | def load_a3m(fasta, max_gap_fraction=0.9):
""" load alignment with the alphabet used in GaussDCA """
mapping = {'-': 21, 'A': 1, 'B': 21, 'C': 2, 'D': 3, 'E': 4, 'F': 5,
'G': 6, 'H': 7, 'I': 8, 'K': 9, 'L': 10, 'M': 11,
'N': 12, 'O': 21, 'P': 13, 'Q': 14, 'R': 15, 'S': 16, 'T': 17,
'V': 18, 'W': 19, 'Y': 20,
'U': 21, 'Z': 21, 'X': 21, 'J': 21}
# We want to exclude the lowercase, not ignore the uppercase because of gaps.
lowercase = set('abcdefghijklmnopqrstuvwxyz')
# Figure out the length of the sequence
f = open(fasta)
for line in f:
if line.startswith('>'):
continue
seq_length = len(line.strip())
break
else:
raise RuntimeError('I cannot find the first sequence')
f.seek(0)
parsed = []
for line in f:
if line.startswith('>'):
continue
line = line.strip()
gap_fraction = line.count('-') / seq_length
if gap_fraction <= max_gap_fraction:
parsed.append([mapping.get(ch, 22) for ch in line
if ch not in lowercase])
return np.array(parsed, dtype=np.int8).T | [
"def",
"load_a3m",
"(",
"fasta",
",",
"max_gap_fraction",
"=",
"0.9",
")",
":",
"mapping",
"=",
"{",
"'-'",
":",
"21",
",",
"'A'",
":",
"1",
",",
"'B'",
":",
"21",
",",
"'C'",
":",
"2",
",",
"'D'",
":",
"3",
",",
"'E'",
":",
"4",
",",
"'F'",
":",
"5",
",",
"'G'",
":",
"6",
",",
"'H'",
":",
"7",
",",
"'I'",
":",
"8",
",",
"'K'",
":",
"9",
",",
"'L'",
":",
"10",
",",
"'M'",
":",
"11",
",",
"'N'",
":",
"12",
",",
"'O'",
":",
"21",
",",
"'P'",
":",
"13",
",",
"'Q'",
":",
"14",
",",
"'R'",
":",
"15",
",",
"'S'",
":",
"16",
",",
"'T'",
":",
"17",
",",
"'V'",
":",
"18",
",",
"'W'",
":",
"19",
",",
"'Y'",
":",
"20",
",",
"'U'",
":",
"21",
",",
"'Z'",
":",
"21",
",",
"'X'",
":",
"21",
",",
"'J'",
":",
"21",
"}",
"# We want to exclude the lowercase, not ignore the uppercase because of gaps.",
"lowercase",
"=",
"set",
"(",
"'abcdefghijklmnopqrstuvwxyz'",
")",
"# Figure out the length of the sequence",
"f",
"=",
"open",
"(",
"fasta",
")",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"'>'",
")",
":",
"continue",
"seq_length",
"=",
"len",
"(",
"line",
".",
"strip",
"(",
")",
")",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
"'I cannot find the first sequence'",
")",
"f",
".",
"seek",
"(",
"0",
")",
"parsed",
"=",
"[",
"]",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"'>'",
")",
":",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"gap_fraction",
"=",
"line",
".",
"count",
"(",
"'-'",
")",
"/",
"seq_length",
"if",
"gap_fraction",
"<=",
"max_gap_fraction",
":",
"parsed",
".",
"append",
"(",
"[",
"mapping",
".",
"get",
"(",
"ch",
",",
"22",
")",
"for",
"ch",
"in",
"line",
"if",
"ch",
"not",
"in",
"lowercase",
"]",
")",
"return",
"np",
".",
"array",
"(",
"parsed",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
".",
"T"
]
| load alignment with the alphabet used in GaussDCA | [
"load",
"alignment",
"with",
"the",
"alphabet",
"used",
"in",
"GaussDCA"
]
| 0c1a16dbbb2f4fbe039b36f37f9c7c3989e2e84c | https://github.com/ElofssonLab/pyGaussDCA/blob/0c1a16dbbb2f4fbe039b36f37f9c7c3989e2e84c/src/gaussdca/_load_data.py#L7-L39 | train |
lowandrew/OLCTools | spadespipeline/typingclasses.py | PlasmidExtractor.run_plasmid_extractor | def run_plasmid_extractor(self):
"""
Create and run the plasmid extractor system call
"""
logging.info('Extracting plasmids')
# Define the system call
extract_command = 'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc' \
.format(inf=self.path,
outf=self.plasmid_output,
plasdb=os.path.join(self.plasmid_db, 'plasmid_db.fasta'),
db=self.plasmid_db,
cpus=self.cpus)
# Only attempt to extract plasmids if the report doesn't already exist
if not os.path.isfile(self.plasmid_report):
# Run the system calls
out, err = run_subprocess(extract_command)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(extract_command, extract_command, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() | python | def run_plasmid_extractor(self):
"""
Create and run the plasmid extractor system call
"""
logging.info('Extracting plasmids')
# Define the system call
extract_command = 'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc' \
.format(inf=self.path,
outf=self.plasmid_output,
plasdb=os.path.join(self.plasmid_db, 'plasmid_db.fasta'),
db=self.plasmid_db,
cpus=self.cpus)
# Only attempt to extract plasmids if the report doesn't already exist
if not os.path.isfile(self.plasmid_report):
# Run the system calls
out, err = run_subprocess(extract_command)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(extract_command, extract_command, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() | [
"def",
"run_plasmid_extractor",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Extracting plasmids'",
")",
"# Define the system call",
"extract_command",
"=",
"'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc'",
".",
"format",
"(",
"inf",
"=",
"self",
".",
"path",
",",
"outf",
"=",
"self",
".",
"plasmid_output",
",",
"plasdb",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"plasmid_db",
",",
"'plasmid_db.fasta'",
")",
",",
"db",
"=",
"self",
".",
"plasmid_db",
",",
"cpus",
"=",
"self",
".",
"cpus",
")",
"# Only attempt to extract plasmids if the report doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"plasmid_report",
")",
":",
"# Run the system calls",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"extract_command",
")",
"# Acquire thread lock, and write the logs to file",
"self",
".",
"threadlock",
".",
"acquire",
"(",
")",
"write_to_logfile",
"(",
"extract_command",
",",
"extract_command",
",",
"self",
".",
"logfile",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
")",
"self",
".",
"threadlock",
".",
"release",
"(",
")"
]
| Create and run the plasmid extractor system call | [
"Create",
"and",
"run",
"the",
"plasmid",
"extractor",
"system",
"call"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/typingclasses.py#L224-L244 | train |
lowandrew/OLCTools | spadespipeline/typingclasses.py | PlasmidExtractor.parse_report | def parse_report(self):
"""
Parse the plasmid extractor report, and populate metadata objects
"""
logging.info('Parsing Plasmid Extractor outputs')
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())
dictionary = pandas.read_csv(self.plasmid_report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# Sample is the primary key, and value is the value of the cell for that primary key + header combination
for sample, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[sample].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[sample] = dict()
nesteddictionary[sample].update({header: value})
# Get the results into the metadata object
for sample in self.metadata:
# Initialise the plasmid extractor genobject
setattr(sample, self.analysistype, GenObject())
# Initialise the list of all plasmids
sample[self.analysistype].plasmids = list()
# Iterate through the dictionary of results
for line in nesteddictionary:
# Extract the sample name from the dictionary in a manner consistent with the rest of the COWBAT
# pipeline e.g. 2014-SEQ-0276_S2_L001 becomes 2014-SEQ-0276
sample_name = nesteddictionary[line]['Sample']
# Use the filer method to extract the name
name = list(filer([sample_name]))[0]
# Ensure that the names match
if name == sample.name:
# Append the plasmid name extracted from the dictionary to the list of plasmids
sample[self.analysistype].plasmids.append(nesteddictionary[line]['Plasmid'])
# Copy the report to the folder containing all reports for the pipeline
try:
shutil.copyfile(self.plasmid_report, os.path.join(self.reportpath, 'plasmidReport.csv'))
except IOError:
pass | python | def parse_report(self):
"""
Parse the plasmid extractor report, and populate metadata objects
"""
logging.info('Parsing Plasmid Extractor outputs')
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())
dictionary = pandas.read_csv(self.plasmid_report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# Sample is the primary key, and value is the value of the cell for that primary key + header combination
for sample, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[sample].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[sample] = dict()
nesteddictionary[sample].update({header: value})
# Get the results into the metadata object
for sample in self.metadata:
# Initialise the plasmid extractor genobject
setattr(sample, self.analysistype, GenObject())
# Initialise the list of all plasmids
sample[self.analysistype].plasmids = list()
# Iterate through the dictionary of results
for line in nesteddictionary:
# Extract the sample name from the dictionary in a manner consistent with the rest of the COWBAT
# pipeline e.g. 2014-SEQ-0276_S2_L001 becomes 2014-SEQ-0276
sample_name = nesteddictionary[line]['Sample']
# Use the filer method to extract the name
name = list(filer([sample_name]))[0]
# Ensure that the names match
if name == sample.name:
# Append the plasmid name extracted from the dictionary to the list of plasmids
sample[self.analysistype].plasmids.append(nesteddictionary[line]['Plasmid'])
# Copy the report to the folder containing all reports for the pipeline
try:
shutil.copyfile(self.plasmid_report, os.path.join(self.reportpath, 'plasmidReport.csv'))
except IOError:
pass | [
"def",
"parse_report",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Parsing Plasmid Extractor outputs'",
")",
"# A dictionary to store the parsed excel file in a more readable format",
"nesteddictionary",
"=",
"dict",
"(",
")",
"# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())",
"dictionary",
"=",
"pandas",
".",
"read_csv",
"(",
"self",
".",
"plasmid_report",
")",
".",
"to_dict",
"(",
")",
"# Iterate through the dictionary - each header from the CSV file",
"for",
"header",
"in",
"dictionary",
":",
"# Sample is the primary key, and value is the value of the cell for that primary key + header combination",
"for",
"sample",
",",
"value",
"in",
"dictionary",
"[",
"header",
"]",
".",
"items",
"(",
")",
":",
"# Update the dictionary with the new data",
"try",
":",
"nesteddictionary",
"[",
"sample",
"]",
".",
"update",
"(",
"{",
"header",
":",
"value",
"}",
")",
"# Create the nested dictionary if it hasn't been created yet",
"except",
"KeyError",
":",
"nesteddictionary",
"[",
"sample",
"]",
"=",
"dict",
"(",
")",
"nesteddictionary",
"[",
"sample",
"]",
".",
"update",
"(",
"{",
"header",
":",
"value",
"}",
")",
"# Get the results into the metadata object",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise the plasmid extractor genobject",
"setattr",
"(",
"sample",
",",
"self",
".",
"analysistype",
",",
"GenObject",
"(",
")",
")",
"# Initialise the list of all plasmids",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"plasmids",
"=",
"list",
"(",
")",
"# Iterate through the dictionary of results",
"for",
"line",
"in",
"nesteddictionary",
":",
"# Extract the sample name from the dictionary in a manner consistent with the rest of the COWBAT",
"# pipeline e.g. 2014-SEQ-0276_S2_L001 becomes 2014-SEQ-0276",
"sample_name",
"=",
"nesteddictionary",
"[",
"line",
"]",
"[",
"'Sample'",
"]",
"# Use the filer method to extract the name",
"name",
"=",
"list",
"(",
"filer",
"(",
"[",
"sample_name",
"]",
")",
")",
"[",
"0",
"]",
"# Ensure that the names match",
"if",
"name",
"==",
"sample",
".",
"name",
":",
"# Append the plasmid name extracted from the dictionary to the list of plasmids",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"plasmids",
".",
"append",
"(",
"nesteddictionary",
"[",
"line",
"]",
"[",
"'Plasmid'",
"]",
")",
"# Copy the report to the folder containing all reports for the pipeline",
"try",
":",
"shutil",
".",
"copyfile",
"(",
"self",
".",
"plasmid_report",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'plasmidReport.csv'",
")",
")",
"except",
"IOError",
":",
"pass"
]
| Parse the plasmid extractor report, and populate metadata objects | [
"Parse",
"the",
"plasmid",
"extractor",
"report",
"and",
"populate",
"metadata",
"objects"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/typingclasses.py#L246-L287 | train |
lowandrew/OLCTools | spadespipeline/typingclasses.py | ResFinder.object_clean | def object_clean(self):
"""
Remove large attributes from the metadata objects
"""
for sample in self.metadata:
try:
delattr(sample[self.analysistype], 'aaidentity')
delattr(sample[self.analysistype], 'aaalign')
delattr(sample[self.analysistype], 'aaindex')
delattr(sample[self.analysistype], 'ntalign')
delattr(sample[self.analysistype], 'ntindex')
delattr(sample[self.analysistype], 'dnaseq')
delattr(sample[self.analysistype], 'blastresults')
except AttributeError:
pass | python | def object_clean(self):
"""
Remove large attributes from the metadata objects
"""
for sample in self.metadata:
try:
delattr(sample[self.analysistype], 'aaidentity')
delattr(sample[self.analysistype], 'aaalign')
delattr(sample[self.analysistype], 'aaindex')
delattr(sample[self.analysistype], 'ntalign')
delattr(sample[self.analysistype], 'ntindex')
delattr(sample[self.analysistype], 'dnaseq')
delattr(sample[self.analysistype], 'blastresults')
except AttributeError:
pass | [
"def",
"object_clean",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"try",
":",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'aaidentity'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'aaalign'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'aaindex'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'ntalign'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'ntindex'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'dnaseq'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'blastresults'",
")",
"except",
"AttributeError",
":",
"pass"
]
| Remove large attributes from the metadata objects | [
"Remove",
"large",
"attributes",
"from",
"the",
"metadata",
"objects"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/typingclasses.py#L803-L817 | train |
portfors-lab/sparkle | sparkle/gui/dialogs/scale_dlg.py | ScaleDialog.values | def values(self):
"""Gets the scales that the user chose
| For frequency: 1 = Hz, 1000 = kHz
| For time: 1 = seconds, 0.001 = ms
:returns: float, float -- frequency scaling, time scaling
"""
if self.ui.hzBtn.isChecked():
fscale = SmartSpinBox.Hz
else:
fscale = SmartSpinBox.kHz
if self.ui.msBtn.isChecked():
tscale = SmartSpinBox.MilliSeconds
else:
tscale = SmartSpinBox.Seconds
return fscale, tscale | python | def values(self):
"""Gets the scales that the user chose
| For frequency: 1 = Hz, 1000 = kHz
| For time: 1 = seconds, 0.001 = ms
:returns: float, float -- frequency scaling, time scaling
"""
if self.ui.hzBtn.isChecked():
fscale = SmartSpinBox.Hz
else:
fscale = SmartSpinBox.kHz
if self.ui.msBtn.isChecked():
tscale = SmartSpinBox.MilliSeconds
else:
tscale = SmartSpinBox.Seconds
return fscale, tscale | [
"def",
"values",
"(",
"self",
")",
":",
"if",
"self",
".",
"ui",
".",
"hzBtn",
".",
"isChecked",
"(",
")",
":",
"fscale",
"=",
"SmartSpinBox",
".",
"Hz",
"else",
":",
"fscale",
"=",
"SmartSpinBox",
".",
"kHz",
"if",
"self",
".",
"ui",
".",
"msBtn",
".",
"isChecked",
"(",
")",
":",
"tscale",
"=",
"SmartSpinBox",
".",
"MilliSeconds",
"else",
":",
"tscale",
"=",
"SmartSpinBox",
".",
"Seconds",
"return",
"fscale",
",",
"tscale"
]
| Gets the scales that the user chose
| For frequency: 1 = Hz, 1000 = kHz
| For time: 1 = seconds, 0.001 = ms
:returns: float, float -- frequency scaling, time scaling | [
"Gets",
"the",
"scales",
"that",
"the",
"user",
"chose"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/scale_dlg.py#L28-L46 | train |
tgbugs/ontquery | ontquery/trie.py | insert_trie | def insert_trie(trie, value): # aka get_subtrie_or_insert
""" Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
or not. """
if value in trie:
return trie[value]
multi_check = False
for key in tuple(trie.keys()):
if len(value) > len(key) and value.startswith(key):
return insert_trie(trie[key], value)
elif key.startswith(value): # we know the value is not in the trie
if not multi_check:
trie[value] = {}
multi_check = True # there can be multiple longer existing prefixes
dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged
trie[value][key] = dict_
if value not in trie:
trie[value] = {}
return trie[value] | python | def insert_trie(trie, value): # aka get_subtrie_or_insert
""" Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
or not. """
if value in trie:
return trie[value]
multi_check = False
for key in tuple(trie.keys()):
if len(value) > len(key) and value.startswith(key):
return insert_trie(trie[key], value)
elif key.startswith(value): # we know the value is not in the trie
if not multi_check:
trie[value] = {}
multi_check = True # there can be multiple longer existing prefixes
dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged
trie[value][key] = dict_
if value not in trie:
trie[value] = {}
return trie[value] | [
"def",
"insert_trie",
"(",
"trie",
",",
"value",
")",
":",
"# aka get_subtrie_or_insert",
"if",
"value",
"in",
"trie",
":",
"return",
"trie",
"[",
"value",
"]",
"multi_check",
"=",
"False",
"for",
"key",
"in",
"tuple",
"(",
"trie",
".",
"keys",
"(",
")",
")",
":",
"if",
"len",
"(",
"value",
")",
">",
"len",
"(",
"key",
")",
"and",
"value",
".",
"startswith",
"(",
"key",
")",
":",
"return",
"insert_trie",
"(",
"trie",
"[",
"key",
"]",
",",
"value",
")",
"elif",
"key",
".",
"startswith",
"(",
"value",
")",
":",
"# we know the value is not in the trie",
"if",
"not",
"multi_check",
":",
"trie",
"[",
"value",
"]",
"=",
"{",
"}",
"multi_check",
"=",
"True",
"# there can be multiple longer existing prefixes",
"dict_",
"=",
"trie",
".",
"pop",
"(",
"key",
")",
"# does not break strie since key<->dict_ remains unchanged",
"trie",
"[",
"value",
"]",
"[",
"key",
"]",
"=",
"dict_",
"if",
"value",
"not",
"in",
"trie",
":",
"trie",
"[",
"value",
"]",
"=",
"{",
"}",
"return",
"trie",
"[",
"value",
"]"
]
| Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
or not. | [
"Insert",
"a",
"value",
"into",
"the",
"trie",
"if",
"it",
"is",
"not",
"already",
"contained",
"in",
"the",
"trie",
".",
"Return",
"the",
"subtree",
"for",
"the",
"value",
"regardless",
"of",
"whether",
"it",
"is",
"a",
"new",
"value",
"or",
"not",
"."
]
| bcf4863cb2bf221afe2b093c5dc7da1377300041 | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/trie.py#L31-L49 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.get_valid_cell_indecies | def get_valid_cell_indecies(self):
"""
Return a dataframe of images present with 'valid' being a list of cell indecies that can be included
"""
return pd.DataFrame(self).groupby(self.frame_columns).apply(lambda x: list(x['cell_index'])).\
reset_index().rename(columns={0:'valid'}) | python | def get_valid_cell_indecies(self):
"""
Return a dataframe of images present with 'valid' being a list of cell indecies that can be included
"""
return pd.DataFrame(self).groupby(self.frame_columns).apply(lambda x: list(x['cell_index'])).\
reset_index().rename(columns={0:'valid'}) | [
"def",
"get_valid_cell_indecies",
"(",
"self",
")",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"self",
")",
".",
"groupby",
"(",
"self",
".",
"frame_columns",
")",
".",
"apply",
"(",
"lambda",
"x",
":",
"list",
"(",
"x",
"[",
"'cell_index'",
"]",
")",
")",
".",
"reset_index",
"(",
")",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"'valid'",
"}",
")"
]
| Return a dataframe of images present with 'valid' being a list of cell indecies that can be included | [
"Return",
"a",
"dataframe",
"of",
"images",
"present",
"with",
"valid",
"being",
"a",
"list",
"of",
"cell",
"indecies",
"that",
"can",
"be",
"included"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L49-L54 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.prune_neighbors | def prune_neighbors(self):
"""
If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections.
Returns:
CellDataFrame: A CellDataFrame with only valid cell-cell contacts
"""
def _neighbor_check(neighbors,valid):
if not neighbors==neighbors: return np.nan
valid_keys = set(valid)&set(neighbors.keys())
d = dict([(k,v) for k,v in neighbors.items() if k in valid_keys])
return d
fixed = self.copy()
valid = self.get_valid_cell_indecies()
valid = pd.DataFrame(self).merge(valid,on=self.frame_columns).set_index(self.frame_columns+['cell_index'])
valid = valid.apply(lambda x: _neighbor_check(x['neighbors'],x['valid']),1).reset_index().\
rename(columns={0:'new_neighbors'})
fixed = fixed.merge(valid,on=self.frame_columns+['cell_index']).drop(columns='neighbors').\
rename(columns={'new_neighbors':'neighbors'})
fixed.microns_per_pixel = self.microns_per_pixel
fixed.db = self.db
#fixed.loc[:,'neighbors'] = list(new_neighbors)
return fixed | python | def prune_neighbors(self):
"""
If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections.
Returns:
CellDataFrame: A CellDataFrame with only valid cell-cell contacts
"""
def _neighbor_check(neighbors,valid):
if not neighbors==neighbors: return np.nan
valid_keys = set(valid)&set(neighbors.keys())
d = dict([(k,v) for k,v in neighbors.items() if k in valid_keys])
return d
fixed = self.copy()
valid = self.get_valid_cell_indecies()
valid = pd.DataFrame(self).merge(valid,on=self.frame_columns).set_index(self.frame_columns+['cell_index'])
valid = valid.apply(lambda x: _neighbor_check(x['neighbors'],x['valid']),1).reset_index().\
rename(columns={0:'new_neighbors'})
fixed = fixed.merge(valid,on=self.frame_columns+['cell_index']).drop(columns='neighbors').\
rename(columns={'new_neighbors':'neighbors'})
fixed.microns_per_pixel = self.microns_per_pixel
fixed.db = self.db
#fixed.loc[:,'neighbors'] = list(new_neighbors)
return fixed | [
"def",
"prune_neighbors",
"(",
"self",
")",
":",
"def",
"_neighbor_check",
"(",
"neighbors",
",",
"valid",
")",
":",
"if",
"not",
"neighbors",
"==",
"neighbors",
":",
"return",
"np",
".",
"nan",
"valid_keys",
"=",
"set",
"(",
"valid",
")",
"&",
"set",
"(",
"neighbors",
".",
"keys",
"(",
")",
")",
"d",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"neighbors",
".",
"items",
"(",
")",
"if",
"k",
"in",
"valid_keys",
"]",
")",
"return",
"d",
"fixed",
"=",
"self",
".",
"copy",
"(",
")",
"valid",
"=",
"self",
".",
"get_valid_cell_indecies",
"(",
")",
"valid",
"=",
"pd",
".",
"DataFrame",
"(",
"self",
")",
".",
"merge",
"(",
"valid",
",",
"on",
"=",
"self",
".",
"frame_columns",
")",
".",
"set_index",
"(",
"self",
".",
"frame_columns",
"+",
"[",
"'cell_index'",
"]",
")",
"valid",
"=",
"valid",
".",
"apply",
"(",
"lambda",
"x",
":",
"_neighbor_check",
"(",
"x",
"[",
"'neighbors'",
"]",
",",
"x",
"[",
"'valid'",
"]",
")",
",",
"1",
")",
".",
"reset_index",
"(",
")",
".",
"rename",
"(",
"columns",
"=",
"{",
"0",
":",
"'new_neighbors'",
"}",
")",
"fixed",
"=",
"fixed",
".",
"merge",
"(",
"valid",
",",
"on",
"=",
"self",
".",
"frame_columns",
"+",
"[",
"'cell_index'",
"]",
")",
".",
"drop",
"(",
"columns",
"=",
"'neighbors'",
")",
".",
"rename",
"(",
"columns",
"=",
"{",
"'new_neighbors'",
":",
"'neighbors'",
"}",
")",
"fixed",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"fixed",
".",
"db",
"=",
"self",
".",
"db",
"#fixed.loc[:,'neighbors'] = list(new_neighbors)",
"return",
"fixed"
]
| If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections.
Returns:
CellDataFrame: A CellDataFrame with only valid cell-cell contacts | [
"If",
"the",
"CellDataFrame",
"has",
"been",
"subsetted",
"some",
"of",
"the",
"cell",
"-",
"cell",
"contacts",
"may",
"no",
"longer",
"be",
"part",
"of",
"the",
"the",
"dataset",
".",
"This",
"prunes",
"those",
"no",
"-",
"longer",
"existant",
"connections",
"."
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L56-L78 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.to_hdf | def to_hdf(self,path,key,mode='a'):
"""
Save the CellDataFrame to an hdf5 file.
Args:
path (str): the path to save to
key (str): the name of the location to save it to
mode (str): write mode
"""
pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9)
f = h5py.File(path,'r+')
f[key].attrs["microns_per_pixel"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan
f.close() | python | def to_hdf(self,path,key,mode='a'):
"""
Save the CellDataFrame to an hdf5 file.
Args:
path (str): the path to save to
key (str): the name of the location to save it to
mode (str): write mode
"""
pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9)
f = h5py.File(path,'r+')
f[key].attrs["microns_per_pixel"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan
f.close() | [
"def",
"to_hdf",
"(",
"self",
",",
"path",
",",
"key",
",",
"mode",
"=",
"'a'",
")",
":",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"serialize",
"(",
")",
")",
".",
"to_hdf",
"(",
"path",
",",
"key",
",",
"mode",
"=",
"mode",
",",
"format",
"=",
"'table'",
",",
"complib",
"=",
"'zlib'",
",",
"complevel",
"=",
"9",
")",
"f",
"=",
"h5py",
".",
"File",
"(",
"path",
",",
"'r+'",
")",
"f",
"[",
"key",
"]",
".",
"attrs",
"[",
"\"microns_per_pixel\"",
"]",
"=",
"float",
"(",
"self",
".",
"microns_per_pixel",
")",
"if",
"self",
".",
"microns_per_pixel",
"is",
"not",
"None",
"else",
"np",
".",
"nan",
"f",
".",
"close",
"(",
")"
]
| Save the CellDataFrame to an hdf5 file.
Args:
path (str): the path to save to
key (str): the name of the location to save it to
mode (str): write mode | [
"Save",
"the",
"CellDataFrame",
"to",
"an",
"hdf5",
"file",
"."
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L102-L114 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.phenotypes_to_scored | def phenotypes_to_scored(self,phenotypes=None,overwrite=False):
"""
Add mutually exclusive phenotypes to the scored calls
Args:
phenotypes (list): a list of phenotypes to add to scored calls. if none or not set, add them all
overwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls
Returns:
CellDataFrame
"""
if not self.is_uniform(): raise ValueError("inconsistent phenotypes")
if phenotypes is None:
phenotypes = self.phenotypes
elif isinstance(phenotypes,str):
phenotypes = [phenotypes]
def _post(binary,phenotype_label,phenotypes,overwrite):
d = binary.copy()
if len(set(phenotypes)&set(list(binary.keys()))) > 0 and overwrite==False:
raise ValueError("Error, phenotype already exists as a scored type")
for label in phenotypes: d[label] = 0
if phenotype_label == phenotype_label and phenotype_label in phenotypes:
d[phenotype_label] = 1
return d
output = self.copy()
output['scored_calls'] = output.apply(lambda x:
_post(x['scored_calls'],x['phenotype_label'],phenotypes,overwrite)
,1)
return output | python | def phenotypes_to_scored(self,phenotypes=None,overwrite=False):
"""
Add mutually exclusive phenotypes to the scored calls
Args:
phenotypes (list): a list of phenotypes to add to scored calls. if none or not set, add them all
overwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls
Returns:
CellDataFrame
"""
if not self.is_uniform(): raise ValueError("inconsistent phenotypes")
if phenotypes is None:
phenotypes = self.phenotypes
elif isinstance(phenotypes,str):
phenotypes = [phenotypes]
def _post(binary,phenotype_label,phenotypes,overwrite):
d = binary.copy()
if len(set(phenotypes)&set(list(binary.keys()))) > 0 and overwrite==False:
raise ValueError("Error, phenotype already exists as a scored type")
for label in phenotypes: d[label] = 0
if phenotype_label == phenotype_label and phenotype_label in phenotypes:
d[phenotype_label] = 1
return d
output = self.copy()
output['scored_calls'] = output.apply(lambda x:
_post(x['scored_calls'],x['phenotype_label'],phenotypes,overwrite)
,1)
return output | [
"def",
"phenotypes_to_scored",
"(",
"self",
",",
"phenotypes",
"=",
"None",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"is_uniform",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"inconsistent phenotypes\"",
")",
"if",
"phenotypes",
"is",
"None",
":",
"phenotypes",
"=",
"self",
".",
"phenotypes",
"elif",
"isinstance",
"(",
"phenotypes",
",",
"str",
")",
":",
"phenotypes",
"=",
"[",
"phenotypes",
"]",
"def",
"_post",
"(",
"binary",
",",
"phenotype_label",
",",
"phenotypes",
",",
"overwrite",
")",
":",
"d",
"=",
"binary",
".",
"copy",
"(",
")",
"if",
"len",
"(",
"set",
"(",
"phenotypes",
")",
"&",
"set",
"(",
"list",
"(",
"binary",
".",
"keys",
"(",
")",
")",
")",
")",
">",
"0",
"and",
"overwrite",
"==",
"False",
":",
"raise",
"ValueError",
"(",
"\"Error, phenotype already exists as a scored type\"",
")",
"for",
"label",
"in",
"phenotypes",
":",
"d",
"[",
"label",
"]",
"=",
"0",
"if",
"phenotype_label",
"==",
"phenotype_label",
"and",
"phenotype_label",
"in",
"phenotypes",
":",
"d",
"[",
"phenotype_label",
"]",
"=",
"1",
"return",
"d",
"output",
"=",
"self",
".",
"copy",
"(",
")",
"output",
"[",
"'scored_calls'",
"]",
"=",
"output",
".",
"apply",
"(",
"lambda",
"x",
":",
"_post",
"(",
"x",
"[",
"'scored_calls'",
"]",
",",
"x",
"[",
"'phenotype_label'",
"]",
",",
"phenotypes",
",",
"overwrite",
")",
",",
"1",
")",
"return",
"output"
]
| Add mutually exclusive phenotypes to the scored calls
Args:
phenotypes (list): a list of phenotypes to add to scored calls. if none or not set, add them all
overwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls
Returns:
CellDataFrame | [
"Add",
"mutually",
"exclusive",
"phenotypes",
"to",
"the",
"scored",
"calls"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L116-L143 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.concat | def concat(self,array_like):
"""
Concatonate multiple CellDataFrames
throws an error if the microns_per_pixel is not uniform across the frames
Args:
array_like (list): a list of CellDataFrames with 1 or more CellDataFrames
Returns:
CellDataFrame
"""
arr = list(array_like)
if len(set([x.microns_per_pixel for x in arr])) != 1:
raise ValueError("Multiple microns per pixel set")
cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr]))
cdf.microns_per_pixel = arr[0].microns_per_pixel
return cdf | python | def concat(self,array_like):
"""
Concatonate multiple CellDataFrames
throws an error if the microns_per_pixel is not uniform across the frames
Args:
array_like (list): a list of CellDataFrames with 1 or more CellDataFrames
Returns:
CellDataFrame
"""
arr = list(array_like)
if len(set([x.microns_per_pixel for x in arr])) != 1:
raise ValueError("Multiple microns per pixel set")
cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr]))
cdf.microns_per_pixel = arr[0].microns_per_pixel
return cdf | [
"def",
"concat",
"(",
"self",
",",
"array_like",
")",
":",
"arr",
"=",
"list",
"(",
"array_like",
")",
"if",
"len",
"(",
"set",
"(",
"[",
"x",
".",
"microns_per_pixel",
"for",
"x",
"in",
"arr",
"]",
")",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Multiple microns per pixel set\"",
")",
"cdf",
"=",
"CellDataFrame",
"(",
"pd",
".",
"concat",
"(",
"[",
"pd",
".",
"DataFrame",
"(",
"x",
")",
"for",
"x",
"in",
"arr",
"]",
")",
")",
"cdf",
".",
"microns_per_pixel",
"=",
"arr",
"[",
"0",
"]",
".",
"microns_per_pixel",
"return",
"cdf"
]
| Concatonate multiple CellDataFrames
throws an error if the microns_per_pixel is not uniform across the frames
Args:
array_like (list): a list of CellDataFrames with 1 or more CellDataFrames
Returns:
CellDataFrame | [
"Concatonate",
"multiple",
"CellDataFrames"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L147-L164 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.read_hdf | def read_hdf(cls,path,key=None):
"""
Read a CellDataFrame from an hdf5 file.
Args:
path (str): the path to read from
key (str): the name of the location to read from
Returns:
CellDataFrame
"""
df = pd.read_hdf(path,key)
df['scored_calls'] = df['scored_calls'].apply(lambda x: json.loads(x))
df['channel_values'] = df['channel_values'].apply(lambda x: json.loads(x))
df['regions'] = df['regions'].apply(lambda x: json.loads(x))
df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.loads(x))
df['neighbors'] = df['neighbors'].apply(lambda x: json.loads(x))
df['neighbors'] = df['neighbors'].apply(lambda x:
np.nan if not isinstance(x,dict) else dict(zip([int(y) for y in x.keys()],x.values()))
)
df['frame_shape'] = df['frame_shape'].apply(lambda x: tuple(json.loads(x)))
df = cls(df)
f = h5py.File(path,'r')
mpp = f[key].attrs["microns_per_pixel"]
if not np.isnan(mpp): df.microns_per_pixel = mpp
f.close()
return df | python | def read_hdf(cls,path,key=None):
"""
Read a CellDataFrame from an hdf5 file.
Args:
path (str): the path to read from
key (str): the name of the location to read from
Returns:
CellDataFrame
"""
df = pd.read_hdf(path,key)
df['scored_calls'] = df['scored_calls'].apply(lambda x: json.loads(x))
df['channel_values'] = df['channel_values'].apply(lambda x: json.loads(x))
df['regions'] = df['regions'].apply(lambda x: json.loads(x))
df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.loads(x))
df['neighbors'] = df['neighbors'].apply(lambda x: json.loads(x))
df['neighbors'] = df['neighbors'].apply(lambda x:
np.nan if not isinstance(x,dict) else dict(zip([int(y) for y in x.keys()],x.values()))
)
df['frame_shape'] = df['frame_shape'].apply(lambda x: tuple(json.loads(x)))
df = cls(df)
f = h5py.File(path,'r')
mpp = f[key].attrs["microns_per_pixel"]
if not np.isnan(mpp): df.microns_per_pixel = mpp
f.close()
return df | [
"def",
"read_hdf",
"(",
"cls",
",",
"path",
",",
"key",
"=",
"None",
")",
":",
"df",
"=",
"pd",
".",
"read_hdf",
"(",
"path",
",",
"key",
")",
"df",
"[",
"'scored_calls'",
"]",
"=",
"df",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"loads",
"(",
"x",
")",
")",
"df",
"[",
"'channel_values'",
"]",
"=",
"df",
"[",
"'channel_values'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"loads",
"(",
"x",
")",
")",
"df",
"[",
"'regions'",
"]",
"=",
"df",
"[",
"'regions'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"loads",
"(",
"x",
")",
")",
"df",
"[",
"'phenotype_calls'",
"]",
"=",
"df",
"[",
"'phenotype_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"loads",
"(",
"x",
")",
")",
"df",
"[",
"'neighbors'",
"]",
"=",
"df",
"[",
"'neighbors'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"loads",
"(",
"x",
")",
")",
"df",
"[",
"'neighbors'",
"]",
"=",
"df",
"[",
"'neighbors'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"np",
".",
"nan",
"if",
"not",
"isinstance",
"(",
"x",
",",
"dict",
")",
"else",
"dict",
"(",
"zip",
"(",
"[",
"int",
"(",
"y",
")",
"for",
"y",
"in",
"x",
".",
"keys",
"(",
")",
"]",
",",
"x",
".",
"values",
"(",
")",
")",
")",
")",
"df",
"[",
"'frame_shape'",
"]",
"=",
"df",
"[",
"'frame_shape'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"tuple",
"(",
"json",
".",
"loads",
"(",
"x",
")",
")",
")",
"df",
"=",
"cls",
"(",
"df",
")",
"f",
"=",
"h5py",
".",
"File",
"(",
"path",
",",
"'r'",
")",
"mpp",
"=",
"f",
"[",
"key",
"]",
".",
"attrs",
"[",
"\"microns_per_pixel\"",
"]",
"if",
"not",
"np",
".",
"isnan",
"(",
"mpp",
")",
":",
"df",
".",
"microns_per_pixel",
"=",
"mpp",
"f",
".",
"close",
"(",
")",
"return",
"df"
]
| Read a CellDataFrame from an hdf5 file.
Args:
path (str): the path to read from
key (str): the name of the location to read from
Returns:
CellDataFrame | [
"Read",
"a",
"CellDataFrame",
"from",
"an",
"hdf5",
"file",
"."
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L168-L194 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.serialize | def serialize(self):
"""
Convert the data to one that can be saved in h5 structures
Returns:
pandas.DataFrame: like a cell data frame but serialized. columns
"""
df = self.copy()
df['scored_calls'] = df['scored_calls'].apply(lambda x: json.dumps(x))
df['channel_values'] = df['channel_values'].apply(lambda x: json.dumps(x))
df['regions'] = df['regions'].apply(lambda x: json.dumps(x))
df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.dumps(x))
df['neighbors'] = df['neighbors'].apply(lambda x: json.dumps(x))
df['frame_shape'] = df['frame_shape'].apply(lambda x: json.dumps(x))
return df | python | def serialize(self):
"""
Convert the data to one that can be saved in h5 structures
Returns:
pandas.DataFrame: like a cell data frame but serialized. columns
"""
df = self.copy()
df['scored_calls'] = df['scored_calls'].apply(lambda x: json.dumps(x))
df['channel_values'] = df['channel_values'].apply(lambda x: json.dumps(x))
df['regions'] = df['regions'].apply(lambda x: json.dumps(x))
df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.dumps(x))
df['neighbors'] = df['neighbors'].apply(lambda x: json.dumps(x))
df['frame_shape'] = df['frame_shape'].apply(lambda x: json.dumps(x))
return df | [
"def",
"serialize",
"(",
"self",
")",
":",
"df",
"=",
"self",
".",
"copy",
"(",
")",
"df",
"[",
"'scored_calls'",
"]",
"=",
"df",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'channel_values'",
"]",
"=",
"df",
"[",
"'channel_values'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'regions'",
"]",
"=",
"df",
"[",
"'regions'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'phenotype_calls'",
"]",
"=",
"df",
"[",
"'phenotype_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'neighbors'",
"]",
"=",
"df",
"[",
"'neighbors'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"df",
"[",
"'frame_shape'",
"]",
"=",
"df",
"[",
"'frame_shape'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
"return",
"df"
]
| Convert the data to one that can be saved in h5 structures
Returns:
pandas.DataFrame: like a cell data frame but serialized. columns | [
"Convert",
"the",
"data",
"to",
"one",
"that",
"can",
"be",
"saved",
"in",
"h5",
"structures"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L196-L210 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.contacts | def contacts(self,*args,**kwargs):
"""
Use assess the cell-to-cell contacts recorded in the celldataframe
Returns:
Contacts: returns a class that holds cell-to-cell contact information for whatever phenotypes were in the CellDataFrame before execution.
"""
n = Contacts.read_cellframe(self,prune_neighbors=True)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
return n | python | def contacts(self,*args,**kwargs):
"""
Use assess the cell-to-cell contacts recorded in the celldataframe
Returns:
Contacts: returns a class that holds cell-to-cell contact information for whatever phenotypes were in the CellDataFrame before execution.
"""
n = Contacts.read_cellframe(self,prune_neighbors=True)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
return n | [
"def",
"contacts",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"n",
"=",
"Contacts",
".",
"read_cellframe",
"(",
"self",
",",
"prune_neighbors",
"=",
"True",
")",
"if",
"'measured_regions'",
"in",
"kwargs",
":",
"n",
".",
"measured_regions",
"=",
"kwargs",
"[",
"'measured_regions'",
"]",
"else",
":",
"n",
".",
"measured_regions",
"=",
"self",
".",
"get_measured_regions",
"(",
")",
"if",
"'measured_phenotypes'",
"in",
"kwargs",
":",
"n",
".",
"measured_phenotypes",
"=",
"kwargs",
"[",
"'measured_phenotypes'",
"]",
"else",
":",
"n",
".",
"measured_phenotypes",
"=",
"self",
".",
"phenotypes",
"n",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"return",
"n"
]
| Use assess the cell-to-cell contacts recorded in the celldataframe
Returns:
Contacts: returns a class that holds cell-to-cell contact information for whatever phenotypes were in the CellDataFrame before execution. | [
"Use",
"assess",
"the",
"cell",
"-",
"to",
"-",
"cell",
"contacts",
"recorded",
"in",
"the",
"celldataframe"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L332-L345 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.cartesian | def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs):
"""
Return a class that can be used to create honeycomb plots
Args:
subsets (list): list of SubsetLogic objects
step_pixels (int): distance between hexagons
max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area
Returns:
Cartesian: returns a class that holds the layout of the points to plot.
"""
n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
return n | python | def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs):
"""
Return a class that can be used to create honeycomb plots
Args:
subsets (list): list of SubsetLogic objects
step_pixels (int): distance between hexagons
max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area
Returns:
Cartesian: returns a class that holds the layout of the points to plot.
"""
n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
return n | [
"def",
"cartesian",
"(",
"self",
",",
"subsets",
"=",
"None",
",",
"step_pixels",
"=",
"100",
",",
"max_distance_pixels",
"=",
"150",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"n",
"=",
"Cartesian",
".",
"read_cellframe",
"(",
"self",
",",
"subsets",
"=",
"subsets",
",",
"step_pixels",
"=",
"step_pixels",
",",
"max_distance_pixels",
"=",
"max_distance_pixels",
",",
"prune_neighbors",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"'measured_regions'",
"in",
"kwargs",
":",
"n",
".",
"measured_regions",
"=",
"kwargs",
"[",
"'measured_regions'",
"]",
"else",
":",
"n",
".",
"measured_regions",
"=",
"self",
".",
"get_measured_regions",
"(",
")",
"if",
"'measured_phenotypes'",
"in",
"kwargs",
":",
"n",
".",
"measured_phenotypes",
"=",
"kwargs",
"[",
"'measured_phenotypes'",
"]",
"else",
":",
"n",
".",
"measured_phenotypes",
"=",
"self",
".",
"phenotypes",
"n",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"return",
"n"
]
| Return a class that can be used to create honeycomb plots
Args:
subsets (list): list of SubsetLogic objects
step_pixels (int): distance between hexagons
max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area
Returns:
Cartesian: returns a class that holds the layout of the points to plot. | [
"Return",
"a",
"class",
"that",
"can",
"be",
"used",
"to",
"create",
"honeycomb",
"plots"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L347-L365 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.counts | def counts(self,*args,**kwargs):
"""
Return a class that can be used to access count densities
Args:
measured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions)
measured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes)
minimum_region_size_pixels (int): Minimum region size to calculate counts on in pixels (Default: 1)
Returns:
Counts: returns a class that holds the counts.
"""
n = Counts.read_cellframe(self,prune_neighbors=False)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
if 'minimum_region_size_pixels' in kwargs: n.minimum_region_size_pixels = kwargs['minimum_region_size_pixels']
else: n.minimum_region_size_pixels = 1
return n | python | def counts(self,*args,**kwargs):
"""
Return a class that can be used to access count densities
Args:
measured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions)
measured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes)
minimum_region_size_pixels (int): Minimum region size to calculate counts on in pixels (Default: 1)
Returns:
Counts: returns a class that holds the counts.
"""
n = Counts.read_cellframe(self,prune_neighbors=False)
if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']
else: n.measured_regions = self.get_measured_regions()
if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']
else: n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
if 'minimum_region_size_pixels' in kwargs: n.minimum_region_size_pixels = kwargs['minimum_region_size_pixels']
else: n.minimum_region_size_pixels = 1
return n | [
"def",
"counts",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"n",
"=",
"Counts",
".",
"read_cellframe",
"(",
"self",
",",
"prune_neighbors",
"=",
"False",
")",
"if",
"'measured_regions'",
"in",
"kwargs",
":",
"n",
".",
"measured_regions",
"=",
"kwargs",
"[",
"'measured_regions'",
"]",
"else",
":",
"n",
".",
"measured_regions",
"=",
"self",
".",
"get_measured_regions",
"(",
")",
"if",
"'measured_phenotypes'",
"in",
"kwargs",
":",
"n",
".",
"measured_phenotypes",
"=",
"kwargs",
"[",
"'measured_phenotypes'",
"]",
"else",
":",
"n",
".",
"measured_phenotypes",
"=",
"self",
".",
"phenotypes",
"n",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"if",
"'minimum_region_size_pixels'",
"in",
"kwargs",
":",
"n",
".",
"minimum_region_size_pixels",
"=",
"kwargs",
"[",
"'minimum_region_size_pixels'",
"]",
"else",
":",
"n",
".",
"minimum_region_size_pixels",
"=",
"1",
"return",
"n"
]
| Return a class that can be used to access count densities
Args:
measured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions)
measured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes)
minimum_region_size_pixels (int): Minimum region size to calculate counts on in pixels (Default: 1)
Returns:
Counts: returns a class that holds the counts. | [
"Return",
"a",
"class",
"that",
"can",
"be",
"used",
"to",
"access",
"count",
"densities"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L367-L387 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.merge_scores | def merge_scores(self,df_addition,reference_markers='all',
addition_markers='all',on=['project_name','sample_name','frame_name','cell_index']):
"""
Combine CellDataFrames that differ by score composition
Args:
df_addition (CellDataFrame): The CellDataFrame to merge scores in from
reference_markers (list): which scored call names to keep in the this object (default: all)
addition_markers (list): which scored call names to merge in (default: all)
on (list): the features to merge cells on
Returns:
CellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met.
"""
if isinstance(reference_markers, str):
reference_markers = self.scored_names
elif reference_markers is None: reference_markers = []
if isinstance(addition_markers, str):
addition_markers = df_addition.scored_names
elif addition_markers is None: addition_markers = []
df_addition = df_addition.copy()
df_addition['_key'] = 1
df = self.merge(df_addition[['scored_calls','_key']+on].rename(columns={'scored_calls':'_addition'}),
on = on,
how = 'left'
)
df['_sub1'] = df['scored_calls'].apply(lambda x:
dict((k,x[k]) for k in reference_markers)
)
df['_sub2'] = df['_addition'].apply(lambda x:
dict({}) if x!=x else dict((k,x[k]) for k in addition_markers) # handle NaN where we fail to match properly treat as empty
)
# combine the two dictionaries
df['scored_calls'] = df.apply(lambda x:
{**x['_sub1'],**x['_sub2']}
,1)
df = df.drop(columns=['_sub1','_sub2','_addition'])
df = df.drop(columns='_key').copy(),df[df['_key'].isna()].drop(columns='_key').copy()
if self.microns_per_pixel: df[0].microns_per_pixel = self.microns_per_pixel
if self.microns_per_pixel: df[1].microns_per_pixel = self.microns_per_pixel
return df | python | def merge_scores(self,df_addition,reference_markers='all',
addition_markers='all',on=['project_name','sample_name','frame_name','cell_index']):
"""
Combine CellDataFrames that differ by score composition
Args:
df_addition (CellDataFrame): The CellDataFrame to merge scores in from
reference_markers (list): which scored call names to keep in the this object (default: all)
addition_markers (list): which scored call names to merge in (default: all)
on (list): the features to merge cells on
Returns:
CellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met.
"""
if isinstance(reference_markers, str):
reference_markers = self.scored_names
elif reference_markers is None: reference_markers = []
if isinstance(addition_markers, str):
addition_markers = df_addition.scored_names
elif addition_markers is None: addition_markers = []
df_addition = df_addition.copy()
df_addition['_key'] = 1
df = self.merge(df_addition[['scored_calls','_key']+on].rename(columns={'scored_calls':'_addition'}),
on = on,
how = 'left'
)
df['_sub1'] = df['scored_calls'].apply(lambda x:
dict((k,x[k]) for k in reference_markers)
)
df['_sub2'] = df['_addition'].apply(lambda x:
dict({}) if x!=x else dict((k,x[k]) for k in addition_markers) # handle NaN where we fail to match properly treat as empty
)
# combine the two dictionaries
df['scored_calls'] = df.apply(lambda x:
{**x['_sub1'],**x['_sub2']}
,1)
df = df.drop(columns=['_sub1','_sub2','_addition'])
df = df.drop(columns='_key').copy(),df[df['_key'].isna()].drop(columns='_key').copy()
if self.microns_per_pixel: df[0].microns_per_pixel = self.microns_per_pixel
if self.microns_per_pixel: df[1].microns_per_pixel = self.microns_per_pixel
return df | [
"def",
"merge_scores",
"(",
"self",
",",
"df_addition",
",",
"reference_markers",
"=",
"'all'",
",",
"addition_markers",
"=",
"'all'",
",",
"on",
"=",
"[",
"'project_name'",
",",
"'sample_name'",
",",
"'frame_name'",
",",
"'cell_index'",
"]",
")",
":",
"if",
"isinstance",
"(",
"reference_markers",
",",
"str",
")",
":",
"reference_markers",
"=",
"self",
".",
"scored_names",
"elif",
"reference_markers",
"is",
"None",
":",
"reference_markers",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"addition_markers",
",",
"str",
")",
":",
"addition_markers",
"=",
"df_addition",
".",
"scored_names",
"elif",
"addition_markers",
"is",
"None",
":",
"addition_markers",
"=",
"[",
"]",
"df_addition",
"=",
"df_addition",
".",
"copy",
"(",
")",
"df_addition",
"[",
"'_key'",
"]",
"=",
"1",
"df",
"=",
"self",
".",
"merge",
"(",
"df_addition",
"[",
"[",
"'scored_calls'",
",",
"'_key'",
"]",
"+",
"on",
"]",
".",
"rename",
"(",
"columns",
"=",
"{",
"'scored_calls'",
":",
"'_addition'",
"}",
")",
",",
"on",
"=",
"on",
",",
"how",
"=",
"'left'",
")",
"df",
"[",
"'_sub1'",
"]",
"=",
"df",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"dict",
"(",
"(",
"k",
",",
"x",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"reference_markers",
")",
")",
"df",
"[",
"'_sub2'",
"]",
"=",
"df",
"[",
"'_addition'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"dict",
"(",
"{",
"}",
")",
"if",
"x",
"!=",
"x",
"else",
"dict",
"(",
"(",
"k",
",",
"x",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"addition_markers",
")",
"# handle NaN where we fail to match properly treat as empty",
")",
"# combine the two dictionaries",
"df",
"[",
"'scored_calls'",
"]",
"=",
"df",
".",
"apply",
"(",
"lambda",
"x",
":",
"{",
"*",
"*",
"x",
"[",
"'_sub1'",
"]",
",",
"*",
"*",
"x",
"[",
"'_sub2'",
"]",
"}",
",",
"1",
")",
"df",
"=",
"df",
".",
"drop",
"(",
"columns",
"=",
"[",
"'_sub1'",
",",
"'_sub2'",
",",
"'_addition'",
"]",
")",
"df",
"=",
"df",
".",
"drop",
"(",
"columns",
"=",
"'_key'",
")",
".",
"copy",
"(",
")",
",",
"df",
"[",
"df",
"[",
"'_key'",
"]",
".",
"isna",
"(",
")",
"]",
".",
"drop",
"(",
"columns",
"=",
"'_key'",
")",
".",
"copy",
"(",
")",
"if",
"self",
".",
"microns_per_pixel",
":",
"df",
"[",
"0",
"]",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"if",
"self",
".",
"microns_per_pixel",
":",
"df",
"[",
"1",
"]",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"return",
"df"
]
| Combine CellDataFrames that differ by score composition
Args:
df_addition (CellDataFrame): The CellDataFrame to merge scores in from
reference_markers (list): which scored call names to keep in the this object (default: all)
addition_markers (list): which scored call names to merge in (default: all)
on (list): the features to merge cells on
Returns:
CellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met. | [
"Combine",
"CellDataFrames",
"that",
"differ",
"by",
"score",
"composition"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L409-L451 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.zero_fill_missing_phenotypes | def zero_fill_missing_phenotypes(self):
"""
Fill in missing phenotypes and scored types by listing any missing data as negative
Returns:
CellDataFrame: The CellDataFrame modified.
"""
if self.is_uniform(verbose=False): return self.copy()
output = self.copy()
def _do_fill(d,names):
old_names = list(d.keys())
old_values = list(d.values())
missing = set(names)-set(old_names)
return dict(zip(old_names+list(missing),old_values+([0]*len(missing))))
## Need to make these uniform
pnames = self.phenotypes
output['phenotype_calls']= output.apply(lambda x:
_do_fill(x['phenotype_calls'],pnames)
,1)
return output | python | def zero_fill_missing_phenotypes(self):
"""
Fill in missing phenotypes and scored types by listing any missing data as negative
Returns:
CellDataFrame: The CellDataFrame modified.
"""
if self.is_uniform(verbose=False): return self.copy()
output = self.copy()
def _do_fill(d,names):
old_names = list(d.keys())
old_values = list(d.values())
missing = set(names)-set(old_names)
return dict(zip(old_names+list(missing),old_values+([0]*len(missing))))
## Need to make these uniform
pnames = self.phenotypes
output['phenotype_calls']= output.apply(lambda x:
_do_fill(x['phenotype_calls'],pnames)
,1)
return output | [
"def",
"zero_fill_missing_phenotypes",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_uniform",
"(",
"verbose",
"=",
"False",
")",
":",
"return",
"self",
".",
"copy",
"(",
")",
"output",
"=",
"self",
".",
"copy",
"(",
")",
"def",
"_do_fill",
"(",
"d",
",",
"names",
")",
":",
"old_names",
"=",
"list",
"(",
"d",
".",
"keys",
"(",
")",
")",
"old_values",
"=",
"list",
"(",
"d",
".",
"values",
"(",
")",
")",
"missing",
"=",
"set",
"(",
"names",
")",
"-",
"set",
"(",
"old_names",
")",
"return",
"dict",
"(",
"zip",
"(",
"old_names",
"+",
"list",
"(",
"missing",
")",
",",
"old_values",
"+",
"(",
"[",
"0",
"]",
"*",
"len",
"(",
"missing",
")",
")",
")",
")",
"## Need to make these uniform",
"pnames",
"=",
"self",
".",
"phenotypes",
"output",
"[",
"'phenotype_calls'",
"]",
"=",
"output",
".",
"apply",
"(",
"lambda",
"x",
":",
"_do_fill",
"(",
"x",
"[",
"'phenotype_calls'",
"]",
",",
"pnames",
")",
",",
"1",
")",
"return",
"output"
]
| Fill in missing phenotypes and scored types by listing any missing data as negative
Returns:
CellDataFrame: The CellDataFrame modified. | [
"Fill",
"in",
"missing",
"phenotypes",
"and",
"scored",
"types",
"by",
"listing",
"any",
"missing",
"data",
"as",
"negative"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L469-L488 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.drop_scored_calls | def drop_scored_calls(self,names):
"""
Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified.
"""
def _remove(calls,names):
d = dict([(k,v) for k,v in calls.items() if k not in names])
return d
if isinstance(names, str):
names = [names]
output = self.copy()
output['scored_calls'] = output['scored_calls'].\
apply(lambda x: _remove(x,names))
return output | python | def drop_scored_calls(self,names):
"""
Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified.
"""
def _remove(calls,names):
d = dict([(k,v) for k,v in calls.items() if k not in names])
return d
if isinstance(names, str):
names = [names]
output = self.copy()
output['scored_calls'] = output['scored_calls'].\
apply(lambda x: _remove(x,names))
return output | [
"def",
"drop_scored_calls",
"(",
"self",
",",
"names",
")",
":",
"def",
"_remove",
"(",
"calls",
",",
"names",
")",
":",
"d",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"calls",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"names",
"]",
")",
"return",
"d",
"if",
"isinstance",
"(",
"names",
",",
"str",
")",
":",
"names",
"=",
"[",
"names",
"]",
"output",
"=",
"self",
".",
"copy",
"(",
")",
"output",
"[",
"'scored_calls'",
"]",
"=",
"output",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"_remove",
"(",
"x",
",",
"names",
")",
")",
"return",
"output"
]
| Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified. | [
"Take",
"a",
"name",
"or",
"list",
"of",
"scored",
"call",
"names",
"and",
"drop",
"those",
"from",
"the",
"scored",
"calls"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L490-L508 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.subset | def subset(self,logic,update=False):
"""
subset create a specific phenotype based on a logic,
logic is a 'SubsetLogic' class,
take union of all the phenotypes listed. If none are listed use all phenotypes.
take the intersection of all the scored calls.
Args:
logic (SubsetLogic): A subsetlogic object to slice on
update (bool): (default False) change the name of the phenotype according to the label in the subset logic
Returns:
CellDataFrame: The CellDataFrame modified.
"""
pnames = self.phenotypes
snames = self.scored_names
data = self.copy()
values = []
phenotypes = logic.phenotypes
if len(phenotypes)==0: phenotypes = pnames
removing = set(self.phenotypes)-set(phenotypes)
for k in phenotypes:
if k not in pnames: raise ValueError("phenotype must exist in defined")
temp = data.loc[data['phenotype_calls'].apply(lambda x: x[k]==1)].copy()
if len(removing) > 0 and temp.shape[0] > 0:
temp['phenotype_calls'] = temp.apply(lambda x:
dict([(k,v) for k,v in x['phenotype_calls'].items() if k not in removing])
,1)
values.append(temp)
data = pd.concat(values)
for k,v in logic.scored_calls.items():
if k not in snames: raise ValueError("Scored name must exist in defined")
myfilter = 0 if v == '-' else 1
data = data.loc[data['scored_calls'].apply(lambda x: x[k]==myfilter)]
data.microns_per_pixel = self.microns_per_pixel
if update:
data['phenotype_calls'] = data['phenotype_calls'].apply(lambda x: {logic.label:1})
data.fill_phenotype_label(inplace=True)
data.db = self.db
return data | python | def subset(self,logic,update=False):
"""
subset create a specific phenotype based on a logic,
logic is a 'SubsetLogic' class,
take union of all the phenotypes listed. If none are listed use all phenotypes.
take the intersection of all the scored calls.
Args:
logic (SubsetLogic): A subsetlogic object to slice on
update (bool): (default False) change the name of the phenotype according to the label in the subset logic
Returns:
CellDataFrame: The CellDataFrame modified.
"""
pnames = self.phenotypes
snames = self.scored_names
data = self.copy()
values = []
phenotypes = logic.phenotypes
if len(phenotypes)==0: phenotypes = pnames
removing = set(self.phenotypes)-set(phenotypes)
for k in phenotypes:
if k not in pnames: raise ValueError("phenotype must exist in defined")
temp = data.loc[data['phenotype_calls'].apply(lambda x: x[k]==1)].copy()
if len(removing) > 0 and temp.shape[0] > 0:
temp['phenotype_calls'] = temp.apply(lambda x:
dict([(k,v) for k,v in x['phenotype_calls'].items() if k not in removing])
,1)
values.append(temp)
data = pd.concat(values)
for k,v in logic.scored_calls.items():
if k not in snames: raise ValueError("Scored name must exist in defined")
myfilter = 0 if v == '-' else 1
data = data.loc[data['scored_calls'].apply(lambda x: x[k]==myfilter)]
data.microns_per_pixel = self.microns_per_pixel
if update:
data['phenotype_calls'] = data['phenotype_calls'].apply(lambda x: {logic.label:1})
data.fill_phenotype_label(inplace=True)
data.db = self.db
return data | [
"def",
"subset",
"(",
"self",
",",
"logic",
",",
"update",
"=",
"False",
")",
":",
"pnames",
"=",
"self",
".",
"phenotypes",
"snames",
"=",
"self",
".",
"scored_names",
"data",
"=",
"self",
".",
"copy",
"(",
")",
"values",
"=",
"[",
"]",
"phenotypes",
"=",
"logic",
".",
"phenotypes",
"if",
"len",
"(",
"phenotypes",
")",
"==",
"0",
":",
"phenotypes",
"=",
"pnames",
"removing",
"=",
"set",
"(",
"self",
".",
"phenotypes",
")",
"-",
"set",
"(",
"phenotypes",
")",
"for",
"k",
"in",
"phenotypes",
":",
"if",
"k",
"not",
"in",
"pnames",
":",
"raise",
"ValueError",
"(",
"\"phenotype must exist in defined\"",
")",
"temp",
"=",
"data",
".",
"loc",
"[",
"data",
"[",
"'phenotype_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"[",
"k",
"]",
"==",
"1",
")",
"]",
".",
"copy",
"(",
")",
"if",
"len",
"(",
"removing",
")",
">",
"0",
"and",
"temp",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"temp",
"[",
"'phenotype_calls'",
"]",
"=",
"temp",
".",
"apply",
"(",
"lambda",
"x",
":",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"x",
"[",
"'phenotype_calls'",
"]",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"removing",
"]",
")",
",",
"1",
")",
"values",
".",
"append",
"(",
"temp",
")",
"data",
"=",
"pd",
".",
"concat",
"(",
"values",
")",
"for",
"k",
",",
"v",
"in",
"logic",
".",
"scored_calls",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"snames",
":",
"raise",
"ValueError",
"(",
"\"Scored name must exist in defined\"",
")",
"myfilter",
"=",
"0",
"if",
"v",
"==",
"'-'",
"else",
"1",
"data",
"=",
"data",
".",
"loc",
"[",
"data",
"[",
"'scored_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"[",
"k",
"]",
"==",
"myfilter",
")",
"]",
"data",
".",
"microns_per_pixel",
"=",
"self",
".",
"microns_per_pixel",
"if",
"update",
":",
"data",
"[",
"'phenotype_calls'",
"]",
"=",
"data",
"[",
"'phenotype_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"{",
"logic",
".",
"label",
":",
"1",
"}",
")",
"data",
".",
"fill_phenotype_label",
"(",
"inplace",
"=",
"True",
")",
"data",
".",
"db",
"=",
"self",
".",
"db",
"return",
"data"
]
| subset create a specific phenotype based on a logic,
logic is a 'SubsetLogic' class,
take union of all the phenotypes listed. If none are listed use all phenotypes.
take the intersection of all the scored calls.
Args:
logic (SubsetLogic): A subsetlogic object to slice on
update (bool): (default False) change the name of the phenotype according to the label in the subset logic
Returns:
CellDataFrame: The CellDataFrame modified. | [
"subset",
"create",
"a",
"specific",
"phenotype",
"based",
"on",
"a",
"logic",
"logic",
"is",
"a",
"SubsetLogic",
"class",
"take",
"union",
"of",
"all",
"the",
"phenotypes",
"listed",
".",
"If",
"none",
"are",
"listed",
"use",
"all",
"phenotypes",
".",
"take",
"the",
"intersection",
"of",
"all",
"the",
"scored",
"calls",
"."
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L511-L550 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.collapse_phenotypes | def collapse_phenotypes(self,input_phenotype_labels,output_phenotype_label,verbose=True):
"""
Rename one or more input phenotypes to a single output phenotype
Args:
input_phenotype_labels (list): A str name or list of names to combine
output_phenotype_label (list): A str name to change the phenotype names to
verbose (bool): output more details
Returns:
CellDataFrame: The CellDataFrame modified.
"""
if isinstance(input_phenotype_labels,str): input_phenotype_labels = [input_phenotype_labels]
bad_phenotypes = set(input_phenotype_labels)-set(self.phenotypes)
if len(bad_phenotypes) > 0: raise ValueError("Error phenotype(s) "+str(bad_phenotypes)+" are not in the data.")
data = self.copy()
if len(input_phenotype_labels) == 0: return data
def _swap_in(d,inputs,output):
# Get the keys we need to merge together
overlap = set(d.keys()).intersection(inputs)
# if there are none to merge we're done already
if len(overlap) == 0: return d
keepers = [(k,v) for k,v in d.items() if k not in inputs]
# combine anything thats not a keeper
return dict(keepers+\
[(output_phenotype_label,max([d[x] for x in overlap]))])
data['phenotype_calls'] = data.apply(lambda x:
_swap_in(x['phenotype_calls'],input_phenotype_labels,output_phenotype_label)
,1)
def _set_label(d):
vals = [k for k,v in d.items() if v==1]
return np.nan if len(vals) == 0 else vals[0]
data['phenotype_label'] = data.apply(lambda x:
_set_label(x['phenotype_calls']),1)
return data | python | def collapse_phenotypes(self,input_phenotype_labels,output_phenotype_label,verbose=True):
"""
Rename one or more input phenotypes to a single output phenotype
Args:
input_phenotype_labels (list): A str name or list of names to combine
output_phenotype_label (list): A str name to change the phenotype names to
verbose (bool): output more details
Returns:
CellDataFrame: The CellDataFrame modified.
"""
if isinstance(input_phenotype_labels,str): input_phenotype_labels = [input_phenotype_labels]
bad_phenotypes = set(input_phenotype_labels)-set(self.phenotypes)
if len(bad_phenotypes) > 0: raise ValueError("Error phenotype(s) "+str(bad_phenotypes)+" are not in the data.")
data = self.copy()
if len(input_phenotype_labels) == 0: return data
def _swap_in(d,inputs,output):
# Get the keys we need to merge together
overlap = set(d.keys()).intersection(inputs)
# if there are none to merge we're done already
if len(overlap) == 0: return d
keepers = [(k,v) for k,v in d.items() if k not in inputs]
# combine anything thats not a keeper
return dict(keepers+\
[(output_phenotype_label,max([d[x] for x in overlap]))])
data['phenotype_calls'] = data.apply(lambda x:
_swap_in(x['phenotype_calls'],input_phenotype_labels,output_phenotype_label)
,1)
def _set_label(d):
vals = [k for k,v in d.items() if v==1]
return np.nan if len(vals) == 0 else vals[0]
data['phenotype_label'] = data.apply(lambda x:
_set_label(x['phenotype_calls']),1)
return data | [
"def",
"collapse_phenotypes",
"(",
"self",
",",
"input_phenotype_labels",
",",
"output_phenotype_label",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"input_phenotype_labels",
",",
"str",
")",
":",
"input_phenotype_labels",
"=",
"[",
"input_phenotype_labels",
"]",
"bad_phenotypes",
"=",
"set",
"(",
"input_phenotype_labels",
")",
"-",
"set",
"(",
"self",
".",
"phenotypes",
")",
"if",
"len",
"(",
"bad_phenotypes",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Error phenotype(s) \"",
"+",
"str",
"(",
"bad_phenotypes",
")",
"+",
"\" are not in the data.\"",
")",
"data",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"len",
"(",
"input_phenotype_labels",
")",
"==",
"0",
":",
"return",
"data",
"def",
"_swap_in",
"(",
"d",
",",
"inputs",
",",
"output",
")",
":",
"# Get the keys we need to merge together",
"overlap",
"=",
"set",
"(",
"d",
".",
"keys",
"(",
")",
")",
".",
"intersection",
"(",
"inputs",
")",
"# if there are none to merge we're done already",
"if",
"len",
"(",
"overlap",
")",
"==",
"0",
":",
"return",
"d",
"keepers",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"inputs",
"]",
"# combine anything thats not a keeper",
"return",
"dict",
"(",
"keepers",
"+",
"[",
"(",
"output_phenotype_label",
",",
"max",
"(",
"[",
"d",
"[",
"x",
"]",
"for",
"x",
"in",
"overlap",
"]",
")",
")",
"]",
")",
"data",
"[",
"'phenotype_calls'",
"]",
"=",
"data",
".",
"apply",
"(",
"lambda",
"x",
":",
"_swap_in",
"(",
"x",
"[",
"'phenotype_calls'",
"]",
",",
"input_phenotype_labels",
",",
"output_phenotype_label",
")",
",",
"1",
")",
"def",
"_set_label",
"(",
"d",
")",
":",
"vals",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"v",
"==",
"1",
"]",
"return",
"np",
".",
"nan",
"if",
"len",
"(",
"vals",
")",
"==",
"0",
"else",
"vals",
"[",
"0",
"]",
"data",
"[",
"'phenotype_label'",
"]",
"=",
"data",
".",
"apply",
"(",
"lambda",
"x",
":",
"_set_label",
"(",
"x",
"[",
"'phenotype_calls'",
"]",
")",
",",
"1",
")",
"return",
"data"
]
| Rename one or more input phenotypes to a single output phenotype
Args:
input_phenotype_labels (list): A str name or list of names to combine
output_phenotype_label (list): A str name to change the phenotype names to
verbose (bool): output more details
Returns:
CellDataFrame: The CellDataFrame modified. | [
"Rename",
"one",
"or",
"more",
"input",
"phenotypes",
"to",
"a",
"single",
"output",
"phenotype"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L601-L635 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.fill_phenotype_label | def fill_phenotype_label(self,inplace=False):
"""
Set the phenotype_label column according to our rules for mutual exclusion
"""
def _get_phenotype(d):
vals = [k for k,v in d.items() if v == 1]
return np.nan if len(vals) == 0 else vals[0]
if inplace:
if self.shape[0] == 0: return self
self['phenotype_label'] = self.apply(lambda x: _get_phenotype(x['phenotype_calls']),1)
return
fixed = self.copy()
if fixed.shape[0] == 0: return fixed
fixed['phenotype_label'] = fixed.apply(lambda x: _get_phenotype(x['phenotype_calls']),1)
return fixed | python | def fill_phenotype_label(self,inplace=False):
"""
Set the phenotype_label column according to our rules for mutual exclusion
"""
def _get_phenotype(d):
vals = [k for k,v in d.items() if v == 1]
return np.nan if len(vals) == 0 else vals[0]
if inplace:
if self.shape[0] == 0: return self
self['phenotype_label'] = self.apply(lambda x: _get_phenotype(x['phenotype_calls']),1)
return
fixed = self.copy()
if fixed.shape[0] == 0: return fixed
fixed['phenotype_label'] = fixed.apply(lambda x: _get_phenotype(x['phenotype_calls']),1)
return fixed | [
"def",
"fill_phenotype_label",
"(",
"self",
",",
"inplace",
"=",
"False",
")",
":",
"def",
"_get_phenotype",
"(",
"d",
")",
":",
"vals",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"if",
"v",
"==",
"1",
"]",
"return",
"np",
".",
"nan",
"if",
"len",
"(",
"vals",
")",
"==",
"0",
"else",
"vals",
"[",
"0",
"]",
"if",
"inplace",
":",
"if",
"self",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"return",
"self",
"self",
"[",
"'phenotype_label'",
"]",
"=",
"self",
".",
"apply",
"(",
"lambda",
"x",
":",
"_get_phenotype",
"(",
"x",
"[",
"'phenotype_calls'",
"]",
")",
",",
"1",
")",
"return",
"fixed",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"fixed",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"return",
"fixed",
"fixed",
"[",
"'phenotype_label'",
"]",
"=",
"fixed",
".",
"apply",
"(",
"lambda",
"x",
":",
"_get_phenotype",
"(",
"x",
"[",
"'phenotype_calls'",
"]",
")",
",",
"1",
")",
"return",
"fixed"
]
| Set the phenotype_label column according to our rules for mutual exclusion | [
"Set",
"the",
"phenotype_label",
"column",
"according",
"to",
"our",
"rules",
"for",
"mutual",
"exclusion"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L676-L690 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.fill_phenotype_calls | def fill_phenotype_calls(self,phenotypes=None,inplace=False):
"""
Set the phenotype_calls according to the phenotype names
"""
if phenotypes is None: phenotypes = list(self['phenotype_label'].unique())
def _get_calls(label,phenos):
d = dict([(x,0) for x in phenos])
if label!=label: return d # np.nan case
d[label] = 1
return d
if inplace:
self['phenotype_calls'] = self.apply(lambda x: _get_calls(x['phenotype_label'],phenotypes),1)
return
fixed = self.copy()
fixed['phenotype_calls'] = fixed.apply(lambda x: _get_calls(x['phenotype_label'],phenotypes),1)
return fixed | python | def fill_phenotype_calls(self,phenotypes=None,inplace=False):
"""
Set the phenotype_calls according to the phenotype names
"""
if phenotypes is None: phenotypes = list(self['phenotype_label'].unique())
def _get_calls(label,phenos):
d = dict([(x,0) for x in phenos])
if label!=label: return d # np.nan case
d[label] = 1
return d
if inplace:
self['phenotype_calls'] = self.apply(lambda x: _get_calls(x['phenotype_label'],phenotypes),1)
return
fixed = self.copy()
fixed['phenotype_calls'] = fixed.apply(lambda x: _get_calls(x['phenotype_label'],phenotypes),1)
return fixed | [
"def",
"fill_phenotype_calls",
"(",
"self",
",",
"phenotypes",
"=",
"None",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"phenotypes",
"is",
"None",
":",
"phenotypes",
"=",
"list",
"(",
"self",
"[",
"'phenotype_label'",
"]",
".",
"unique",
"(",
")",
")",
"def",
"_get_calls",
"(",
"label",
",",
"phenos",
")",
":",
"d",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"0",
")",
"for",
"x",
"in",
"phenos",
"]",
")",
"if",
"label",
"!=",
"label",
":",
"return",
"d",
"# np.nan case",
"d",
"[",
"label",
"]",
"=",
"1",
"return",
"d",
"if",
"inplace",
":",
"self",
"[",
"'phenotype_calls'",
"]",
"=",
"self",
".",
"apply",
"(",
"lambda",
"x",
":",
"_get_calls",
"(",
"x",
"[",
"'phenotype_label'",
"]",
",",
"phenotypes",
")",
",",
"1",
")",
"return",
"fixed",
"=",
"self",
".",
"copy",
"(",
")",
"fixed",
"[",
"'phenotype_calls'",
"]",
"=",
"fixed",
".",
"apply",
"(",
"lambda",
"x",
":",
"_get_calls",
"(",
"x",
"[",
"'phenotype_label'",
"]",
",",
"phenotypes",
")",
",",
"1",
")",
"return",
"fixed"
]
| Set the phenotype_calls according to the phenotype names | [
"Set",
"the",
"phenotype_calls",
"according",
"to",
"the",
"phenotype",
"names"
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L691-L706 | train |
jason-weirather/pythologist | pythologist/__init__.py | CellDataFrame.scored_to_phenotype | def scored_to_phenotype(self,phenotypes):
"""
Convert binary pehnotypes to mutually exclusive phenotypes.
If none of the phenotypes are set, then phenotype_label becomes nan
If any of the phenotypes are multiply set then it throws a fatal error.
Args:
phenotypes (list): a list of scored_names to convert to phenotypes
Returns:
CellDataFrame
"""
def _apply_score(scored_calls,phenotypes):
present = sorted(list(set(phenotypes)&set(scored_calls.keys())))
total = sum([scored_calls[x] for x in present])
if total > 1:
raise ValueError("You cant extract phenotypes from scores if they are not mutually exclusive")
if total == 0: return np.nan
for label in present:
if scored_calls[label] == 1: return label
raise ValueError("Should have hit an exit criteria already")
output = self.copy()
output['phenotype_label'] = output.apply(lambda x: _apply_score(x['scored_calls'],phenotypes),1)
# now update the phenotypes with these
output['phenotype_calls'] = output.apply(lambda x:
dict([(y,1 if x['phenotype_label']==y else 0) for y in phenotypes])
,1)
return output | python | def scored_to_phenotype(self,phenotypes):
"""
Convert binary pehnotypes to mutually exclusive phenotypes.
If none of the phenotypes are set, then phenotype_label becomes nan
If any of the phenotypes are multiply set then it throws a fatal error.
Args:
phenotypes (list): a list of scored_names to convert to phenotypes
Returns:
CellDataFrame
"""
def _apply_score(scored_calls,phenotypes):
present = sorted(list(set(phenotypes)&set(scored_calls.keys())))
total = sum([scored_calls[x] for x in present])
if total > 1:
raise ValueError("You cant extract phenotypes from scores if they are not mutually exclusive")
if total == 0: return np.nan
for label in present:
if scored_calls[label] == 1: return label
raise ValueError("Should have hit an exit criteria already")
output = self.copy()
output['phenotype_label'] = output.apply(lambda x: _apply_score(x['scored_calls'],phenotypes),1)
# now update the phenotypes with these
output['phenotype_calls'] = output.apply(lambda x:
dict([(y,1 if x['phenotype_label']==y else 0) for y in phenotypes])
,1)
return output | [
"def",
"scored_to_phenotype",
"(",
"self",
",",
"phenotypes",
")",
":",
"def",
"_apply_score",
"(",
"scored_calls",
",",
"phenotypes",
")",
":",
"present",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"phenotypes",
")",
"&",
"set",
"(",
"scored_calls",
".",
"keys",
"(",
")",
")",
")",
")",
"total",
"=",
"sum",
"(",
"[",
"scored_calls",
"[",
"x",
"]",
"for",
"x",
"in",
"present",
"]",
")",
"if",
"total",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"You cant extract phenotypes from scores if they are not mutually exclusive\"",
")",
"if",
"total",
"==",
"0",
":",
"return",
"np",
".",
"nan",
"for",
"label",
"in",
"present",
":",
"if",
"scored_calls",
"[",
"label",
"]",
"==",
"1",
":",
"return",
"label",
"raise",
"ValueError",
"(",
"\"Should have hit an exit criteria already\"",
")",
"output",
"=",
"self",
".",
"copy",
"(",
")",
"output",
"[",
"'phenotype_label'",
"]",
"=",
"output",
".",
"apply",
"(",
"lambda",
"x",
":",
"_apply_score",
"(",
"x",
"[",
"'scored_calls'",
"]",
",",
"phenotypes",
")",
",",
"1",
")",
"# now update the phenotypes with these",
"output",
"[",
"'phenotype_calls'",
"]",
"=",
"output",
".",
"apply",
"(",
"lambda",
"x",
":",
"dict",
"(",
"[",
"(",
"y",
",",
"1",
"if",
"x",
"[",
"'phenotype_label'",
"]",
"==",
"y",
"else",
"0",
")",
"for",
"y",
"in",
"phenotypes",
"]",
")",
",",
"1",
")",
"return",
"output"
]
| Convert binary pehnotypes to mutually exclusive phenotypes.
If none of the phenotypes are set, then phenotype_label becomes nan
If any of the phenotypes are multiply set then it throws a fatal error.
Args:
phenotypes (list): a list of scored_names to convert to phenotypes
Returns:
CellDataFrame | [
"Convert",
"binary",
"pehnotypes",
"to",
"mutually",
"exclusive",
"phenotypes",
".",
"If",
"none",
"of",
"the",
"phenotypes",
"are",
"set",
"then",
"phenotype_label",
"becomes",
"nan",
"If",
"any",
"of",
"the",
"phenotypes",
"are",
"multiply",
"set",
"then",
"it",
"throws",
"a",
"fatal",
"error",
"."
]
| 6eb4082be9dffa9570e4ceaa06d97845eac4c006 | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L724-L751 | train |
yamcs/yamcs-python | yamcs-client/examples/commanding.py | issue_and_listen_to_command_history | def issue_and_listen_to_command_history():
"""Listen to command history updates of a single issued command."""
def tc_callback(rec):
print('TC:', rec)
command = processor.issue_command('/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF', args={
'voltage_num': 1,
}, comment='im a comment')
command.create_command_history_subscription(on_data=tc_callback) | python | def issue_and_listen_to_command_history():
"""Listen to command history updates of a single issued command."""
def tc_callback(rec):
print('TC:', rec)
command = processor.issue_command('/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF', args={
'voltage_num': 1,
}, comment='im a comment')
command.create_command_history_subscription(on_data=tc_callback) | [
"def",
"issue_and_listen_to_command_history",
"(",
")",
":",
"def",
"tc_callback",
"(",
"rec",
")",
":",
"print",
"(",
"'TC:'",
",",
"rec",
")",
"command",
"=",
"processor",
".",
"issue_command",
"(",
"'/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF'",
",",
"args",
"=",
"{",
"'voltage_num'",
":",
"1",
",",
"}",
",",
"comment",
"=",
"'im a comment'",
")",
"command",
".",
"create_command_history_subscription",
"(",
"on_data",
"=",
"tc_callback",
")"
]
| Listen to command history updates of a single issued command. | [
"Listen",
"to",
"command",
"history",
"updates",
"of",
"a",
"single",
"issued",
"command",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/commanding.py#L24-L32 | train |
LeKono/pyhgnc | src/pyhgnc/manager/models.py | get_many2many_table | def get_many2many_table(table1, table2):
"""Creates a many-to-many table that links the given tables table1 and table2.
:param str table1: Tablename of left hand table without TABLE_PREFIX.
:param str table2: Tablename of right hand table without TABLE_PREFIX.
:return:
"""
table_name = ('{}{}__{}'.format(TABLE_PREFIX, table1, table2))
return Table(table_name, Base.metadata,
Column('{}_id'.format(table1), Integer, ForeignKey('{}{}.id'.format(TABLE_PREFIX, table1))),
Column('{}_id'.format(table2), Integer, ForeignKey('{}{}.id'.format(TABLE_PREFIX, table2)))
) | python | def get_many2many_table(table1, table2):
"""Creates a many-to-many table that links the given tables table1 and table2.
:param str table1: Tablename of left hand table without TABLE_PREFIX.
:param str table2: Tablename of right hand table without TABLE_PREFIX.
:return:
"""
table_name = ('{}{}__{}'.format(TABLE_PREFIX, table1, table2))
return Table(table_name, Base.metadata,
Column('{}_id'.format(table1), Integer, ForeignKey('{}{}.id'.format(TABLE_PREFIX, table1))),
Column('{}_id'.format(table2), Integer, ForeignKey('{}{}.id'.format(TABLE_PREFIX, table2)))
) | [
"def",
"get_many2many_table",
"(",
"table1",
",",
"table2",
")",
":",
"table_name",
"=",
"(",
"'{}{}__{}'",
".",
"format",
"(",
"TABLE_PREFIX",
",",
"table1",
",",
"table2",
")",
")",
"return",
"Table",
"(",
"table_name",
",",
"Base",
".",
"metadata",
",",
"Column",
"(",
"'{}_id'",
".",
"format",
"(",
"table1",
")",
",",
"Integer",
",",
"ForeignKey",
"(",
"'{}{}.id'",
".",
"format",
"(",
"TABLE_PREFIX",
",",
"table1",
")",
")",
")",
",",
"Column",
"(",
"'{}_id'",
".",
"format",
"(",
"table2",
")",
",",
"Integer",
",",
"ForeignKey",
"(",
"'{}{}.id'",
".",
"format",
"(",
"TABLE_PREFIX",
",",
"table2",
")",
")",
")",
")"
]
| Creates a many-to-many table that links the given tables table1 and table2.
:param str table1: Tablename of left hand table without TABLE_PREFIX.
:param str table2: Tablename of right hand table without TABLE_PREFIX.
:return: | [
"Creates",
"a",
"many",
"-",
"to",
"-",
"many",
"table",
"that",
"links",
"the",
"given",
"tables",
"table1",
"and",
"table2",
"."
]
| 1cae20c40874bfb51581b7c5c1481707e942b5d0 | https://github.com/LeKono/pyhgnc/blob/1cae20c40874bfb51581b7c5c1481707e942b5d0/src/pyhgnc/manager/models.py#L25-L36 | train |
Frzk/Ellis | ellis/search_matches.py | SearchMatches.search | async def search(self, regex):
"""
Wraps the search for a match in an `executor`_ and awaits for it.
.. _executor: https://docs.python.org/3/library/asyncio-eventloop.html#executor
"""
coro = self._loop.run_in_executor(None, self._search, regex)
match = await coro
return match | python | async def search(self, regex):
"""
Wraps the search for a match in an `executor`_ and awaits for it.
.. _executor: https://docs.python.org/3/library/asyncio-eventloop.html#executor
"""
coro = self._loop.run_in_executor(None, self._search, regex)
match = await coro
return match | [
"async",
"def",
"search",
"(",
"self",
",",
"regex",
")",
":",
"coro",
"=",
"self",
".",
"_loop",
".",
"run_in_executor",
"(",
"None",
",",
"self",
".",
"_search",
",",
"regex",
")",
"match",
"=",
"await",
"coro",
"return",
"match"
]
| Wraps the search for a match in an `executor`_ and awaits for it.
.. _executor: https://docs.python.org/3/library/asyncio-eventloop.html#executor | [
"Wraps",
"the",
"search",
"for",
"a",
"match",
"in",
"an",
"executor",
"_",
"and",
"awaits",
"for",
"it",
"."
]
| 39ce8987cbc503354cf1f45927344186a8b18363 | https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis/search_matches.py#L45-L54 | train |
sirfoga/pyhal | hal/streams/user.py | UserInput.show_help | def show_help(self):
"""Prints to stdout help on how to answer properly"""
print("Sorry, not well understood.")
print("- use", str(self.yes_input), "to answer 'YES'")
print("- use", str(self.no_input), "to answer 'NO'") | python | def show_help(self):
"""Prints to stdout help on how to answer properly"""
print("Sorry, not well understood.")
print("- use", str(self.yes_input), "to answer 'YES'")
print("- use", str(self.no_input), "to answer 'NO'") | [
"def",
"show_help",
"(",
"self",
")",
":",
"print",
"(",
"\"Sorry, not well understood.\"",
")",
"print",
"(",
"\"- use\"",
",",
"str",
"(",
"self",
".",
"yes_input",
")",
",",
"\"to answer 'YES'\"",
")",
"print",
"(",
"\"- use\"",
",",
"str",
"(",
"self",
".",
"no_input",
")",
",",
"\"to answer 'NO'\"",
")"
]
| Prints to stdout help on how to answer properly | [
"Prints",
"to",
"stdout",
"help",
"on",
"how",
"to",
"answer",
"properly"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/user.py#L61-L65 | train |
sirfoga/pyhal | hal/streams/user.py | UserInput.re_ask | def re_ask(self, with_help=True):
"""Re-asks user the last question
:param with_help: True iff you want to show help on how to answer
questions
:return: user answer
"""
if with_help:
self.show_help()
return self.get_answer(self.last_question) | python | def re_ask(self, with_help=True):
"""Re-asks user the last question
:param with_help: True iff you want to show help on how to answer
questions
:return: user answer
"""
if with_help:
self.show_help()
return self.get_answer(self.last_question) | [
"def",
"re_ask",
"(",
"self",
",",
"with_help",
"=",
"True",
")",
":",
"if",
"with_help",
":",
"self",
".",
"show_help",
"(",
")",
"return",
"self",
".",
"get_answer",
"(",
"self",
".",
"last_question",
")"
]
| Re-asks user the last question
:param with_help: True iff you want to show help on how to answer
questions
:return: user answer | [
"Re",
"-",
"asks",
"user",
"the",
"last",
"question"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/user.py#L67-L77 | train |
sirfoga/pyhal | hal/streams/user.py | UserInput.get_answer | def get_answer(self, question):
"""Asks user a question, then gets user answer
:param question: Question: to ask user
:return: User answer
"""
self.last_question = str(question).strip()
user_answer = input(self.last_question)
return user_answer.strip() | python | def get_answer(self, question):
"""Asks user a question, then gets user answer
:param question: Question: to ask user
:return: User answer
"""
self.last_question = str(question).strip()
user_answer = input(self.last_question)
return user_answer.strip() | [
"def",
"get_answer",
"(",
"self",
",",
"question",
")",
":",
"self",
".",
"last_question",
"=",
"str",
"(",
"question",
")",
".",
"strip",
"(",
")",
"user_answer",
"=",
"input",
"(",
"self",
".",
"last_question",
")",
"return",
"user_answer",
".",
"strip",
"(",
")"
]
| Asks user a question, then gets user answer
:param question: Question: to ask user
:return: User answer | [
"Asks",
"user",
"a",
"question",
"then",
"gets",
"user",
"answer"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/user.py#L79-L87 | train |
sirfoga/pyhal | hal/streams/user.py | UserInput.get_number | def get_number(self, question, min_i=float("-inf"), max_i=float("inf"),
just_these=None):
"""Parses answer and gets number
:param question: Question: to ask user
:param min_i: min acceptable number
:param max_i: max acceptable number
:param just_these: Accept only these numbers
:return: User answer
"""
try:
user_answer = self.get_answer(question)
user_answer = float(user_answer)
if min_i < user_answer < max_i:
if just_these:
if user_answer in just_these:
return user_answer
exc = "Number cannot be accepted. Just these: "
exc += str(just_these)
raise Exception(exc)
return user_answer
exc = "Number is not within limits. "
exc += "Min is " + str(min_i) + ". Max is " + str(max_i) + ""
raise Exception(exc)
except Exception as exc:
print(str(exc))
return self.get_number(
self.last_question,
min_i=min_i,
max_i=max_i,
just_these=just_these
) | python | def get_number(self, question, min_i=float("-inf"), max_i=float("inf"),
just_these=None):
"""Parses answer and gets number
:param question: Question: to ask user
:param min_i: min acceptable number
:param max_i: max acceptable number
:param just_these: Accept only these numbers
:return: User answer
"""
try:
user_answer = self.get_answer(question)
user_answer = float(user_answer)
if min_i < user_answer < max_i:
if just_these:
if user_answer in just_these:
return user_answer
exc = "Number cannot be accepted. Just these: "
exc += str(just_these)
raise Exception(exc)
return user_answer
exc = "Number is not within limits. "
exc += "Min is " + str(min_i) + ". Max is " + str(max_i) + ""
raise Exception(exc)
except Exception as exc:
print(str(exc))
return self.get_number(
self.last_question,
min_i=min_i,
max_i=max_i,
just_these=just_these
) | [
"def",
"get_number",
"(",
"self",
",",
"question",
",",
"min_i",
"=",
"float",
"(",
"\"-inf\"",
")",
",",
"max_i",
"=",
"float",
"(",
"\"inf\"",
")",
",",
"just_these",
"=",
"None",
")",
":",
"try",
":",
"user_answer",
"=",
"self",
".",
"get_answer",
"(",
"question",
")",
"user_answer",
"=",
"float",
"(",
"user_answer",
")",
"if",
"min_i",
"<",
"user_answer",
"<",
"max_i",
":",
"if",
"just_these",
":",
"if",
"user_answer",
"in",
"just_these",
":",
"return",
"user_answer",
"exc",
"=",
"\"Number cannot be accepted. Just these: \"",
"exc",
"+=",
"str",
"(",
"just_these",
")",
"raise",
"Exception",
"(",
"exc",
")",
"return",
"user_answer",
"exc",
"=",
"\"Number is not within limits. \"",
"exc",
"+=",
"\"Min is \"",
"+",
"str",
"(",
"min_i",
")",
"+",
"\". Max is \"",
"+",
"str",
"(",
"max_i",
")",
"+",
"\"\"",
"raise",
"Exception",
"(",
"exc",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"str",
"(",
"exc",
")",
")",
"return",
"self",
".",
"get_number",
"(",
"self",
".",
"last_question",
",",
"min_i",
"=",
"min_i",
",",
"max_i",
"=",
"max_i",
",",
"just_these",
"=",
"just_these",
")"
]
| Parses answer and gets number
:param question: Question: to ask user
:param min_i: min acceptable number
:param max_i: max acceptable number
:param just_these: Accept only these numbers
:return: User answer | [
"Parses",
"answer",
"and",
"gets",
"number"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/user.py#L116-L151 | train |
sirfoga/pyhal | hal/streams/user.py | UserInput.get_list | def get_list(self, question,
splitter=",", at_least=0, at_most=float("inf")):
"""Parses answer and gets list
:param question: Question: to ask user
:param splitter: Split list elements with this char
:param at_least: List must have at least this amount of elements
:param at_most: List must have at most this amount of elements
:return: User answer
"""
try:
user_answer = self.get_answer(question) # ask question
user_answer = user_answer.split(splitter) # split items
user_answer = [str(item).strip() for item in user_answer] # strip
if at_least < len(user_answer) < at_most:
return user_answer
exc = "List is not correct. "
exc += "There must be at least " + str(at_least) + " items, "
exc += "and at most " + str(at_most) + ". "
exc += "Use '" + str(splitter) + "' to separate items"
raise Exception(exc)
except Exception as exc:
print(str(exc))
return self.get_list(
self.last_question,
at_least=at_least,
at_most=at_most
) | python | def get_list(self, question,
splitter=",", at_least=0, at_most=float("inf")):
"""Parses answer and gets list
:param question: Question: to ask user
:param splitter: Split list elements with this char
:param at_least: List must have at least this amount of elements
:param at_most: List must have at most this amount of elements
:return: User answer
"""
try:
user_answer = self.get_answer(question) # ask question
user_answer = user_answer.split(splitter) # split items
user_answer = [str(item).strip() for item in user_answer] # strip
if at_least < len(user_answer) < at_most:
return user_answer
exc = "List is not correct. "
exc += "There must be at least " + str(at_least) + " items, "
exc += "and at most " + str(at_most) + ". "
exc += "Use '" + str(splitter) + "' to separate items"
raise Exception(exc)
except Exception as exc:
print(str(exc))
return self.get_list(
self.last_question,
at_least=at_least,
at_most=at_most
) | [
"def",
"get_list",
"(",
"self",
",",
"question",
",",
"splitter",
"=",
"\",\"",
",",
"at_least",
"=",
"0",
",",
"at_most",
"=",
"float",
"(",
"\"inf\"",
")",
")",
":",
"try",
":",
"user_answer",
"=",
"self",
".",
"get_answer",
"(",
"question",
")",
"# ask question",
"user_answer",
"=",
"user_answer",
".",
"split",
"(",
"splitter",
")",
"# split items",
"user_answer",
"=",
"[",
"str",
"(",
"item",
")",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"user_answer",
"]",
"# strip",
"if",
"at_least",
"<",
"len",
"(",
"user_answer",
")",
"<",
"at_most",
":",
"return",
"user_answer",
"exc",
"=",
"\"List is not correct. \"",
"exc",
"+=",
"\"There must be at least \"",
"+",
"str",
"(",
"at_least",
")",
"+",
"\" items, \"",
"exc",
"+=",
"\"and at most \"",
"+",
"str",
"(",
"at_most",
")",
"+",
"\". \"",
"exc",
"+=",
"\"Use '\"",
"+",
"str",
"(",
"splitter",
")",
"+",
"\"' to separate items\"",
"raise",
"Exception",
"(",
"exc",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"str",
"(",
"exc",
")",
")",
"return",
"self",
".",
"get_list",
"(",
"self",
".",
"last_question",
",",
"at_least",
"=",
"at_least",
",",
"at_most",
"=",
"at_most",
")"
]
| Parses answer and gets list
:param question: Question: to ask user
:param splitter: Split list elements with this char
:param at_least: List must have at least this amount of elements
:param at_most: List must have at most this amount of elements
:return: User answer | [
"Parses",
"answer",
"and",
"gets",
"list"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/user.py#L153-L182 | train |
portfors-lab/sparkle | sparkle/data/batlabdata.py | batlab2sparkle | def batlab2sparkle(experiment_data):
"""Sparkle expects meta data to have a certain heirarchial organization,
reformat batlab experiment data to fit.
"""
# This is mostly for convention.. attribute that matters most is samplerate,
# since it is used in the GUI to calculate things like duration
nsdata = {}
for attr in ['computername', 'pst_filename', 'title', 'who', 'date', 'program_date']:
nsdata[attr] = experiment_data[attr]
for itest, test in enumerate(experiment_data['test']):
setname = 'test_{}'.format(itest+1)
nsdata[setname] = {}
nsdata[setname]['samplerate_ad'] = test['trace'][0]['samplerate_ad']
nsdata[setname]['comment'] = test['comment']
nsdata[setname]['start'] = test['time']
nsdata[setname]['mode'] = 'finite'
nsdata[setname]['user_tag'] = ''
if test['full_testtype'] == 'General Auto Test' and test['testtype'] == 'tone':
nsdata[setname]['testtype'] = 'Tuning Curve'
else:
nsdata[setname]['testtype'] = test['full_testtype']
stims = []
for itrace, trace in enumerate(test['trace']):
try:
stim = {'samplerate_da': trace['samplerate_da'],
'overloaded_attenuation': 0,}
components = []
for icomp, component in enumerate(trace['stimulus']):
# always add in silence component to match batlab's delay parameter
delay_comp = {'index': [icomp, 0], 'stim_type': 'silence',
'intensity': 0, 'duration': component['delay']/1000.,
'start_s': 0, 'risefall': 0}
components.append(delay_comp)
# FIXME need to pull in speaker calibration to get real intensity
comp = {'risefall' : component['rise_fall']/1000.,
'index': [icomp, 1],
'duration': component['duration']/1000.,
'start_s': component['delay']/1000.,
'intensity': 100 - component['attenuation']}
if component['soundtype_name'] == 'vocalization':
# print component
comp['stim_type'] = 'Vocalization'
comp['filename'] = component['vocal_call_file']
comp['browsedir'] = ''
elif component['soundtype_name'] == 'fmsweep':
comp['stim_type'] = 'FM Sweep'
usweep = 1 if component['usweep'] else -1
comp['start_f'] = component['frequency'] - (component['bandwidth']/2)*usweep
comp['stop_f'] = component['frequency'] + (component['bandwidth']/2)*usweep
elif component['soundtype_name'] == 'tone':
comp['stim_type'] = 'Pure Tone'
comp['frequency'] = component['frequency']
else:
# print 'FOUND UNKNOWN STIM', component['soundtype_name']
# raise ValueError
comp['stim_type'] = component['soundtype_name']
components.append(comp)
stim['components'] = components
stims.append(stim)
except TypeError:
print 'PROBLEM with', itest, itrace
print 'component', component
continue
nsdata[setname]['stim'] = stims
return nsdata | python | def batlab2sparkle(experiment_data):
"""Sparkle expects meta data to have a certain heirarchial organization,
reformat batlab experiment data to fit.
"""
# This is mostly for convention.. attribute that matters most is samplerate,
# since it is used in the GUI to calculate things like duration
nsdata = {}
for attr in ['computername', 'pst_filename', 'title', 'who', 'date', 'program_date']:
nsdata[attr] = experiment_data[attr]
for itest, test in enumerate(experiment_data['test']):
setname = 'test_{}'.format(itest+1)
nsdata[setname] = {}
nsdata[setname]['samplerate_ad'] = test['trace'][0]['samplerate_ad']
nsdata[setname]['comment'] = test['comment']
nsdata[setname]['start'] = test['time']
nsdata[setname]['mode'] = 'finite'
nsdata[setname]['user_tag'] = ''
if test['full_testtype'] == 'General Auto Test' and test['testtype'] == 'tone':
nsdata[setname]['testtype'] = 'Tuning Curve'
else:
nsdata[setname]['testtype'] = test['full_testtype']
stims = []
for itrace, trace in enumerate(test['trace']):
try:
stim = {'samplerate_da': trace['samplerate_da'],
'overloaded_attenuation': 0,}
components = []
for icomp, component in enumerate(trace['stimulus']):
# always add in silence component to match batlab's delay parameter
delay_comp = {'index': [icomp, 0], 'stim_type': 'silence',
'intensity': 0, 'duration': component['delay']/1000.,
'start_s': 0, 'risefall': 0}
components.append(delay_comp)
# FIXME need to pull in speaker calibration to get real intensity
comp = {'risefall' : component['rise_fall']/1000.,
'index': [icomp, 1],
'duration': component['duration']/1000.,
'start_s': component['delay']/1000.,
'intensity': 100 - component['attenuation']}
if component['soundtype_name'] == 'vocalization':
# print component
comp['stim_type'] = 'Vocalization'
comp['filename'] = component['vocal_call_file']
comp['browsedir'] = ''
elif component['soundtype_name'] == 'fmsweep':
comp['stim_type'] = 'FM Sweep'
usweep = 1 if component['usweep'] else -1
comp['start_f'] = component['frequency'] - (component['bandwidth']/2)*usweep
comp['stop_f'] = component['frequency'] + (component['bandwidth']/2)*usweep
elif component['soundtype_name'] == 'tone':
comp['stim_type'] = 'Pure Tone'
comp['frequency'] = component['frequency']
else:
# print 'FOUND UNKNOWN STIM', component['soundtype_name']
# raise ValueError
comp['stim_type'] = component['soundtype_name']
components.append(comp)
stim['components'] = components
stims.append(stim)
except TypeError:
print 'PROBLEM with', itest, itrace
print 'component', component
continue
nsdata[setname]['stim'] = stims
return nsdata | [
"def",
"batlab2sparkle",
"(",
"experiment_data",
")",
":",
"# This is mostly for convention.. attribute that matters most is samplerate, ",
"# since it is used in the GUI to calculate things like duration",
"nsdata",
"=",
"{",
"}",
"for",
"attr",
"in",
"[",
"'computername'",
",",
"'pst_filename'",
",",
"'title'",
",",
"'who'",
",",
"'date'",
",",
"'program_date'",
"]",
":",
"nsdata",
"[",
"attr",
"]",
"=",
"experiment_data",
"[",
"attr",
"]",
"for",
"itest",
",",
"test",
"in",
"enumerate",
"(",
"experiment_data",
"[",
"'test'",
"]",
")",
":",
"setname",
"=",
"'test_{}'",
".",
"format",
"(",
"itest",
"+",
"1",
")",
"nsdata",
"[",
"setname",
"]",
"=",
"{",
"}",
"nsdata",
"[",
"setname",
"]",
"[",
"'samplerate_ad'",
"]",
"=",
"test",
"[",
"'trace'",
"]",
"[",
"0",
"]",
"[",
"'samplerate_ad'",
"]",
"nsdata",
"[",
"setname",
"]",
"[",
"'comment'",
"]",
"=",
"test",
"[",
"'comment'",
"]",
"nsdata",
"[",
"setname",
"]",
"[",
"'start'",
"]",
"=",
"test",
"[",
"'time'",
"]",
"nsdata",
"[",
"setname",
"]",
"[",
"'mode'",
"]",
"=",
"'finite'",
"nsdata",
"[",
"setname",
"]",
"[",
"'user_tag'",
"]",
"=",
"''",
"if",
"test",
"[",
"'full_testtype'",
"]",
"==",
"'General Auto Test'",
"and",
"test",
"[",
"'testtype'",
"]",
"==",
"'tone'",
":",
"nsdata",
"[",
"setname",
"]",
"[",
"'testtype'",
"]",
"=",
"'Tuning Curve'",
"else",
":",
"nsdata",
"[",
"setname",
"]",
"[",
"'testtype'",
"]",
"=",
"test",
"[",
"'full_testtype'",
"]",
"stims",
"=",
"[",
"]",
"for",
"itrace",
",",
"trace",
"in",
"enumerate",
"(",
"test",
"[",
"'trace'",
"]",
")",
":",
"try",
":",
"stim",
"=",
"{",
"'samplerate_da'",
":",
"trace",
"[",
"'samplerate_da'",
"]",
",",
"'overloaded_attenuation'",
":",
"0",
",",
"}",
"components",
"=",
"[",
"]",
"for",
"icomp",
",",
"component",
"in",
"enumerate",
"(",
"trace",
"[",
"'stimulus'",
"]",
")",
":",
"# always add in silence component to match batlab's delay parameter",
"delay_comp",
"=",
"{",
"'index'",
":",
"[",
"icomp",
",",
"0",
"]",
",",
"'stim_type'",
":",
"'silence'",
",",
"'intensity'",
":",
"0",
",",
"'duration'",
":",
"component",
"[",
"'delay'",
"]",
"/",
"1000.",
",",
"'start_s'",
":",
"0",
",",
"'risefall'",
":",
"0",
"}",
"components",
".",
"append",
"(",
"delay_comp",
")",
"# FIXME need to pull in speaker calibration to get real intensity",
"comp",
"=",
"{",
"'risefall'",
":",
"component",
"[",
"'rise_fall'",
"]",
"/",
"1000.",
",",
"'index'",
":",
"[",
"icomp",
",",
"1",
"]",
",",
"'duration'",
":",
"component",
"[",
"'duration'",
"]",
"/",
"1000.",
",",
"'start_s'",
":",
"component",
"[",
"'delay'",
"]",
"/",
"1000.",
",",
"'intensity'",
":",
"100",
"-",
"component",
"[",
"'attenuation'",
"]",
"}",
"if",
"component",
"[",
"'soundtype_name'",
"]",
"==",
"'vocalization'",
":",
"# print component",
"comp",
"[",
"'stim_type'",
"]",
"=",
"'Vocalization'",
"comp",
"[",
"'filename'",
"]",
"=",
"component",
"[",
"'vocal_call_file'",
"]",
"comp",
"[",
"'browsedir'",
"]",
"=",
"''",
"elif",
"component",
"[",
"'soundtype_name'",
"]",
"==",
"'fmsweep'",
":",
"comp",
"[",
"'stim_type'",
"]",
"=",
"'FM Sweep'",
"usweep",
"=",
"1",
"if",
"component",
"[",
"'usweep'",
"]",
"else",
"-",
"1",
"comp",
"[",
"'start_f'",
"]",
"=",
"component",
"[",
"'frequency'",
"]",
"-",
"(",
"component",
"[",
"'bandwidth'",
"]",
"/",
"2",
")",
"*",
"usweep",
"comp",
"[",
"'stop_f'",
"]",
"=",
"component",
"[",
"'frequency'",
"]",
"+",
"(",
"component",
"[",
"'bandwidth'",
"]",
"/",
"2",
")",
"*",
"usweep",
"elif",
"component",
"[",
"'soundtype_name'",
"]",
"==",
"'tone'",
":",
"comp",
"[",
"'stim_type'",
"]",
"=",
"'Pure Tone'",
"comp",
"[",
"'frequency'",
"]",
"=",
"component",
"[",
"'frequency'",
"]",
"else",
":",
"# print 'FOUND UNKNOWN STIM', component['soundtype_name']",
"# raise ValueError",
"comp",
"[",
"'stim_type'",
"]",
"=",
"component",
"[",
"'soundtype_name'",
"]",
"components",
".",
"append",
"(",
"comp",
")",
"stim",
"[",
"'components'",
"]",
"=",
"components",
"stims",
".",
"append",
"(",
"stim",
")",
"except",
"TypeError",
":",
"print",
"'PROBLEM with'",
",",
"itest",
",",
"itrace",
"print",
"'component'",
",",
"component",
"continue",
"nsdata",
"[",
"setname",
"]",
"[",
"'stim'",
"]",
"=",
"stims",
"return",
"nsdata"
]
| Sparkle expects meta data to have a certain heirarchial organization,
reformat batlab experiment data to fit. | [
"Sparkle",
"expects",
"meta",
"data",
"to",
"have",
"a",
"certain",
"heirarchial",
"organization",
"reformat",
"batlab",
"experiment",
"data",
"to",
"fit",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/data/batlabdata.py#L118-L187 | train |
NoviceLive/intellicoder | intellicoder/intellisense/sanitizers.py | sanitize_type | def sanitize_type(raw_type):
"""Sanitize the raw type string."""
cleaned = get_printable(raw_type).strip()
for bad in [
r'__drv_aliasesMem', r'__drv_freesMem',
r'__drv_strictTypeMatch\(\w+\)',
r'__out_data_source\(\w+\)',
r'_In_NLS_string_\(\w+\)',
r'_Frees_ptr_', r'_Frees_ptr_opt_', r'opt_',
r'\(Mem\) '
]:
cleaned = re.sub(bad, '', cleaned).strip()
if cleaned in ['_EXCEPTION_RECORD *', '_EXCEPTION_POINTERS *']:
cleaned = cleaned.strip('_')
cleaned = cleaned.replace('[]', '*')
return cleaned | python | def sanitize_type(raw_type):
"""Sanitize the raw type string."""
cleaned = get_printable(raw_type).strip()
for bad in [
r'__drv_aliasesMem', r'__drv_freesMem',
r'__drv_strictTypeMatch\(\w+\)',
r'__out_data_source\(\w+\)',
r'_In_NLS_string_\(\w+\)',
r'_Frees_ptr_', r'_Frees_ptr_opt_', r'opt_',
r'\(Mem\) '
]:
cleaned = re.sub(bad, '', cleaned).strip()
if cleaned in ['_EXCEPTION_RECORD *', '_EXCEPTION_POINTERS *']:
cleaned = cleaned.strip('_')
cleaned = cleaned.replace('[]', '*')
return cleaned | [
"def",
"sanitize_type",
"(",
"raw_type",
")",
":",
"cleaned",
"=",
"get_printable",
"(",
"raw_type",
")",
".",
"strip",
"(",
")",
"for",
"bad",
"in",
"[",
"r'__drv_aliasesMem'",
",",
"r'__drv_freesMem'",
",",
"r'__drv_strictTypeMatch\\(\\w+\\)'",
",",
"r'__out_data_source\\(\\w+\\)'",
",",
"r'_In_NLS_string_\\(\\w+\\)'",
",",
"r'_Frees_ptr_'",
",",
"r'_Frees_ptr_opt_'",
",",
"r'opt_'",
",",
"r'\\(Mem\\) '",
"]",
":",
"cleaned",
"=",
"re",
".",
"sub",
"(",
"bad",
",",
"''",
",",
"cleaned",
")",
".",
"strip",
"(",
")",
"if",
"cleaned",
"in",
"[",
"'_EXCEPTION_RECORD *'",
",",
"'_EXCEPTION_POINTERS *'",
"]",
":",
"cleaned",
"=",
"cleaned",
".",
"strip",
"(",
"'_'",
")",
"cleaned",
"=",
"cleaned",
".",
"replace",
"(",
"'[]'",
",",
"'*'",
")",
"return",
"cleaned"
]
| Sanitize the raw type string. | [
"Sanitize",
"the",
"raw",
"type",
"string",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/sanitizers.py#L32-L47 | train |
NoviceLive/intellicoder | intellicoder/intellisense/sanitizers.py | clean_ret_type | def clean_ret_type(ret_type):
"""Clean the erraneous parsed return type."""
ret_type = get_printable(ret_type).strip()
if ret_type == 'LRESULT LRESULT':
ret_type = 'LRESULT'
for bad in [
'DECLSPEC_NORETURN', 'NTSYSCALLAPI', '__kernel_entry',
'__analysis_noreturn', '_Post_equals_last_error_',
'_Maybe_raises_SEH_exception_',
'_CRT_STDIO_INLINE', '_ACRTIMP'
]:
if bad in ret_type:
ret_type = ret_type.replace(bad, '').strip()
logging.debug(_('cleaned %s'), bad)
return ret_type | python | def clean_ret_type(ret_type):
"""Clean the erraneous parsed return type."""
ret_type = get_printable(ret_type).strip()
if ret_type == 'LRESULT LRESULT':
ret_type = 'LRESULT'
for bad in [
'DECLSPEC_NORETURN', 'NTSYSCALLAPI', '__kernel_entry',
'__analysis_noreturn', '_Post_equals_last_error_',
'_Maybe_raises_SEH_exception_',
'_CRT_STDIO_INLINE', '_ACRTIMP'
]:
if bad in ret_type:
ret_type = ret_type.replace(bad, '').strip()
logging.debug(_('cleaned %s'), bad)
return ret_type | [
"def",
"clean_ret_type",
"(",
"ret_type",
")",
":",
"ret_type",
"=",
"get_printable",
"(",
"ret_type",
")",
".",
"strip",
"(",
")",
"if",
"ret_type",
"==",
"'LRESULT LRESULT'",
":",
"ret_type",
"=",
"'LRESULT'",
"for",
"bad",
"in",
"[",
"'DECLSPEC_NORETURN'",
",",
"'NTSYSCALLAPI'",
",",
"'__kernel_entry'",
",",
"'__analysis_noreturn'",
",",
"'_Post_equals_last_error_'",
",",
"'_Maybe_raises_SEH_exception_'",
",",
"'_CRT_STDIO_INLINE'",
",",
"'_ACRTIMP'",
"]",
":",
"if",
"bad",
"in",
"ret_type",
":",
"ret_type",
"=",
"ret_type",
".",
"replace",
"(",
"bad",
",",
"''",
")",
".",
"strip",
"(",
")",
"logging",
".",
"debug",
"(",
"_",
"(",
"'cleaned %s'",
")",
",",
"bad",
")",
"return",
"ret_type"
]
| Clean the erraneous parsed return type. | [
"Clean",
"the",
"erraneous",
"parsed",
"return",
"type",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/sanitizers.py#L50-L64 | train |
dpa-newslab/livebridge | livebridge/storages/dynamo.py | DynamoClient.setup | async def setup(self):
"""Setting up DynamoDB table, if it not exists."""
try:
client = await self.db
response = await client.list_tables()
created = False
# create table if not already created.
if self.table_name not in response["TableNames"]:
logger.info("Creating DynamoDB table [{}]".format(self.table_name))
resp = await client.create_table(**self.table_schema)
if resp.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200:
logger.info("DynamoDB table [{}] successfully created!".format(self.table_name))
created = True
# create control table if not already created.
if self.control_table_name and self.control_table_name not in response["TableNames"]:
logger.info("Creating DynamoDB control_table [{}]".format(self.control_table_name))
resp = await client.create_table(**self.control_table_schema)
if resp.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200:
logger.info("DynamoDB control table [{}] successfully created!".format(self.control_table_name))
created = True
return created
except Exception as exc:
logger.error("[DB] Error when setting up DynamoDB.")
logger.error(exc)
return False | python | async def setup(self):
"""Setting up DynamoDB table, if it not exists."""
try:
client = await self.db
response = await client.list_tables()
created = False
# create table if not already created.
if self.table_name not in response["TableNames"]:
logger.info("Creating DynamoDB table [{}]".format(self.table_name))
resp = await client.create_table(**self.table_schema)
if resp.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200:
logger.info("DynamoDB table [{}] successfully created!".format(self.table_name))
created = True
# create control table if not already created.
if self.control_table_name and self.control_table_name not in response["TableNames"]:
logger.info("Creating DynamoDB control_table [{}]".format(self.control_table_name))
resp = await client.create_table(**self.control_table_schema)
if resp.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200:
logger.info("DynamoDB control table [{}] successfully created!".format(self.control_table_name))
created = True
return created
except Exception as exc:
logger.error("[DB] Error when setting up DynamoDB.")
logger.error(exc)
return False | [
"async",
"def",
"setup",
"(",
"self",
")",
":",
"try",
":",
"client",
"=",
"await",
"self",
".",
"db",
"response",
"=",
"await",
"client",
".",
"list_tables",
"(",
")",
"created",
"=",
"False",
"# create table if not already created.",
"if",
"self",
".",
"table_name",
"not",
"in",
"response",
"[",
"\"TableNames\"",
"]",
":",
"logger",
".",
"info",
"(",
"\"Creating DynamoDB table [{}]\"",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"resp",
"=",
"await",
"client",
".",
"create_table",
"(",
"*",
"*",
"self",
".",
"table_schema",
")",
"if",
"resp",
".",
"get",
"(",
"\"ResponseMetadata\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"HTTPStatusCode\"",
")",
"==",
"200",
":",
"logger",
".",
"info",
"(",
"\"DynamoDB table [{}] successfully created!\"",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"created",
"=",
"True",
"# create control table if not already created.",
"if",
"self",
".",
"control_table_name",
"and",
"self",
".",
"control_table_name",
"not",
"in",
"response",
"[",
"\"TableNames\"",
"]",
":",
"logger",
".",
"info",
"(",
"\"Creating DynamoDB control_table [{}]\"",
".",
"format",
"(",
"self",
".",
"control_table_name",
")",
")",
"resp",
"=",
"await",
"client",
".",
"create_table",
"(",
"*",
"*",
"self",
".",
"control_table_schema",
")",
"if",
"resp",
".",
"get",
"(",
"\"ResponseMetadata\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"HTTPStatusCode\"",
")",
"==",
"200",
":",
"logger",
".",
"info",
"(",
"\"DynamoDB control table [{}] successfully created!\"",
".",
"format",
"(",
"self",
".",
"control_table_name",
")",
")",
"created",
"=",
"True",
"return",
"created",
"except",
"Exception",
"as",
"exc",
":",
"logger",
".",
"error",
"(",
"\"[DB] Error when setting up DynamoDB.\"",
")",
"logger",
".",
"error",
"(",
"exc",
")",
"return",
"False"
]
| Setting up DynamoDB table, if it not exists. | [
"Setting",
"up",
"DynamoDB",
"table",
"if",
"it",
"not",
"exists",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/storages/dynamo.py#L106-L130 | train |
portfors-lab/sparkle | sparkle/gui/dialogs/calibration_dlg.py | CalibrationDialog.maxRange | def maxRange(self):
"""Sets the maximum range for the currently selection calibration,
determined from its range of values store on file
"""
try:
x, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.ui.frangeLowSpnbx.setValue(freqs[0])
self.ui.frangeHighSpnbx.setValue(freqs[-1])
print 'set freq range', freqs[0], freqs[-1], freqs[0], freqs[-1]
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") | python | def maxRange(self):
"""Sets the maximum range for the currently selection calibration,
determined from its range of values store on file
"""
try:
x, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.ui.frangeLowSpnbx.setValue(freqs[0])
self.ui.frangeHighSpnbx.setValue(freqs[-1])
print 'set freq range', freqs[0], freqs[-1], freqs[0], freqs[-1]
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") | [
"def",
"maxRange",
"(",
"self",
")",
":",
"try",
":",
"x",
",",
"freqs",
"=",
"self",
".",
"datafile",
".",
"get_calibration",
"(",
"str",
"(",
"self",
".",
"ui",
".",
"calChoiceCmbbx",
".",
"currentText",
"(",
")",
")",
",",
"self",
".",
"calf",
")",
"self",
".",
"ui",
".",
"frangeLowSpnbx",
".",
"setValue",
"(",
"freqs",
"[",
"0",
"]",
")",
"self",
".",
"ui",
".",
"frangeHighSpnbx",
".",
"setValue",
"(",
"freqs",
"[",
"-",
"1",
"]",
")",
"print",
"'set freq range'",
",",
"freqs",
"[",
"0",
"]",
",",
"freqs",
"[",
"-",
"1",
"]",
",",
"freqs",
"[",
"0",
"]",
",",
"freqs",
"[",
"-",
"1",
"]",
"except",
"IOError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Read Error\"",
",",
"\"Unable to read calibration file\"",
")",
"except",
"KeyError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Data Error\"",
",",
"\"Unable to find data in file\"",
")"
]
| Sets the maximum range for the currently selection calibration,
determined from its range of values store on file | [
"Sets",
"the",
"maximum",
"range",
"for",
"the",
"currently",
"selection",
"calibration",
"determined",
"from",
"its",
"range",
"of",
"values",
"store",
"on",
"file"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/calibration_dlg.py#L34-L46 | train |
portfors-lab/sparkle | sparkle/gui/dialogs/calibration_dlg.py | CalibrationDialog.plotCurve | def plotCurve(self):
"""Shows a calibration curve, in a separate window, of the currently selected calibration"""
try:
attenuations, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.pw = SimplePlotWidget(freqs, attenuations, parent=self)
self.pw.setWindowFlags(QtCore.Qt.Window)
self.pw.setLabels('Frequency', 'Attenuation', 'Calibration Curve')
self.pw.show()
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") | python | def plotCurve(self):
"""Shows a calibration curve, in a separate window, of the currently selected calibration"""
try:
attenuations, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.pw = SimplePlotWidget(freqs, attenuations, parent=self)
self.pw.setWindowFlags(QtCore.Qt.Window)
self.pw.setLabels('Frequency', 'Attenuation', 'Calibration Curve')
self.pw.show()
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") | [
"def",
"plotCurve",
"(",
"self",
")",
":",
"try",
":",
"attenuations",
",",
"freqs",
"=",
"self",
".",
"datafile",
".",
"get_calibration",
"(",
"str",
"(",
"self",
".",
"ui",
".",
"calChoiceCmbbx",
".",
"currentText",
"(",
")",
")",
",",
"self",
".",
"calf",
")",
"self",
".",
"pw",
"=",
"SimplePlotWidget",
"(",
"freqs",
",",
"attenuations",
",",
"parent",
"=",
"self",
")",
"self",
".",
"pw",
".",
"setWindowFlags",
"(",
"QtCore",
".",
"Qt",
".",
"Window",
")",
"self",
".",
"pw",
".",
"setLabels",
"(",
"'Frequency'",
",",
"'Attenuation'",
",",
"'Calibration Curve'",
")",
"self",
".",
"pw",
".",
"show",
"(",
")",
"except",
"IOError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Read Error\"",
",",
"\"Unable to read calibration file\"",
")",
"except",
"KeyError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Data Error\"",
",",
"\"Unable to find data in file\"",
")"
]
| Shows a calibration curve, in a separate window, of the currently selected calibration | [
"Shows",
"a",
"calibration",
"curve",
"in",
"a",
"separate",
"window",
"of",
"the",
"currently",
"selected",
"calibration"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/calibration_dlg.py#L48-L59 | train |
portfors-lab/sparkle | sparkle/gui/dialogs/calibration_dlg.py | CalibrationDialog.values | def values(self):
"""Gets the values the user input to this dialog
:returns: dict of inputs:
| *'use_calfile'*: bool, -- whether to apply calibration at all
| *'calname'*: str, -- the name of the calibration dataset to use
| *'frange'*: (int, int), -- (min, max) of the frequency range to apply calibration to
"""
results = {}
results['use_calfile'] = self.ui.calfileRadio.isChecked()
results['calname'] = str(self.ui.calChoiceCmbbx.currentText())
results['frange'] = (self.ui.frangeLowSpnbx.value(), self.ui.frangeHighSpnbx.value())
return results | python | def values(self):
"""Gets the values the user input to this dialog
:returns: dict of inputs:
| *'use_calfile'*: bool, -- whether to apply calibration at all
| *'calname'*: str, -- the name of the calibration dataset to use
| *'frange'*: (int, int), -- (min, max) of the frequency range to apply calibration to
"""
results = {}
results['use_calfile'] = self.ui.calfileRadio.isChecked()
results['calname'] = str(self.ui.calChoiceCmbbx.currentText())
results['frange'] = (self.ui.frangeLowSpnbx.value(), self.ui.frangeHighSpnbx.value())
return results | [
"def",
"values",
"(",
"self",
")",
":",
"results",
"=",
"{",
"}",
"results",
"[",
"'use_calfile'",
"]",
"=",
"self",
".",
"ui",
".",
"calfileRadio",
".",
"isChecked",
"(",
")",
"results",
"[",
"'calname'",
"]",
"=",
"str",
"(",
"self",
".",
"ui",
".",
"calChoiceCmbbx",
".",
"currentText",
"(",
")",
")",
"results",
"[",
"'frange'",
"]",
"=",
"(",
"self",
".",
"ui",
".",
"frangeLowSpnbx",
".",
"value",
"(",
")",
",",
"self",
".",
"ui",
".",
"frangeHighSpnbx",
".",
"value",
"(",
")",
")",
"return",
"results"
]
| Gets the values the user input to this dialog
:returns: dict of inputs:
| *'use_calfile'*: bool, -- whether to apply calibration at all
| *'calname'*: str, -- the name of the calibration dataset to use
| *'frange'*: (int, int), -- (min, max) of the frequency range to apply calibration to | [
"Gets",
"the",
"values",
"the",
"user",
"input",
"to",
"this",
"dialog"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/calibration_dlg.py#L61-L73 | train |
portfors-lab/sparkle | sparkle/gui/dialogs/calibration_dlg.py | CalibrationDialog.conditional_accept | def conditional_accept(self):
"""Accepts the inputs if all values are valid and congruent.
i.e. Valid datafile and frequency range within the given calibration dataset."""
if self.ui.calfileRadio.isChecked() and str(self.ui.calChoiceCmbbx.currentText()) == '':
self.ui.noneRadio.setChecked(True)
if self.ui.calfileRadio.isChecked():
try:
x, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
return
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file")
return
if self.ui.frangeLowSpnbx.value() < freqs[0] or \
self.ui.frangeHighSpnbx.value() > freqs[-1]:
QtGui.QMessageBox.warning(self, "Invalid Frequency Range",
"Provided frequencys outside of calibration file range of {} - {} Hz".format(freqs[0], freqs[-1]))
return
self.accept() | python | def conditional_accept(self):
"""Accepts the inputs if all values are valid and congruent.
i.e. Valid datafile and frequency range within the given calibration dataset."""
if self.ui.calfileRadio.isChecked() and str(self.ui.calChoiceCmbbx.currentText()) == '':
self.ui.noneRadio.setChecked(True)
if self.ui.calfileRadio.isChecked():
try:
x, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
return
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file")
return
if self.ui.frangeLowSpnbx.value() < freqs[0] or \
self.ui.frangeHighSpnbx.value() > freqs[-1]:
QtGui.QMessageBox.warning(self, "Invalid Frequency Range",
"Provided frequencys outside of calibration file range of {} - {} Hz".format(freqs[0], freqs[-1]))
return
self.accept() | [
"def",
"conditional_accept",
"(",
"self",
")",
":",
"if",
"self",
".",
"ui",
".",
"calfileRadio",
".",
"isChecked",
"(",
")",
"and",
"str",
"(",
"self",
".",
"ui",
".",
"calChoiceCmbbx",
".",
"currentText",
"(",
")",
")",
"==",
"''",
":",
"self",
".",
"ui",
".",
"noneRadio",
".",
"setChecked",
"(",
"True",
")",
"if",
"self",
".",
"ui",
".",
"calfileRadio",
".",
"isChecked",
"(",
")",
":",
"try",
":",
"x",
",",
"freqs",
"=",
"self",
".",
"datafile",
".",
"get_calibration",
"(",
"str",
"(",
"self",
".",
"ui",
".",
"calChoiceCmbbx",
".",
"currentText",
"(",
")",
")",
",",
"self",
".",
"calf",
")",
"except",
"IOError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Read Error\"",
",",
"\"Unable to read calibration file\"",
")",
"return",
"except",
"KeyError",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"File Data Error\"",
",",
"\"Unable to find data in file\"",
")",
"return",
"if",
"self",
".",
"ui",
".",
"frangeLowSpnbx",
".",
"value",
"(",
")",
"<",
"freqs",
"[",
"0",
"]",
"or",
"self",
".",
"ui",
".",
"frangeHighSpnbx",
".",
"value",
"(",
")",
">",
"freqs",
"[",
"-",
"1",
"]",
":",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"\"Invalid Frequency Range\"",
",",
"\"Provided frequencys outside of calibration file range of {} - {} Hz\"",
".",
"format",
"(",
"freqs",
"[",
"0",
"]",
",",
"freqs",
"[",
"-",
"1",
"]",
")",
")",
"return",
"self",
".",
"accept",
"(",
")"
]
| Accepts the inputs if all values are valid and congruent.
i.e. Valid datafile and frequency range within the given calibration dataset. | [
"Accepts",
"the",
"inputs",
"if",
"all",
"values",
"are",
"valid",
"and",
"congruent",
".",
"i",
".",
"e",
".",
"Valid",
"datafile",
"and",
"frequency",
"range",
"within",
"the",
"given",
"calibration",
"dataset",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/calibration_dlg.py#L75-L95 | train |
Frzk/Ellis | ellis/main.py | customized_warning | def customized_warning(message, category=UserWarning,
filename='', lineno=-1, file=None, line=None):
"""
Customized function to display warnings.
Monkey patch for `warnings.showwarning`.
"""
print("WARNING: {0}".format(message)) | python | def customized_warning(message, category=UserWarning,
filename='', lineno=-1, file=None, line=None):
"""
Customized function to display warnings.
Monkey patch for `warnings.showwarning`.
"""
print("WARNING: {0}".format(message)) | [
"def",
"customized_warning",
"(",
"message",
",",
"category",
"=",
"UserWarning",
",",
"filename",
"=",
"''",
",",
"lineno",
"=",
"-",
"1",
",",
"file",
"=",
"None",
",",
"line",
"=",
"None",
")",
":",
"print",
"(",
"\"WARNING: {0}\"",
".",
"format",
"(",
"message",
")",
")"
]
| Customized function to display warnings.
Monkey patch for `warnings.showwarning`. | [
"Customized",
"function",
"to",
"display",
"warnings",
".",
"Monkey",
"patch",
"for",
"warnings",
".",
"showwarning",
"."
]
| 39ce8987cbc503354cf1f45927344186a8b18363 | https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis/main.py#L20-L26 | train |
Frzk/Ellis | ellis/main.py | read_cmdline | def read_cmdline():
"""
Parses optional command line arguments.
"""
info = {
"prog": "Ellis",
"description": "%(prog)s version {0}".format(__version__),
"epilog": "For further help please head over to {0}"
.format(__url__),
"usage": argparse.SUPPRESS,
}
argp = argparse.ArgumentParser(**info)
# Add an optional string argument 'config':
argp.add_argument("-c", "--config",
dest='config_file',
metavar='FILE',
help="read configuration from FILE",
type=str)
# Parse command line:
args = argp.parse_args()
return vars(args) | python | def read_cmdline():
"""
Parses optional command line arguments.
"""
info = {
"prog": "Ellis",
"description": "%(prog)s version {0}".format(__version__),
"epilog": "For further help please head over to {0}"
.format(__url__),
"usage": argparse.SUPPRESS,
}
argp = argparse.ArgumentParser(**info)
# Add an optional string argument 'config':
argp.add_argument("-c", "--config",
dest='config_file',
metavar='FILE',
help="read configuration from FILE",
type=str)
# Parse command line:
args = argp.parse_args()
return vars(args) | [
"def",
"read_cmdline",
"(",
")",
":",
"info",
"=",
"{",
"\"prog\"",
":",
"\"Ellis\"",
",",
"\"description\"",
":",
"\"%(prog)s version {0}\"",
".",
"format",
"(",
"__version__",
")",
",",
"\"epilog\"",
":",
"\"For further help please head over to {0}\"",
".",
"format",
"(",
"__url__",
")",
",",
"\"usage\"",
":",
"argparse",
".",
"SUPPRESS",
",",
"}",
"argp",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"*",
"*",
"info",
")",
"# Add an optional string argument 'config':",
"argp",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--config\"",
",",
"dest",
"=",
"'config_file'",
",",
"metavar",
"=",
"'FILE'",
",",
"help",
"=",
"\"read configuration from FILE\"",
",",
"type",
"=",
"str",
")",
"# Parse command line:",
"args",
"=",
"argp",
".",
"parse_args",
"(",
")",
"return",
"vars",
"(",
"args",
")"
]
| Parses optional command line arguments. | [
"Parses",
"optional",
"command",
"line",
"arguments",
"."
]
| 39ce8987cbc503354cf1f45927344186a8b18363 | https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis/main.py#L36-L60 | train |
Frzk/Ellis | ellis/main.py | main | def main():
"""
Entry point for Ellis.
"""
# Monkey patch warnings.showwarning:
warnings.showwarning = customized_warning
# Read command line args, if any:
args = read_cmdline()
# Configuration file, if given on the command line:
config_file = args['config_file']
try:
ellis = Ellis(config_file)
except NoRuleError:
msg = ("There are no valid rules in the config file. "
"Ellis can not run without rules.")
print_err(msg)
else:
ellis.start() | python | def main():
"""
Entry point for Ellis.
"""
# Monkey patch warnings.showwarning:
warnings.showwarning = customized_warning
# Read command line args, if any:
args = read_cmdline()
# Configuration file, if given on the command line:
config_file = args['config_file']
try:
ellis = Ellis(config_file)
except NoRuleError:
msg = ("There are no valid rules in the config file. "
"Ellis can not run without rules.")
print_err(msg)
else:
ellis.start() | [
"def",
"main",
"(",
")",
":",
"# Monkey patch warnings.showwarning:",
"warnings",
".",
"showwarning",
"=",
"customized_warning",
"# Read command line args, if any:",
"args",
"=",
"read_cmdline",
"(",
")",
"# Configuration file, if given on the command line:",
"config_file",
"=",
"args",
"[",
"'config_file'",
"]",
"try",
":",
"ellis",
"=",
"Ellis",
"(",
"config_file",
")",
"except",
"NoRuleError",
":",
"msg",
"=",
"(",
"\"There are no valid rules in the config file. \"",
"\"Ellis can not run without rules.\"",
")",
"print_err",
"(",
"msg",
")",
"else",
":",
"ellis",
".",
"start",
"(",
")"
]
| Entry point for Ellis. | [
"Entry",
"point",
"for",
"Ellis",
"."
]
| 39ce8987cbc503354cf1f45927344186a8b18363 | https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis/main.py#L63-L83 | train |
AllTheWayDown/turgles | turgles/turgle.py | Turgle.shape | def shape(self, shape=None):
"""We need to shift buffers in order to change shape"""
if shape is None:
return self._shape
data, color = self.renderer.manager.set_shape(self.model.id, shape)
self.model.data = data
self.color = color
self._shape = shape | python | def shape(self, shape=None):
"""We need to shift buffers in order to change shape"""
if shape is None:
return self._shape
data, color = self.renderer.manager.set_shape(self.model.id, shape)
self.model.data = data
self.color = color
self._shape = shape | [
"def",
"shape",
"(",
"self",
",",
"shape",
"=",
"None",
")",
":",
"if",
"shape",
"is",
"None",
":",
"return",
"self",
".",
"_shape",
"data",
",",
"color",
"=",
"self",
".",
"renderer",
".",
"manager",
".",
"set_shape",
"(",
"self",
".",
"model",
".",
"id",
",",
"shape",
")",
"self",
".",
"model",
".",
"data",
"=",
"data",
"self",
".",
"color",
"=",
"color",
"self",
".",
"_shape",
"=",
"shape"
]
| We need to shift buffers in order to change shape | [
"We",
"need",
"to",
"shift",
"buffers",
"in",
"order",
"to",
"change",
"shape"
]
| 1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852 | https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/turgle.py#L18-L25 | train |
yamcs/yamcs-python | yamcs-client/yamcs/core/futures.py | WebSocketSubscriptionFuture.reply | def reply(self, timeout=None):
"""
Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed.
"""
self._wait_on_signal(self._response_received)
if self._response_exception is not None:
msg = self._response_exception.message
raise YamcsError(msg)
return self._response_reply | python | def reply(self, timeout=None):
"""
Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed.
"""
self._wait_on_signal(self._response_received)
if self._response_exception is not None:
msg = self._response_exception.message
raise YamcsError(msg)
return self._response_reply | [
"def",
"reply",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_wait_on_signal",
"(",
"self",
".",
"_response_received",
")",
"if",
"self",
".",
"_response_exception",
"is",
"not",
"None",
":",
"msg",
"=",
"self",
".",
"_response_exception",
".",
"message",
"raise",
"YamcsError",
"(",
"msg",
")",
"return",
"self",
".",
"_response_reply"
]
| Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed. | [
"Returns",
"the",
"initial",
"reply",
".",
"This",
"is",
"emitted",
"before",
"any",
"subscription",
"data",
"is",
"emitted",
".",
"This",
"function",
"raises",
"an",
"exception",
"if",
"the",
"subscription",
"attempt",
"failed",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/core/futures.py#L166-L176 | train |
dpa-newslab/livebridge | livebridge/controller.py | Controller.stop_bridges | async def stop_bridges(self):
"""Stop all sleep tasks to allow bridges to end."""
for task in self.sleep_tasks:
task.cancel()
for bridge in self.bridges:
bridge.stop() | python | async def stop_bridges(self):
"""Stop all sleep tasks to allow bridges to end."""
for task in self.sleep_tasks:
task.cancel()
for bridge in self.bridges:
bridge.stop() | [
"async",
"def",
"stop_bridges",
"(",
"self",
")",
":",
"for",
"task",
"in",
"self",
".",
"sleep_tasks",
":",
"task",
".",
"cancel",
"(",
")",
"for",
"bridge",
"in",
"self",
".",
"bridges",
":",
"bridge",
".",
"stop",
"(",
")"
]
| Stop all sleep tasks to allow bridges to end. | [
"Stop",
"all",
"sleep",
"tasks",
"to",
"allow",
"bridges",
"to",
"end",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/controller.py#L50-L55 | train |
Egregors/cbrf | cbrf/utils.py | str_to_date | def str_to_date(date: str) -> datetime.datetime:
""" Convert cbr.ru API date ste to python datetime
:param date: date from API response
:return: date like datetime
:rtype: datetime
"""
date = date.split('.')
date.reverse()
y, m, d = date
return datetime.datetime(int(y), int(m), int(d)) | python | def str_to_date(date: str) -> datetime.datetime:
""" Convert cbr.ru API date ste to python datetime
:param date: date from API response
:return: date like datetime
:rtype: datetime
"""
date = date.split('.')
date.reverse()
y, m, d = date
return datetime.datetime(int(y), int(m), int(d)) | [
"def",
"str_to_date",
"(",
"date",
":",
"str",
")",
"->",
"datetime",
".",
"datetime",
":",
"date",
"=",
"date",
".",
"split",
"(",
"'.'",
")",
"date",
".",
"reverse",
"(",
")",
"y",
",",
"m",
",",
"d",
"=",
"date",
"return",
"datetime",
".",
"datetime",
"(",
"int",
"(",
"y",
")",
",",
"int",
"(",
"m",
")",
",",
"int",
"(",
"d",
")",
")"
]
| Convert cbr.ru API date ste to python datetime
:param date: date from API response
:return: date like datetime
:rtype: datetime | [
"Convert",
"cbr",
".",
"ru",
"API",
"date",
"ste",
"to",
"python",
"datetime"
]
| e4ce332fcead83c75966337c97c0ae070fb7e576 | https://github.com/Egregors/cbrf/blob/e4ce332fcead83c75966337c97c0ae070fb7e576/cbrf/utils.py#L24-L35 | train |
AllTheWayDown/turgles | turgles/gl/buffer.py | Buffer.load | def load(self, data, size=None):
"""Data is cffi array"""
self.bind()
if size is None:
# ffi's sizeof understands arrays
size = sizeof(data)
if size == self.buffer_size:
# same size - no need to allocate new buffer, just copy
glBufferSubData(
self.array_type,
0,
size,
to_raw_pointer(data)
)
else:
# buffer size has changed - need to allocate new buffer in the GPU
glBufferData(
self.array_type,
size,
to_raw_pointer(data),
self.draw_type
)
self.buffer_size = size
self.unbind() | python | def load(self, data, size=None):
"""Data is cffi array"""
self.bind()
if size is None:
# ffi's sizeof understands arrays
size = sizeof(data)
if size == self.buffer_size:
# same size - no need to allocate new buffer, just copy
glBufferSubData(
self.array_type,
0,
size,
to_raw_pointer(data)
)
else:
# buffer size has changed - need to allocate new buffer in the GPU
glBufferData(
self.array_type,
size,
to_raw_pointer(data),
self.draw_type
)
self.buffer_size = size
self.unbind() | [
"def",
"load",
"(",
"self",
",",
"data",
",",
"size",
"=",
"None",
")",
":",
"self",
".",
"bind",
"(",
")",
"if",
"size",
"is",
"None",
":",
"# ffi's sizeof understands arrays",
"size",
"=",
"sizeof",
"(",
"data",
")",
"if",
"size",
"==",
"self",
".",
"buffer_size",
":",
"# same size - no need to allocate new buffer, just copy",
"glBufferSubData",
"(",
"self",
".",
"array_type",
",",
"0",
",",
"size",
",",
"to_raw_pointer",
"(",
"data",
")",
")",
"else",
":",
"# buffer size has changed - need to allocate new buffer in the GPU",
"glBufferData",
"(",
"self",
".",
"array_type",
",",
"size",
",",
"to_raw_pointer",
"(",
"data",
")",
",",
"self",
".",
"draw_type",
")",
"self",
".",
"buffer_size",
"=",
"size",
"self",
".",
"unbind",
"(",
")"
]
| Data is cffi array | [
"Data",
"is",
"cffi",
"array"
]
| 1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852 | https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/gl/buffer.py#L43-L66 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/main.py | init | def init():
"""Initialize the pipeline in maya so everything works
Init environment and load plugins.
This also creates the initial Jukebox Menu entry.
:returns: None
:rtype: None
:raises: None
"""
main.init_environment()
pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), BUILTIN_PLUGIN_PATH))
os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath
try:
maya.standalone.initialize()
jukeboxmaya.STANDALONE_INITIALIZED = True
except RuntimeError as e:
jukeboxmaya.STANDALONE_INITIALIZED = False
if str(e) == "maya.standalone may only be used from an external Python interpreter":
mm = MenuManager.get()
mainmenu = mm.create_menu("Jukebox", tearOff=True)
mm.create_menu("Help", parent=mainmenu, command=show_help)
# load plugins
pmanager = MayaPluginManager.get()
pmanager.load_plugins()
load_mayaplugins() | python | def init():
"""Initialize the pipeline in maya so everything works
Init environment and load plugins.
This also creates the initial Jukebox Menu entry.
:returns: None
:rtype: None
:raises: None
"""
main.init_environment()
pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), BUILTIN_PLUGIN_PATH))
os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath
try:
maya.standalone.initialize()
jukeboxmaya.STANDALONE_INITIALIZED = True
except RuntimeError as e:
jukeboxmaya.STANDALONE_INITIALIZED = False
if str(e) == "maya.standalone may only be used from an external Python interpreter":
mm = MenuManager.get()
mainmenu = mm.create_menu("Jukebox", tearOff=True)
mm.create_menu("Help", parent=mainmenu, command=show_help)
# load plugins
pmanager = MayaPluginManager.get()
pmanager.load_plugins()
load_mayaplugins() | [
"def",
"init",
"(",
")",
":",
"main",
".",
"init_environment",
"(",
")",
"pluginpath",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'JUKEBOX_PLUGIN_PATH'",
",",
"''",
")",
",",
"BUILTIN_PLUGIN_PATH",
")",
")",
"os",
".",
"environ",
"[",
"'JUKEBOX_PLUGIN_PATH'",
"]",
"=",
"pluginpath",
"try",
":",
"maya",
".",
"standalone",
".",
"initialize",
"(",
")",
"jukeboxmaya",
".",
"STANDALONE_INITIALIZED",
"=",
"True",
"except",
"RuntimeError",
"as",
"e",
":",
"jukeboxmaya",
".",
"STANDALONE_INITIALIZED",
"=",
"False",
"if",
"str",
"(",
"e",
")",
"==",
"\"maya.standalone may only be used from an external Python interpreter\"",
":",
"mm",
"=",
"MenuManager",
".",
"get",
"(",
")",
"mainmenu",
"=",
"mm",
".",
"create_menu",
"(",
"\"Jukebox\"",
",",
"tearOff",
"=",
"True",
")",
"mm",
".",
"create_menu",
"(",
"\"Help\"",
",",
"parent",
"=",
"mainmenu",
",",
"command",
"=",
"show_help",
")",
"# load plugins",
"pmanager",
"=",
"MayaPluginManager",
".",
"get",
"(",
")",
"pmanager",
".",
"load_plugins",
"(",
")",
"load_mayaplugins",
"(",
")"
]
| Initialize the pipeline in maya so everything works
Init environment and load plugins.
This also creates the initial Jukebox Menu entry.
:returns: None
:rtype: None
:raises: None | [
"Initialize",
"the",
"pipeline",
"in",
"maya",
"so",
"everything",
"works"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/main.py#L43-L68 | train |
TorkamaniLab/metapipe | metapipe/models/queue.py | BaseQueue.all_jobs | def all_jobs(self):
""" Returns a list of all jobs submitted to the queue, complete,
in-progess or failed.
"""
return list(set(self.complete + self.failed + self.queue + self.running)) | python | def all_jobs(self):
""" Returns a list of all jobs submitted to the queue, complete,
in-progess or failed.
"""
return list(set(self.complete + self.failed + self.queue + self.running)) | [
"def",
"all_jobs",
"(",
"self",
")",
":",
"return",
"list",
"(",
"set",
"(",
"self",
".",
"complete",
"+",
"self",
".",
"failed",
"+",
"self",
".",
"queue",
"+",
"self",
".",
"running",
")",
")"
]
| Returns a list of all jobs submitted to the queue, complete,
in-progess or failed. | [
"Returns",
"a",
"list",
"of",
"all",
"jobs",
"submitted",
"to",
"the",
"queue",
"complete",
"in",
"-",
"progess",
"or",
"failed",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/queue.py#L40-L44 | train |
TorkamaniLab/metapipe | metapipe/models/queue.py | BaseQueue.progress | def progress(self):
""" Returns the percentage, current and total number of
jobs in the queue.
"""
total = len(self.all_jobs)
remaining = total - len(self.active_jobs) if total > 0 else 0
percent = int(100 * (float(remaining) / total)) if total > 0 else 0
return percent | python | def progress(self):
""" Returns the percentage, current and total number of
jobs in the queue.
"""
total = len(self.all_jobs)
remaining = total - len(self.active_jobs) if total > 0 else 0
percent = int(100 * (float(remaining) / total)) if total > 0 else 0
return percent | [
"def",
"progress",
"(",
"self",
")",
":",
"total",
"=",
"len",
"(",
"self",
".",
"all_jobs",
")",
"remaining",
"=",
"total",
"-",
"len",
"(",
"self",
".",
"active_jobs",
")",
"if",
"total",
">",
"0",
"else",
"0",
"percent",
"=",
"int",
"(",
"100",
"*",
"(",
"float",
"(",
"remaining",
")",
"/",
"total",
")",
")",
"if",
"total",
">",
"0",
"else",
"0",
"return",
"percent"
]
| Returns the percentage, current and total number of
jobs in the queue. | [
"Returns",
"the",
"percentage",
"current",
"and",
"total",
"number",
"of",
"jobs",
"in",
"the",
"queue",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/queue.py#L47-L54 | train |
TorkamaniLab/metapipe | metapipe/models/queue.py | BaseQueue.ready | def ready(self, job):
""" Determines if the job is ready to be sumitted to the
queue. It checks if the job depends on any currently
running or queued operations.
"""
no_deps = len(job.depends_on) == 0
all_complete = all(j.is_complete() for j in self.active_jobs
if j.alias in job.depends_on)
none_failed = not any(True for j in self.failed
if j.alias in job.depends_on)
queue_is_open = len(self.running) < self.MAX_CONCURRENT_JOBS
return queue_is_open and (no_deps or (all_complete and none_failed)) | python | def ready(self, job):
""" Determines if the job is ready to be sumitted to the
queue. It checks if the job depends on any currently
running or queued operations.
"""
no_deps = len(job.depends_on) == 0
all_complete = all(j.is_complete() for j in self.active_jobs
if j.alias in job.depends_on)
none_failed = not any(True for j in self.failed
if j.alias in job.depends_on)
queue_is_open = len(self.running) < self.MAX_CONCURRENT_JOBS
return queue_is_open and (no_deps or (all_complete and none_failed)) | [
"def",
"ready",
"(",
"self",
",",
"job",
")",
":",
"no_deps",
"=",
"len",
"(",
"job",
".",
"depends_on",
")",
"==",
"0",
"all_complete",
"=",
"all",
"(",
"j",
".",
"is_complete",
"(",
")",
"for",
"j",
"in",
"self",
".",
"active_jobs",
"if",
"j",
".",
"alias",
"in",
"job",
".",
"depends_on",
")",
"none_failed",
"=",
"not",
"any",
"(",
"True",
"for",
"j",
"in",
"self",
".",
"failed",
"if",
"j",
".",
"alias",
"in",
"job",
".",
"depends_on",
")",
"queue_is_open",
"=",
"len",
"(",
"self",
".",
"running",
")",
"<",
"self",
".",
"MAX_CONCURRENT_JOBS",
"return",
"queue_is_open",
"and",
"(",
"no_deps",
"or",
"(",
"all_complete",
"and",
"none_failed",
")",
")"
]
| Determines if the job is ready to be sumitted to the
queue. It checks if the job depends on any currently
running or queued operations. | [
"Determines",
"if",
"the",
"job",
"is",
"ready",
"to",
"be",
"sumitted",
"to",
"the",
"queue",
".",
"It",
"checks",
"if",
"the",
"job",
"depends",
"on",
"any",
"currently",
"running",
"or",
"queued",
"operations",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/queue.py#L56-L67 | train |
TorkamaniLab/metapipe | metapipe/models/queue.py | BaseQueue.locked | def locked(self):
""" Determines if the queue is locked. """
if len(self.failed) == 0:
return False
for fail in self.failed:
for job in self.active_jobs:
if fail.alias in job.depends_on:
return True | python | def locked(self):
""" Determines if the queue is locked. """
if len(self.failed) == 0:
return False
for fail in self.failed:
for job in self.active_jobs:
if fail.alias in job.depends_on:
return True | [
"def",
"locked",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"failed",
")",
"==",
"0",
":",
"return",
"False",
"for",
"fail",
"in",
"self",
".",
"failed",
":",
"for",
"job",
"in",
"self",
".",
"active_jobs",
":",
"if",
"fail",
".",
"alias",
"in",
"job",
".",
"depends_on",
":",
"return",
"True"
]
| Determines if the queue is locked. | [
"Determines",
"if",
"the",
"queue",
"is",
"locked",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/queue.py#L69-L76 | train |
dpa-newslab/livebridge | livebridge/run.py | read_args | def read_args(**kwargs):
"""Read controlfile parameter."""
if kwargs.get("control"):
args = Namespace(control=kwargs["control"])
elif config.CONTROLFILE:
args = Namespace(control=config.CONTROLFILE)
elif config.DB.get("control_table_name"):
args = Namespace(control="sql")
elif config.AWS.get("control_table_name"):
args = Namespace(control="dynamodb")
else:
# read cli args
parser = argparse.ArgumentParser()
parser.add_argument("--control", required=True, help="Control file, can be path.")
args = parser.parse_args()
return args | python | def read_args(**kwargs):
"""Read controlfile parameter."""
if kwargs.get("control"):
args = Namespace(control=kwargs["control"])
elif config.CONTROLFILE:
args = Namespace(control=config.CONTROLFILE)
elif config.DB.get("control_table_name"):
args = Namespace(control="sql")
elif config.AWS.get("control_table_name"):
args = Namespace(control="dynamodb")
else:
# read cli args
parser = argparse.ArgumentParser()
parser.add_argument("--control", required=True, help="Control file, can be path.")
args = parser.parse_args()
return args | [
"def",
"read_args",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"\"control\"",
")",
":",
"args",
"=",
"Namespace",
"(",
"control",
"=",
"kwargs",
"[",
"\"control\"",
"]",
")",
"elif",
"config",
".",
"CONTROLFILE",
":",
"args",
"=",
"Namespace",
"(",
"control",
"=",
"config",
".",
"CONTROLFILE",
")",
"elif",
"config",
".",
"DB",
".",
"get",
"(",
"\"control_table_name\"",
")",
":",
"args",
"=",
"Namespace",
"(",
"control",
"=",
"\"sql\"",
")",
"elif",
"config",
".",
"AWS",
".",
"get",
"(",
"\"control_table_name\"",
")",
":",
"args",
"=",
"Namespace",
"(",
"control",
"=",
"\"dynamodb\"",
")",
"else",
":",
"# read cli args",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"--control\"",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Control file, can be path.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
]
| Read controlfile parameter. | [
"Read",
"controlfile",
"parameter",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/run.py#L30-L45 | train |
lowandrew/OLCTools | spadespipeline/spadesRun.py | Spades.best_assemblyfile | def best_assemblyfile(self):
"""
Determine whether the contigs.fasta output file from SPAdes is present. If not, set the .bestassembly
attribute to 'NA'
"""
for sample in self.metadata:
# Set the name of the unfiltered spades assembly output file
assembly_file = os.path.join(sample.general.spadesoutput, 'contigs.fasta')
if os.path.isfile(assembly_file):
sample.general.bestassemblyfile = assembly_file
else:
sample.general.bestassemblyfile = 'NA'
# Set the name of the filtered assembly file
filteredfile = os.path.join(sample.general.outputdirectory, '{}.fasta'.format(sample.name))
# Add the name and path of the filtered file to the metadata
sample.general.filteredfile = filteredfile | python | def best_assemblyfile(self):
"""
Determine whether the contigs.fasta output file from SPAdes is present. If not, set the .bestassembly
attribute to 'NA'
"""
for sample in self.metadata:
# Set the name of the unfiltered spades assembly output file
assembly_file = os.path.join(sample.general.spadesoutput, 'contigs.fasta')
if os.path.isfile(assembly_file):
sample.general.bestassemblyfile = assembly_file
else:
sample.general.bestassemblyfile = 'NA'
# Set the name of the filtered assembly file
filteredfile = os.path.join(sample.general.outputdirectory, '{}.fasta'.format(sample.name))
# Add the name and path of the filtered file to the metadata
sample.general.filteredfile = filteredfile | [
"def",
"best_assemblyfile",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Set the name of the unfiltered spades assembly output file",
"assembly_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"spadesoutput",
",",
"'contigs.fasta'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"assembly_file",
")",
":",
"sample",
".",
"general",
".",
"bestassemblyfile",
"=",
"assembly_file",
"else",
":",
"sample",
".",
"general",
".",
"bestassemblyfile",
"=",
"'NA'",
"# Set the name of the filtered assembly file",
"filteredfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'{}.fasta'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"# Add the name and path of the filtered file to the metadata",
"sample",
".",
"general",
".",
"filteredfile",
"=",
"filteredfile"
]
| Determine whether the contigs.fasta output file from SPAdes is present. If not, set the .bestassembly
attribute to 'NA' | [
"Determine",
"whether",
"the",
"contigs",
".",
"fasta",
"output",
"file",
"from",
"SPAdes",
"is",
"present",
".",
"If",
"not",
"set",
"the",
".",
"bestassembly",
"attribute",
"to",
"NA"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/spadesRun.py#L124-L139 | train |
lowandrew/OLCTools | spadespipeline/spadesRun.py | Spades.assemble | def assemble(self):
"""Run the assembly command in a multi-threaded fashion"""
threadlock = threading.Lock()
while True:
(sample, command) = self.assemblequeue.get()
if command and not os.path.isfile(os.path.join(sample.general.spadesoutput, 'contigs.fasta')):
# execute(command)
out, err = run_subprocess(command)
threadlock.acquire()
write_to_logfile(command, command, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
threadlock.release()
#
call(command, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
dotter()
# Signal to the queue that the job is done
self.assemblequeue.task_done() | python | def assemble(self):
"""Run the assembly command in a multi-threaded fashion"""
threadlock = threading.Lock()
while True:
(sample, command) = self.assemblequeue.get()
if command and not os.path.isfile(os.path.join(sample.general.spadesoutput, 'contigs.fasta')):
# execute(command)
out, err = run_subprocess(command)
threadlock.acquire()
write_to_logfile(command, command, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
threadlock.release()
#
call(command, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
dotter()
# Signal to the queue that the job is done
self.assemblequeue.task_done() | [
"def",
"assemble",
"(",
"self",
")",
":",
"threadlock",
"=",
"threading",
".",
"Lock",
"(",
")",
"while",
"True",
":",
"(",
"sample",
",",
"command",
")",
"=",
"self",
".",
"assemblequeue",
".",
"get",
"(",
")",
"if",
"command",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"spadesoutput",
",",
"'contigs.fasta'",
")",
")",
":",
"# execute(command)",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"command",
")",
"threadlock",
".",
"acquire",
"(",
")",
"write_to_logfile",
"(",
"command",
",",
"command",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"threadlock",
".",
"release",
"(",
")",
"#",
"call",
"(",
"command",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'wb'",
")",
",",
"stderr",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'wb'",
")",
")",
"dotter",
"(",
")",
"# Signal to the queue that the job is done",
"self",
".",
"assemblequeue",
".",
"task_done",
"(",
")"
]
| Run the assembly command in a multi-threaded fashion | [
"Run",
"the",
"assembly",
"command",
"in",
"a",
"multi",
"-",
"threaded",
"fashion"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/spadesRun.py#L141-L158 | train |
sirfoga/pyhal | hal/cvs/gits.py | Repository.get_diff_amounts | def get_diff_amounts(self):
"""Gets list of total diff
:return: List of total diff between 2 consecutive commits since start
"""
diffs = []
last_commit = None
for commit in self.repo.iter_commits():
if last_commit is not None:
diff = self.get_diff(commit.hexsha, last_commit.hexsha)
total_changed = diff[Diff.ADD] + diff[Diff.DEL]
diffs.append(total_changed)
last_commit = commit
return diffs | python | def get_diff_amounts(self):
"""Gets list of total diff
:return: List of total diff between 2 consecutive commits since start
"""
diffs = []
last_commit = None
for commit in self.repo.iter_commits():
if last_commit is not None:
diff = self.get_diff(commit.hexsha, last_commit.hexsha)
total_changed = diff[Diff.ADD] + diff[Diff.DEL]
diffs.append(total_changed)
last_commit = commit
return diffs | [
"def",
"get_diff_amounts",
"(",
"self",
")",
":",
"diffs",
"=",
"[",
"]",
"last_commit",
"=",
"None",
"for",
"commit",
"in",
"self",
".",
"repo",
".",
"iter_commits",
"(",
")",
":",
"if",
"last_commit",
"is",
"not",
"None",
":",
"diff",
"=",
"self",
".",
"get_diff",
"(",
"commit",
".",
"hexsha",
",",
"last_commit",
".",
"hexsha",
")",
"total_changed",
"=",
"diff",
"[",
"Diff",
".",
"ADD",
"]",
"+",
"diff",
"[",
"Diff",
".",
"DEL",
"]",
"diffs",
".",
"append",
"(",
"total_changed",
")",
"last_commit",
"=",
"commit",
"return",
"diffs"
]
| Gets list of total diff
:return: List of total diff between 2 consecutive commits since start | [
"Gets",
"list",
"of",
"total",
"diff"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/cvs/gits.py#L106-L122 | train |
sirfoga/pyhal | hal/cvs/gits.py | Repository.get_new_version | def get_new_version(self, last_version, last_commit,
diff_to_increase_ratio):
"""Gets new version
:param last_version: last version known
:param last_commit: hash of commit of last version
:param diff_to_increase_ratio: Ratio to convert number of changes into
:return: new version
"""
version = Version(last_version)
diff = self.get_diff(last_commit, self.get_last_commit_hash())
total_changed = diff[Diff.ADD] + diff[Diff.DEL]
version.increase_by_changes(total_changed, diff_to_increase_ratio)
return version | python | def get_new_version(self, last_version, last_commit,
diff_to_increase_ratio):
"""Gets new version
:param last_version: last version known
:param last_commit: hash of commit of last version
:param diff_to_increase_ratio: Ratio to convert number of changes into
:return: new version
"""
version = Version(last_version)
diff = self.get_diff(last_commit, self.get_last_commit_hash())
total_changed = diff[Diff.ADD] + diff[Diff.DEL]
version.increase_by_changes(total_changed, diff_to_increase_ratio)
return version | [
"def",
"get_new_version",
"(",
"self",
",",
"last_version",
",",
"last_commit",
",",
"diff_to_increase_ratio",
")",
":",
"version",
"=",
"Version",
"(",
"last_version",
")",
"diff",
"=",
"self",
".",
"get_diff",
"(",
"last_commit",
",",
"self",
".",
"get_last_commit_hash",
"(",
")",
")",
"total_changed",
"=",
"diff",
"[",
"Diff",
".",
"ADD",
"]",
"+",
"diff",
"[",
"Diff",
".",
"DEL",
"]",
"version",
".",
"increase_by_changes",
"(",
"total_changed",
",",
"diff_to_increase_ratio",
")",
"return",
"version"
]
| Gets new version
:param last_version: last version known
:param last_commit: hash of commit of last version
:param diff_to_increase_ratio: Ratio to convert number of changes into
:return: new version | [
"Gets",
"new",
"version"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/cvs/gits.py#L149-L164 | train |
sirfoga/pyhal | hal/internet/email/gmail.py | get_mime_message | def get_mime_message(subject, text):
"""Creates MIME message
:param subject: Subject of email
:param text: Email content
:return: Email formatted as HTML ready to be sent
"""
message = MIMEText(
"<html>" +
str(text).replace("\n", "<br>") +
"</html>", "html"
)
message["subject"] = str(subject)
return message | python | def get_mime_message(subject, text):
"""Creates MIME message
:param subject: Subject of email
:param text: Email content
:return: Email formatted as HTML ready to be sent
"""
message = MIMEText(
"<html>" +
str(text).replace("\n", "<br>") +
"</html>", "html"
)
message["subject"] = str(subject)
return message | [
"def",
"get_mime_message",
"(",
"subject",
",",
"text",
")",
":",
"message",
"=",
"MIMEText",
"(",
"\"<html>\"",
"+",
"str",
"(",
"text",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"<br>\"",
")",
"+",
"\"</html>\"",
",",
"\"html\"",
")",
"message",
"[",
"\"subject\"",
"]",
"=",
"str",
"(",
"subject",
")",
"return",
"message"
]
| Creates MIME message
:param subject: Subject of email
:param text: Email content
:return: Email formatted as HTML ready to be sent | [
"Creates",
"MIME",
"message"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/email/gmail.py#L36-L49 | train |
sirfoga/pyhal | hal/internet/email/gmail.py | send_email | def send_email(sender, msg, driver):
"""Sends email to me with this message
:param sender: Sender of email
:param msg: Message to send to me
:param driver: GMail authenticator
"""
driver.users().messages().send(
userId=sender,
body=msg
).execute() | python | def send_email(sender, msg, driver):
"""Sends email to me with this message
:param sender: Sender of email
:param msg: Message to send to me
:param driver: GMail authenticator
"""
driver.users().messages().send(
userId=sender,
body=msg
).execute() | [
"def",
"send_email",
"(",
"sender",
",",
"msg",
",",
"driver",
")",
":",
"driver",
".",
"users",
"(",
")",
".",
"messages",
"(",
")",
".",
"send",
"(",
"userId",
"=",
"sender",
",",
"body",
"=",
"msg",
")",
".",
"execute",
"(",
")"
]
| Sends email to me with this message
:param sender: Sender of email
:param msg: Message to send to me
:param driver: GMail authenticator | [
"Sends",
"email",
"to",
"me",
"with",
"this",
"message"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/email/gmail.py#L52-L62 | train |
kgritesh/pip-save | setup.py | get_readme | def get_readme():
"""Get the contents of the ``README.rst`` file as a Unicode string."""
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = open('README.md').read()
return description | python | def get_readme():
"""Get the contents of the ``README.rst`` file as a Unicode string."""
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = open('README.md').read()
return description | [
"def",
"get_readme",
"(",
")",
":",
"try",
":",
"import",
"pypandoc",
"description",
"=",
"pypandoc",
".",
"convert",
"(",
"'README.md'",
",",
"'rst'",
")",
"except",
"(",
"IOError",
",",
"ImportError",
")",
":",
"description",
"=",
"open",
"(",
"'README.md'",
")",
".",
"read",
"(",
")",
"return",
"description"
]
| Get the contents of the ``README.rst`` file as a Unicode string. | [
"Get",
"the",
"contents",
"of",
"the",
"README",
".",
"rst",
"file",
"as",
"a",
"Unicode",
"string",
"."
]
| 70a1269db5db05bb850c2caa00222ebe40b2f2fd | https://github.com/kgritesh/pip-save/blob/70a1269db5db05bb850c2caa00222ebe40b2f2fd/setup.py#L9-L17 | train |
kgritesh/pip-save | setup.py | get_absolute_path | def get_absolute_path(*args):
"""Transform relative pathnames into absolute pathnames."""
directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(directory, *args) | python | def get_absolute_path(*args):
"""Transform relative pathnames into absolute pathnames."""
directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(directory, *args) | [
"def",
"get_absolute_path",
"(",
"*",
"args",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"*",
"args",
")"
]
| Transform relative pathnames into absolute pathnames. | [
"Transform",
"relative",
"pathnames",
"into",
"absolute",
"pathnames",
"."
]
| 70a1269db5db05bb850c2caa00222ebe40b2f2fd | https://github.com/kgritesh/pip-save/blob/70a1269db5db05bb850c2caa00222ebe40b2f2fd/setup.py#L19-L22 | train |
portfors-lab/sparkle | sparkle/data/hdf5data.py | HDF5Data.trim | def trim(self, key):
"""
Removes empty rows from dataset... I am still wanting to use this???
:param key: the dataset to trim
:type key: str
"""
current_index = self.meta[key]['cursor']
self.hdf5[key].resize(current_index, axis=0) | python | def trim(self, key):
"""
Removes empty rows from dataset... I am still wanting to use this???
:param key: the dataset to trim
:type key: str
"""
current_index = self.meta[key]['cursor']
self.hdf5[key].resize(current_index, axis=0) | [
"def",
"trim",
"(",
"self",
",",
"key",
")",
":",
"current_index",
"=",
"self",
".",
"meta",
"[",
"key",
"]",
"[",
"'cursor'",
"]",
"self",
".",
"hdf5",
"[",
"key",
"]",
".",
"resize",
"(",
"current_index",
",",
"axis",
"=",
"0",
")"
]
| Removes empty rows from dataset... I am still wanting to use this???
:param key: the dataset to trim
:type key: str | [
"Removes",
"empty",
"rows",
"from",
"dataset",
"...",
"I",
"am",
"still",
"wanting",
"to",
"use",
"this???"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/data/hdf5data.py#L278-L286 | train |
sirfoga/pyhal | hal/internet/services/youtube.py | YoutubeChannel.get_channel_page | def get_channel_page(self):
"""Fetches source page
:return: source page of youtube channel
"""
channel_url = YOUTUBE_USER_BASE_URL + self.channel_name # url
source_page = Webpage(
channel_url).get_html_source() # get source page of channel
return source_page | python | def get_channel_page(self):
"""Fetches source page
:return: source page of youtube channel
"""
channel_url = YOUTUBE_USER_BASE_URL + self.channel_name # url
source_page = Webpage(
channel_url).get_html_source() # get source page of channel
return source_page | [
"def",
"get_channel_page",
"(",
"self",
")",
":",
"channel_url",
"=",
"YOUTUBE_USER_BASE_URL",
"+",
"self",
".",
"channel_name",
"# url",
"source_page",
"=",
"Webpage",
"(",
"channel_url",
")",
".",
"get_html_source",
"(",
")",
"# get source page of channel",
"return",
"source_page"
]
| Fetches source page
:return: source page of youtube channel | [
"Fetches",
"source",
"page"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/services/youtube.py#L19-L27 | train |
sirfoga/pyhal | hal/internet/services/youtube.py | YoutubeChannel.get_feed_url_from_video | def get_feed_url_from_video(video_url):
"""Gets channel id and then creates feed url
:param video_url: Url of video
:return: feed url
"""
web_page = Webpage(video_url)
web_page.get_html_source()
channel_id = \
web_page.soup.find_all("div", {"class": "yt-user-info"})[0].a[
"href"]
channel_id = str(channel_id).strip().replace("/channel/",
"") # get channel id
return YoutubeChannel.get_feed_url_from_id(channel_id) | python | def get_feed_url_from_video(video_url):
"""Gets channel id and then creates feed url
:param video_url: Url of video
:return: feed url
"""
web_page = Webpage(video_url)
web_page.get_html_source()
channel_id = \
web_page.soup.find_all("div", {"class": "yt-user-info"})[0].a[
"href"]
channel_id = str(channel_id).strip().replace("/channel/",
"") # get channel id
return YoutubeChannel.get_feed_url_from_id(channel_id) | [
"def",
"get_feed_url_from_video",
"(",
"video_url",
")",
":",
"web_page",
"=",
"Webpage",
"(",
"video_url",
")",
"web_page",
".",
"get_html_source",
"(",
")",
"channel_id",
"=",
"web_page",
".",
"soup",
".",
"find_all",
"(",
"\"div\"",
",",
"{",
"\"class\"",
":",
"\"yt-user-info\"",
"}",
")",
"[",
"0",
"]",
".",
"a",
"[",
"\"href\"",
"]",
"channel_id",
"=",
"str",
"(",
"channel_id",
")",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"\"/channel/\"",
",",
"\"\"",
")",
"# get channel id",
"return",
"YoutubeChannel",
".",
"get_feed_url_from_id",
"(",
"channel_id",
")"
]
| Gets channel id and then creates feed url
:param video_url: Url of video
:return: feed url | [
"Gets",
"channel",
"id",
"and",
"then",
"creates",
"feed",
"url"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/services/youtube.py#L66-L79 | train |
jmbhughes/suvi-trainer | scripts/update_database.py | process_file | def process_file(path):
""" Open a single labeled image at path and get needed information, return as a dictionary"""
info = dict()
with fits.open(path) as hdu:
head = hdu[0].header
data = hdu[0].data
labels = {theme: value for value, theme in list(hdu[1].data)}
info['filename'] = os.path.basename(path)
info['trainer'] = head['expert']
info['date-label'] = dateparser.parse(head['date-lab'])
info['date-observation'] = dateparser.parse(head['date-end'])
for theme in themes:
info[theme + "_count"] = np.sum(data == labels[theme])
return info | python | def process_file(path):
""" Open a single labeled image at path and get needed information, return as a dictionary"""
info = dict()
with fits.open(path) as hdu:
head = hdu[0].header
data = hdu[0].data
labels = {theme: value for value, theme in list(hdu[1].data)}
info['filename'] = os.path.basename(path)
info['trainer'] = head['expert']
info['date-label'] = dateparser.parse(head['date-lab'])
info['date-observation'] = dateparser.parse(head['date-end'])
for theme in themes:
info[theme + "_count"] = np.sum(data == labels[theme])
return info | [
"def",
"process_file",
"(",
"path",
")",
":",
"info",
"=",
"dict",
"(",
")",
"with",
"fits",
".",
"open",
"(",
"path",
")",
"as",
"hdu",
":",
"head",
"=",
"hdu",
"[",
"0",
"]",
".",
"header",
"data",
"=",
"hdu",
"[",
"0",
"]",
".",
"data",
"labels",
"=",
"{",
"theme",
":",
"value",
"for",
"value",
",",
"theme",
"in",
"list",
"(",
"hdu",
"[",
"1",
"]",
".",
"data",
")",
"}",
"info",
"[",
"'filename'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"info",
"[",
"'trainer'",
"]",
"=",
"head",
"[",
"'expert'",
"]",
"info",
"[",
"'date-label'",
"]",
"=",
"dateparser",
".",
"parse",
"(",
"head",
"[",
"'date-lab'",
"]",
")",
"info",
"[",
"'date-observation'",
"]",
"=",
"dateparser",
".",
"parse",
"(",
"head",
"[",
"'date-end'",
"]",
")",
"for",
"theme",
"in",
"themes",
":",
"info",
"[",
"theme",
"+",
"\"_count\"",
"]",
"=",
"np",
".",
"sum",
"(",
"data",
"==",
"labels",
"[",
"theme",
"]",
")",
"return",
"info"
]
| Open a single labeled image at path and get needed information, return as a dictionary | [
"Open",
"a",
"single",
"labeled",
"image",
"at",
"path",
"and",
"get",
"needed",
"information",
"return",
"as",
"a",
"dictionary"
]
| 3d89894a4a037286221974c7eb5634d229b4f5d4 | https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/scripts/update_database.py#L29-L42 | train |
jmbhughes/suvi-trainer | scripts/update_database.py | plot_counts | def plot_counts(df, theme):
""" plot the counts of a given theme from a created database over time"""
dates, counts = df['date-observation'], df[theme + "_count"]
fig, ax = plt.subplots()
ax.set_ylabel("{} pixel counts".format(" ".join(theme.split("_"))))
ax.set_xlabel("observation date")
ax.plot(dates, counts, '.')
fig.autofmt_xdate()
plt.show() | python | def plot_counts(df, theme):
""" plot the counts of a given theme from a created database over time"""
dates, counts = df['date-observation'], df[theme + "_count"]
fig, ax = plt.subplots()
ax.set_ylabel("{} pixel counts".format(" ".join(theme.split("_"))))
ax.set_xlabel("observation date")
ax.plot(dates, counts, '.')
fig.autofmt_xdate()
plt.show() | [
"def",
"plot_counts",
"(",
"df",
",",
"theme",
")",
":",
"dates",
",",
"counts",
"=",
"df",
"[",
"'date-observation'",
"]",
",",
"df",
"[",
"theme",
"+",
"\"_count\"",
"]",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"ax",
".",
"set_ylabel",
"(",
"\"{} pixel counts\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"theme",
".",
"split",
"(",
"\"_\"",
")",
")",
")",
")",
"ax",
".",
"set_xlabel",
"(",
"\"observation date\"",
")",
"ax",
".",
"plot",
"(",
"dates",
",",
"counts",
",",
"'.'",
")",
"fig",
".",
"autofmt_xdate",
"(",
")",
"plt",
".",
"show",
"(",
")"
]
| plot the counts of a given theme from a created database over time | [
"plot",
"the",
"counts",
"of",
"a",
"given",
"theme",
"from",
"a",
"created",
"database",
"over",
"time"
]
| 3d89894a4a037286221974c7eb5634d229b4f5d4 | https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/scripts/update_database.py#L45-L53 | train |
klahnakoski/mo-logs | mo_logs/strings.py | deformat | def deformat(value):
"""
REMOVE NON-ALPHANUMERIC CHARACTERS
FOR SOME REASON translate CAN NOT BE CALLED:
ERROR: translate() takes exactly one argument (2 given)
File "C:\Python27\lib\string.py", line 493, in translate
"""
output = []
for c in value:
if c in delchars:
continue
output.append(c)
return "".join(output) | python | def deformat(value):
"""
REMOVE NON-ALPHANUMERIC CHARACTERS
FOR SOME REASON translate CAN NOT BE CALLED:
ERROR: translate() takes exactly one argument (2 given)
File "C:\Python27\lib\string.py", line 493, in translate
"""
output = []
for c in value:
if c in delchars:
continue
output.append(c)
return "".join(output) | [
"def",
"deformat",
"(",
"value",
")",
":",
"output",
"=",
"[",
"]",
"for",
"c",
"in",
"value",
":",
"if",
"c",
"in",
"delchars",
":",
"continue",
"output",
".",
"append",
"(",
"c",
")",
"return",
"\"\"",
".",
"join",
"(",
"output",
")"
]
| REMOVE NON-ALPHANUMERIC CHARACTERS
FOR SOME REASON translate CAN NOT BE CALLED:
ERROR: translate() takes exactly one argument (2 given)
File "C:\Python27\lib\string.py", line 493, in translate | [
"REMOVE",
"NON",
"-",
"ALPHANUMERIC",
"CHARACTERS"
]
| 0971277ac9caf28a755b766b70621916957d4fea | https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/strings.py#L567-L580 | train |
klahnakoski/mo-logs | mo_logs/strings.py | _expand | def _expand(template, seq):
"""
seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE
"""
if is_text(template):
return _simple_expand(template, seq)
elif is_data(template):
# EXPAND LISTS OF ITEMS USING THIS FORM
# {"from":from, "template":template, "separator":separator}
template = wrap(template)
assert template["from"], "Expecting template to have 'from' attribute"
assert template.template, "Expecting template to have 'template' attribute"
data = seq[-1][template["from"]]
output = []
for d in data:
s = seq + (d,)
output.append(_expand(template.template, s))
return coalesce(template.separator, "").join(output)
elif is_list(template):
return "".join(_expand(t, seq) for t in template)
else:
if not _Log:
_late_import()
_Log.error("can not handle") | python | def _expand(template, seq):
"""
seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE
"""
if is_text(template):
return _simple_expand(template, seq)
elif is_data(template):
# EXPAND LISTS OF ITEMS USING THIS FORM
# {"from":from, "template":template, "separator":separator}
template = wrap(template)
assert template["from"], "Expecting template to have 'from' attribute"
assert template.template, "Expecting template to have 'template' attribute"
data = seq[-1][template["from"]]
output = []
for d in data:
s = seq + (d,)
output.append(_expand(template.template, s))
return coalesce(template.separator, "").join(output)
elif is_list(template):
return "".join(_expand(t, seq) for t in template)
else:
if not _Log:
_late_import()
_Log.error("can not handle") | [
"def",
"_expand",
"(",
"template",
",",
"seq",
")",
":",
"if",
"is_text",
"(",
"template",
")",
":",
"return",
"_simple_expand",
"(",
"template",
",",
"seq",
")",
"elif",
"is_data",
"(",
"template",
")",
":",
"# EXPAND LISTS OF ITEMS USING THIS FORM",
"# {\"from\":from, \"template\":template, \"separator\":separator}",
"template",
"=",
"wrap",
"(",
"template",
")",
"assert",
"template",
"[",
"\"from\"",
"]",
",",
"\"Expecting template to have 'from' attribute\"",
"assert",
"template",
".",
"template",
",",
"\"Expecting template to have 'template' attribute\"",
"data",
"=",
"seq",
"[",
"-",
"1",
"]",
"[",
"template",
"[",
"\"from\"",
"]",
"]",
"output",
"=",
"[",
"]",
"for",
"d",
"in",
"data",
":",
"s",
"=",
"seq",
"+",
"(",
"d",
",",
")",
"output",
".",
"append",
"(",
"_expand",
"(",
"template",
".",
"template",
",",
"s",
")",
")",
"return",
"coalesce",
"(",
"template",
".",
"separator",
",",
"\"\"",
")",
".",
"join",
"(",
"output",
")",
"elif",
"is_list",
"(",
"template",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"_expand",
"(",
"t",
",",
"seq",
")",
"for",
"t",
"in",
"template",
")",
"else",
":",
"if",
"not",
"_Log",
":",
"_late_import",
"(",
")",
"_Log",
".",
"error",
"(",
"\"can not handle\"",
")"
]
| seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE | [
"seq",
"IS",
"TUPLE",
"OF",
"OBJECTS",
"IN",
"PATH",
"ORDER",
"INTO",
"THE",
"DATA",
"TREE"
]
| 0971277ac9caf28a755b766b70621916957d4fea | https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/strings.py#L586-L611 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.