repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
threeML/astromodels
|
astromodels/core/model.py
|
Model.get_extended_source_fluxes
|
def get_extended_source_fluxes(self, id, j2000_ra, j2000_dec, energies):
"""
Get the flux of the id-th extended sources at the given position at the given energies
:param id: id of the source
:param j2000_ra: R.A. where the flux is desired
:param j2000_dec: Dec. where the flux is desired
:param energies: energies at which the flux is desired
:return: flux array
"""
return self._extended_sources.values()[id](j2000_ra, j2000_dec, energies)
|
python
|
def get_extended_source_fluxes(self, id, j2000_ra, j2000_dec, energies):
"""
Get the flux of the id-th extended sources at the given position at the given energies
:param id: id of the source
:param j2000_ra: R.A. where the flux is desired
:param j2000_dec: Dec. where the flux is desired
:param energies: energies at which the flux is desired
:return: flux array
"""
return self._extended_sources.values()[id](j2000_ra, j2000_dec, energies)
|
[
"def",
"get_extended_source_fluxes",
"(",
"self",
",",
"id",
",",
"j2000_ra",
",",
"j2000_dec",
",",
"energies",
")",
":",
"return",
"self",
".",
"_extended_sources",
".",
"values",
"(",
")",
"[",
"id",
"]",
"(",
"j2000_ra",
",",
"j2000_dec",
",",
"energies",
")"
] |
Get the flux of the id-th extended sources at the given position at the given energies
:param id: id of the source
:param j2000_ra: R.A. where the flux is desired
:param j2000_dec: Dec. where the flux is desired
:param energies: energies at which the flux is desired
:return: flux array
|
[
"Get",
"the",
"flux",
"of",
"the",
"id",
"-",
"th",
"extended",
"sources",
"at",
"the",
"given",
"position",
"at",
"the",
"given",
"energies"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/model.py#L996-L1007
|
train
|
threeML/astromodels
|
astromodels/utils/long_path_formatter.py
|
long_path_formatter
|
def long_path_formatter(line, max_width=pd.get_option('max_colwidth')):
"""
If a path is longer than max_width, it substitute it with the first and last element,
joined by "...". For example 'this.is.a.long.path.which.we.want.to.shorten' becomes
'this...shorten'
:param line:
:param max_width:
:return:
"""
if len(line) > max_width:
tokens = line.split(".")
trial1 = "%s...%s" % (tokens[0], tokens[-1])
if len(trial1) > max_width:
return "...%s" %(tokens[-1][-1:-(max_width-3)])
else:
return trial1
else:
return line
|
python
|
def long_path_formatter(line, max_width=pd.get_option('max_colwidth')):
"""
If a path is longer than max_width, it substitute it with the first and last element,
joined by "...". For example 'this.is.a.long.path.which.we.want.to.shorten' becomes
'this...shorten'
:param line:
:param max_width:
:return:
"""
if len(line) > max_width:
tokens = line.split(".")
trial1 = "%s...%s" % (tokens[0], tokens[-1])
if len(trial1) > max_width:
return "...%s" %(tokens[-1][-1:-(max_width-3)])
else:
return trial1
else:
return line
|
[
"def",
"long_path_formatter",
"(",
"line",
",",
"max_width",
"=",
"pd",
".",
"get_option",
"(",
"'max_colwidth'",
")",
")",
":",
"if",
"len",
"(",
"line",
")",
">",
"max_width",
":",
"tokens",
"=",
"line",
".",
"split",
"(",
"\".\"",
")",
"trial1",
"=",
"\"%s...%s\"",
"%",
"(",
"tokens",
"[",
"0",
"]",
",",
"tokens",
"[",
"-",
"1",
"]",
")",
"if",
"len",
"(",
"trial1",
")",
">",
"max_width",
":",
"return",
"\"...%s\"",
"%",
"(",
"tokens",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
":",
"-",
"(",
"max_width",
"-",
"3",
")",
"]",
")",
"else",
":",
"return",
"trial1",
"else",
":",
"return",
"line"
] |
If a path is longer than max_width, it substitute it with the first and last element,
joined by "...". For example 'this.is.a.long.path.which.we.want.to.shorten' becomes
'this...shorten'
:param line:
:param max_width:
:return:
|
[
"If",
"a",
"path",
"is",
"longer",
"than",
"max_width",
"it",
"substitute",
"it",
"with",
"the",
"first",
"and",
"last",
"element",
"joined",
"by",
"...",
".",
"For",
"example",
"this",
".",
"is",
".",
"a",
".",
"long",
".",
"path",
".",
"which",
".",
"we",
".",
"want",
".",
"to",
".",
"shorten",
"becomes",
"this",
"...",
"shorten"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/utils/long_path_formatter.py#L4-L30
|
train
|
threeML/astromodels
|
astromodels/sources/point_source.py
|
PointSource.has_free_parameters
|
def has_free_parameters(self):
"""
Returns True or False whether there is any parameter in this source
:return:
"""
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
return True
for par in self.position.parameters.values():
if par.free:
return True
return False
|
python
|
def has_free_parameters(self):
"""
Returns True or False whether there is any parameter in this source
:return:
"""
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
return True
for par in self.position.parameters.values():
if par.free:
return True
return False
|
[
"def",
"has_free_parameters",
"(",
"self",
")",
":",
"for",
"component",
"in",
"self",
".",
"_components",
".",
"values",
"(",
")",
":",
"for",
"par",
"in",
"component",
".",
"shape",
".",
"parameters",
".",
"values",
"(",
")",
":",
"if",
"par",
".",
"free",
":",
"return",
"True",
"for",
"par",
"in",
"self",
".",
"position",
".",
"parameters",
".",
"values",
"(",
")",
":",
"if",
"par",
".",
"free",
":",
"return",
"True",
"return",
"False"
] |
Returns True or False whether there is any parameter in this source
:return:
|
[
"Returns",
"True",
"or",
"False",
"whether",
"there",
"is",
"any",
"parameter",
"in",
"this",
"source"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/sources/point_source.py#L214-L235
|
train
|
threeML/astromodels
|
astromodels/sources/point_source.py
|
PointSource._repr__base
|
def _repr__base(self, rich_output=False):
"""
Representation of the object
:param rich_output: if True, generates HTML, otherwise text
:return: the representation
"""
# Make a dictionary which will then be transformed in a list
repr_dict = collections.OrderedDict()
key = '%s (point source)' % self.name
repr_dict[key] = collections.OrderedDict()
repr_dict[key]['position'] = self._sky_position.to_dict(minimal=True)
repr_dict[key]['spectrum'] = collections.OrderedDict()
for component_name, component in self.components.iteritems():
repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True)
return dict_to_list(repr_dict, rich_output)
|
python
|
def _repr__base(self, rich_output=False):
"""
Representation of the object
:param rich_output: if True, generates HTML, otherwise text
:return: the representation
"""
# Make a dictionary which will then be transformed in a list
repr_dict = collections.OrderedDict()
key = '%s (point source)' % self.name
repr_dict[key] = collections.OrderedDict()
repr_dict[key]['position'] = self._sky_position.to_dict(minimal=True)
repr_dict[key]['spectrum'] = collections.OrderedDict()
for component_name, component in self.components.iteritems():
repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True)
return dict_to_list(repr_dict, rich_output)
|
[
"def",
"_repr__base",
"(",
"self",
",",
"rich_output",
"=",
"False",
")",
":",
"# Make a dictionary which will then be transformed in a list",
"repr_dict",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"key",
"=",
"'%s (point source)'",
"%",
"self",
".",
"name",
"repr_dict",
"[",
"key",
"]",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"repr_dict",
"[",
"key",
"]",
"[",
"'position'",
"]",
"=",
"self",
".",
"_sky_position",
".",
"to_dict",
"(",
"minimal",
"=",
"True",
")",
"repr_dict",
"[",
"key",
"]",
"[",
"'spectrum'",
"]",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"component_name",
",",
"component",
"in",
"self",
".",
"components",
".",
"iteritems",
"(",
")",
":",
"repr_dict",
"[",
"key",
"]",
"[",
"'spectrum'",
"]",
"[",
"component_name",
"]",
"=",
"component",
".",
"to_dict",
"(",
"minimal",
"=",
"True",
")",
"return",
"dict_to_list",
"(",
"repr_dict",
",",
"rich_output",
")"
] |
Representation of the object
:param rich_output: if True, generates HTML, otherwise text
:return: the representation
|
[
"Representation",
"of",
"the",
"object"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/sources/point_source.py#L287-L309
|
train
|
threeML/astromodels
|
astromodels/functions/function.py
|
get_function
|
def get_function(function_name, composite_function_expression=None):
"""
Returns the function "name", which must be among the known functions or a composite function.
:param function_name: the name of the function (use 'composite' if the function is a composite function)
:param composite_function_expression: composite function specification such as
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
:return: the an instance of the requested class
"""
# Check whether this is a composite function or a simple function
if composite_function_expression is not None:
# Composite function
return _parse_function_expression(composite_function_expression)
else:
if function_name in _known_functions:
return _known_functions[function_name]()
else:
# Maybe this is a template
# NOTE: import here to avoid circular import
from astromodels.functions.template_model import TemplateModel, MissingDataFile
try:
instance = TemplateModel(function_name)
except MissingDataFile:
raise UnknownFunction("Function %s is not known. Known functions are: %s" %
(function_name, ",".join(_known_functions.keys())))
else:
return instance
|
python
|
def get_function(function_name, composite_function_expression=None):
"""
Returns the function "name", which must be among the known functions or a composite function.
:param function_name: the name of the function (use 'composite' if the function is a composite function)
:param composite_function_expression: composite function specification such as
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
:return: the an instance of the requested class
"""
# Check whether this is a composite function or a simple function
if composite_function_expression is not None:
# Composite function
return _parse_function_expression(composite_function_expression)
else:
if function_name in _known_functions:
return _known_functions[function_name]()
else:
# Maybe this is a template
# NOTE: import here to avoid circular import
from astromodels.functions.template_model import TemplateModel, MissingDataFile
try:
instance = TemplateModel(function_name)
except MissingDataFile:
raise UnknownFunction("Function %s is not known. Known functions are: %s" %
(function_name, ",".join(_known_functions.keys())))
else:
return instance
|
[
"def",
"get_function",
"(",
"function_name",
",",
"composite_function_expression",
"=",
"None",
")",
":",
"# Check whether this is a composite function or a simple function",
"if",
"composite_function_expression",
"is",
"not",
"None",
":",
"# Composite function",
"return",
"_parse_function_expression",
"(",
"composite_function_expression",
")",
"else",
":",
"if",
"function_name",
"in",
"_known_functions",
":",
"return",
"_known_functions",
"[",
"function_name",
"]",
"(",
")",
"else",
":",
"# Maybe this is a template",
"# NOTE: import here to avoid circular import",
"from",
"astromodels",
".",
"functions",
".",
"template_model",
"import",
"TemplateModel",
",",
"MissingDataFile",
"try",
":",
"instance",
"=",
"TemplateModel",
"(",
"function_name",
")",
"except",
"MissingDataFile",
":",
"raise",
"UnknownFunction",
"(",
"\"Function %s is not known. Known functions are: %s\"",
"%",
"(",
"function_name",
",",
"\",\"",
".",
"join",
"(",
"_known_functions",
".",
"keys",
"(",
")",
")",
")",
")",
"else",
":",
"return",
"instance"
] |
Returns the function "name", which must be among the known functions or a composite function.
:param function_name: the name of the function (use 'composite' if the function is a composite function)
:param composite_function_expression: composite function specification such as
((((powerlaw{1} + (sin{2} * 3)) + (sin{2} * 25)) - (powerlaw{1} * 16)) + (sin{2} ** 3.0))
:return: the an instance of the requested class
|
[
"Returns",
"the",
"function",
"name",
"which",
"must",
"be",
"among",
"the",
"known",
"functions",
"or",
"a",
"composite",
"function",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/function.py#L1566-L1609
|
train
|
threeML/astromodels
|
astromodels/functions/function.py
|
get_function_class
|
def get_function_class(function_name):
"""
Return the type for the requested function
:param function_name: the function to return
:return: the type for that function (i.e., this is a class, not an instance)
"""
if function_name in _known_functions:
return _known_functions[function_name]
else:
raise UnknownFunction("Function %s is not known. Known functions are: %s" %
(function_name, ",".join(_known_functions.keys())))
|
python
|
def get_function_class(function_name):
"""
Return the type for the requested function
:param function_name: the function to return
:return: the type for that function (i.e., this is a class, not an instance)
"""
if function_name in _known_functions:
return _known_functions[function_name]
else:
raise UnknownFunction("Function %s is not known. Known functions are: %s" %
(function_name, ",".join(_known_functions.keys())))
|
[
"def",
"get_function_class",
"(",
"function_name",
")",
":",
"if",
"function_name",
"in",
"_known_functions",
":",
"return",
"_known_functions",
"[",
"function_name",
"]",
"else",
":",
"raise",
"UnknownFunction",
"(",
"\"Function %s is not known. Known functions are: %s\"",
"%",
"(",
"function_name",
",",
"\",\"",
".",
"join",
"(",
"_known_functions",
".",
"keys",
"(",
")",
")",
")",
")"
] |
Return the type for the requested function
:param function_name: the function to return
:return: the type for that function (i.e., this is a class, not an instance)
|
[
"Return",
"the",
"type",
"for",
"the",
"requested",
"function"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/function.py#L1612-L1627
|
train
|
threeML/astromodels
|
astromodels/functions/function.py
|
FunctionMeta.check_calling_sequence
|
def check_calling_sequence(name, function_name, function, possible_variables):
"""
Check the calling sequence for the function looking for the variables specified.
One or more of the variables can be in the calling sequence. Note that the
order of the variables will be enforced.
It will also enforce that the first parameter in the calling sequence is called 'self'.
:param function: the function to check
:param possible_variables: a list of variables to check, The order is important, and will be enforced
:return: a tuple containing the list of found variables, and the name of the other parameters in the calling
sequence
"""
# Get calling sequence
# If the function has been memoized, it will have a "input_object" member
try:
calling_sequence = inspect.getargspec(function.input_object).args
except AttributeError:
# This might happen if the function is with memoization
calling_sequence = inspect.getargspec(function).args
assert calling_sequence[0] == 'self', "Wrong syntax for 'evaluate' in %s. The first argument " \
"should be called 'self'." % name
# Figure out how many variables are used
variables = filter(lambda var: var in possible_variables, calling_sequence)
# Check that they actually make sense. They must be used in the same order
# as specified in possible_variables
assert len(variables) > 0, "The name of the variables for 'evaluate' in %s must be one or more " \
"among %s, instead of %s" % (name, ','.join(possible_variables), ",".join(variables))
if variables != possible_variables[:len(variables)]:
raise AssertionError("The variables %s are out of order in '%s' of %s. Should be %s."
% (",".join(variables), function_name, name, possible_variables[:len(variables)]))
other_parameters = filter(lambda var: var not in variables and var != 'self', calling_sequence)
return variables, other_parameters
|
python
|
def check_calling_sequence(name, function_name, function, possible_variables):
"""
Check the calling sequence for the function looking for the variables specified.
One or more of the variables can be in the calling sequence. Note that the
order of the variables will be enforced.
It will also enforce that the first parameter in the calling sequence is called 'self'.
:param function: the function to check
:param possible_variables: a list of variables to check, The order is important, and will be enforced
:return: a tuple containing the list of found variables, and the name of the other parameters in the calling
sequence
"""
# Get calling sequence
# If the function has been memoized, it will have a "input_object" member
try:
calling_sequence = inspect.getargspec(function.input_object).args
except AttributeError:
# This might happen if the function is with memoization
calling_sequence = inspect.getargspec(function).args
assert calling_sequence[0] == 'self', "Wrong syntax for 'evaluate' in %s. The first argument " \
"should be called 'self'." % name
# Figure out how many variables are used
variables = filter(lambda var: var in possible_variables, calling_sequence)
# Check that they actually make sense. They must be used in the same order
# as specified in possible_variables
assert len(variables) > 0, "The name of the variables for 'evaluate' in %s must be one or more " \
"among %s, instead of %s" % (name, ','.join(possible_variables), ",".join(variables))
if variables != possible_variables[:len(variables)]:
raise AssertionError("The variables %s are out of order in '%s' of %s. Should be %s."
% (",".join(variables), function_name, name, possible_variables[:len(variables)]))
other_parameters = filter(lambda var: var not in variables and var != 'self', calling_sequence)
return variables, other_parameters
|
[
"def",
"check_calling_sequence",
"(",
"name",
",",
"function_name",
",",
"function",
",",
"possible_variables",
")",
":",
"# Get calling sequence",
"# If the function has been memoized, it will have a \"input_object\" member",
"try",
":",
"calling_sequence",
"=",
"inspect",
".",
"getargspec",
"(",
"function",
".",
"input_object",
")",
".",
"args",
"except",
"AttributeError",
":",
"# This might happen if the function is with memoization",
"calling_sequence",
"=",
"inspect",
".",
"getargspec",
"(",
"function",
")",
".",
"args",
"assert",
"calling_sequence",
"[",
"0",
"]",
"==",
"'self'",
",",
"\"Wrong syntax for 'evaluate' in %s. The first argument \"",
"\"should be called 'self'.\"",
"%",
"name",
"# Figure out how many variables are used",
"variables",
"=",
"filter",
"(",
"lambda",
"var",
":",
"var",
"in",
"possible_variables",
",",
"calling_sequence",
")",
"# Check that they actually make sense. They must be used in the same order",
"# as specified in possible_variables",
"assert",
"len",
"(",
"variables",
")",
">",
"0",
",",
"\"The name of the variables for 'evaluate' in %s must be one or more \"",
"\"among %s, instead of %s\"",
"%",
"(",
"name",
",",
"','",
".",
"join",
"(",
"possible_variables",
")",
",",
"\",\"",
".",
"join",
"(",
"variables",
")",
")",
"if",
"variables",
"!=",
"possible_variables",
"[",
":",
"len",
"(",
"variables",
")",
"]",
":",
"raise",
"AssertionError",
"(",
"\"The variables %s are out of order in '%s' of %s. Should be %s.\"",
"%",
"(",
"\",\"",
".",
"join",
"(",
"variables",
")",
",",
"function_name",
",",
"name",
",",
"possible_variables",
"[",
":",
"len",
"(",
"variables",
")",
"]",
")",
")",
"other_parameters",
"=",
"filter",
"(",
"lambda",
"var",
":",
"var",
"not",
"in",
"variables",
"and",
"var",
"!=",
"'self'",
",",
"calling_sequence",
")",
"return",
"variables",
",",
"other_parameters"
] |
Check the calling sequence for the function looking for the variables specified.
One or more of the variables can be in the calling sequence. Note that the
order of the variables will be enforced.
It will also enforce that the first parameter in the calling sequence is called 'self'.
:param function: the function to check
:param possible_variables: a list of variables to check, The order is important, and will be enforced
:return: a tuple containing the list of found variables, and the name of the other parameters in the calling
sequence
|
[
"Check",
"the",
"calling",
"sequence",
"for",
"the",
"function",
"looking",
"for",
"the",
"variables",
"specified",
".",
"One",
"or",
"more",
"of",
"the",
"variables",
"can",
"be",
"in",
"the",
"calling",
"sequence",
".",
"Note",
"that",
"the",
"order",
"of",
"the",
"variables",
"will",
"be",
"enforced",
".",
"It",
"will",
"also",
"enforce",
"that",
"the",
"first",
"parameter",
"in",
"the",
"calling",
"sequence",
"is",
"called",
"self",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/function.py#L339-L385
|
train
|
threeML/astromodels
|
astromodels/functions/function.py
|
Function.free_parameters
|
def free_parameters(self):
"""
Returns a dictionary of free parameters for this function
:return: dictionary of free parameters
"""
free_parameters = collections.OrderedDict([(k,v) for k, v in self.parameters.iteritems() if v.free])
return free_parameters
|
python
|
def free_parameters(self):
"""
Returns a dictionary of free parameters for this function
:return: dictionary of free parameters
"""
free_parameters = collections.OrderedDict([(k,v) for k, v in self.parameters.iteritems() if v.free])
return free_parameters
|
[
"def",
"free_parameters",
"(",
"self",
")",
":",
"free_parameters",
"=",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"parameters",
".",
"iteritems",
"(",
")",
"if",
"v",
".",
"free",
"]",
")",
"return",
"free_parameters"
] |
Returns a dictionary of free parameters for this function
:return: dictionary of free parameters
|
[
"Returns",
"a",
"dictionary",
"of",
"free",
"parameters",
"for",
"this",
"function"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/function.py#L518-L527
|
train
|
threeML/astromodels
|
astromodels/utils/data_files.py
|
_get_data_file_path
|
def _get_data_file_path(data_file):
"""
Returns the absolute path to the required data files.
:param data_file: relative path to the data file, relative to the astromodels/data path.
So to get the path to data/dark_matter/gammamc_dif.dat you need to use data_file="dark_matter/gammamc_dif.dat"
:return: absolute path of the data file
"""
try:
file_path = pkg_resources.resource_filename("astromodels", 'data/%s' % data_file)
except KeyError:
raise IOError("Could not read or find data file %s. Try reinstalling astromodels. If this does not fix your "
"problem, open an issue on github." % (data_file))
else:
return os.path.abspath(file_path)
|
python
|
def _get_data_file_path(data_file):
"""
Returns the absolute path to the required data files.
:param data_file: relative path to the data file, relative to the astromodels/data path.
So to get the path to data/dark_matter/gammamc_dif.dat you need to use data_file="dark_matter/gammamc_dif.dat"
:return: absolute path of the data file
"""
try:
file_path = pkg_resources.resource_filename("astromodels", 'data/%s' % data_file)
except KeyError:
raise IOError("Could not read or find data file %s. Try reinstalling astromodels. If this does not fix your "
"problem, open an issue on github." % (data_file))
else:
return os.path.abspath(file_path)
|
[
"def",
"_get_data_file_path",
"(",
"data_file",
")",
":",
"try",
":",
"file_path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"\"astromodels\"",
",",
"'data/%s'",
"%",
"data_file",
")",
"except",
"KeyError",
":",
"raise",
"IOError",
"(",
"\"Could not read or find data file %s. Try reinstalling astromodels. If this does not fix your \"",
"\"problem, open an issue on github.\"",
"%",
"(",
"data_file",
")",
")",
"else",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"file_path",
")"
] |
Returns the absolute path to the required data files.
:param data_file: relative path to the data file, relative to the astromodels/data path.
So to get the path to data/dark_matter/gammamc_dif.dat you need to use data_file="dark_matter/gammamc_dif.dat"
:return: absolute path of the data file
|
[
"Returns",
"the",
"absolute",
"path",
"to",
"the",
"required",
"data",
"files",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/utils/data_files.py#L5-L25
|
train
|
threeML/astromodels
|
astromodels/functions/dark_matter/dm_models.py
|
DMFitFunction._setup
|
def _setup(self):
tablepath = _get_data_file_path("dark_matter/gammamc_dif.dat")
self._data = np.loadtxt(tablepath)
"""
Mapping between the channel codes and the rows in the gammamc file
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
"""
channel_index_mapping = {
1: 8, # ee
2: 6, # mumu
3: 3, # tautau
4: 1, # bb
5: 2, # tt
6: 7, # gg
7: 4, # ww
8: 5, # zz
9: 0, # cc
10: 10, # uu
11: 11, # dd
12: 9, # ss
}
# Number of decades in x = log10(E/M)
ndec = 10.0
xedge = np.linspace(0, 1.0, 251)
self._x = 0.5 * (xedge[1:] + xedge[:-1]) * ndec - ndec
ichan = channel_index_mapping[int(self.channel.value)]
# These are the mass points
self._mass = np.array([2.0, 4.0, 6.0, 8.0, 10.0,
25.0, 50.0, 80.3, 91.2, 100.0,
150.0, 176.0, 200.0, 250.0, 350.0, 500.0, 750.0,
1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 1E4])
self._dn = self._data.reshape((12, 24, 250))
self._dn_interp = RegularGridInterpolator([self._mass, self._x],
self._dn[ichan, :, :],
bounds_error=False,
fill_value=None)
if self.mass.value > 10000:
print "Warning: DMFitFunction only appropriate for masses <= 10 TeV"
print "To model DM from 2 GeV < mass < 1 PeV use DMSpectra"
|
python
|
def _setup(self):
tablepath = _get_data_file_path("dark_matter/gammamc_dif.dat")
self._data = np.loadtxt(tablepath)
"""
Mapping between the channel codes and the rows in the gammamc file
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
"""
channel_index_mapping = {
1: 8, # ee
2: 6, # mumu
3: 3, # tautau
4: 1, # bb
5: 2, # tt
6: 7, # gg
7: 4, # ww
8: 5, # zz
9: 0, # cc
10: 10, # uu
11: 11, # dd
12: 9, # ss
}
# Number of decades in x = log10(E/M)
ndec = 10.0
xedge = np.linspace(0, 1.0, 251)
self._x = 0.5 * (xedge[1:] + xedge[:-1]) * ndec - ndec
ichan = channel_index_mapping[int(self.channel.value)]
# These are the mass points
self._mass = np.array([2.0, 4.0, 6.0, 8.0, 10.0,
25.0, 50.0, 80.3, 91.2, 100.0,
150.0, 176.0, 200.0, 250.0, 350.0, 500.0, 750.0,
1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 1E4])
self._dn = self._data.reshape((12, 24, 250))
self._dn_interp = RegularGridInterpolator([self._mass, self._x],
self._dn[ichan, :, :],
bounds_error=False,
fill_value=None)
if self.mass.value > 10000:
print "Warning: DMFitFunction only appropriate for masses <= 10 TeV"
print "To model DM from 2 GeV < mass < 1 PeV use DMSpectra"
|
[
"def",
"_setup",
"(",
"self",
")",
":",
"tablepath",
"=",
"_get_data_file_path",
"(",
"\"dark_matter/gammamc_dif.dat\"",
")",
"self",
".",
"_data",
"=",
"np",
".",
"loadtxt",
"(",
"tablepath",
")",
"channel_index_mapping",
"=",
"{",
"1",
":",
"8",
",",
"# ee",
"2",
":",
"6",
",",
"# mumu",
"3",
":",
"3",
",",
"# tautau",
"4",
":",
"1",
",",
"# bb",
"5",
":",
"2",
",",
"# tt",
"6",
":",
"7",
",",
"# gg",
"7",
":",
"4",
",",
"# ww",
"8",
":",
"5",
",",
"# zz",
"9",
":",
"0",
",",
"# cc",
"10",
":",
"10",
",",
"# uu",
"11",
":",
"11",
",",
"# dd",
"12",
":",
"9",
",",
"# ss",
"}",
"# Number of decades in x = log10(E/M)",
"ndec",
"=",
"10.0",
"xedge",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1.0",
",",
"251",
")",
"self",
".",
"_x",
"=",
"0.5",
"*",
"(",
"xedge",
"[",
"1",
":",
"]",
"+",
"xedge",
"[",
":",
"-",
"1",
"]",
")",
"*",
"ndec",
"-",
"ndec",
"ichan",
"=",
"channel_index_mapping",
"[",
"int",
"(",
"self",
".",
"channel",
".",
"value",
")",
"]",
"# These are the mass points",
"self",
".",
"_mass",
"=",
"np",
".",
"array",
"(",
"[",
"2.0",
",",
"4.0",
",",
"6.0",
",",
"8.0",
",",
"10.0",
",",
"25.0",
",",
"50.0",
",",
"80.3",
",",
"91.2",
",",
"100.0",
",",
"150.0",
",",
"176.0",
",",
"200.0",
",",
"250.0",
",",
"350.0",
",",
"500.0",
",",
"750.0",
",",
"1000.0",
",",
"1500.0",
",",
"2000.0",
",",
"3000.0",
",",
"5000.0",
",",
"7000.0",
",",
"1E4",
"]",
")",
"self",
".",
"_dn",
"=",
"self",
".",
"_data",
".",
"reshape",
"(",
"(",
"12",
",",
"24",
",",
"250",
")",
")",
"self",
".",
"_dn_interp",
"=",
"RegularGridInterpolator",
"(",
"[",
"self",
".",
"_mass",
",",
"self",
".",
"_x",
"]",
",",
"self",
".",
"_dn",
"[",
"ichan",
",",
":",
",",
":",
"]",
",",
"bounds_error",
"=",
"False",
",",
"fill_value",
"=",
"None",
")",
"if",
"self",
".",
"mass",
".",
"value",
">",
"10000",
":",
"print",
"\"Warning: DMFitFunction only appropriate for masses <= 10 TeV\"",
"print",
"\"To model DM from 2 GeV < mass < 1 PeV use DMSpectra\""
] |
Mapping between the channel codes and the rows in the gammamc file
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
|
[
"Mapping",
"between",
"the",
"channel",
"codes",
"and",
"the",
"rows",
"in",
"the",
"gammamc",
"file"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/dark_matter/dm_models.py#L48-L108
|
train
|
threeML/astromodels
|
astromodels/functions/dark_matter/dm_models.py
|
DMSpectra._setup
|
def _setup(self):
# Get and open the two data files
tablepath_h = _get_data_file_path("dark_matter/dmSpecTab.npy")
self._data_h = np.load(tablepath_h)
tablepath_f = _get_data_file_path("dark_matter/gammamc_dif.dat")
self._data_f = np.loadtxt(tablepath_f)
"""
Mapping between the channel codes and the rows in the gammamc file
dmSpecTab.npy created to match this mapping too
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
"""
channel_index_mapping = {
1: 8, # ee
2: 6, # mumu
3: 3, # tautau
4: 1, # bb
5: 2, # tt
6: 7, # gg
7: 4, # ww
8: 5, # zz
9: 0, # cc
10: 10, # uu
11: 11, # dd
12: 9, # ss
}
# Number of decades in x = log10(E/M)
ndec = 10.0
xedge = np.linspace(0, 1.0, 251)
self._x = 0.5 * (xedge[1:] + xedge[:-1]) * ndec - ndec
ichan = channel_index_mapping[int(self.channel.value)]
# These are the mass points in GeV
self._mass_h = np.array([50., 61.2, 74.91, 91.69, 112.22, 137.36, 168.12, 205.78, 251.87, 308.29,
377.34, 461.86, 565.31, 691.93, 846.91, 1036.6, 1268.78, 1552.97, 1900.82,
2326.57, 2847.69, 3485.53, 4266.23, 5221.81, 6391.41, 7823.0, 9575.23,
11719.94, 14345.03, 17558.1, 21490.85, 26304.48, 32196.3, 39407.79, 48234.54,
59038.36, 72262.07, 88447.7, 108258.66, 132506.99, 162186.57, 198513.95,
242978.11, 297401.58, 364015.09, 445549.04, 545345.37, 667494.6, 817003.43, 1000000.])
# These are the mass points in GeV
self._mass_f = np.array([2.0, 4.0, 6.0, 8.0, 10.0,
25.0, 50.0, 80.3, 91.2, 100.0,
150.0, 176.0, 200.0, 250.0, 350.0, 500.0, 750.0,
1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 1E4])
self._mass = np.append(self._mass_f, self._mass_h[27:])
self._dn_f = self._data_f.reshape((12, 24, 250))
# Is this really used?
self._dn_h = self._data_h
self._dn = np.zeros((12, len(self._mass), 250))
self._dn[:, 0:24, :] = self._dn_f
self._dn[:, 24:, :] = self._dn_h[:, 27:, :]
self._dn_interp = RegularGridInterpolator([self._mass, self._x],
self._dn[ichan, :, :],
bounds_error=False,
fill_value=None)
if self.channel.value in [1, 6, 7] and self.mass.value > 10000.:
print "ERROR: currently spectra for selected channel and mass not implemented."
print "Spectra for channels ['ee','gg','WW'] currently not available for mass > 10 TeV"
|
python
|
def _setup(self):
# Get and open the two data files
tablepath_h = _get_data_file_path("dark_matter/dmSpecTab.npy")
self._data_h = np.load(tablepath_h)
tablepath_f = _get_data_file_path("dark_matter/gammamc_dif.dat")
self._data_f = np.loadtxt(tablepath_f)
"""
Mapping between the channel codes and the rows in the gammamc file
dmSpecTab.npy created to match this mapping too
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
"""
channel_index_mapping = {
1: 8, # ee
2: 6, # mumu
3: 3, # tautau
4: 1, # bb
5: 2, # tt
6: 7, # gg
7: 4, # ww
8: 5, # zz
9: 0, # cc
10: 10, # uu
11: 11, # dd
12: 9, # ss
}
# Number of decades in x = log10(E/M)
ndec = 10.0
xedge = np.linspace(0, 1.0, 251)
self._x = 0.5 * (xedge[1:] + xedge[:-1]) * ndec - ndec
ichan = channel_index_mapping[int(self.channel.value)]
# These are the mass points in GeV
self._mass_h = np.array([50., 61.2, 74.91, 91.69, 112.22, 137.36, 168.12, 205.78, 251.87, 308.29,
377.34, 461.86, 565.31, 691.93, 846.91, 1036.6, 1268.78, 1552.97, 1900.82,
2326.57, 2847.69, 3485.53, 4266.23, 5221.81, 6391.41, 7823.0, 9575.23,
11719.94, 14345.03, 17558.1, 21490.85, 26304.48, 32196.3, 39407.79, 48234.54,
59038.36, 72262.07, 88447.7, 108258.66, 132506.99, 162186.57, 198513.95,
242978.11, 297401.58, 364015.09, 445549.04, 545345.37, 667494.6, 817003.43, 1000000.])
# These are the mass points in GeV
self._mass_f = np.array([2.0, 4.0, 6.0, 8.0, 10.0,
25.0, 50.0, 80.3, 91.2, 100.0,
150.0, 176.0, 200.0, 250.0, 350.0, 500.0, 750.0,
1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 1E4])
self._mass = np.append(self._mass_f, self._mass_h[27:])
self._dn_f = self._data_f.reshape((12, 24, 250))
# Is this really used?
self._dn_h = self._data_h
self._dn = np.zeros((12, len(self._mass), 250))
self._dn[:, 0:24, :] = self._dn_f
self._dn[:, 24:, :] = self._dn_h[:, 27:, :]
self._dn_interp = RegularGridInterpolator([self._mass, self._x],
self._dn[ichan, :, :],
bounds_error=False,
fill_value=None)
if self.channel.value in [1, 6, 7] and self.mass.value > 10000.:
print "ERROR: currently spectra for selected channel and mass not implemented."
print "Spectra for channels ['ee','gg','WW'] currently not available for mass > 10 TeV"
|
[
"def",
"_setup",
"(",
"self",
")",
":",
"# Get and open the two data files",
"tablepath_h",
"=",
"_get_data_file_path",
"(",
"\"dark_matter/dmSpecTab.npy\"",
")",
"self",
".",
"_data_h",
"=",
"np",
".",
"load",
"(",
"tablepath_h",
")",
"tablepath_f",
"=",
"_get_data_file_path",
"(",
"\"dark_matter/gammamc_dif.dat\"",
")",
"self",
".",
"_data_f",
"=",
"np",
".",
"loadtxt",
"(",
"tablepath_f",
")",
"channel_index_mapping",
"=",
"{",
"1",
":",
"8",
",",
"# ee",
"2",
":",
"6",
",",
"# mumu",
"3",
":",
"3",
",",
"# tautau",
"4",
":",
"1",
",",
"# bb",
"5",
":",
"2",
",",
"# tt",
"6",
":",
"7",
",",
"# gg",
"7",
":",
"4",
",",
"# ww",
"8",
":",
"5",
",",
"# zz",
"9",
":",
"0",
",",
"# cc",
"10",
":",
"10",
",",
"# uu",
"11",
":",
"11",
",",
"# dd",
"12",
":",
"9",
",",
"# ss",
"}",
"# Number of decades in x = log10(E/M)",
"ndec",
"=",
"10.0",
"xedge",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1.0",
",",
"251",
")",
"self",
".",
"_x",
"=",
"0.5",
"*",
"(",
"xedge",
"[",
"1",
":",
"]",
"+",
"xedge",
"[",
":",
"-",
"1",
"]",
")",
"*",
"ndec",
"-",
"ndec",
"ichan",
"=",
"channel_index_mapping",
"[",
"int",
"(",
"self",
".",
"channel",
".",
"value",
")",
"]",
"# These are the mass points in GeV",
"self",
".",
"_mass_h",
"=",
"np",
".",
"array",
"(",
"[",
"50.",
",",
"61.2",
",",
"74.91",
",",
"91.69",
",",
"112.22",
",",
"137.36",
",",
"168.12",
",",
"205.78",
",",
"251.87",
",",
"308.29",
",",
"377.34",
",",
"461.86",
",",
"565.31",
",",
"691.93",
",",
"846.91",
",",
"1036.6",
",",
"1268.78",
",",
"1552.97",
",",
"1900.82",
",",
"2326.57",
",",
"2847.69",
",",
"3485.53",
",",
"4266.23",
",",
"5221.81",
",",
"6391.41",
",",
"7823.0",
",",
"9575.23",
",",
"11719.94",
",",
"14345.03",
",",
"17558.1",
",",
"21490.85",
",",
"26304.48",
",",
"32196.3",
",",
"39407.79",
",",
"48234.54",
",",
"59038.36",
",",
"72262.07",
",",
"88447.7",
",",
"108258.66",
",",
"132506.99",
",",
"162186.57",
",",
"198513.95",
",",
"242978.11",
",",
"297401.58",
",",
"364015.09",
",",
"445549.04",
",",
"545345.37",
",",
"667494.6",
",",
"817003.43",
",",
"1000000.",
"]",
")",
"# These are the mass points in GeV",
"self",
".",
"_mass_f",
"=",
"np",
".",
"array",
"(",
"[",
"2.0",
",",
"4.0",
",",
"6.0",
",",
"8.0",
",",
"10.0",
",",
"25.0",
",",
"50.0",
",",
"80.3",
",",
"91.2",
",",
"100.0",
",",
"150.0",
",",
"176.0",
",",
"200.0",
",",
"250.0",
",",
"350.0",
",",
"500.0",
",",
"750.0",
",",
"1000.0",
",",
"1500.0",
",",
"2000.0",
",",
"3000.0",
",",
"5000.0",
",",
"7000.0",
",",
"1E4",
"]",
")",
"self",
".",
"_mass",
"=",
"np",
".",
"append",
"(",
"self",
".",
"_mass_f",
",",
"self",
".",
"_mass_h",
"[",
"27",
":",
"]",
")",
"self",
".",
"_dn_f",
"=",
"self",
".",
"_data_f",
".",
"reshape",
"(",
"(",
"12",
",",
"24",
",",
"250",
")",
")",
"# Is this really used?",
"self",
".",
"_dn_h",
"=",
"self",
".",
"_data_h",
"self",
".",
"_dn",
"=",
"np",
".",
"zeros",
"(",
"(",
"12",
",",
"len",
"(",
"self",
".",
"_mass",
")",
",",
"250",
")",
")",
"self",
".",
"_dn",
"[",
":",
",",
"0",
":",
"24",
",",
":",
"]",
"=",
"self",
".",
"_dn_f",
"self",
".",
"_dn",
"[",
":",
",",
"24",
":",
",",
":",
"]",
"=",
"self",
".",
"_dn_h",
"[",
":",
",",
"27",
":",
",",
":",
"]",
"self",
".",
"_dn_interp",
"=",
"RegularGridInterpolator",
"(",
"[",
"self",
".",
"_mass",
",",
"self",
".",
"_x",
"]",
",",
"self",
".",
"_dn",
"[",
"ichan",
",",
":",
",",
":",
"]",
",",
"bounds_error",
"=",
"False",
",",
"fill_value",
"=",
"None",
")",
"if",
"self",
".",
"channel",
".",
"value",
"in",
"[",
"1",
",",
"6",
",",
"7",
"]",
"and",
"self",
".",
"mass",
".",
"value",
">",
"10000.",
":",
"print",
"\"ERROR: currently spectra for selected channel and mass not implemented.\"",
"print",
"\"Spectra for channels ['ee','gg','WW'] currently not available for mass > 10 TeV\""
] |
Mapping between the channel codes and the rows in the gammamc file
dmSpecTab.npy created to match this mapping too
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
|
[
"Mapping",
"between",
"the",
"channel",
"codes",
"and",
"the",
"rows",
"in",
"the",
"gammamc",
"file",
"dmSpecTab",
".",
"npy",
"created",
"to",
"match",
"this",
"mapping",
"too"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/dark_matter/dm_models.py#L209-L291
|
train
|
threeML/astromodels
|
astromodels/utils/valid_variable.py
|
is_valid_variable_name
|
def is_valid_variable_name(string_to_check):
"""
Returns whether the provided name is a valid variable name in Python
:param string_to_check: the string to be checked
:return: True or False
"""
try:
parse('{} = None'.format(string_to_check))
return True
except (SyntaxError, ValueError, TypeError):
return False
|
python
|
def is_valid_variable_name(string_to_check):
"""
Returns whether the provided name is a valid variable name in Python
:param string_to_check: the string to be checked
:return: True or False
"""
try:
parse('{} = None'.format(string_to_check))
return True
except (SyntaxError, ValueError, TypeError):
return False
|
[
"def",
"is_valid_variable_name",
"(",
"string_to_check",
")",
":",
"try",
":",
"parse",
"(",
"'{} = None'",
".",
"format",
"(",
"string_to_check",
")",
")",
"return",
"True",
"except",
"(",
"SyntaxError",
",",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"False"
] |
Returns whether the provided name is a valid variable name in Python
:param string_to_check: the string to be checked
:return: True or False
|
[
"Returns",
"whether",
"the",
"provided",
"name",
"is",
"a",
"valid",
"variable",
"name",
"in",
"Python"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/utils/valid_variable.py#L4-L19
|
train
|
threeML/astromodels
|
astromodels/core/units.py
|
_check_unit
|
def _check_unit(new_unit, old_unit):
"""
Check that the new unit is compatible with the old unit for the quantity described by variable_name
:param new_unit: instance of astropy.units.Unit
:param old_unit: instance of astropy.units.Unit
:return: nothin
"""
try:
new_unit.physical_type
except AttributeError:
raise UnitMismatch("The provided unit (%s) has no physical type. Was expecting a unit for %s"
% (new_unit, old_unit.physical_type))
if new_unit.physical_type != old_unit.physical_type:
raise UnitMismatch("Physical type mismatch: you provided a unit for %s instead of a unit for %s"
% (new_unit.physical_type, old_unit.physical_type))
|
python
|
def _check_unit(new_unit, old_unit):
"""
Check that the new unit is compatible with the old unit for the quantity described by variable_name
:param new_unit: instance of astropy.units.Unit
:param old_unit: instance of astropy.units.Unit
:return: nothin
"""
try:
new_unit.physical_type
except AttributeError:
raise UnitMismatch("The provided unit (%s) has no physical type. Was expecting a unit for %s"
% (new_unit, old_unit.physical_type))
if new_unit.physical_type != old_unit.physical_type:
raise UnitMismatch("Physical type mismatch: you provided a unit for %s instead of a unit for %s"
% (new_unit.physical_type, old_unit.physical_type))
|
[
"def",
"_check_unit",
"(",
"new_unit",
",",
"old_unit",
")",
":",
"try",
":",
"new_unit",
".",
"physical_type",
"except",
"AttributeError",
":",
"raise",
"UnitMismatch",
"(",
"\"The provided unit (%s) has no physical type. Was expecting a unit for %s\"",
"%",
"(",
"new_unit",
",",
"old_unit",
".",
"physical_type",
")",
")",
"if",
"new_unit",
".",
"physical_type",
"!=",
"old_unit",
".",
"physical_type",
":",
"raise",
"UnitMismatch",
"(",
"\"Physical type mismatch: you provided a unit for %s instead of a unit for %s\"",
"%",
"(",
"new_unit",
".",
"physical_type",
",",
"old_unit",
".",
"physical_type",
")",
")"
] |
Check that the new unit is compatible with the old unit for the quantity described by variable_name
:param new_unit: instance of astropy.units.Unit
:param old_unit: instance of astropy.units.Unit
:return: nothin
|
[
"Check",
"that",
"the",
"new",
"unit",
"is",
"compatible",
"with",
"the",
"old",
"unit",
"for",
"the",
"quantity",
"described",
"by",
"variable_name"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/units.py#L29-L50
|
train
|
threeML/astromodels
|
astromodels/functions/functions.py
|
Log_parabola.peak_energy
|
def peak_energy(self):
"""
Returns the peak energy in the nuFnu spectrum
:return: peak energy in keV
"""
# Eq. 6 in Massaro et al. 2004
# (http://adsabs.harvard.edu/abs/2004A%26A...413..489M)
return self.piv.value * pow(10, ((2 + self.alpha.value) * np.log(10)) / (2 * self.beta.value))
|
python
|
def peak_energy(self):
"""
Returns the peak energy in the nuFnu spectrum
:return: peak energy in keV
"""
# Eq. 6 in Massaro et al. 2004
# (http://adsabs.harvard.edu/abs/2004A%26A...413..489M)
return self.piv.value * pow(10, ((2 + self.alpha.value) * np.log(10)) / (2 * self.beta.value))
|
[
"def",
"peak_energy",
"(",
"self",
")",
":",
"# Eq. 6 in Massaro et al. 2004",
"# (http://adsabs.harvard.edu/abs/2004A%26A...413..489M)",
"return",
"self",
".",
"piv",
".",
"value",
"*",
"pow",
"(",
"10",
",",
"(",
"(",
"2",
"+",
"self",
".",
"alpha",
".",
"value",
")",
"*",
"np",
".",
"log",
"(",
"10",
")",
")",
"/",
"(",
"2",
"*",
"self",
".",
"beta",
".",
"value",
")",
")"
] |
Returns the peak energy in the nuFnu spectrum
:return: peak energy in keV
|
[
"Returns",
"the",
"peak",
"energy",
"in",
"the",
"nuFnu",
"spectrum"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/functions.py#L1562-L1572
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase.in_unit_of
|
def in_unit_of(self, unit, as_quantity=False):
"""
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
"""
new_unit = u.Unit(unit)
new_quantity = self.as_quantity.to(new_unit)
if as_quantity:
return new_quantity
else:
return new_quantity.value
|
python
|
def in_unit_of(self, unit, as_quantity=False):
"""
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
"""
new_unit = u.Unit(unit)
new_quantity = self.as_quantity.to(new_unit)
if as_quantity:
return new_quantity
else:
return new_quantity.value
|
[
"def",
"in_unit_of",
"(",
"self",
",",
"unit",
",",
"as_quantity",
"=",
"False",
")",
":",
"new_unit",
"=",
"u",
".",
"Unit",
"(",
"unit",
")",
"new_quantity",
"=",
"self",
".",
"as_quantity",
".",
"to",
"(",
"new_unit",
")",
"if",
"as_quantity",
":",
"return",
"new_quantity",
"else",
":",
"return",
"new_quantity",
".",
"value"
] |
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
|
[
"Return",
"the",
"current",
"value",
"transformed",
"to",
"the",
"new",
"units"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L325-L346
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase._get_value
|
def _get_value(self):
"""Return current parameter value"""
# This is going to be true (possibly) only for derived classes. It is here to make the code cleaner
# and also to avoid infinite recursion
if self._aux_variable:
return self._aux_variable['law'](self._aux_variable['variable'].value)
if self._transformation is None:
return self._internal_value
else:
# A transformation is set. Transform back from internal value to true value
#
# print("Interval value is %s" % self._internal_value)
# print("Returning %s" % self._transformation.backward(self._internal_value))
return self._transformation.backward(self._internal_value)
|
python
|
def _get_value(self):
"""Return current parameter value"""
# This is going to be true (possibly) only for derived classes. It is here to make the code cleaner
# and also to avoid infinite recursion
if self._aux_variable:
return self._aux_variable['law'](self._aux_variable['variable'].value)
if self._transformation is None:
return self._internal_value
else:
# A transformation is set. Transform back from internal value to true value
#
# print("Interval value is %s" % self._internal_value)
# print("Returning %s" % self._transformation.backward(self._internal_value))
return self._transformation.backward(self._internal_value)
|
[
"def",
"_get_value",
"(",
"self",
")",
":",
"# This is going to be true (possibly) only for derived classes. It is here to make the code cleaner",
"# and also to avoid infinite recursion",
"if",
"self",
".",
"_aux_variable",
":",
"return",
"self",
".",
"_aux_variable",
"[",
"'law'",
"]",
"(",
"self",
".",
"_aux_variable",
"[",
"'variable'",
"]",
".",
"value",
")",
"if",
"self",
".",
"_transformation",
"is",
"None",
":",
"return",
"self",
".",
"_internal_value",
"else",
":",
"# A transformation is set. Transform back from internal value to true value",
"#",
"# print(\"Interval value is %s\" % self._internal_value)",
"# print(\"Returning %s\" % self._transformation.backward(self._internal_value))",
"return",
"self",
".",
"_transformation",
".",
"backward",
"(",
"self",
".",
"_internal_value",
")"
] |
Return current parameter value
|
[
"Return",
"current",
"parameter",
"value"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L394-L415
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase._set_value
|
def _set_value(self, new_value):
"""Sets the current value of the parameter, ensuring that it is within the allowed range."""
if self.min_value is not None and new_value < self.min_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}".format(
self.name, new_value, self.min_value))
if self.max_value is not None and new_value > self.max_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}".format(
self.name, new_value, self.max_value))
# Issue a warning if there is an auxiliary variable, as the setting does not have any effect
if self.has_auxiliary_variable():
with warnings.catch_warnings():
warnings.simplefilter("always", RuntimeWarning)
warnings.warn("You are trying to assign to a parameter which is either linked or "
"has auxiliary variables. The assignment has no effect.", RuntimeWarning)
# Save the value as a pure floating point to avoid the overhead of the astropy.units machinery when
# not needed
if self._transformation is None:
new_internal_value = new_value
else:
new_internal_value = self._transformation.forward(new_value)
# If the parameter has changed, update its value and call the callbacks if needed
if new_internal_value != self._internal_value:
# Update
self._internal_value = new_internal_value
# Call the callbacks (if any)
for callback in self._callbacks:
try:
callback(self)
except:
raise NotCallableOrErrorInCall("Could not call callback for parameter %s" % self.name)
|
python
|
def _set_value(self, new_value):
"""Sets the current value of the parameter, ensuring that it is within the allowed range."""
if self.min_value is not None and new_value < self.min_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}".format(
self.name, new_value, self.min_value))
if self.max_value is not None and new_value > self.max_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}".format(
self.name, new_value, self.max_value))
# Issue a warning if there is an auxiliary variable, as the setting does not have any effect
if self.has_auxiliary_variable():
with warnings.catch_warnings():
warnings.simplefilter("always", RuntimeWarning)
warnings.warn("You are trying to assign to a parameter which is either linked or "
"has auxiliary variables. The assignment has no effect.", RuntimeWarning)
# Save the value as a pure floating point to avoid the overhead of the astropy.units machinery when
# not needed
if self._transformation is None:
new_internal_value = new_value
else:
new_internal_value = self._transformation.forward(new_value)
# If the parameter has changed, update its value and call the callbacks if needed
if new_internal_value != self._internal_value:
# Update
self._internal_value = new_internal_value
# Call the callbacks (if any)
for callback in self._callbacks:
try:
callback(self)
except:
raise NotCallableOrErrorInCall("Could not call callback for parameter %s" % self.name)
|
[
"def",
"_set_value",
"(",
"self",
",",
"new_value",
")",
":",
"if",
"self",
".",
"min_value",
"is",
"not",
"None",
"and",
"new_value",
"<",
"self",
".",
"min_value",
":",
"raise",
"SettingOutOfBounds",
"(",
"\"Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"new_value",
",",
"self",
".",
"min_value",
")",
")",
"if",
"self",
".",
"max_value",
"is",
"not",
"None",
"and",
"new_value",
">",
"self",
".",
"max_value",
":",
"raise",
"SettingOutOfBounds",
"(",
"\"Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"new_value",
",",
"self",
".",
"max_value",
")",
")",
"# Issue a warning if there is an auxiliary variable, as the setting does not have any effect",
"if",
"self",
".",
"has_auxiliary_variable",
"(",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"always\"",
",",
"RuntimeWarning",
")",
"warnings",
".",
"warn",
"(",
"\"You are trying to assign to a parameter which is either linked or \"",
"\"has auxiliary variables. The assignment has no effect.\"",
",",
"RuntimeWarning",
")",
"# Save the value as a pure floating point to avoid the overhead of the astropy.units machinery when",
"# not needed",
"if",
"self",
".",
"_transformation",
"is",
"None",
":",
"new_internal_value",
"=",
"new_value",
"else",
":",
"new_internal_value",
"=",
"self",
".",
"_transformation",
".",
"forward",
"(",
"new_value",
")",
"# If the parameter has changed, update its value and call the callbacks if needed",
"if",
"new_internal_value",
"!=",
"self",
".",
"_internal_value",
":",
"# Update",
"self",
".",
"_internal_value",
"=",
"new_internal_value",
"# Call the callbacks (if any)",
"for",
"callback",
"in",
"self",
".",
"_callbacks",
":",
"try",
":",
"callback",
"(",
"self",
")",
"except",
":",
"raise",
"NotCallableOrErrorInCall",
"(",
"\"Could not call callback for parameter %s\"",
"%",
"self",
".",
"name",
")"
] |
Sets the current value of the parameter, ensuring that it is within the allowed range.
|
[
"Sets",
"the",
"current",
"value",
"of",
"the",
"parameter",
"ensuring",
"that",
"it",
"is",
"within",
"the",
"allowed",
"range",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L420-L471
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase._set_internal_value
|
def _set_internal_value(self, new_internal_value):
"""
This is supposed to be only used by fitting engines
:param new_internal_value: new value in internal representation
:return: none
"""
if new_internal_value != self._internal_value:
self._internal_value = new_internal_value
# Call callbacks if any
for callback in self._callbacks:
callback(self)
|
python
|
def _set_internal_value(self, new_internal_value):
"""
This is supposed to be only used by fitting engines
:param new_internal_value: new value in internal representation
:return: none
"""
if new_internal_value != self._internal_value:
self._internal_value = new_internal_value
# Call callbacks if any
for callback in self._callbacks:
callback(self)
|
[
"def",
"_set_internal_value",
"(",
"self",
",",
"new_internal_value",
")",
":",
"if",
"new_internal_value",
"!=",
"self",
".",
"_internal_value",
":",
"self",
".",
"_internal_value",
"=",
"new_internal_value",
"# Call callbacks if any",
"for",
"callback",
"in",
"self",
".",
"_callbacks",
":",
"callback",
"(",
"self",
")"
] |
This is supposed to be only used by fitting engines
:param new_internal_value: new value in internal representation
:return: none
|
[
"This",
"is",
"supposed",
"to",
"be",
"only",
"used",
"by",
"fitting",
"engines"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L491-L507
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase._set_min_value
|
def _set_min_value(self, min_value):
"""Sets current minimum allowed value"""
# Check that the min value can be transformed if a transformation is present
if self._transformation is not None:
if min_value is not None:
try:
_ = self._transformation.forward(min_value)
except FloatingPointError:
raise ValueError("The provided minimum %s cannot be transformed with the transformation %s which "
"is defined for the parameter %s" % (min_value,
type(self._transformation),
self.path))
# Store the minimum as a pure float
self._external_min_value = min_value
# Check that the current value of the parameter is still within the boundaries. If not, issue a warning
if self._external_min_value is not None and self.value < self._external_min_value:
warnings.warn("The current value of the parameter %s (%s) "
"was below the new minimum %s." % (self.name, self.value, self._external_min_value),
exceptions.RuntimeWarning)
self.value = self._external_min_value
|
python
|
def _set_min_value(self, min_value):
"""Sets current minimum allowed value"""
# Check that the min value can be transformed if a transformation is present
if self._transformation is not None:
if min_value is not None:
try:
_ = self._transformation.forward(min_value)
except FloatingPointError:
raise ValueError("The provided minimum %s cannot be transformed with the transformation %s which "
"is defined for the parameter %s" % (min_value,
type(self._transformation),
self.path))
# Store the minimum as a pure float
self._external_min_value = min_value
# Check that the current value of the parameter is still within the boundaries. If not, issue a warning
if self._external_min_value is not None and self.value < self._external_min_value:
warnings.warn("The current value of the parameter %s (%s) "
"was below the new minimum %s." % (self.name, self.value, self._external_min_value),
exceptions.RuntimeWarning)
self.value = self._external_min_value
|
[
"def",
"_set_min_value",
"(",
"self",
",",
"min_value",
")",
":",
"# Check that the min value can be transformed if a transformation is present",
"if",
"self",
".",
"_transformation",
"is",
"not",
"None",
":",
"if",
"min_value",
"is",
"not",
"None",
":",
"try",
":",
"_",
"=",
"self",
".",
"_transformation",
".",
"forward",
"(",
"min_value",
")",
"except",
"FloatingPointError",
":",
"raise",
"ValueError",
"(",
"\"The provided minimum %s cannot be transformed with the transformation %s which \"",
"\"is defined for the parameter %s\"",
"%",
"(",
"min_value",
",",
"type",
"(",
"self",
".",
"_transformation",
")",
",",
"self",
".",
"path",
")",
")",
"# Store the minimum as a pure float",
"self",
".",
"_external_min_value",
"=",
"min_value",
"# Check that the current value of the parameter is still within the boundaries. If not, issue a warning",
"if",
"self",
".",
"_external_min_value",
"is",
"not",
"None",
"and",
"self",
".",
"value",
"<",
"self",
".",
"_external_min_value",
":",
"warnings",
".",
"warn",
"(",
"\"The current value of the parameter %s (%s) \"",
"\"was below the new minimum %s.\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"value",
",",
"self",
".",
"_external_min_value",
")",
",",
"exceptions",
".",
"RuntimeWarning",
")",
"self",
".",
"value",
"=",
"self",
".",
"_external_min_value"
] |
Sets current minimum allowed value
|
[
"Sets",
"current",
"minimum",
"allowed",
"value"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L518-L550
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase._set_max_value
|
def _set_max_value(self, max_value):
"""Sets current maximum allowed value"""
self._external_max_value = max_value
# Check that the current value of the parameter is still within the boundaries. If not, issue a warning
if self._external_max_value is not None and self.value > self._external_max_value:
warnings.warn("The current value of the parameter %s (%s) "
"was above the new maximum %s." % (self.name, self.value, self._external_max_value),
exceptions.RuntimeWarning)
self.value = self._external_max_value
|
python
|
def _set_max_value(self, max_value):
"""Sets current maximum allowed value"""
self._external_max_value = max_value
# Check that the current value of the parameter is still within the boundaries. If not, issue a warning
if self._external_max_value is not None and self.value > self._external_max_value:
warnings.warn("The current value of the parameter %s (%s) "
"was above the new maximum %s." % (self.name, self.value, self._external_max_value),
exceptions.RuntimeWarning)
self.value = self._external_max_value
|
[
"def",
"_set_max_value",
"(",
"self",
",",
"max_value",
")",
":",
"self",
".",
"_external_max_value",
"=",
"max_value",
"# Check that the current value of the parameter is still within the boundaries. If not, issue a warning",
"if",
"self",
".",
"_external_max_value",
"is",
"not",
"None",
"and",
"self",
".",
"value",
">",
"self",
".",
"_external_max_value",
":",
"warnings",
".",
"warn",
"(",
"\"The current value of the parameter %s (%s) \"",
"\"was above the new maximum %s.\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"value",
",",
"self",
".",
"_external_max_value",
")",
",",
"exceptions",
".",
"RuntimeWarning",
")",
"self",
".",
"value",
"=",
"self",
".",
"_external_max_value"
] |
Sets current maximum allowed value
|
[
"Sets",
"current",
"maximum",
"allowed",
"value"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L600-L612
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
ParameterBase._set_bounds
|
def _set_bounds(self, bounds):
"""Sets the boundaries for this parameter to min_value and max_value"""
# Use the properties so that the checks and the handling of units are made automatically
min_value, max_value = bounds
# Remove old boundaries to avoid problems with the new one, if the current value was within the old boundaries
# but is not within the new ones (it will then be adjusted automatically later)
self.min_value = None
self.max_value = None
self.min_value = min_value
self.max_value = max_value
|
python
|
def _set_bounds(self, bounds):
"""Sets the boundaries for this parameter to min_value and max_value"""
# Use the properties so that the checks and the handling of units are made automatically
min_value, max_value = bounds
# Remove old boundaries to avoid problems with the new one, if the current value was within the old boundaries
# but is not within the new ones (it will then be adjusted automatically later)
self.min_value = None
self.max_value = None
self.min_value = min_value
self.max_value = max_value
|
[
"def",
"_set_bounds",
"(",
"self",
",",
"bounds",
")",
":",
"# Use the properties so that the checks and the handling of units are made automatically",
"min_value",
",",
"max_value",
"=",
"bounds",
"# Remove old boundaries to avoid problems with the new one, if the current value was within the old boundaries",
"# but is not within the new ones (it will then be adjusted automatically later)",
"self",
".",
"min_value",
"=",
"None",
"self",
".",
"max_value",
"=",
"None",
"self",
".",
"min_value",
"=",
"min_value",
"self",
".",
"max_value",
"=",
"max_value"
] |
Sets the boundaries for this parameter to min_value and max_value
|
[
"Sets",
"the",
"boundaries",
"for",
"this",
"parameter",
"to",
"min_value",
"and",
"max_value"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L653-L667
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
Parameter._set_prior
|
def _set_prior(self, prior):
"""Set prior for this parameter. The prior must be a function accepting the current value of the parameter
as input and giving the probability density as output."""
if prior is None:
# Removing prior
self._prior = None
else:
# Try and call the prior with the current value of the parameter
try:
_ = prior(self.value)
except:
raise NotCallableOrErrorInCall("Could not call the provided prior. " +
"Is it a function accepting the current value of the parameter?")
try:
prior.set_units(self.unit, u.dimensionless_unscaled)
except AttributeError:
raise NotCallableOrErrorInCall("It looks like the provided prior is not a astromodels function.")
self._prior = prior
|
python
|
def _set_prior(self, prior):
"""Set prior for this parameter. The prior must be a function accepting the current value of the parameter
as input and giving the probability density as output."""
if prior is None:
# Removing prior
self._prior = None
else:
# Try and call the prior with the current value of the parameter
try:
_ = prior(self.value)
except:
raise NotCallableOrErrorInCall("Could not call the provided prior. " +
"Is it a function accepting the current value of the parameter?")
try:
prior.set_units(self.unit, u.dimensionless_unscaled)
except AttributeError:
raise NotCallableOrErrorInCall("It looks like the provided prior is not a astromodels function.")
self._prior = prior
|
[
"def",
"_set_prior",
"(",
"self",
",",
"prior",
")",
":",
"if",
"prior",
"is",
"None",
":",
"# Removing prior",
"self",
".",
"_prior",
"=",
"None",
"else",
":",
"# Try and call the prior with the current value of the parameter",
"try",
":",
"_",
"=",
"prior",
"(",
"self",
".",
"value",
")",
"except",
":",
"raise",
"NotCallableOrErrorInCall",
"(",
"\"Could not call the provided prior. \"",
"+",
"\"Is it a function accepting the current value of the parameter?\"",
")",
"try",
":",
"prior",
".",
"set_units",
"(",
"self",
".",
"unit",
",",
"u",
".",
"dimensionless_unscaled",
")",
"except",
"AttributeError",
":",
"raise",
"NotCallableOrErrorInCall",
"(",
"\"It looks like the provided prior is not a astromodels function.\"",
")",
"self",
".",
"_prior",
"=",
"prior"
] |
Set prior for this parameter. The prior must be a function accepting the current value of the parameter
as input and giving the probability density as output.
|
[
"Set",
"prior",
"for",
"this",
"parameter",
".",
"The",
"prior",
"must",
"be",
"a",
"function",
"accepting",
"the",
"current",
"value",
"of",
"the",
"parameter",
"as",
"input",
"and",
"giving",
"the",
"probability",
"density",
"as",
"output",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L916-L946
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
Parameter.set_uninformative_prior
|
def set_uninformative_prior(self, prior_class):
"""
Sets the prior for the parameter to a uniform prior between the current minimum and maximum, or a
log-uniform prior between the current minimum and maximum.
NOTE: if the current minimum and maximum are not defined, the default bounds for the prior class will be used.
:param prior_class : the class to be used as prior (either Log_uniform_prior or Uniform_prior, or a class which
provide a lower_bound and an upper_bound properties)
:return: (none)
"""
prior_instance = prior_class()
if self.min_value is None:
raise ParameterMustHaveBounds("Parameter %s does not have a defined minimum. Set one first, then re-run "
"set_uninformative_prior" % self.path)
else:
try:
prior_instance.lower_bound = self.min_value
except SettingOutOfBounds:
raise SettingOutOfBounds("Cannot use minimum of %s for prior %s" % (self.min_value,
prior_instance.name))
if self.max_value is None:
raise ParameterMustHaveBounds("Parameter %s does not have a defined maximum. Set one first, then re-run "
"set_uninformative_prior" % self.path)
else: # pragma: no cover
try:
prior_instance.upper_bound = self.max_value
except SettingOutOfBounds:
raise SettingOutOfBounds("Cannot use maximum of %s for prior %s" % (self.max_value,
prior_instance.name))
assert np.isfinite(prior_instance.upper_bound.value),"The parameter %s must have a finite maximum" % self.name
assert np.isfinite(prior_instance.lower_bound.value),"The parameter %s must have a finite minimum" % self.name
self._set_prior(prior_instance)
|
python
|
def set_uninformative_prior(self, prior_class):
"""
Sets the prior for the parameter to a uniform prior between the current minimum and maximum, or a
log-uniform prior between the current minimum and maximum.
NOTE: if the current minimum and maximum are not defined, the default bounds for the prior class will be used.
:param prior_class : the class to be used as prior (either Log_uniform_prior or Uniform_prior, or a class which
provide a lower_bound and an upper_bound properties)
:return: (none)
"""
prior_instance = prior_class()
if self.min_value is None:
raise ParameterMustHaveBounds("Parameter %s does not have a defined minimum. Set one first, then re-run "
"set_uninformative_prior" % self.path)
else:
try:
prior_instance.lower_bound = self.min_value
except SettingOutOfBounds:
raise SettingOutOfBounds("Cannot use minimum of %s for prior %s" % (self.min_value,
prior_instance.name))
if self.max_value is None:
raise ParameterMustHaveBounds("Parameter %s does not have a defined maximum. Set one first, then re-run "
"set_uninformative_prior" % self.path)
else: # pragma: no cover
try:
prior_instance.upper_bound = self.max_value
except SettingOutOfBounds:
raise SettingOutOfBounds("Cannot use maximum of %s for prior %s" % (self.max_value,
prior_instance.name))
assert np.isfinite(prior_instance.upper_bound.value),"The parameter %s must have a finite maximum" % self.name
assert np.isfinite(prior_instance.lower_bound.value),"The parameter %s must have a finite minimum" % self.name
self._set_prior(prior_instance)
|
[
"def",
"set_uninformative_prior",
"(",
"self",
",",
"prior_class",
")",
":",
"prior_instance",
"=",
"prior_class",
"(",
")",
"if",
"self",
".",
"min_value",
"is",
"None",
":",
"raise",
"ParameterMustHaveBounds",
"(",
"\"Parameter %s does not have a defined minimum. Set one first, then re-run \"",
"\"set_uninformative_prior\"",
"%",
"self",
".",
"path",
")",
"else",
":",
"try",
":",
"prior_instance",
".",
"lower_bound",
"=",
"self",
".",
"min_value",
"except",
"SettingOutOfBounds",
":",
"raise",
"SettingOutOfBounds",
"(",
"\"Cannot use minimum of %s for prior %s\"",
"%",
"(",
"self",
".",
"min_value",
",",
"prior_instance",
".",
"name",
")",
")",
"if",
"self",
".",
"max_value",
"is",
"None",
":",
"raise",
"ParameterMustHaveBounds",
"(",
"\"Parameter %s does not have a defined maximum. Set one first, then re-run \"",
"\"set_uninformative_prior\"",
"%",
"self",
".",
"path",
")",
"else",
":",
"# pragma: no cover",
"try",
":",
"prior_instance",
".",
"upper_bound",
"=",
"self",
".",
"max_value",
"except",
"SettingOutOfBounds",
":",
"raise",
"SettingOutOfBounds",
"(",
"\"Cannot use maximum of %s for prior %s\"",
"%",
"(",
"self",
".",
"max_value",
",",
"prior_instance",
".",
"name",
")",
")",
"assert",
"np",
".",
"isfinite",
"(",
"prior_instance",
".",
"upper_bound",
".",
"value",
")",
",",
"\"The parameter %s must have a finite maximum\"",
"%",
"self",
".",
"name",
"assert",
"np",
".",
"isfinite",
"(",
"prior_instance",
".",
"lower_bound",
".",
"value",
")",
",",
"\"The parameter %s must have a finite minimum\"",
"%",
"self",
".",
"name",
"self",
".",
"_set_prior",
"(",
"prior_instance",
")"
] |
Sets the prior for the parameter to a uniform prior between the current minimum and maximum, or a
log-uniform prior between the current minimum and maximum.
NOTE: if the current minimum and maximum are not defined, the default bounds for the prior class will be used.
:param prior_class : the class to be used as prior (either Log_uniform_prior or Uniform_prior, or a class which
provide a lower_bound and an upper_bound properties)
:return: (none)
|
[
"Sets",
"the",
"prior",
"for",
"the",
"parameter",
"to",
"a",
"uniform",
"prior",
"between",
"the",
"current",
"minimum",
"and",
"maximum",
"or",
"a",
"log",
"-",
"uniform",
"prior",
"between",
"the",
"current",
"minimum",
"and",
"maximum",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L962-L1011
|
train
|
threeML/astromodels
|
astromodels/core/parameter.py
|
Parameter.remove_auxiliary_variable
|
def remove_auxiliary_variable(self):
"""
Remove an existing auxiliary variable
:return:
"""
if not self.has_auxiliary_variable():
# do nothing, but print a warning
warnings.warn("Cannot remove a non-existing auxiliary variable", RuntimeWarning)
else:
# Remove the law from the children
self._remove_child(self._aux_variable['law'].name)
# Clean up the dictionary
self._aux_variable = {}
# Set the parameter to the status it has before the auxiliary variable was created
self.free = self._old_free
|
python
|
def remove_auxiliary_variable(self):
"""
Remove an existing auxiliary variable
:return:
"""
if not self.has_auxiliary_variable():
# do nothing, but print a warning
warnings.warn("Cannot remove a non-existing auxiliary variable", RuntimeWarning)
else:
# Remove the law from the children
self._remove_child(self._aux_variable['law'].name)
# Clean up the dictionary
self._aux_variable = {}
# Set the parameter to the status it has before the auxiliary variable was created
self.free = self._old_free
|
[
"def",
"remove_auxiliary_variable",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_auxiliary_variable",
"(",
")",
":",
"# do nothing, but print a warning",
"warnings",
".",
"warn",
"(",
"\"Cannot remove a non-existing auxiliary variable\"",
",",
"RuntimeWarning",
")",
"else",
":",
"# Remove the law from the children",
"self",
".",
"_remove_child",
"(",
"self",
".",
"_aux_variable",
"[",
"'law'",
"]",
".",
"name",
")",
"# Clean up the dictionary",
"self",
".",
"_aux_variable",
"=",
"{",
"}",
"# Set the parameter to the status it has before the auxiliary variable was created",
"self",
".",
"free",
"=",
"self",
".",
"_old_free"
] |
Remove an existing auxiliary variable
:return:
|
[
"Remove",
"an",
"existing",
"auxiliary",
"variable"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/parameter.py#L1077-L1102
|
train
|
threeML/astromodels
|
astromodels/core/tree.py
|
OldNode._get_child_from_path
|
def _get_child_from_path(self, path):
"""
Return a children below this level, starting from a path of the kind "this_level.something.something.name"
:param path: the key
:return: the child
"""
keys = path.split(".")
this_child = self
for key in keys:
try:
this_child = this_child._get_child(key)
except KeyError:
raise KeyError("Child %s not found" % path)
return this_child
|
python
|
def _get_child_from_path(self, path):
"""
Return a children below this level, starting from a path of the kind "this_level.something.something.name"
:param path: the key
:return: the child
"""
keys = path.split(".")
this_child = self
for key in keys:
try:
this_child = this_child._get_child(key)
except KeyError:
raise KeyError("Child %s not found" % path)
return this_child
|
[
"def",
"_get_child_from_path",
"(",
"self",
",",
"path",
")",
":",
"keys",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"this_child",
"=",
"self",
"for",
"key",
"in",
"keys",
":",
"try",
":",
"this_child",
"=",
"this_child",
".",
"_get_child",
"(",
"key",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Child %s not found\"",
"%",
"path",
")",
"return",
"this_child"
] |
Return a children below this level, starting from a path of the kind "this_level.something.something.name"
:param path: the key
:return: the child
|
[
"Return",
"a",
"children",
"below",
"this",
"level",
"starting",
"from",
"a",
"path",
"of",
"the",
"kind",
"this_level",
".",
"something",
".",
"something",
".",
"name"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/tree.py#L210-L232
|
train
|
threeML/astromodels
|
astromodels/core/tree.py
|
OldNode._find_instances
|
def _find_instances(self, cls):
"""
Find all the instances of cls below this node.
:return: a dictionary of instances of cls
"""
instances = collections.OrderedDict()
for child_name, child in self._children.iteritems():
if isinstance(child, cls):
key_name = ".".join(child._get_path())
instances[key_name] = child
# Now check if the instance has children,
# and if it does go deeper in the tree
# NOTE: an empty dictionary evaluate as False
if child._children:
instances.update(child._find_instances(cls))
else:
instances.update(child._find_instances(cls))
return instances
|
python
|
def _find_instances(self, cls):
"""
Find all the instances of cls below this node.
:return: a dictionary of instances of cls
"""
instances = collections.OrderedDict()
for child_name, child in self._children.iteritems():
if isinstance(child, cls):
key_name = ".".join(child._get_path())
instances[key_name] = child
# Now check if the instance has children,
# and if it does go deeper in the tree
# NOTE: an empty dictionary evaluate as False
if child._children:
instances.update(child._find_instances(cls))
else:
instances.update(child._find_instances(cls))
return instances
|
[
"def",
"_find_instances",
"(",
"self",
",",
"cls",
")",
":",
"instances",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"child_name",
",",
"child",
"in",
"self",
".",
"_children",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"cls",
")",
":",
"key_name",
"=",
"\".\"",
".",
"join",
"(",
"child",
".",
"_get_path",
"(",
")",
")",
"instances",
"[",
"key_name",
"]",
"=",
"child",
"# Now check if the instance has children,",
"# and if it does go deeper in the tree",
"# NOTE: an empty dictionary evaluate as False",
"if",
"child",
".",
"_children",
":",
"instances",
".",
"update",
"(",
"child",
".",
"_find_instances",
"(",
"cls",
")",
")",
"else",
":",
"instances",
".",
"update",
"(",
"child",
".",
"_find_instances",
"(",
"cls",
")",
")",
"return",
"instances"
] |
Find all the instances of cls below this node.
:return: a dictionary of instances of cls
|
[
"Find",
"all",
"the",
"instances",
"of",
"cls",
"below",
"this",
"node",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/tree.py#L299-L329
|
train
|
threeML/astromodels
|
setup.py
|
find_library
|
def find_library(library_root, additional_places=None):
"""
Returns the name of the library without extension
:param library_root: root of the library to search, for example "cfitsio_" will match libcfitsio_1.2.3.4.so
:return: the name of the library found (NOTE: this is *not* the path), and a directory path if the library is not
in the system paths (and None otherwise). The name of libcfitsio_1.2.3.4.so will be cfitsio_1.2.3.4, in other words,
it will be what is needed to be passed to the linker during a c/c++ compilation, in the -l option
"""
# find_library searches for all system paths in a system independent way (but NOT those defined in
# LD_LIBRARY_PATH or DYLD_LIBRARY_PATH)
first_guess = ctypes.util.find_library(library_root)
if first_guess is not None:
# Found in one of the system paths
if sys.platform.lower().find("linux") >= 0:
# On linux the linker already knows about these paths, so we
# can return None as path
return sanitize_lib_name(first_guess), None
elif sys.platform.lower().find("darwin") >= 0:
# On Mac we still need to return the path, because the linker sometimes
# does not look into it
return sanitize_lib_name(first_guess), os.path.dirname(first_guess)
else:
# Windows is not supported
raise NotImplementedError("Platform %s is not supported" % sys.platform)
else:
# could not find it. Let's examine LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
# (if they sanitize_lib_name(first_guess), are not defined, possible_locations will become [""] which will
# be handled by the next loop)
if sys.platform.lower().find("linux") >= 0:
# Unix / linux
possible_locations = os.environ.get("LD_LIBRARY_PATH", "").split(":")
elif sys.platform.lower().find("darwin") >= 0:
# Mac
possible_locations = os.environ.get("DYLD_LIBRARY_PATH", "").split(":")
else:
raise NotImplementedError("Platform %s is not supported" % sys.platform)
if additional_places is not None:
possible_locations.extend(additional_places)
# Now look into the search paths
library_name = None
library_dir = None
for search_path in possible_locations:
if search_path == "":
# This can happen if there are more than one :, or if nor LD_LIBRARY_PATH
# nor DYLD_LIBRARY_PATH are defined (because of the default use above for os.environ.get)
continue
results = glob.glob(os.path.join(search_path, "lib%s*" % library_root))
if len(results) >= 1:
# Results contain things like libXS.so, libXSPlot.so, libXSpippo.so
# If we are looking for libXS.so, we need to make sure that we get the right one!
for result in results:
if re.match("lib%s[\-_\.]" % library_root, os.path.basename(result)) is None:
continue
else:
# FOUND IT
# This is the full path of the library, like /usr/lib/libcfitsio_1.2.3.4
library_name = result
library_dir = search_path
break
else:
continue
if library_name is not None:
break
if library_name is None:
return None, None
else:
# Sanitize the library name to get from the fully-qualified path to just the library name
# (/usr/lib/libgfortran.so.3.0 becomes gfortran)
return sanitize_lib_name(library_name), library_dir
|
python
|
def find_library(library_root, additional_places=None):
"""
Returns the name of the library without extension
:param library_root: root of the library to search, for example "cfitsio_" will match libcfitsio_1.2.3.4.so
:return: the name of the library found (NOTE: this is *not* the path), and a directory path if the library is not
in the system paths (and None otherwise). The name of libcfitsio_1.2.3.4.so will be cfitsio_1.2.3.4, in other words,
it will be what is needed to be passed to the linker during a c/c++ compilation, in the -l option
"""
# find_library searches for all system paths in a system independent way (but NOT those defined in
# LD_LIBRARY_PATH or DYLD_LIBRARY_PATH)
first_guess = ctypes.util.find_library(library_root)
if first_guess is not None:
# Found in one of the system paths
if sys.platform.lower().find("linux") >= 0:
# On linux the linker already knows about these paths, so we
# can return None as path
return sanitize_lib_name(first_guess), None
elif sys.platform.lower().find("darwin") >= 0:
# On Mac we still need to return the path, because the linker sometimes
# does not look into it
return sanitize_lib_name(first_guess), os.path.dirname(first_guess)
else:
# Windows is not supported
raise NotImplementedError("Platform %s is not supported" % sys.platform)
else:
# could not find it. Let's examine LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
# (if they sanitize_lib_name(first_guess), are not defined, possible_locations will become [""] which will
# be handled by the next loop)
if sys.platform.lower().find("linux") >= 0:
# Unix / linux
possible_locations = os.environ.get("LD_LIBRARY_PATH", "").split(":")
elif sys.platform.lower().find("darwin") >= 0:
# Mac
possible_locations = os.environ.get("DYLD_LIBRARY_PATH", "").split(":")
else:
raise NotImplementedError("Platform %s is not supported" % sys.platform)
if additional_places is not None:
possible_locations.extend(additional_places)
# Now look into the search paths
library_name = None
library_dir = None
for search_path in possible_locations:
if search_path == "":
# This can happen if there are more than one :, or if nor LD_LIBRARY_PATH
# nor DYLD_LIBRARY_PATH are defined (because of the default use above for os.environ.get)
continue
results = glob.glob(os.path.join(search_path, "lib%s*" % library_root))
if len(results) >= 1:
# Results contain things like libXS.so, libXSPlot.so, libXSpippo.so
# If we are looking for libXS.so, we need to make sure that we get the right one!
for result in results:
if re.match("lib%s[\-_\.]" % library_root, os.path.basename(result)) is None:
continue
else:
# FOUND IT
# This is the full path of the library, like /usr/lib/libcfitsio_1.2.3.4
library_name = result
library_dir = search_path
break
else:
continue
if library_name is not None:
break
if library_name is None:
return None, None
else:
# Sanitize the library name to get from the fully-qualified path to just the library name
# (/usr/lib/libgfortran.so.3.0 becomes gfortran)
return sanitize_lib_name(library_name), library_dir
|
[
"def",
"find_library",
"(",
"library_root",
",",
"additional_places",
"=",
"None",
")",
":",
"# find_library searches for all system paths in a system independent way (but NOT those defined in",
"# LD_LIBRARY_PATH or DYLD_LIBRARY_PATH)",
"first_guess",
"=",
"ctypes",
".",
"util",
".",
"find_library",
"(",
"library_root",
")",
"if",
"first_guess",
"is",
"not",
"None",
":",
"# Found in one of the system paths",
"if",
"sys",
".",
"platform",
".",
"lower",
"(",
")",
".",
"find",
"(",
"\"linux\"",
")",
">=",
"0",
":",
"# On linux the linker already knows about these paths, so we",
"# can return None as path",
"return",
"sanitize_lib_name",
"(",
"first_guess",
")",
",",
"None",
"elif",
"sys",
".",
"platform",
".",
"lower",
"(",
")",
".",
"find",
"(",
"\"darwin\"",
")",
">=",
"0",
":",
"# On Mac we still need to return the path, because the linker sometimes",
"# does not look into it",
"return",
"sanitize_lib_name",
"(",
"first_guess",
")",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"first_guess",
")",
"else",
":",
"# Windows is not supported",
"raise",
"NotImplementedError",
"(",
"\"Platform %s is not supported\"",
"%",
"sys",
".",
"platform",
")",
"else",
":",
"# could not find it. Let's examine LD_LIBRARY_PATH or DYLD_LIBRARY_PATH",
"# (if they sanitize_lib_name(first_guess), are not defined, possible_locations will become [\"\"] which will",
"# be handled by the next loop)",
"if",
"sys",
".",
"platform",
".",
"lower",
"(",
")",
".",
"find",
"(",
"\"linux\"",
")",
">=",
"0",
":",
"# Unix / linux",
"possible_locations",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"LD_LIBRARY_PATH\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\":\"",
")",
"elif",
"sys",
".",
"platform",
".",
"lower",
"(",
")",
".",
"find",
"(",
"\"darwin\"",
")",
">=",
"0",
":",
"# Mac",
"possible_locations",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"DYLD_LIBRARY_PATH\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\":\"",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Platform %s is not supported\"",
"%",
"sys",
".",
"platform",
")",
"if",
"additional_places",
"is",
"not",
"None",
":",
"possible_locations",
".",
"extend",
"(",
"additional_places",
")",
"# Now look into the search paths",
"library_name",
"=",
"None",
"library_dir",
"=",
"None",
"for",
"search_path",
"in",
"possible_locations",
":",
"if",
"search_path",
"==",
"\"\"",
":",
"# This can happen if there are more than one :, or if nor LD_LIBRARY_PATH",
"# nor DYLD_LIBRARY_PATH are defined (because of the default use above for os.environ.get)",
"continue",
"results",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"search_path",
",",
"\"lib%s*\"",
"%",
"library_root",
")",
")",
"if",
"len",
"(",
"results",
")",
">=",
"1",
":",
"# Results contain things like libXS.so, libXSPlot.so, libXSpippo.so",
"# If we are looking for libXS.so, we need to make sure that we get the right one!",
"for",
"result",
"in",
"results",
":",
"if",
"re",
".",
"match",
"(",
"\"lib%s[\\-_\\.]\"",
"%",
"library_root",
",",
"os",
".",
"path",
".",
"basename",
"(",
"result",
")",
")",
"is",
"None",
":",
"continue",
"else",
":",
"# FOUND IT",
"# This is the full path of the library, like /usr/lib/libcfitsio_1.2.3.4",
"library_name",
"=",
"result",
"library_dir",
"=",
"search_path",
"break",
"else",
":",
"continue",
"if",
"library_name",
"is",
"not",
"None",
":",
"break",
"if",
"library_name",
"is",
"None",
":",
"return",
"None",
",",
"None",
"else",
":",
"# Sanitize the library name to get from the fully-qualified path to just the library name",
"# (/usr/lib/libgfortran.so.3.0 becomes gfortran)",
"return",
"sanitize_lib_name",
"(",
"library_name",
")",
",",
"library_dir"
] |
Returns the name of the library without extension
:param library_root: root of the library to search, for example "cfitsio_" will match libcfitsio_1.2.3.4.so
:return: the name of the library found (NOTE: this is *not* the path), and a directory path if the library is not
in the system paths (and None otherwise). The name of libcfitsio_1.2.3.4.so will be cfitsio_1.2.3.4, in other words,
it will be what is needed to be passed to the linker during a c/c++ compilation, in the -l option
|
[
"Returns",
"the",
"name",
"of",
"the",
"library",
"without",
"extension"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/setup.py#L54-L172
|
train
|
threeML/astromodels
|
astromodels/utils/table.py
|
dict_to_table
|
def dict_to_table(dictionary, list_of_keys=None):
"""
Return a table representing the dictionary.
:param dictionary: the dictionary to represent
:param list_of_keys: optionally, only the keys in this list will be inserted in the table
:return: a Table instance
"""
# assert len(dictionary.values()) > 0, "Dictionary cannot be empty"
# Create an empty table
table = Table()
# If the dictionary is not empty, fill the table
if len(dictionary) > 0:
# Add the names as first column
table['name'] = dictionary.keys()
# Now add all other properties
# Use the first parameter as prototype
prototype = dictionary.values()[0]
column_names = prototype.keys()
# If we have a white list for the columns, use it
if list_of_keys is not None:
column_names = filter(lambda key: key in list_of_keys, column_names)
# Fill the table
for column_name in column_names:
table[column_name] = map(lambda x: x[column_name], dictionary.values())
return table
|
python
|
def dict_to_table(dictionary, list_of_keys=None):
"""
Return a table representing the dictionary.
:param dictionary: the dictionary to represent
:param list_of_keys: optionally, only the keys in this list will be inserted in the table
:return: a Table instance
"""
# assert len(dictionary.values()) > 0, "Dictionary cannot be empty"
# Create an empty table
table = Table()
# If the dictionary is not empty, fill the table
if len(dictionary) > 0:
# Add the names as first column
table['name'] = dictionary.keys()
# Now add all other properties
# Use the first parameter as prototype
prototype = dictionary.values()[0]
column_names = prototype.keys()
# If we have a white list for the columns, use it
if list_of_keys is not None:
column_names = filter(lambda key: key in list_of_keys, column_names)
# Fill the table
for column_name in column_names:
table[column_name] = map(lambda x: x[column_name], dictionary.values())
return table
|
[
"def",
"dict_to_table",
"(",
"dictionary",
",",
"list_of_keys",
"=",
"None",
")",
":",
"# assert len(dictionary.values()) > 0, \"Dictionary cannot be empty\"",
"# Create an empty table",
"table",
"=",
"Table",
"(",
")",
"# If the dictionary is not empty, fill the table",
"if",
"len",
"(",
"dictionary",
")",
">",
"0",
":",
"# Add the names as first column",
"table",
"[",
"'name'",
"]",
"=",
"dictionary",
".",
"keys",
"(",
")",
"# Now add all other properties",
"# Use the first parameter as prototype",
"prototype",
"=",
"dictionary",
".",
"values",
"(",
")",
"[",
"0",
"]",
"column_names",
"=",
"prototype",
".",
"keys",
"(",
")",
"# If we have a white list for the columns, use it",
"if",
"list_of_keys",
"is",
"not",
"None",
":",
"column_names",
"=",
"filter",
"(",
"lambda",
"key",
":",
"key",
"in",
"list_of_keys",
",",
"column_names",
")",
"# Fill the table",
"for",
"column_name",
"in",
"column_names",
":",
"table",
"[",
"column_name",
"]",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"column_name",
"]",
",",
"dictionary",
".",
"values",
"(",
")",
")",
"return",
"table"
] |
Return a table representing the dictionary.
:param dictionary: the dictionary to represent
:param list_of_keys: optionally, only the keys in this list will be inserted in the table
:return: a Table instance
|
[
"Return",
"a",
"table",
"representing",
"the",
"dictionary",
"."
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/utils/table.py#L6-L49
|
train
|
threeML/astromodels
|
astromodels/utils/table.py
|
Table._base_repr_
|
def _base_repr_(self, html=False, show_name=True, **kwargs):
"""
Override the method in the astropy.Table class
to avoid displaying the description, and the format
of the columns
"""
table_id = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(self,
tableid=table_id, html=html, max_width=(-1 if html else None),
show_name=show_name, show_unit=None, show_dtype=False)
out = '\n'.join(data_lines)
# if astropy.table.six.PY2 and isinstance(out, astropy.table.six.text_type):
# out = out.encode('utf-8')
return out
|
python
|
def _base_repr_(self, html=False, show_name=True, **kwargs):
"""
Override the method in the astropy.Table class
to avoid displaying the description, and the format
of the columns
"""
table_id = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(self,
tableid=table_id, html=html, max_width=(-1 if html else None),
show_name=show_name, show_unit=None, show_dtype=False)
out = '\n'.join(data_lines)
# if astropy.table.six.PY2 and isinstance(out, astropy.table.six.text_type):
# out = out.encode('utf-8')
return out
|
[
"def",
"_base_repr_",
"(",
"self",
",",
"html",
"=",
"False",
",",
"show_name",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"table_id",
"=",
"'table{id}'",
".",
"format",
"(",
"id",
"=",
"id",
"(",
"self",
")",
")",
"data_lines",
",",
"outs",
"=",
"self",
".",
"formatter",
".",
"_pformat_table",
"(",
"self",
",",
"tableid",
"=",
"table_id",
",",
"html",
"=",
"html",
",",
"max_width",
"=",
"(",
"-",
"1",
"if",
"html",
"else",
"None",
")",
",",
"show_name",
"=",
"show_name",
",",
"show_unit",
"=",
"None",
",",
"show_dtype",
"=",
"False",
")",
"out",
"=",
"'\\n'",
".",
"join",
"(",
"data_lines",
")",
"# if astropy.table.six.PY2 and isinstance(out, astropy.table.six.text_type):",
"# out = out.encode('utf-8')",
"return",
"out"
] |
Override the method in the astropy.Table class
to avoid displaying the description, and the format
of the columns
|
[
"Override",
"the",
"method",
"in",
"the",
"astropy",
".",
"Table",
"class",
"to",
"avoid",
"displaying",
"the",
"description",
"and",
"the",
"format",
"of",
"the",
"columns"
] |
9aac365a372f77603039533df9a6b694c1e360d5
|
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/utils/table.py#L60-L78
|
train
|
eamigo86/graphene-django-extras
|
graphene_django_extras/views.py
|
ExtraGraphQLView.fetch_cache_key
|
def fetch_cache_key(request):
""" Returns a hashed cache key. """
m = hashlib.md5()
m.update(request.body)
return m.hexdigest()
|
python
|
def fetch_cache_key(request):
""" Returns a hashed cache key. """
m = hashlib.md5()
m.update(request.body)
return m.hexdigest()
|
[
"def",
"fetch_cache_key",
"(",
"request",
")",
":",
"m",
"=",
"hashlib",
".",
"md5",
"(",
")",
"m",
".",
"update",
"(",
"request",
".",
"body",
")",
"return",
"m",
".",
"hexdigest",
"(",
")"
] |
Returns a hashed cache key.
|
[
"Returns",
"a",
"hashed",
"cache",
"key",
"."
] |
b27fd6b5128f6b6a500a8b7a497d76be72d6a232
|
https://github.com/eamigo86/graphene-django-extras/blob/b27fd6b5128f6b6a500a8b7a497d76be72d6a232/graphene_django_extras/views.py#L42-L47
|
train
|
eamigo86/graphene-django-extras
|
graphene_django_extras/views.py
|
ExtraGraphQLView.dispatch
|
def dispatch(self, request, *args, **kwargs):
""" Fetches queried data from graphql and returns cached & hashed key. """
if not graphql_api_settings.CACHE_ACTIVE:
return self.super_call(request, *args, **kwargs)
cache = caches["default"]
operation_ast = self.get_operation_ast(request)
if operation_ast and operation_ast.operation == "mutation":
cache.clear()
return self.super_call(request, *args, **kwargs)
cache_key = "_graplql_{}".format(self.fetch_cache_key(request))
response = cache.get(cache_key)
if not response:
response = self.super_call(request, *args, **kwargs)
# cache key and value
cache.set(cache_key, response, timeout=graphql_api_settings.CACHE_TIMEOUT)
return response
|
python
|
def dispatch(self, request, *args, **kwargs):
""" Fetches queried data from graphql and returns cached & hashed key. """
if not graphql_api_settings.CACHE_ACTIVE:
return self.super_call(request, *args, **kwargs)
cache = caches["default"]
operation_ast = self.get_operation_ast(request)
if operation_ast and operation_ast.operation == "mutation":
cache.clear()
return self.super_call(request, *args, **kwargs)
cache_key = "_graplql_{}".format(self.fetch_cache_key(request))
response = cache.get(cache_key)
if not response:
response = self.super_call(request, *args, **kwargs)
# cache key and value
cache.set(cache_key, response, timeout=graphql_api_settings.CACHE_TIMEOUT)
return response
|
[
"def",
"dispatch",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"graphql_api_settings",
".",
"CACHE_ACTIVE",
":",
"return",
"self",
".",
"super_call",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"cache",
"=",
"caches",
"[",
"\"default\"",
"]",
"operation_ast",
"=",
"self",
".",
"get_operation_ast",
"(",
"request",
")",
"if",
"operation_ast",
"and",
"operation_ast",
".",
"operation",
"==",
"\"mutation\"",
":",
"cache",
".",
"clear",
"(",
")",
"return",
"self",
".",
"super_call",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"cache_key",
"=",
"\"_graplql_{}\"",
".",
"format",
"(",
"self",
".",
"fetch_cache_key",
"(",
"request",
")",
")",
"response",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"not",
"response",
":",
"response",
"=",
"self",
".",
"super_call",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# cache key and value",
"cache",
".",
"set",
"(",
"cache_key",
",",
"response",
",",
"timeout",
"=",
"graphql_api_settings",
".",
"CACHE_TIMEOUT",
")",
"return",
"response"
] |
Fetches queried data from graphql and returns cached & hashed key.
|
[
"Fetches",
"queried",
"data",
"from",
"graphql",
"and",
"returns",
"cached",
"&",
"hashed",
"key",
"."
] |
b27fd6b5128f6b6a500a8b7a497d76be72d6a232
|
https://github.com/eamigo86/graphene-django-extras/blob/b27fd6b5128f6b6a500a8b7a497d76be72d6a232/graphene_django_extras/views.py#L54-L74
|
train
|
eamigo86/graphene-django-extras
|
graphene_django_extras/directives/date.py
|
_parse
|
def _parse(partial_dt):
"""
parse a partial datetime object to a complete datetime object
"""
dt = None
try:
if isinstance(partial_dt, datetime):
dt = partial_dt
if isinstance(partial_dt, date):
dt = _combine_date_time(partial_dt, time(0, 0, 0))
if isinstance(partial_dt, time):
dt = _combine_date_time(date.today(), partial_dt)
if isinstance(partial_dt, (int, float)):
dt = datetime.fromtimestamp(partial_dt)
if isinstance(partial_dt, (str, bytes)):
dt = parser.parse(partial_dt, default=timezone.now())
if dt is not None and timezone.is_naive(dt):
dt = timezone.make_aware(dt)
return dt
except ValueError:
return None
|
python
|
def _parse(partial_dt):
"""
parse a partial datetime object to a complete datetime object
"""
dt = None
try:
if isinstance(partial_dt, datetime):
dt = partial_dt
if isinstance(partial_dt, date):
dt = _combine_date_time(partial_dt, time(0, 0, 0))
if isinstance(partial_dt, time):
dt = _combine_date_time(date.today(), partial_dt)
if isinstance(partial_dt, (int, float)):
dt = datetime.fromtimestamp(partial_dt)
if isinstance(partial_dt, (str, bytes)):
dt = parser.parse(partial_dt, default=timezone.now())
if dt is not None and timezone.is_naive(dt):
dt = timezone.make_aware(dt)
return dt
except ValueError:
return None
|
[
"def",
"_parse",
"(",
"partial_dt",
")",
":",
"dt",
"=",
"None",
"try",
":",
"if",
"isinstance",
"(",
"partial_dt",
",",
"datetime",
")",
":",
"dt",
"=",
"partial_dt",
"if",
"isinstance",
"(",
"partial_dt",
",",
"date",
")",
":",
"dt",
"=",
"_combine_date_time",
"(",
"partial_dt",
",",
"time",
"(",
"0",
",",
"0",
",",
"0",
")",
")",
"if",
"isinstance",
"(",
"partial_dt",
",",
"time",
")",
":",
"dt",
"=",
"_combine_date_time",
"(",
"date",
".",
"today",
"(",
")",
",",
"partial_dt",
")",
"if",
"isinstance",
"(",
"partial_dt",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"dt",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"partial_dt",
")",
"if",
"isinstance",
"(",
"partial_dt",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"dt",
"=",
"parser",
".",
"parse",
"(",
"partial_dt",
",",
"default",
"=",
"timezone",
".",
"now",
"(",
")",
")",
"if",
"dt",
"is",
"not",
"None",
"and",
"timezone",
".",
"is_naive",
"(",
"dt",
")",
":",
"dt",
"=",
"timezone",
".",
"make_aware",
"(",
"dt",
")",
"return",
"dt",
"except",
"ValueError",
":",
"return",
"None"
] |
parse a partial datetime object to a complete datetime object
|
[
"parse",
"a",
"partial",
"datetime",
"object",
"to",
"a",
"complete",
"datetime",
"object"
] |
b27fd6b5128f6b6a500a8b7a497d76be72d6a232
|
https://github.com/eamigo86/graphene-django-extras/blob/b27fd6b5128f6b6a500a8b7a497d76be72d6a232/graphene_django_extras/directives/date.py#L73-L94
|
train
|
eamigo86/graphene-django-extras
|
graphene_django_extras/utils.py
|
clean_dict
|
def clean_dict(d):
"""
Remove all empty fields in a nested dict
"""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_dict(v) for v in d) if v]
return OrderedDict(
[(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v]
)
|
python
|
def clean_dict(d):
"""
Remove all empty fields in a nested dict
"""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_dict(v) for v in d) if v]
return OrderedDict(
[(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v]
)
|
[
"def",
"clean_dict",
"(",
"d",
")",
":",
"if",
"not",
"isinstance",
"(",
"d",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"return",
"d",
"if",
"isinstance",
"(",
"d",
",",
"list",
")",
":",
"return",
"[",
"v",
"for",
"v",
"in",
"(",
"clean_dict",
"(",
"v",
")",
"for",
"v",
"in",
"d",
")",
"if",
"v",
"]",
"return",
"OrderedDict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"(",
"(",
"k",
",",
"clean_dict",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"d",
".",
"items",
"(",
")",
")",
")",
"if",
"v",
"]",
")"
] |
Remove all empty fields in a nested dict
|
[
"Remove",
"all",
"empty",
"fields",
"in",
"a",
"nested",
"dict"
] |
b27fd6b5128f6b6a500a8b7a497d76be72d6a232
|
https://github.com/eamigo86/graphene-django-extras/blob/b27fd6b5128f6b6a500a8b7a497d76be72d6a232/graphene_django_extras/utils.py#L168-L179
|
train
|
eamigo86/graphene-django-extras
|
graphene_django_extras/utils.py
|
_get_queryset
|
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError(
"Object is of type '{}', but must be a Django Model, "
"Manager, or QuerySet".format(klass__name)
)
return manager.all()
|
python
|
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError(
"Object is of type '{}', but must be a Django Model, "
"Manager, or QuerySet".format(klass__name)
)
return manager.all()
|
[
"def",
"_get_queryset",
"(",
"klass",
")",
":",
"if",
"isinstance",
"(",
"klass",
",",
"QuerySet",
")",
":",
"return",
"klass",
"elif",
"isinstance",
"(",
"klass",
",",
"Manager",
")",
":",
"manager",
"=",
"klass",
"elif",
"isinstance",
"(",
"klass",
",",
"ModelBase",
")",
":",
"manager",
"=",
"klass",
".",
"_default_manager",
"else",
":",
"if",
"isinstance",
"(",
"klass",
",",
"type",
")",
":",
"klass__name",
"=",
"klass",
".",
"__name__",
"else",
":",
"klass__name",
"=",
"klass",
".",
"__class__",
".",
"__name__",
"raise",
"ValueError",
"(",
"\"Object is of type '{}', but must be a Django Model, \"",
"\"Manager, or QuerySet\"",
".",
"format",
"(",
"klass__name",
")",
")",
"return",
"manager",
".",
"all",
"(",
")"
] |
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
|
[
"Returns",
"a",
"QuerySet",
"from",
"a",
"Model",
"Manager",
"or",
"QuerySet",
".",
"Created",
"to",
"make",
"get_object_or_404",
"and",
"get_list_or_404",
"more",
"DRY",
"."
] |
b27fd6b5128f6b6a500a8b7a497d76be72d6a232
|
https://github.com/eamigo86/graphene-django-extras/blob/b27fd6b5128f6b6a500a8b7a497d76be72d6a232/graphene_django_extras/utils.py#L224-L246
|
train
|
Proteus-tech/tormor
|
tormor/schema.py
|
find_schema_paths
|
def find_schema_paths(schema_files_path=DEFAULT_SCHEMA_FILES_PATH):
"""Searches the locations in the `SCHEMA_FILES_PATH` to
try to find where the schema SQL files are located.
"""
paths = []
for path in schema_files_path:
if os.path.isdir(path):
paths.append(path)
if paths:
return paths
raise SchemaFilesNotFound("Searched " + os.pathsep.join(schema_files_path))
|
python
|
def find_schema_paths(schema_files_path=DEFAULT_SCHEMA_FILES_PATH):
"""Searches the locations in the `SCHEMA_FILES_PATH` to
try to find where the schema SQL files are located.
"""
paths = []
for path in schema_files_path:
if os.path.isdir(path):
paths.append(path)
if paths:
return paths
raise SchemaFilesNotFound("Searched " + os.pathsep.join(schema_files_path))
|
[
"def",
"find_schema_paths",
"(",
"schema_files_path",
"=",
"DEFAULT_SCHEMA_FILES_PATH",
")",
":",
"paths",
"=",
"[",
"]",
"for",
"path",
"in",
"schema_files_path",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"paths",
".",
"append",
"(",
"path",
")",
"if",
"paths",
":",
"return",
"paths",
"raise",
"SchemaFilesNotFound",
"(",
"\"Searched \"",
"+",
"os",
".",
"pathsep",
".",
"join",
"(",
"schema_files_path",
")",
")"
] |
Searches the locations in the `SCHEMA_FILES_PATH` to
try to find where the schema SQL files are located.
|
[
"Searches",
"the",
"locations",
"in",
"the",
"SCHEMA_FILES_PATH",
"to",
"try",
"to",
"find",
"where",
"the",
"schema",
"SQL",
"files",
"are",
"located",
"."
] |
3083b0cd2b9a4d21b20dfd5c27678b23660548d7
|
https://github.com/Proteus-tech/tormor/blob/3083b0cd2b9a4d21b20dfd5c27678b23660548d7/tormor/schema.py#L41-L51
|
train
|
plivo/sharq-server
|
runner.py
|
run
|
def run():
"""Exposes a CLI to configure the SharQ Server and runs the server."""
# create a arg parser and configure it.
parser = argparse.ArgumentParser(description='SharQ Server.')
parser.add_argument('-c', '--config', action='store', required=True,
help='Absolute path of the SharQ configuration file.',
dest='sharq_config')
parser.add_argument('-gc', '--gunicorn-config', action='store', required=False,
help='Gunicorn configuration file.',
dest='gunicorn_config')
parser.add_argument('--version', action='version', version='SharQ Server %s' % __version__)
args = parser.parse_args()
# read the configuration file and set gunicorn options.
config_parser = ConfigParser.SafeConfigParser()
# get the full path of the config file.
sharq_config = os.path.abspath(args.sharq_config)
config_parser.read(sharq_config)
host = config_parser.get('sharq-server', 'host')
port = config_parser.get('sharq-server', 'port')
bind = '%s:%s' % (host, port)
try:
workers = config_parser.get('sharq-server', 'workers')
except ConfigParser.NoOptionError:
workers = number_of_workers()
try:
accesslog = config_parser.get('sharq-server', 'accesslog')
except ConfigParser.NoOptionError:
accesslog = None
options = {
'bind': bind,
'workers': workers,
'worker_class': 'gevent' # required for sharq to function.
}
if accesslog:
options.update({
'accesslog': accesslog
})
if args.gunicorn_config:
gunicorn_config = os.path.abspath(args.gunicorn_config)
options.update({
'config': gunicorn_config
})
print """
___ _ ___ ___
/ __| |_ __ _ _ _ / _ \ / __| ___ _ ___ _____ _ _
\__ \ ' \/ _` | '_| (_) | \__ \/ -_) '_\ V / -_) '_|
|___/_||_\__,_|_| \__\_\ |___/\___|_| \_/\___|_|
Version: %s
Listening on: %s
""" % (__version__, bind)
server = setup_server(sharq_config)
SharQServerApplicationRunner(server.app, options).run()
|
python
|
def run():
"""Exposes a CLI to configure the SharQ Server and runs the server."""
# create a arg parser and configure it.
parser = argparse.ArgumentParser(description='SharQ Server.')
parser.add_argument('-c', '--config', action='store', required=True,
help='Absolute path of the SharQ configuration file.',
dest='sharq_config')
parser.add_argument('-gc', '--gunicorn-config', action='store', required=False,
help='Gunicorn configuration file.',
dest='gunicorn_config')
parser.add_argument('--version', action='version', version='SharQ Server %s' % __version__)
args = parser.parse_args()
# read the configuration file and set gunicorn options.
config_parser = ConfigParser.SafeConfigParser()
# get the full path of the config file.
sharq_config = os.path.abspath(args.sharq_config)
config_parser.read(sharq_config)
host = config_parser.get('sharq-server', 'host')
port = config_parser.get('sharq-server', 'port')
bind = '%s:%s' % (host, port)
try:
workers = config_parser.get('sharq-server', 'workers')
except ConfigParser.NoOptionError:
workers = number_of_workers()
try:
accesslog = config_parser.get('sharq-server', 'accesslog')
except ConfigParser.NoOptionError:
accesslog = None
options = {
'bind': bind,
'workers': workers,
'worker_class': 'gevent' # required for sharq to function.
}
if accesslog:
options.update({
'accesslog': accesslog
})
if args.gunicorn_config:
gunicorn_config = os.path.abspath(args.gunicorn_config)
options.update({
'config': gunicorn_config
})
print """
___ _ ___ ___
/ __| |_ __ _ _ _ / _ \ / __| ___ _ ___ _____ _ _
\__ \ ' \/ _` | '_| (_) | \__ \/ -_) '_\ V / -_) '_|
|___/_||_\__,_|_| \__\_\ |___/\___|_| \_/\___|_|
Version: %s
Listening on: %s
""" % (__version__, bind)
server = setup_server(sharq_config)
SharQServerApplicationRunner(server.app, options).run()
|
[
"def",
"run",
"(",
")",
":",
"# create a arg parser and configure it.",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'SharQ Server.'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Absolute path of the SharQ configuration file.'",
",",
"dest",
"=",
"'sharq_config'",
")",
"parser",
".",
"add_argument",
"(",
"'-gc'",
",",
"'--gunicorn-config'",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Gunicorn configuration file.'",
",",
"dest",
"=",
"'gunicorn_config'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'SharQ Server %s'",
"%",
"__version__",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# read the configuration file and set gunicorn options.",
"config_parser",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
")",
"# get the full path of the config file.",
"sharq_config",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"sharq_config",
")",
"config_parser",
".",
"read",
"(",
"sharq_config",
")",
"host",
"=",
"config_parser",
".",
"get",
"(",
"'sharq-server'",
",",
"'host'",
")",
"port",
"=",
"config_parser",
".",
"get",
"(",
"'sharq-server'",
",",
"'port'",
")",
"bind",
"=",
"'%s:%s'",
"%",
"(",
"host",
",",
"port",
")",
"try",
":",
"workers",
"=",
"config_parser",
".",
"get",
"(",
"'sharq-server'",
",",
"'workers'",
")",
"except",
"ConfigParser",
".",
"NoOptionError",
":",
"workers",
"=",
"number_of_workers",
"(",
")",
"try",
":",
"accesslog",
"=",
"config_parser",
".",
"get",
"(",
"'sharq-server'",
",",
"'accesslog'",
")",
"except",
"ConfigParser",
".",
"NoOptionError",
":",
"accesslog",
"=",
"None",
"options",
"=",
"{",
"'bind'",
":",
"bind",
",",
"'workers'",
":",
"workers",
",",
"'worker_class'",
":",
"'gevent'",
"# required for sharq to function.",
"}",
"if",
"accesslog",
":",
"options",
".",
"update",
"(",
"{",
"'accesslog'",
":",
"accesslog",
"}",
")",
"if",
"args",
".",
"gunicorn_config",
":",
"gunicorn_config",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"gunicorn_config",
")",
"options",
".",
"update",
"(",
"{",
"'config'",
":",
"gunicorn_config",
"}",
")",
"print",
"\"\"\"\n ___ _ ___ ___\n / __| |_ __ _ _ _ / _ \\ / __| ___ _ ___ _____ _ _\n \\__ \\ ' \\/ _` | '_| (_) | \\__ \\/ -_) '_\\ V / -_) '_|\n |___/_||_\\__,_|_| \\__\\_\\ |___/\\___|_| \\_/\\___|_|\n\n Version: %s\n\n Listening on: %s\n \"\"\"",
"%",
"(",
"__version__",
",",
"bind",
")",
"server",
"=",
"setup_server",
"(",
"sharq_config",
")",
"SharQServerApplicationRunner",
"(",
"server",
".",
"app",
",",
"options",
")",
".",
"run",
"(",
")"
] |
Exposes a CLI to configure the SharQ Server and runs the server.
|
[
"Exposes",
"a",
"CLI",
"to",
"configure",
"the",
"SharQ",
"Server",
"and",
"runs",
"the",
"server",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/runner.py#L38-L97
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
setup_server
|
def setup_server(config_path):
"""Configure SharQ server, start the requeue loop
and return the server."""
# configure the SharQ server
server = SharQServer(config_path)
# start the requeue loop
gevent.spawn(server.requeue)
return server
|
python
|
def setup_server(config_path):
"""Configure SharQ server, start the requeue loop
and return the server."""
# configure the SharQ server
server = SharQServer(config_path)
# start the requeue loop
gevent.spawn(server.requeue)
return server
|
[
"def",
"setup_server",
"(",
"config_path",
")",
":",
"# configure the SharQ server",
"server",
"=",
"SharQServer",
"(",
"config_path",
")",
"# start the requeue loop",
"gevent",
".",
"spawn",
"(",
"server",
".",
"requeue",
")",
"return",
"server"
] |
Configure SharQ server, start the requeue loop
and return the server.
|
[
"Configure",
"SharQ",
"server",
"start",
"the",
"requeue",
"loop",
"and",
"return",
"the",
"server",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L204-L212
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer.requeue
|
def requeue(self):
"""Loop endlessly and requeue expired jobs."""
job_requeue_interval = float(
self.config.get('sharq', 'job_requeue_interval'))
while True:
self.sq.requeue()
gevent.sleep(job_requeue_interval / 1000.00)
|
python
|
def requeue(self):
"""Loop endlessly and requeue expired jobs."""
job_requeue_interval = float(
self.config.get('sharq', 'job_requeue_interval'))
while True:
self.sq.requeue()
gevent.sleep(job_requeue_interval / 1000.00)
|
[
"def",
"requeue",
"(",
"self",
")",
":",
"job_requeue_interval",
"=",
"float",
"(",
"self",
".",
"config",
".",
"get",
"(",
"'sharq'",
",",
"'job_requeue_interval'",
")",
")",
"while",
"True",
":",
"self",
".",
"sq",
".",
"requeue",
"(",
")",
"gevent",
".",
"sleep",
"(",
"job_requeue_interval",
"/",
"1000.00",
")"
] |
Loop endlessly and requeue expired jobs.
|
[
"Loop",
"endlessly",
"and",
"requeue",
"expired",
"jobs",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L57-L63
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer._view_enqueue
|
def _view_enqueue(self, queue_type, queue_id):
"""Enqueues a job into SharQ."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
request_data.update({
'queue_type': queue_type,
'queue_id': queue_id
})
try:
response = self.sq.enqueue(**request_data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response), 201
|
python
|
def _view_enqueue(self, queue_type, queue_id):
"""Enqueues a job into SharQ."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
request_data.update({
'queue_type': queue_type,
'queue_id': queue_id
})
try:
response = self.sq.enqueue(**request_data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response), 201
|
[
"def",
"_view_enqueue",
"(",
"self",
",",
"queue_type",
",",
"queue_id",
")",
":",
"response",
"=",
"{",
"'status'",
":",
"'failure'",
"}",
"try",
":",
"request_data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"data",
")",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"request_data",
".",
"update",
"(",
"{",
"'queue_type'",
":",
"queue_type",
",",
"'queue_id'",
":",
"queue_id",
"}",
")",
"try",
":",
"response",
"=",
"self",
".",
"sq",
".",
"enqueue",
"(",
"*",
"*",
"request_data",
")",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"201"
] |
Enqueues a job into SharQ.
|
[
"Enqueues",
"a",
"job",
"into",
"SharQ",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L69-L91
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer._view_dequeue
|
def _view_dequeue(self, queue_type):
"""Dequeues a job from SharQ."""
response = {
'status': 'failure'
}
request_data = {
'queue_type': queue_type
}
try:
response = self.sq.dequeue(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
python
|
def _view_dequeue(self, queue_type):
"""Dequeues a job from SharQ."""
response = {
'status': 'failure'
}
request_data = {
'queue_type': queue_type
}
try:
response = self.sq.dequeue(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
[
"def",
"_view_dequeue",
"(",
"self",
",",
"queue_type",
")",
":",
"response",
"=",
"{",
"'status'",
":",
"'failure'",
"}",
"request_data",
"=",
"{",
"'queue_type'",
":",
"queue_type",
"}",
"try",
":",
"response",
"=",
"self",
".",
"sq",
".",
"dequeue",
"(",
"*",
"*",
"request_data",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"'failure'",
":",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"404",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")"
] |
Dequeues a job from SharQ.
|
[
"Dequeues",
"a",
"job",
"from",
"SharQ",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L93-L110
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer._view_finish
|
def _view_finish(self, queue_type, queue_id, job_id):
"""Marks a job as finished in SharQ."""
response = {
'status': 'failure'
}
request_data = {
'queue_type': queue_type,
'queue_id': queue_id,
'job_id': job_id
}
try:
response = self.sq.finish(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
python
|
def _view_finish(self, queue_type, queue_id, job_id):
"""Marks a job as finished in SharQ."""
response = {
'status': 'failure'
}
request_data = {
'queue_type': queue_type,
'queue_id': queue_id,
'job_id': job_id
}
try:
response = self.sq.finish(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
[
"def",
"_view_finish",
"(",
"self",
",",
"queue_type",
",",
"queue_id",
",",
"job_id",
")",
":",
"response",
"=",
"{",
"'status'",
":",
"'failure'",
"}",
"request_data",
"=",
"{",
"'queue_type'",
":",
"queue_type",
",",
"'queue_id'",
":",
"queue_id",
",",
"'job_id'",
":",
"job_id",
"}",
"try",
":",
"response",
"=",
"self",
".",
"sq",
".",
"finish",
"(",
"*",
"*",
"request_data",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"'failure'",
":",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"404",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")"
] |
Marks a job as finished in SharQ.
|
[
"Marks",
"a",
"job",
"as",
"finished",
"in",
"SharQ",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L112-L131
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer._view_interval
|
def _view_interval(self, queue_type, queue_id):
"""Updates the queue interval in SharQ."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
interval = request_data['interval']
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
request_data = {
'queue_type': queue_type,
'queue_id': queue_id,
'interval': interval
}
try:
response = self.sq.interval(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
python
|
def _view_interval(self, queue_type, queue_id):
"""Updates the queue interval in SharQ."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
interval = request_data['interval']
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
request_data = {
'queue_type': queue_type,
'queue_id': queue_id,
'interval': interval
}
try:
response = self.sq.interval(**request_data)
if response['status'] == 'failure':
return jsonify(**response), 404
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
[
"def",
"_view_interval",
"(",
"self",
",",
"queue_type",
",",
"queue_id",
")",
":",
"response",
"=",
"{",
"'status'",
":",
"'failure'",
"}",
"try",
":",
"request_data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"data",
")",
"interval",
"=",
"request_data",
"[",
"'interval'",
"]",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"request_data",
"=",
"{",
"'queue_type'",
":",
"queue_type",
",",
"'queue_id'",
":",
"queue_id",
",",
"'interval'",
":",
"interval",
"}",
"try",
":",
"response",
"=",
"self",
".",
"sq",
".",
"interval",
"(",
"*",
"*",
"request_data",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"'failure'",
":",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"404",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")"
] |
Updates the queue interval in SharQ.
|
[
"Updates",
"the",
"queue",
"interval",
"in",
"SharQ",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L133-L159
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer._view_metrics
|
def _view_metrics(self, queue_type, queue_id):
"""Gets SharQ metrics based on the params."""
response = {
'status': 'failure'
}
request_data = {}
if queue_type:
request_data['queue_type'] = queue_type
if queue_id:
request_data['queue_id'] = queue_id
try:
response = self.sq.metrics(**request_data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
python
|
def _view_metrics(self, queue_type, queue_id):
"""Gets SharQ metrics based on the params."""
response = {
'status': 'failure'
}
request_data = {}
if queue_type:
request_data['queue_type'] = queue_type
if queue_id:
request_data['queue_id'] = queue_id
try:
response = self.sq.metrics(**request_data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
[
"def",
"_view_metrics",
"(",
"self",
",",
"queue_type",
",",
"queue_id",
")",
":",
"response",
"=",
"{",
"'status'",
":",
"'failure'",
"}",
"request_data",
"=",
"{",
"}",
"if",
"queue_type",
":",
"request_data",
"[",
"'queue_type'",
"]",
"=",
"queue_type",
"if",
"queue_id",
":",
"request_data",
"[",
"'queue_id'",
"]",
"=",
"queue_id",
"try",
":",
"response",
"=",
"self",
".",
"sq",
".",
"metrics",
"(",
"*",
"*",
"request_data",
")",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")"
] |
Gets SharQ metrics based on the params.
|
[
"Gets",
"SharQ",
"metrics",
"based",
"on",
"the",
"params",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L161-L178
|
train
|
plivo/sharq-server
|
sharq_server/server.py
|
SharQServer._view_clear_queue
|
def _view_clear_queue(self, queue_type, queue_id):
"""remove queueu from SharQ based on the queue_type and queue_id."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
request_data.update({
'queue_type': queue_type,
'queue_id': queue_id
})
try:
response = self.sq.clear_queue(**request_data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
python
|
def _view_clear_queue(self, queue_type, queue_id):
"""remove queueu from SharQ based on the queue_type and queue_id."""
response = {
'status': 'failure'
}
try:
request_data = json.loads(request.data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
request_data.update({
'queue_type': queue_type,
'queue_id': queue_id
})
try:
response = self.sq.clear_queue(**request_data)
except Exception, e:
response['message'] = e.message
return jsonify(**response), 400
return jsonify(**response)
|
[
"def",
"_view_clear_queue",
"(",
"self",
",",
"queue_type",
",",
"queue_id",
")",
":",
"response",
"=",
"{",
"'status'",
":",
"'failure'",
"}",
"try",
":",
"request_data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"data",
")",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"request_data",
".",
"update",
"(",
"{",
"'queue_type'",
":",
"queue_type",
",",
"'queue_id'",
":",
"queue_id",
"}",
")",
"try",
":",
"response",
"=",
"self",
".",
"sq",
".",
"clear_queue",
"(",
"*",
"*",
"request_data",
")",
"except",
"Exception",
",",
"e",
":",
"response",
"[",
"'message'",
"]",
"=",
"e",
".",
"message",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"*",
"*",
"response",
")"
] |
remove queueu from SharQ based on the queue_type and queue_id.
|
[
"remove",
"queueu",
"from",
"SharQ",
"based",
"on",
"the",
"queue_type",
"and",
"queue_id",
"."
] |
9f4c50eb5ee28d1084591febc4a3a34d7ffd0556
|
https://github.com/plivo/sharq-server/blob/9f4c50eb5ee28d1084591febc4a3a34d7ffd0556/sharq_server/server.py#L180-L201
|
train
|
depop/python-automock
|
automock/base.py
|
start_patching
|
def start_patching(name=None):
# type: (Optional[str]) -> None
"""
Initiate mocking of the functions listed in `_factory_map`.
For this to work reliably all mocked helper functions should be imported
and used like this:
import dp_paypal.client as paypal
res = paypal.do_paypal_express_checkout(...)
(i.e. don't use `from dp_paypal.client import x` import style)
Kwargs:
name (Optional[str]): if given, only patch the specified path, else all
defined default mocks
"""
global _factory_map, _patchers, _mocks
if _patchers and name is None:
warnings.warn('start_patching() called again, already patched')
_pre_import()
if name is not None:
factory = _factory_map[name]
items = [(name, factory)]
else:
items = _factory_map.items()
for name, factory in items:
patcher = mock.patch(name, new=factory())
mocked = patcher.start()
_patchers[name] = patcher
_mocks[name] = mocked
|
python
|
def start_patching(name=None):
# type: (Optional[str]) -> None
"""
Initiate mocking of the functions listed in `_factory_map`.
For this to work reliably all mocked helper functions should be imported
and used like this:
import dp_paypal.client as paypal
res = paypal.do_paypal_express_checkout(...)
(i.e. don't use `from dp_paypal.client import x` import style)
Kwargs:
name (Optional[str]): if given, only patch the specified path, else all
defined default mocks
"""
global _factory_map, _patchers, _mocks
if _patchers and name is None:
warnings.warn('start_patching() called again, already patched')
_pre_import()
if name is not None:
factory = _factory_map[name]
items = [(name, factory)]
else:
items = _factory_map.items()
for name, factory in items:
patcher = mock.patch(name, new=factory())
mocked = patcher.start()
_patchers[name] = patcher
_mocks[name] = mocked
|
[
"def",
"start_patching",
"(",
"name",
"=",
"None",
")",
":",
"# type: (Optional[str]) -> None",
"global",
"_factory_map",
",",
"_patchers",
",",
"_mocks",
"if",
"_patchers",
"and",
"name",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"'start_patching() called again, already patched'",
")",
"_pre_import",
"(",
")",
"if",
"name",
"is",
"not",
"None",
":",
"factory",
"=",
"_factory_map",
"[",
"name",
"]",
"items",
"=",
"[",
"(",
"name",
",",
"factory",
")",
"]",
"else",
":",
"items",
"=",
"_factory_map",
".",
"items",
"(",
")",
"for",
"name",
",",
"factory",
"in",
"items",
":",
"patcher",
"=",
"mock",
".",
"patch",
"(",
"name",
",",
"new",
"=",
"factory",
"(",
")",
")",
"mocked",
"=",
"patcher",
".",
"start",
"(",
")",
"_patchers",
"[",
"name",
"]",
"=",
"patcher",
"_mocks",
"[",
"name",
"]",
"=",
"mocked"
] |
Initiate mocking of the functions listed in `_factory_map`.
For this to work reliably all mocked helper functions should be imported
and used like this:
import dp_paypal.client as paypal
res = paypal.do_paypal_express_checkout(...)
(i.e. don't use `from dp_paypal.client import x` import style)
Kwargs:
name (Optional[str]): if given, only patch the specified path, else all
defined default mocks
|
[
"Initiate",
"mocking",
"of",
"the",
"functions",
"listed",
"in",
"_factory_map",
"."
] |
8a02acecd9265c8f9a00d7b8e097cae87cdf28bd
|
https://github.com/depop/python-automock/blob/8a02acecd9265c8f9a00d7b8e097cae87cdf28bd/automock/base.py#L88-L121
|
train
|
depop/python-automock
|
automock/base.py
|
stop_patching
|
def stop_patching(name=None):
# type: (Optional[str]) -> None
"""
Finish the mocking initiated by `start_patching`
Kwargs:
name (Optional[str]): if given, only unpatch the specified path, else all
defined default mocks
"""
global _patchers, _mocks
if not _patchers:
warnings.warn('stop_patching() called again, already stopped')
if name is not None:
items = [(name, _patchers[name])]
else:
items = list(_patchers.items())
for name, patcher in items:
patcher.stop()
del _patchers[name]
del _mocks[name]
|
python
|
def stop_patching(name=None):
# type: (Optional[str]) -> None
"""
Finish the mocking initiated by `start_patching`
Kwargs:
name (Optional[str]): if given, only unpatch the specified path, else all
defined default mocks
"""
global _patchers, _mocks
if not _patchers:
warnings.warn('stop_patching() called again, already stopped')
if name is not None:
items = [(name, _patchers[name])]
else:
items = list(_patchers.items())
for name, patcher in items:
patcher.stop()
del _patchers[name]
del _mocks[name]
|
[
"def",
"stop_patching",
"(",
"name",
"=",
"None",
")",
":",
"# type: (Optional[str]) -> None",
"global",
"_patchers",
",",
"_mocks",
"if",
"not",
"_patchers",
":",
"warnings",
".",
"warn",
"(",
"'stop_patching() called again, already stopped'",
")",
"if",
"name",
"is",
"not",
"None",
":",
"items",
"=",
"[",
"(",
"name",
",",
"_patchers",
"[",
"name",
"]",
")",
"]",
"else",
":",
"items",
"=",
"list",
"(",
"_patchers",
".",
"items",
"(",
")",
")",
"for",
"name",
",",
"patcher",
"in",
"items",
":",
"patcher",
".",
"stop",
"(",
")",
"del",
"_patchers",
"[",
"name",
"]",
"del",
"_mocks",
"[",
"name",
"]"
] |
Finish the mocking initiated by `start_patching`
Kwargs:
name (Optional[str]): if given, only unpatch the specified path, else all
defined default mocks
|
[
"Finish",
"the",
"mocking",
"initiated",
"by",
"start_patching"
] |
8a02acecd9265c8f9a00d7b8e097cae87cdf28bd
|
https://github.com/depop/python-automock/blob/8a02acecd9265c8f9a00d7b8e097cae87cdf28bd/automock/base.py#L124-L145
|
train
|
matousc89/padasip
|
padasip/preprocess/standardize_back.py
|
standardize_back
|
def standardize_back(xs, offset, scale):
"""
This is function for de-standarization of input series.
**Args:**
* `xs` : standardized input (1 dimensional array)
* `offset` : offset to add (float).
* `scale` : scale (float).
**Returns:**
* `x` : original (destandardised) series
"""
try:
offset = float(offset)
except:
raise ValueError('The argument offset is not None or float.')
try:
scale = float(scale)
except:
raise ValueError('The argument scale is not None or float.')
try:
xs = np.array(xs, dtype="float64")
except:
raise ValueError('The argument xs is not numpy array or similar.')
return xs*scale + offset
|
python
|
def standardize_back(xs, offset, scale):
"""
This is function for de-standarization of input series.
**Args:**
* `xs` : standardized input (1 dimensional array)
* `offset` : offset to add (float).
* `scale` : scale (float).
**Returns:**
* `x` : original (destandardised) series
"""
try:
offset = float(offset)
except:
raise ValueError('The argument offset is not None or float.')
try:
scale = float(scale)
except:
raise ValueError('The argument scale is not None or float.')
try:
xs = np.array(xs, dtype="float64")
except:
raise ValueError('The argument xs is not numpy array or similar.')
return xs*scale + offset
|
[
"def",
"standardize_back",
"(",
"xs",
",",
"offset",
",",
"scale",
")",
":",
"try",
":",
"offset",
"=",
"float",
"(",
"offset",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument offset is not None or float.'",
")",
"try",
":",
"scale",
"=",
"float",
"(",
"scale",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument scale is not None or float.'",
")",
"try",
":",
"xs",
"=",
"np",
".",
"array",
"(",
"xs",
",",
"dtype",
"=",
"\"float64\"",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument xs is not numpy array or similar.'",
")",
"return",
"xs",
"*",
"scale",
"+",
"offset"
] |
This is function for de-standarization of input series.
**Args:**
* `xs` : standardized input (1 dimensional array)
* `offset` : offset to add (float).
* `scale` : scale (float).
**Returns:**
* `x` : original (destandardised) series
|
[
"This",
"is",
"function",
"for",
"de",
"-",
"standarization",
"of",
"input",
"series",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/standardize_back.py#L33-L62
|
train
|
matousc89/padasip
|
padasip/preprocess/standardize.py
|
standardize
|
def standardize(x, offset=None, scale=None):
"""
This is function for standarization of input series.
**Args:**
* `x` : series (1 dimensional array)
**Kwargs:**
* `offset` : offset to remove (float). If not given, \
the mean value of `x` is used.
* `scale` : scale (float). If not given, \
the standard deviation of `x` is used.
**Returns:**
* `xs` : standardized series
"""
if offset == None:
offset = np.array(x).mean()
else:
try:
offset = float(offset)
except:
raise ValueError('The argument offset is not None or float')
if scale == None:
scale = np.array(x).std()
else:
try:
scale = float(scale)
except:
raise ValueError('The argument scale is not None or float')
try:
x = np.array(x, dtype="float64")
except:
raise ValueError('The argument x is not numpy array or similar.')
return (x - offset) / scale
|
python
|
def standardize(x, offset=None, scale=None):
"""
This is function for standarization of input series.
**Args:**
* `x` : series (1 dimensional array)
**Kwargs:**
* `offset` : offset to remove (float). If not given, \
the mean value of `x` is used.
* `scale` : scale (float). If not given, \
the standard deviation of `x` is used.
**Returns:**
* `xs` : standardized series
"""
if offset == None:
offset = np.array(x).mean()
else:
try:
offset = float(offset)
except:
raise ValueError('The argument offset is not None or float')
if scale == None:
scale = np.array(x).std()
else:
try:
scale = float(scale)
except:
raise ValueError('The argument scale is not None or float')
try:
x = np.array(x, dtype="float64")
except:
raise ValueError('The argument x is not numpy array or similar.')
return (x - offset) / scale
|
[
"def",
"standardize",
"(",
"x",
",",
"offset",
"=",
"None",
",",
"scale",
"=",
"None",
")",
":",
"if",
"offset",
"==",
"None",
":",
"offset",
"=",
"np",
".",
"array",
"(",
"x",
")",
".",
"mean",
"(",
")",
"else",
":",
"try",
":",
"offset",
"=",
"float",
"(",
"offset",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument offset is not None or float'",
")",
"if",
"scale",
"==",
"None",
":",
"scale",
"=",
"np",
".",
"array",
"(",
"x",
")",
".",
"std",
"(",
")",
"else",
":",
"try",
":",
"scale",
"=",
"float",
"(",
"scale",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument scale is not None or float'",
")",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
",",
"dtype",
"=",
"\"float64\"",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument x is not numpy array or similar.'",
")",
"return",
"(",
"x",
"-",
"offset",
")",
"/",
"scale"
] |
This is function for standarization of input series.
**Args:**
* `x` : series (1 dimensional array)
**Kwargs:**
* `offset` : offset to remove (float). If not given, \
the mean value of `x` is used.
* `scale` : scale (float). If not given, \
the standard deviation of `x` is used.
**Returns:**
* `xs` : standardized series
|
[
"This",
"is",
"function",
"for",
"standarization",
"of",
"input",
"series",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/standardize.py#L62-L100
|
train
|
matousc89/padasip
|
padasip/preprocess/input_from_history.py
|
input_from_history
|
def input_from_history(a, n, bias=False):
"""
This is function for creation of input matrix.
**Args:**
* `a` : series (1 dimensional array)
* `n` : size of input matrix row (int). It means how many samples \
of previous history you want to use \
as the filter input. It also represents the filter length.
**Kwargs:**
* `bias` : decides if the bias is used (Boolean). If True, \
array of all ones is appended as a last column to matrix `x`. \
So matrix `x` has `n`+1 columns.
**Returns:**
* `x` : input matrix (2 dimensional array) \
constructed from an array `a`. The length of `x` \
is calculated as length of `a` - `n` + 1. \
If the `bias` is used, then the amount of columns is `n` if not then \
amount of columns is `n`+1).
"""
if not type(n) == int:
raise ValueError('The argument n must be int.')
if not n > 0:
raise ValueError('The argument n must be greater than 0')
try:
a = np.array(a, dtype="float64")
except:
raise ValueError('The argument a is not numpy array or similar.')
x = np.array([a[i:i+n] for i in range(len(a)-n+1)])
if bias:
x = np.vstack((x.T, np.ones(len(x)))).T
return x
|
python
|
def input_from_history(a, n, bias=False):
"""
This is function for creation of input matrix.
**Args:**
* `a` : series (1 dimensional array)
* `n` : size of input matrix row (int). It means how many samples \
of previous history you want to use \
as the filter input. It also represents the filter length.
**Kwargs:**
* `bias` : decides if the bias is used (Boolean). If True, \
array of all ones is appended as a last column to matrix `x`. \
So matrix `x` has `n`+1 columns.
**Returns:**
* `x` : input matrix (2 dimensional array) \
constructed from an array `a`. The length of `x` \
is calculated as length of `a` - `n` + 1. \
If the `bias` is used, then the amount of columns is `n` if not then \
amount of columns is `n`+1).
"""
if not type(n) == int:
raise ValueError('The argument n must be int.')
if not n > 0:
raise ValueError('The argument n must be greater than 0')
try:
a = np.array(a, dtype="float64")
except:
raise ValueError('The argument a is not numpy array or similar.')
x = np.array([a[i:i+n] for i in range(len(a)-n+1)])
if bias:
x = np.vstack((x.T, np.ones(len(x)))).T
return x
|
[
"def",
"input_from_history",
"(",
"a",
",",
"n",
",",
"bias",
"=",
"False",
")",
":",
"if",
"not",
"type",
"(",
"n",
")",
"==",
"int",
":",
"raise",
"ValueError",
"(",
"'The argument n must be int.'",
")",
"if",
"not",
"n",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'The argument n must be greater than 0'",
")",
"try",
":",
"a",
"=",
"np",
".",
"array",
"(",
"a",
",",
"dtype",
"=",
"\"float64\"",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'The argument a is not numpy array or similar.'",
")",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"a",
"[",
"i",
":",
"i",
"+",
"n",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"a",
")",
"-",
"n",
"+",
"1",
")",
"]",
")",
"if",
"bias",
":",
"x",
"=",
"np",
".",
"vstack",
"(",
"(",
"x",
".",
"T",
",",
"np",
".",
"ones",
"(",
"len",
"(",
"x",
")",
")",
")",
")",
".",
"T",
"return",
"x"
] |
This is function for creation of input matrix.
**Args:**
* `a` : series (1 dimensional array)
* `n` : size of input matrix row (int). It means how many samples \
of previous history you want to use \
as the filter input. It also represents the filter length.
**Kwargs:**
* `bias` : decides if the bias is used (Boolean). If True, \
array of all ones is appended as a last column to matrix `x`. \
So matrix `x` has `n`+1 columns.
**Returns:**
* `x` : input matrix (2 dimensional array) \
constructed from an array `a`. The length of `x` \
is calculated as length of `a` - `n` + 1. \
If the `bias` is used, then the amount of columns is `n` if not then \
amount of columns is `n`+1).
|
[
"This",
"is",
"function",
"for",
"creation",
"of",
"input",
"matrix",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/input_from_history.py#L34-L72
|
train
|
matousc89/padasip
|
padasip/filters/base_filter.py
|
AdaptiveFilter.init_weights
|
def init_weights(self, w, n=-1):
"""
This function initialises the adaptive weights of the filter.
**Args:**
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
**Kwargs:**
* `n` : size of filter (int) - number of filter coefficients.
**Returns:**
* `y` : output value (float) calculated from input array.
"""
if n == -1:
n = self.n
if type(w) == str:
if w == "random":
w = np.random.normal(0, 0.5, n)
elif w == "zeros":
w = np.zeros(n)
else:
raise ValueError('Impossible to understand the w')
elif len(w) == n:
try:
w = np.array(w, dtype="float64")
except:
raise ValueError('Impossible to understand the w')
else:
raise ValueError('Impossible to understand the w')
self.w = w
|
python
|
def init_weights(self, w, n=-1):
"""
This function initialises the adaptive weights of the filter.
**Args:**
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
**Kwargs:**
* `n` : size of filter (int) - number of filter coefficients.
**Returns:**
* `y` : output value (float) calculated from input array.
"""
if n == -1:
n = self.n
if type(w) == str:
if w == "random":
w = np.random.normal(0, 0.5, n)
elif w == "zeros":
w = np.zeros(n)
else:
raise ValueError('Impossible to understand the w')
elif len(w) == n:
try:
w = np.array(w, dtype="float64")
except:
raise ValueError('Impossible to understand the w')
else:
raise ValueError('Impossible to understand the w')
self.w = w
|
[
"def",
"init_weights",
"(",
"self",
",",
"w",
",",
"n",
"=",
"-",
"1",
")",
":",
"if",
"n",
"==",
"-",
"1",
":",
"n",
"=",
"self",
".",
"n",
"if",
"type",
"(",
"w",
")",
"==",
"str",
":",
"if",
"w",
"==",
"\"random\"",
":",
"w",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"0.5",
",",
"n",
")",
"elif",
"w",
"==",
"\"zeros\"",
":",
"w",
"=",
"np",
".",
"zeros",
"(",
"n",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Impossible to understand the w'",
")",
"elif",
"len",
"(",
"w",
")",
"==",
"n",
":",
"try",
":",
"w",
"=",
"np",
".",
"array",
"(",
"w",
",",
"dtype",
"=",
"\"float64\"",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to understand the w'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Impossible to understand the w'",
")",
"self",
".",
"w",
"=",
"w"
] |
This function initialises the adaptive weights of the filter.
**Args:**
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
**Kwargs:**
* `n` : size of filter (int) - number of filter coefficients.
**Returns:**
* `y` : output value (float) calculated from input array.
|
[
"This",
"function",
"initialises",
"the",
"adaptive",
"weights",
"of",
"the",
"filter",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L16-L56
|
train
|
matousc89/padasip
|
padasip/filters/base_filter.py
|
AdaptiveFilter.predict
|
def predict(self, x):
"""
This function calculates the new output value `y` from input array `x`.
**Args:**
* `x` : input vector (1 dimension array) in length of filter.
**Returns:**
* `y` : output value (float) calculated from input array.
"""
y = np.dot(self.w, x)
return y
|
python
|
def predict(self, x):
"""
This function calculates the new output value `y` from input array `x`.
**Args:**
* `x` : input vector (1 dimension array) in length of filter.
**Returns:**
* `y` : output value (float) calculated from input array.
"""
y = np.dot(self.w, x)
return y
|
[
"def",
"predict",
"(",
"self",
",",
"x",
")",
":",
"y",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"w",
",",
"x",
")",
"return",
"y"
] |
This function calculates the new output value `y` from input array `x`.
**Args:**
* `x` : input vector (1 dimension array) in length of filter.
**Returns:**
* `y` : output value (float) calculated from input array.
|
[
"This",
"function",
"calculates",
"the",
"new",
"output",
"value",
"y",
"from",
"input",
"array",
"x",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L58-L72
|
train
|
matousc89/padasip
|
padasip/filters/base_filter.py
|
AdaptiveFilter.explore_learning
|
def explore_learning(self, d, x, mu_start=0, mu_end=1., steps=100,
ntrain=0.5, epochs=1, criteria="MSE", target_w=False):
"""
Test what learning rate is the best.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Kwargs:**
* `mu_start` : starting learning rate (float)
* `mu_end` : final learning rate (float)
* `steps` : how many learning rates should be tested between `mu_start`
and `mu_end`.
* `ntrain` : train to test ratio (float), default value is 0.5
(that means 50% of data is used for training)
* `epochs` : number of training epochs (int), default value is 1.
This number describes how many times the training will be repeated
on dedicated part of data.
* `criteria` : how should be measured the mean error (str),
default value is "MSE".
* `target_w` : target weights (str or 1d array), default value is False.
If False, the mean error is estimated from prediction error.
If an array is provided, the error between weights and `target_w`
is used.
**Returns:**
* `errors` : mean error for tested learning rates (1 dimensional array).
* `mu_range` : range of used learning rates (1d array). Every value
corresponds with one value from `errors`
"""
mu_range = np.linspace(mu_start, mu_end, steps)
errors = np.zeros(len(mu_range))
for i, mu in enumerate(mu_range):
# init
self.init_weights("zeros")
self.mu = mu
# run
y, e, w = self.pretrained_run(d, x, ntrain=ntrain, epochs=epochs)
if type(target_w) != bool:
errors[i] = get_mean_error(w[-1]-target_w, function=criteria)
else:
errors[i] = get_mean_error(e, function=criteria)
return errors, mu_range
|
python
|
def explore_learning(self, d, x, mu_start=0, mu_end=1., steps=100,
ntrain=0.5, epochs=1, criteria="MSE", target_w=False):
"""
Test what learning rate is the best.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Kwargs:**
* `mu_start` : starting learning rate (float)
* `mu_end` : final learning rate (float)
* `steps` : how many learning rates should be tested between `mu_start`
and `mu_end`.
* `ntrain` : train to test ratio (float), default value is 0.5
(that means 50% of data is used for training)
* `epochs` : number of training epochs (int), default value is 1.
This number describes how many times the training will be repeated
on dedicated part of data.
* `criteria` : how should be measured the mean error (str),
default value is "MSE".
* `target_w` : target weights (str or 1d array), default value is False.
If False, the mean error is estimated from prediction error.
If an array is provided, the error between weights and `target_w`
is used.
**Returns:**
* `errors` : mean error for tested learning rates (1 dimensional array).
* `mu_range` : range of used learning rates (1d array). Every value
corresponds with one value from `errors`
"""
mu_range = np.linspace(mu_start, mu_end, steps)
errors = np.zeros(len(mu_range))
for i, mu in enumerate(mu_range):
# init
self.init_weights("zeros")
self.mu = mu
# run
y, e, w = self.pretrained_run(d, x, ntrain=ntrain, epochs=epochs)
if type(target_w) != bool:
errors[i] = get_mean_error(w[-1]-target_w, function=criteria)
else:
errors[i] = get_mean_error(e, function=criteria)
return errors, mu_range
|
[
"def",
"explore_learning",
"(",
"self",
",",
"d",
",",
"x",
",",
"mu_start",
"=",
"0",
",",
"mu_end",
"=",
"1.",
",",
"steps",
"=",
"100",
",",
"ntrain",
"=",
"0.5",
",",
"epochs",
"=",
"1",
",",
"criteria",
"=",
"\"MSE\"",
",",
"target_w",
"=",
"False",
")",
":",
"mu_range",
"=",
"np",
".",
"linspace",
"(",
"mu_start",
",",
"mu_end",
",",
"steps",
")",
"errors",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"mu_range",
")",
")",
"for",
"i",
",",
"mu",
"in",
"enumerate",
"(",
"mu_range",
")",
":",
"# init",
"self",
".",
"init_weights",
"(",
"\"zeros\"",
")",
"self",
".",
"mu",
"=",
"mu",
"# run",
"y",
",",
"e",
",",
"w",
"=",
"self",
".",
"pretrained_run",
"(",
"d",
",",
"x",
",",
"ntrain",
"=",
"ntrain",
",",
"epochs",
"=",
"epochs",
")",
"if",
"type",
"(",
"target_w",
")",
"!=",
"bool",
":",
"errors",
"[",
"i",
"]",
"=",
"get_mean_error",
"(",
"w",
"[",
"-",
"1",
"]",
"-",
"target_w",
",",
"function",
"=",
"criteria",
")",
"else",
":",
"errors",
"[",
"i",
"]",
"=",
"get_mean_error",
"(",
"e",
",",
"function",
"=",
"criteria",
")",
"return",
"errors",
",",
"mu_range"
] |
Test what learning rate is the best.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Kwargs:**
* `mu_start` : starting learning rate (float)
* `mu_end` : final learning rate (float)
* `steps` : how many learning rates should be tested between `mu_start`
and `mu_end`.
* `ntrain` : train to test ratio (float), default value is 0.5
(that means 50% of data is used for training)
* `epochs` : number of training epochs (int), default value is 1.
This number describes how many times the training will be repeated
on dedicated part of data.
* `criteria` : how should be measured the mean error (str),
default value is "MSE".
* `target_w` : target weights (str or 1d array), default value is False.
If False, the mean error is estimated from prediction error.
If an array is provided, the error between weights and `target_w`
is used.
**Returns:**
* `errors` : mean error for tested learning rates (1 dimensional array).
* `mu_range` : range of used learning rates (1d array). Every value
corresponds with one value from `errors`
|
[
"Test",
"what",
"learning",
"rate",
"is",
"the",
"best",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L112-L168
|
train
|
matousc89/padasip
|
padasip/filters/base_filter.py
|
AdaptiveFilter.check_float_param
|
def check_float_param(self, param, low, high, name):
"""
Check if the value of the given parameter is in the given range
and a float.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (float or similar)
* `low` : lowest allowed value (float), or None
* `high` : highest allowed value (float), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
"""
try:
param = float(param)
except:
raise ValueError(
'Parameter {} is not float or similar'.format(name)
)
if low != None or high != None:
if not low <= param <= high:
raise ValueError('Parameter {} is not in range <{}, {}>'
.format(name, low, high))
return param
|
python
|
def check_float_param(self, param, low, high, name):
"""
Check if the value of the given parameter is in the given range
and a float.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (float or similar)
* `low` : lowest allowed value (float), or None
* `high` : highest allowed value (float), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
"""
try:
param = float(param)
except:
raise ValueError(
'Parameter {} is not float or similar'.format(name)
)
if low != None or high != None:
if not low <= param <= high:
raise ValueError('Parameter {} is not in range <{}, {}>'
.format(name, low, high))
return param
|
[
"def",
"check_float_param",
"(",
"self",
",",
"param",
",",
"low",
",",
"high",
",",
"name",
")",
":",
"try",
":",
"param",
"=",
"float",
"(",
"param",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Parameter {} is not float or similar'",
".",
"format",
"(",
"name",
")",
")",
"if",
"low",
"!=",
"None",
"or",
"high",
"!=",
"None",
":",
"if",
"not",
"low",
"<=",
"param",
"<=",
"high",
":",
"raise",
"ValueError",
"(",
"'Parameter {} is not in range <{}, {}>'",
".",
"format",
"(",
"name",
",",
"low",
",",
"high",
")",
")",
"return",
"param"
] |
Check if the value of the given parameter is in the given range
and a float.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (float or similar)
* `low` : lowest allowed value (float), or None
* `high` : highest allowed value (float), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
|
[
"Check",
"if",
"the",
"value",
"of",
"the",
"given",
"parameter",
"is",
"in",
"the",
"given",
"range",
"and",
"a",
"float",
".",
"Designed",
"for",
"testing",
"parameters",
"like",
"mu",
"and",
"eps",
".",
"To",
"pass",
"this",
"function",
"the",
"variable",
"param",
"must",
"be",
"able",
"to",
"be",
"converted",
"into",
"a",
"float",
"with",
"a",
"value",
"between",
"low",
"and",
"high",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L170-L203
|
train
|
matousc89/padasip
|
padasip/filters/base_filter.py
|
AdaptiveFilter.check_int_param
|
def check_int_param(self, param, low, high, name):
"""
Check if the value of the given parameter is in the given range
and an int.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (int or similar)
* `low` : lowest allowed value (int), or None
* `high` : highest allowed value (int), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
"""
try:
param = int(param)
except:
raise ValueError(
'Parameter {} is not int or similar'.format(name)
)
if low != None or high != None:
if not low <= param <= high:
raise ValueError('Parameter {} is not in range <{}, {}>'
.format(name, low, high))
return param
|
python
|
def check_int_param(self, param, low, high, name):
"""
Check if the value of the given parameter is in the given range
and an int.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (int or similar)
* `low` : lowest allowed value (int), or None
* `high` : highest allowed value (int), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
"""
try:
param = int(param)
except:
raise ValueError(
'Parameter {} is not int or similar'.format(name)
)
if low != None or high != None:
if not low <= param <= high:
raise ValueError('Parameter {} is not in range <{}, {}>'
.format(name, low, high))
return param
|
[
"def",
"check_int_param",
"(",
"self",
",",
"param",
",",
"low",
",",
"high",
",",
"name",
")",
":",
"try",
":",
"param",
"=",
"int",
"(",
"param",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Parameter {} is not int or similar'",
".",
"format",
"(",
"name",
")",
")",
"if",
"low",
"!=",
"None",
"or",
"high",
"!=",
"None",
":",
"if",
"not",
"low",
"<=",
"param",
"<=",
"high",
":",
"raise",
"ValueError",
"(",
"'Parameter {} is not in range <{}, {}>'",
".",
"format",
"(",
"name",
",",
"low",
",",
"high",
")",
")",
"return",
"param"
] |
Check if the value of the given parameter is in the given range
and an int.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (int or similar)
* `low` : lowest allowed value (int), or None
* `high` : highest allowed value (int), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
|
[
"Check",
"if",
"the",
"value",
"of",
"the",
"given",
"parameter",
"is",
"in",
"the",
"given",
"range",
"and",
"an",
"int",
".",
"Designed",
"for",
"testing",
"parameters",
"like",
"mu",
"and",
"eps",
".",
"To",
"pass",
"this",
"function",
"the",
"variable",
"param",
"must",
"be",
"able",
"to",
"be",
"converted",
"into",
"a",
"float",
"with",
"a",
"value",
"between",
"low",
"and",
"high",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/base_filter.py#L226-L259
|
train
|
matousc89/padasip
|
padasip/misc/error_evaluation.py
|
MAE
|
def MAE(x1, x2=-1):
"""
Mean absolute error - this function accepts two series of data or directly
one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - MAE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
"""
e = get_valid_error(x1, x2)
return np.sum(np.abs(e)) / float(len(e))
|
python
|
def MAE(x1, x2=-1):
"""
Mean absolute error - this function accepts two series of data or directly
one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - MAE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
"""
e = get_valid_error(x1, x2)
return np.sum(np.abs(e)) / float(len(e))
|
[
"def",
"MAE",
"(",
"x1",
",",
"x2",
"=",
"-",
"1",
")",
":",
"e",
"=",
"get_valid_error",
"(",
"x1",
",",
"x2",
")",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"e",
")",
")",
"/",
"float",
"(",
"len",
"(",
"e",
")",
")"
] |
Mean absolute error - this function accepts two series of data or directly
one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - MAE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
|
[
"Mean",
"absolute",
"error",
"-",
"this",
"function",
"accepts",
"two",
"series",
"of",
"data",
"or",
"directly",
"one",
"series",
"with",
"error",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L152-L173
|
train
|
matousc89/padasip
|
padasip/misc/error_evaluation.py
|
MSE
|
def MSE(x1, x2=-1):
"""
Mean squared error - this function accepts two series of data or directly
one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - MSE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
"""
e = get_valid_error(x1, x2)
return np.dot(e, e) / float(len(e))
|
python
|
def MSE(x1, x2=-1):
"""
Mean squared error - this function accepts two series of data or directly
one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - MSE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
"""
e = get_valid_error(x1, x2)
return np.dot(e, e) / float(len(e))
|
[
"def",
"MSE",
"(",
"x1",
",",
"x2",
"=",
"-",
"1",
")",
":",
"e",
"=",
"get_valid_error",
"(",
"x1",
",",
"x2",
")",
"return",
"np",
".",
"dot",
"(",
"e",
",",
"e",
")",
"/",
"float",
"(",
"len",
"(",
"e",
")",
")"
] |
Mean squared error - this function accepts two series of data or directly
one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - MSE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
|
[
"Mean",
"squared",
"error",
"-",
"this",
"function",
"accepts",
"two",
"series",
"of",
"data",
"or",
"directly",
"one",
"series",
"with",
"error",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L175-L196
|
train
|
matousc89/padasip
|
padasip/misc/error_evaluation.py
|
RMSE
|
def RMSE(x1, x2=-1):
"""
Root-mean-square error - this function accepts two series of data
or directly one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - RMSE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
"""
e = get_valid_error(x1, x2)
return np.sqrt(np.dot(e, e) / float(len(e)))
|
python
|
def RMSE(x1, x2=-1):
"""
Root-mean-square error - this function accepts two series of data
or directly one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - RMSE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
"""
e = get_valid_error(x1, x2)
return np.sqrt(np.dot(e, e) / float(len(e)))
|
[
"def",
"RMSE",
"(",
"x1",
",",
"x2",
"=",
"-",
"1",
")",
":",
"e",
"=",
"get_valid_error",
"(",
"x1",
",",
"x2",
")",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"e",
",",
"e",
")",
"/",
"float",
"(",
"len",
"(",
"e",
")",
")",
")"
] |
Root-mean-square error - this function accepts two series of data
or directly one series with error.
**Args:**
* `x1` - first data series or error (1d array)
**Kwargs:**
* `x2` - second series (1d array) if first series was not error directly,\\
then this should be the second series
**Returns:**
* `e` - RMSE of error (float) obtained directly from `x1`, \\
or as a difference of `x1` and `x2`
|
[
"Root",
"-",
"mean",
"-",
"square",
"error",
"-",
"this",
"function",
"accepts",
"two",
"series",
"of",
"data",
"or",
"directly",
"one",
"series",
"with",
"error",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L198-L219
|
train
|
matousc89/padasip
|
padasip/detection/elbnd.py
|
ELBND
|
def ELBND(w, e, function="max"):
"""
This function estimates Error and Learning Based Novelty Detection measure
from given data.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
* `e` : error of adaptive model (1d array)
**Kwargs:**
* `functions` : output function (str). The way how to produce single
value for every sample (from all parameters)
* `max` - maximal value
* `sum` - sum of values
**Returns:**
* ELBND values (1d array). This vector has same lenght as `w`.
"""
# check if the function is known
if not function in ["max", "sum"]:
raise ValueError('Unknown output function')
# get length of data and number of parameters
N = w.shape[0]
n = w.shape[1]
# get abs dw from w
dw = np.zeros(w.shape)
dw[:-1] = np.abs(np.diff(w, axis=0))
# absolute values of product of increments and error
a = np.random.random((5,2))
b = a.T*np.array([1,2,3,4,5])
elbnd = np.abs((dw.T*e).T)
# apply output function
if function == "max":
elbnd = np.max(elbnd, axis=1)
elif function == "sum":
elbnd = np.sum(elbnd, axis=1)
# return output
return elbnd
|
python
|
def ELBND(w, e, function="max"):
"""
This function estimates Error and Learning Based Novelty Detection measure
from given data.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
* `e` : error of adaptive model (1d array)
**Kwargs:**
* `functions` : output function (str). The way how to produce single
value for every sample (from all parameters)
* `max` - maximal value
* `sum` - sum of values
**Returns:**
* ELBND values (1d array). This vector has same lenght as `w`.
"""
# check if the function is known
if not function in ["max", "sum"]:
raise ValueError('Unknown output function')
# get length of data and number of parameters
N = w.shape[0]
n = w.shape[1]
# get abs dw from w
dw = np.zeros(w.shape)
dw[:-1] = np.abs(np.diff(w, axis=0))
# absolute values of product of increments and error
a = np.random.random((5,2))
b = a.T*np.array([1,2,3,4,5])
elbnd = np.abs((dw.T*e).T)
# apply output function
if function == "max":
elbnd = np.max(elbnd, axis=1)
elif function == "sum":
elbnd = np.sum(elbnd, axis=1)
# return output
return elbnd
|
[
"def",
"ELBND",
"(",
"w",
",",
"e",
",",
"function",
"=",
"\"max\"",
")",
":",
"# check if the function is known",
"if",
"not",
"function",
"in",
"[",
"\"max\"",
",",
"\"sum\"",
"]",
":",
"raise",
"ValueError",
"(",
"'Unknown output function'",
")",
"# get length of data and number of parameters",
"N",
"=",
"w",
".",
"shape",
"[",
"0",
"]",
"n",
"=",
"w",
".",
"shape",
"[",
"1",
"]",
"# get abs dw from w",
"dw",
"=",
"np",
".",
"zeros",
"(",
"w",
".",
"shape",
")",
"dw",
"[",
":",
"-",
"1",
"]",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"w",
",",
"axis",
"=",
"0",
")",
")",
"# absolute values of product of increments and error",
"a",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"5",
",",
"2",
")",
")",
"b",
"=",
"a",
".",
"T",
"*",
"np",
".",
"array",
"(",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
"]",
")",
"elbnd",
"=",
"np",
".",
"abs",
"(",
"(",
"dw",
".",
"T",
"*",
"e",
")",
".",
"T",
")",
"# apply output function",
"if",
"function",
"==",
"\"max\"",
":",
"elbnd",
"=",
"np",
".",
"max",
"(",
"elbnd",
",",
"axis",
"=",
"1",
")",
"elif",
"function",
"==",
"\"sum\"",
":",
"elbnd",
"=",
"np",
".",
"sum",
"(",
"elbnd",
",",
"axis",
"=",
"1",
")",
"# return output",
"return",
"elbnd"
] |
This function estimates Error and Learning Based Novelty Detection measure
from given data.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
* `e` : error of adaptive model (1d array)
**Kwargs:**
* `functions` : output function (str). The way how to produce single
value for every sample (from all parameters)
* `max` - maximal value
* `sum` - sum of values
**Returns:**
* ELBND values (1d array). This vector has same lenght as `w`.
|
[
"This",
"function",
"estimates",
"Error",
"and",
"Learning",
"Based",
"Novelty",
"Detection",
"measure",
"from",
"given",
"data",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/detection/elbnd.py#L93-L138
|
train
|
matousc89/padasip
|
padasip/preprocess/lda.py
|
LDA_base
|
def LDA_base(x, labels):
"""
Base function used for Linear Discriminant Analysis.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Returns:**
* `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \
from LDA analysis
"""
classes = np.array(tuple(set(labels)))
cols = x.shape[1]
# mean values for every class
means = np.zeros((len(classes), cols))
for i, cl in enumerate(classes):
means[i] = np.mean(x[labels==cl], axis=0)
# scatter matrices
scatter_within = np.zeros((cols, cols))
for cl, mean in zip(classes, means):
scatter_class = np.zeros((cols, cols))
for row in x[labels == cl]:
dif = row - mean
scatter_class += np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
scatter_within += scatter_class
total_mean = np.mean(x, axis=0)
scatter_between = np.zeros((cols, cols))
for cl, mean in zip(classes, means):
dif = mean - total_mean
dif_product = np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
scatter_between += x[labels == cl, :].shape[0] * dif_product
# eigenvalues and eigenvectors from scatter matrices
scatter_product = np.dot(np.linalg.inv(scatter_within), scatter_between)
eigen_values, eigen_vectors = np.linalg.eig(scatter_product)
return eigen_values, eigen_vectors
|
python
|
def LDA_base(x, labels):
"""
Base function used for Linear Discriminant Analysis.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Returns:**
* `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \
from LDA analysis
"""
classes = np.array(tuple(set(labels)))
cols = x.shape[1]
# mean values for every class
means = np.zeros((len(classes), cols))
for i, cl in enumerate(classes):
means[i] = np.mean(x[labels==cl], axis=0)
# scatter matrices
scatter_within = np.zeros((cols, cols))
for cl, mean in zip(classes, means):
scatter_class = np.zeros((cols, cols))
for row in x[labels == cl]:
dif = row - mean
scatter_class += np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
scatter_within += scatter_class
total_mean = np.mean(x, axis=0)
scatter_between = np.zeros((cols, cols))
for cl, mean in zip(classes, means):
dif = mean - total_mean
dif_product = np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
scatter_between += x[labels == cl, :].shape[0] * dif_product
# eigenvalues and eigenvectors from scatter matrices
scatter_product = np.dot(np.linalg.inv(scatter_within), scatter_between)
eigen_values, eigen_vectors = np.linalg.eig(scatter_product)
return eigen_values, eigen_vectors
|
[
"def",
"LDA_base",
"(",
"x",
",",
"labels",
")",
":",
"classes",
"=",
"np",
".",
"array",
"(",
"tuple",
"(",
"set",
"(",
"labels",
")",
")",
")",
"cols",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"# mean values for every class",
"means",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"classes",
")",
",",
"cols",
")",
")",
"for",
"i",
",",
"cl",
"in",
"enumerate",
"(",
"classes",
")",
":",
"means",
"[",
"i",
"]",
"=",
"np",
".",
"mean",
"(",
"x",
"[",
"labels",
"==",
"cl",
"]",
",",
"axis",
"=",
"0",
")",
"# scatter matrices",
"scatter_within",
"=",
"np",
".",
"zeros",
"(",
"(",
"cols",
",",
"cols",
")",
")",
"for",
"cl",
",",
"mean",
"in",
"zip",
"(",
"classes",
",",
"means",
")",
":",
"scatter_class",
"=",
"np",
".",
"zeros",
"(",
"(",
"cols",
",",
"cols",
")",
")",
"for",
"row",
"in",
"x",
"[",
"labels",
"==",
"cl",
"]",
":",
"dif",
"=",
"row",
"-",
"mean",
"scatter_class",
"+=",
"np",
".",
"dot",
"(",
"dif",
".",
"reshape",
"(",
"cols",
",",
"1",
")",
",",
"dif",
".",
"reshape",
"(",
"1",
",",
"cols",
")",
")",
"scatter_within",
"+=",
"scatter_class",
"total_mean",
"=",
"np",
".",
"mean",
"(",
"x",
",",
"axis",
"=",
"0",
")",
"scatter_between",
"=",
"np",
".",
"zeros",
"(",
"(",
"cols",
",",
"cols",
")",
")",
"for",
"cl",
",",
"mean",
"in",
"zip",
"(",
"classes",
",",
"means",
")",
":",
"dif",
"=",
"mean",
"-",
"total_mean",
"dif_product",
"=",
"np",
".",
"dot",
"(",
"dif",
".",
"reshape",
"(",
"cols",
",",
"1",
")",
",",
"dif",
".",
"reshape",
"(",
"1",
",",
"cols",
")",
")",
"scatter_between",
"+=",
"x",
"[",
"labels",
"==",
"cl",
",",
":",
"]",
".",
"shape",
"[",
"0",
"]",
"*",
"dif_product",
"# eigenvalues and eigenvectors from scatter matrices",
"scatter_product",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"scatter_within",
")",
",",
"scatter_between",
")",
"eigen_values",
",",
"eigen_vectors",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"scatter_product",
")",
"return",
"eigen_values",
",",
"eigen_vectors"
] |
Base function used for Linear Discriminant Analysis.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Returns:**
* `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \
from LDA analysis
|
[
"Base",
"function",
"used",
"for",
"Linear",
"Discriminant",
"Analysis",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/lda.py#L104-L144
|
train
|
matousc89/padasip
|
padasip/preprocess/lda.py
|
LDA
|
def LDA(x, labels, n=False):
"""
Linear Discriminant Analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* new_x : matrix with reduced size (number of columns are equal `n`)
"""
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# make the LDA
eigen_values, eigen_vectors = LDA_base(x, labels)
# sort the eigen vectors according to eigen values
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
return eigen_order[:n].dot(x.T).T
|
python
|
def LDA(x, labels, n=False):
"""
Linear Discriminant Analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* new_x : matrix with reduced size (number of columns are equal `n`)
"""
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# make the LDA
eigen_values, eigen_vectors = LDA_base(x, labels)
# sort the eigen vectors according to eigen values
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
return eigen_order[:n].dot(x.T).T
|
[
"def",
"LDA",
"(",
"x",
",",
"labels",
",",
"n",
"=",
"False",
")",
":",
"# select n if not provided",
"if",
"not",
"n",
":",
"n",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"# validate inputs",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x to a numpy array.'",
")",
"assert",
"type",
"(",
"n",
")",
"==",
"int",
",",
"\"Provided n is not an integer.\"",
"assert",
"x",
".",
"shape",
"[",
"1",
"]",
">",
"n",
",",
"\"The requested n is bigger than \\\n number of features in x.\"",
"# make the LDA",
"eigen_values",
",",
"eigen_vectors",
"=",
"LDA_base",
"(",
"x",
",",
"labels",
")",
"# sort the eigen vectors according to eigen values",
"eigen_order",
"=",
"eigen_vectors",
".",
"T",
"[",
"(",
"-",
"eigen_values",
")",
".",
"argsort",
"(",
")",
"]",
"return",
"eigen_order",
"[",
":",
"n",
"]",
".",
"dot",
"(",
"x",
".",
"T",
")",
".",
"T"
] |
Linear Discriminant Analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* new_x : matrix with reduced size (number of columns are equal `n`)
|
[
"Linear",
"Discriminant",
"Analysis",
"function",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/lda.py#L146-L181
|
train
|
matousc89/padasip
|
padasip/preprocess/lda.py
|
LDA_discriminants
|
def LDA_discriminants(x, labels):
"""
Linear Discriminant Analysis helper for determination how many columns of
data should be reduced.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Returns:**
* `discriminants` : array of eigenvalues sorted in descending order
"""
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
# make the LDA
eigen_values, eigen_vectors = LDA_base(x, labels)
return eigen_values[(-eigen_values).argsort()]
|
python
|
def LDA_discriminants(x, labels):
"""
Linear Discriminant Analysis helper for determination how many columns of
data should be reduced.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Returns:**
* `discriminants` : array of eigenvalues sorted in descending order
"""
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
# make the LDA
eigen_values, eigen_vectors = LDA_base(x, labels)
return eigen_values[(-eigen_values).argsort()]
|
[
"def",
"LDA_discriminants",
"(",
"x",
",",
"labels",
")",
":",
"# validate inputs",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x to a numpy array.'",
")",
"# make the LDA",
"eigen_values",
",",
"eigen_vectors",
"=",
"LDA_base",
"(",
"x",
",",
"labels",
")",
"return",
"eigen_values",
"[",
"(",
"-",
"eigen_values",
")",
".",
"argsort",
"(",
")",
"]"
] |
Linear Discriminant Analysis helper for determination how many columns of
data should be reduced.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
* `labels` : list of labels (iterable), every item should be label for \
sample with corresponding index
**Returns:**
* `discriminants` : array of eigenvalues sorted in descending order
|
[
"Linear",
"Discriminant",
"Analysis",
"helper",
"for",
"determination",
"how",
"many",
"columns",
"of",
"data",
"should",
"be",
"reduced",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/lda.py#L184-L208
|
train
|
matousc89/padasip
|
padasip/filters/ocnlms.py
|
FilterOCNLMS.read_memory
|
def read_memory(self):
"""
This function read mean value of target`d`
and input vector `x` from history
"""
if self.mem_empty == True:
if self.mem_idx == 0:
m_x = np.zeros(self.n)
m_d = 0
else:
m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0)
m_d = np.mean(self.mem_d[:self.mem_idx])
else:
m_x = np.mean(self.mem_x, axis=0)
m_d = np.mean(np.delete(self.mem_d, self.mem_idx))
self.mem_idx += 1
if self.mem_idx > len(self.mem_x)-1:
self.mem_idx = 0
self.mem_empty = False
return m_d, m_x
|
python
|
def read_memory(self):
"""
This function read mean value of target`d`
and input vector `x` from history
"""
if self.mem_empty == True:
if self.mem_idx == 0:
m_x = np.zeros(self.n)
m_d = 0
else:
m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0)
m_d = np.mean(self.mem_d[:self.mem_idx])
else:
m_x = np.mean(self.mem_x, axis=0)
m_d = np.mean(np.delete(self.mem_d, self.mem_idx))
self.mem_idx += 1
if self.mem_idx > len(self.mem_x)-1:
self.mem_idx = 0
self.mem_empty = False
return m_d, m_x
|
[
"def",
"read_memory",
"(",
"self",
")",
":",
"if",
"self",
".",
"mem_empty",
"==",
"True",
":",
"if",
"self",
".",
"mem_idx",
"==",
"0",
":",
"m_x",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"n",
")",
"m_d",
"=",
"0",
"else",
":",
"m_x",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"mem_x",
"[",
":",
"self",
".",
"mem_idx",
"+",
"1",
"]",
",",
"axis",
"=",
"0",
")",
"m_d",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"mem_d",
"[",
":",
"self",
".",
"mem_idx",
"]",
")",
"else",
":",
"m_x",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"mem_x",
",",
"axis",
"=",
"0",
")",
"m_d",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"delete",
"(",
"self",
".",
"mem_d",
",",
"self",
".",
"mem_idx",
")",
")",
"self",
".",
"mem_idx",
"+=",
"1",
"if",
"self",
".",
"mem_idx",
">",
"len",
"(",
"self",
".",
"mem_x",
")",
"-",
"1",
":",
"self",
".",
"mem_idx",
"=",
"0",
"self",
".",
"mem_empty",
"=",
"False",
"return",
"m_d",
",",
"m_x"
] |
This function read mean value of target`d`
and input vector `x` from history
|
[
"This",
"function",
"read",
"mean",
"value",
"of",
"target",
"d",
"and",
"input",
"vector",
"x",
"from",
"history"
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/ocnlms.py#L86-L105
|
train
|
matousc89/padasip
|
padasip/detection/le.py
|
learning_entropy
|
def learning_entropy(w, m=10, order=1, alpha=False):
"""
This function estimates Learning Entropy.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
**Kwargs:**
* `m` : window size (1d array) - how many last samples are used for
evaluation of every sample.
* `order` : order of the LE (int) - order of weights differention
* `alpha` : list of senstitivites (1d array). If not provided, the LE
direct approach is used.
**Returns:**
* Learning Entropy of data (1 d array) - one value for every sample
"""
w = np.array(w)
# get length of data and number of parameters
N = w.shape[0]
n = w.shape[1]
# get abs dw from w
dw = np.copy(w)
dw[order:] = np.abs(np.diff(dw, n=order, axis=0))
# average floting window - window is k-m ... k-1
awd = np.zeros(w.shape)
if not alpha:
# estimate the ALPHA with multiscale approach
swd = np.zeros(w.shape)
for k in range(m, N):
awd[k] = np.mean(dw[k-m:k], axis=0)
swd[k] = np.std(dw[k-m:k], axis=0)
# estimate the points of entropy
eps = 1e-10 # regularization term
le = (dw - awd) / (swd+eps)
else:
# estimate the ALPHA with direct approach
for k in range(m, N):
awd[k] = np.mean(dw[k-m:k], axis=0)
# estimate the points of entropy
alphas = np.array(alpha)
fh = np.zeros(N)
for alpha in alphas:
fh += np.sum(awd*alpha < dw, axis=1)
le = fh / float(n*len(alphas))
# clear unknown zone on begining
le[:m] = 0
# return output
return le
|
python
|
def learning_entropy(w, m=10, order=1, alpha=False):
"""
This function estimates Learning Entropy.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
**Kwargs:**
* `m` : window size (1d array) - how many last samples are used for
evaluation of every sample.
* `order` : order of the LE (int) - order of weights differention
* `alpha` : list of senstitivites (1d array). If not provided, the LE
direct approach is used.
**Returns:**
* Learning Entropy of data (1 d array) - one value for every sample
"""
w = np.array(w)
# get length of data and number of parameters
N = w.shape[0]
n = w.shape[1]
# get abs dw from w
dw = np.copy(w)
dw[order:] = np.abs(np.diff(dw, n=order, axis=0))
# average floting window - window is k-m ... k-1
awd = np.zeros(w.shape)
if not alpha:
# estimate the ALPHA with multiscale approach
swd = np.zeros(w.shape)
for k in range(m, N):
awd[k] = np.mean(dw[k-m:k], axis=0)
swd[k] = np.std(dw[k-m:k], axis=0)
# estimate the points of entropy
eps = 1e-10 # regularization term
le = (dw - awd) / (swd+eps)
else:
# estimate the ALPHA with direct approach
for k in range(m, N):
awd[k] = np.mean(dw[k-m:k], axis=0)
# estimate the points of entropy
alphas = np.array(alpha)
fh = np.zeros(N)
for alpha in alphas:
fh += np.sum(awd*alpha < dw, axis=1)
le = fh / float(n*len(alphas))
# clear unknown zone on begining
le[:m] = 0
# return output
return le
|
[
"def",
"learning_entropy",
"(",
"w",
",",
"m",
"=",
"10",
",",
"order",
"=",
"1",
",",
"alpha",
"=",
"False",
")",
":",
"w",
"=",
"np",
".",
"array",
"(",
"w",
")",
"# get length of data and number of parameters",
"N",
"=",
"w",
".",
"shape",
"[",
"0",
"]",
"n",
"=",
"w",
".",
"shape",
"[",
"1",
"]",
"# get abs dw from w",
"dw",
"=",
"np",
".",
"copy",
"(",
"w",
")",
"dw",
"[",
"order",
":",
"]",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"dw",
",",
"n",
"=",
"order",
",",
"axis",
"=",
"0",
")",
")",
"# average floting window - window is k-m ... k-1",
"awd",
"=",
"np",
".",
"zeros",
"(",
"w",
".",
"shape",
")",
"if",
"not",
"alpha",
":",
"# estimate the ALPHA with multiscale approach",
"swd",
"=",
"np",
".",
"zeros",
"(",
"w",
".",
"shape",
")",
"for",
"k",
"in",
"range",
"(",
"m",
",",
"N",
")",
":",
"awd",
"[",
"k",
"]",
"=",
"np",
".",
"mean",
"(",
"dw",
"[",
"k",
"-",
"m",
":",
"k",
"]",
",",
"axis",
"=",
"0",
")",
"swd",
"[",
"k",
"]",
"=",
"np",
".",
"std",
"(",
"dw",
"[",
"k",
"-",
"m",
":",
"k",
"]",
",",
"axis",
"=",
"0",
")",
"# estimate the points of entropy",
"eps",
"=",
"1e-10",
"# regularization term",
"le",
"=",
"(",
"dw",
"-",
"awd",
")",
"/",
"(",
"swd",
"+",
"eps",
")",
"else",
":",
"# estimate the ALPHA with direct approach",
"for",
"k",
"in",
"range",
"(",
"m",
",",
"N",
")",
":",
"awd",
"[",
"k",
"]",
"=",
"np",
".",
"mean",
"(",
"dw",
"[",
"k",
"-",
"m",
":",
"k",
"]",
",",
"axis",
"=",
"0",
")",
"# estimate the points of entropy",
"alphas",
"=",
"np",
".",
"array",
"(",
"alpha",
")",
"fh",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"for",
"alpha",
"in",
"alphas",
":",
"fh",
"+=",
"np",
".",
"sum",
"(",
"awd",
"*",
"alpha",
"<",
"dw",
",",
"axis",
"=",
"1",
")",
"le",
"=",
"fh",
"/",
"float",
"(",
"n",
"*",
"len",
"(",
"alphas",
")",
")",
"# clear unknown zone on begining",
"le",
"[",
":",
"m",
"]",
"=",
"0",
"# return output",
"return",
"le"
] |
This function estimates Learning Entropy.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
**Kwargs:**
* `m` : window size (1d array) - how many last samples are used for
evaluation of every sample.
* `order` : order of the LE (int) - order of weights differention
* `alpha` : list of senstitivites (1d array). If not provided, the LE
direct approach is used.
**Returns:**
* Learning Entropy of data (1 d array) - one value for every sample
|
[
"This",
"function",
"estimates",
"Learning",
"Entropy",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/detection/le.py#L145-L200
|
train
|
matousc89/padasip
|
padasip/ann/mlp.py
|
Layer.activation
|
def activation(self, x, f="sigmoid", der=False):
"""
This function process values of layer outputs with activation function.
**Args:**
* `x` : array to process (1-dimensional array)
**Kwargs:**
* `f` : activation function
* `der` : normal output, or its derivation (bool)
**Returns:**
* values processed with activation function (1-dimensional array)
"""
if f == "sigmoid":
if der:
return x * (1 - x)
return 1. / (1 + np.exp(-x))
elif f == "tanh":
if der:
return 1 - x**2
return (2. / (1 + np.exp(-2*x))) - 1
|
python
|
def activation(self, x, f="sigmoid", der=False):
"""
This function process values of layer outputs with activation function.
**Args:**
* `x` : array to process (1-dimensional array)
**Kwargs:**
* `f` : activation function
* `der` : normal output, or its derivation (bool)
**Returns:**
* values processed with activation function (1-dimensional array)
"""
if f == "sigmoid":
if der:
return x * (1 - x)
return 1. / (1 + np.exp(-x))
elif f == "tanh":
if der:
return 1 - x**2
return (2. / (1 + np.exp(-2*x))) - 1
|
[
"def",
"activation",
"(",
"self",
",",
"x",
",",
"f",
"=",
"\"sigmoid\"",
",",
"der",
"=",
"False",
")",
":",
"if",
"f",
"==",
"\"sigmoid\"",
":",
"if",
"der",
":",
"return",
"x",
"*",
"(",
"1",
"-",
"x",
")",
"return",
"1.",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"-",
"x",
")",
")",
"elif",
"f",
"==",
"\"tanh\"",
":",
"if",
"der",
":",
"return",
"1",
"-",
"x",
"**",
"2",
"return",
"(",
"2.",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"-",
"2",
"*",
"x",
")",
")",
")",
"-",
"1"
] |
This function process values of layer outputs with activation function.
**Args:**
* `x` : array to process (1-dimensional array)
**Kwargs:**
* `f` : activation function
* `der` : normal output, or its derivation (bool)
**Returns:**
* values processed with activation function (1-dimensional array)
|
[
"This",
"function",
"process",
"values",
"of",
"layer",
"outputs",
"with",
"activation",
"function",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L126-L152
|
train
|
matousc89/padasip
|
padasip/ann/mlp.py
|
NetworkMLP.train
|
def train(self, x, d, epochs=10, shuffle=False):
"""
Function for batch training of MLP.
**Args:**
* `x` : input array (2-dimensional array).
Every row represents one input vector (features).
* `d` : input array (n-dimensional array).
Every row represents target for one input vector.
Target can be one or more values (in case of multiple outputs).
**Kwargs:**
* `epochs` : amount of epochs (int). That means how many times
the MLP will iterate over the passed set of data (`x`, `d`).
* `shuffle` : if true, the order of inputs and outpust are shuffled (bool).
That means the pairs input-output are in different order in every epoch.
**Returns:**
* `e`: output vector (m-dimensional array). Every row represents
error (or errors) for an input and output in given epoch.
The size of this array is length of provided data times
amount of epochs (`N*epochs`).
* `MSE` : mean squared error (1-dimensional array). Every value
stands for MSE of one epoch.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
if not len(x[0]) == self.n_input:
raise ValueError('The number of network inputs is not correct.')
if self.outputs == 1:
if not len(d.shape) == 1:
raise ValueError('For one output MLP the d must have one dimension')
else:
if not d.shape[1] == self.outputs:
raise ValueError('The number of outputs must agree with number of columns in d')
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
if self.outputs == 1:
e = np.zeros(epochs*N)
else:
e = np.zeros((epochs*N, self.outputs))
MSE = np.zeros(epochs)
# shuffle data if demanded
if shuffle:
randomize = np.arange(len(x))
np.random.shuffle(randomize)
x = x[randomize]
d = d[randomize]
# adaptation loop
for epoch in range(epochs):
for k in range(N):
self.predict(x[k])
e[(epoch*N)+k] = self.update(d[k])
MSE[epoch] = np.sum(e[epoch*N:(epoch+1)*N-1]**2) / N
return e, MSE
|
python
|
def train(self, x, d, epochs=10, shuffle=False):
"""
Function for batch training of MLP.
**Args:**
* `x` : input array (2-dimensional array).
Every row represents one input vector (features).
* `d` : input array (n-dimensional array).
Every row represents target for one input vector.
Target can be one or more values (in case of multiple outputs).
**Kwargs:**
* `epochs` : amount of epochs (int). That means how many times
the MLP will iterate over the passed set of data (`x`, `d`).
* `shuffle` : if true, the order of inputs and outpust are shuffled (bool).
That means the pairs input-output are in different order in every epoch.
**Returns:**
* `e`: output vector (m-dimensional array). Every row represents
error (or errors) for an input and output in given epoch.
The size of this array is length of provided data times
amount of epochs (`N*epochs`).
* `MSE` : mean squared error (1-dimensional array). Every value
stands for MSE of one epoch.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
if not len(x[0]) == self.n_input:
raise ValueError('The number of network inputs is not correct.')
if self.outputs == 1:
if not len(d.shape) == 1:
raise ValueError('For one output MLP the d must have one dimension')
else:
if not d.shape[1] == self.outputs:
raise ValueError('The number of outputs must agree with number of columns in d')
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
if self.outputs == 1:
e = np.zeros(epochs*N)
else:
e = np.zeros((epochs*N, self.outputs))
MSE = np.zeros(epochs)
# shuffle data if demanded
if shuffle:
randomize = np.arange(len(x))
np.random.shuffle(randomize)
x = x[randomize]
d = d[randomize]
# adaptation loop
for epoch in range(epochs):
for k in range(N):
self.predict(x[k])
e[(epoch*N)+k] = self.update(d[k])
MSE[epoch] = np.sum(e[epoch*N:(epoch+1)*N-1]**2) / N
return e, MSE
|
[
"def",
"train",
"(",
"self",
",",
"x",
",",
"d",
",",
"epochs",
"=",
"10",
",",
"shuffle",
"=",
"False",
")",
":",
"# measure the data and check if the dimmension agree",
"N",
"=",
"len",
"(",
"x",
")",
"if",
"not",
"len",
"(",
"d",
")",
"==",
"N",
":",
"raise",
"ValueError",
"(",
"'The length of vector d and matrix x must agree.'",
")",
"if",
"not",
"len",
"(",
"x",
"[",
"0",
"]",
")",
"==",
"self",
".",
"n_input",
":",
"raise",
"ValueError",
"(",
"'The number of network inputs is not correct.'",
")",
"if",
"self",
".",
"outputs",
"==",
"1",
":",
"if",
"not",
"len",
"(",
"d",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"'For one output MLP the d must have one dimension'",
")",
"else",
":",
"if",
"not",
"d",
".",
"shape",
"[",
"1",
"]",
"==",
"self",
".",
"outputs",
":",
"raise",
"ValueError",
"(",
"'The number of outputs must agree with number of columns in d'",
")",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"d",
"=",
"np",
".",
"array",
"(",
"d",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x or d to a numpy array'",
")",
"# create empty arrays",
"if",
"self",
".",
"outputs",
"==",
"1",
":",
"e",
"=",
"np",
".",
"zeros",
"(",
"epochs",
"*",
"N",
")",
"else",
":",
"e",
"=",
"np",
".",
"zeros",
"(",
"(",
"epochs",
"*",
"N",
",",
"self",
".",
"outputs",
")",
")",
"MSE",
"=",
"np",
".",
"zeros",
"(",
"epochs",
")",
"# shuffle data if demanded",
"if",
"shuffle",
":",
"randomize",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"randomize",
")",
"x",
"=",
"x",
"[",
"randomize",
"]",
"d",
"=",
"d",
"[",
"randomize",
"]",
"# adaptation loop",
"for",
"epoch",
"in",
"range",
"(",
"epochs",
")",
":",
"for",
"k",
"in",
"range",
"(",
"N",
")",
":",
"self",
".",
"predict",
"(",
"x",
"[",
"k",
"]",
")",
"e",
"[",
"(",
"epoch",
"*",
"N",
")",
"+",
"k",
"]",
"=",
"self",
".",
"update",
"(",
"d",
"[",
"k",
"]",
")",
"MSE",
"[",
"epoch",
"]",
"=",
"np",
".",
"sum",
"(",
"e",
"[",
"epoch",
"*",
"N",
":",
"(",
"epoch",
"+",
"1",
")",
"*",
"N",
"-",
"1",
"]",
"**",
"2",
")",
"/",
"N",
"return",
"e",
",",
"MSE"
] |
Function for batch training of MLP.
**Args:**
* `x` : input array (2-dimensional array).
Every row represents one input vector (features).
* `d` : input array (n-dimensional array).
Every row represents target for one input vector.
Target can be one or more values (in case of multiple outputs).
**Kwargs:**
* `epochs` : amount of epochs (int). That means how many times
the MLP will iterate over the passed set of data (`x`, `d`).
* `shuffle` : if true, the order of inputs and outpust are shuffled (bool).
That means the pairs input-output are in different order in every epoch.
**Returns:**
* `e`: output vector (m-dimensional array). Every row represents
error (or errors) for an input and output in given epoch.
The size of this array is length of provided data times
amount of epochs (`N*epochs`).
* `MSE` : mean squared error (1-dimensional array). Every value
stands for MSE of one epoch.
|
[
"Function",
"for",
"batch",
"training",
"of",
"MLP",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L267-L334
|
train
|
matousc89/padasip
|
padasip/ann/mlp.py
|
NetworkMLP.run
|
def run(self, x):
"""
Function for batch usage of already trained and tested MLP.
**Args:**
* `x` : input array (2-dimensional array).
Every row represents one input vector (features).
**Returns:**
* `y`: output vector (n-dimensional array). Every row represents
output (outputs) for an input vector.
"""
# measure the data and check if the dimmension agree
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array')
N = len(x)
# create empty arrays
if self.outputs == 1:
y = np.zeros(N)
else:
y = np.zeros((N, self.outputs))
# predict data in loop
for k in range(N):
y[k] = self.predict(x[k])
return y
|
python
|
def run(self, x):
"""
Function for batch usage of already trained and tested MLP.
**Args:**
* `x` : input array (2-dimensional array).
Every row represents one input vector (features).
**Returns:**
* `y`: output vector (n-dimensional array). Every row represents
output (outputs) for an input vector.
"""
# measure the data and check if the dimmension agree
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array')
N = len(x)
# create empty arrays
if self.outputs == 1:
y = np.zeros(N)
else:
y = np.zeros((N, self.outputs))
# predict data in loop
for k in range(N):
y[k] = self.predict(x[k])
return y
|
[
"def",
"run",
"(",
"self",
",",
"x",
")",
":",
"# measure the data and check if the dimmension agree",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x to a numpy array'",
")",
"N",
"=",
"len",
"(",
"x",
")",
"# create empty arrays ",
"if",
"self",
".",
"outputs",
"==",
"1",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"N",
")",
"else",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"self",
".",
"outputs",
")",
")",
"# predict data in loop ",
"for",
"k",
"in",
"range",
"(",
"N",
")",
":",
"y",
"[",
"k",
"]",
"=",
"self",
".",
"predict",
"(",
"x",
"[",
"k",
"]",
")",
"return",
"y"
] |
Function for batch usage of already trained and tested MLP.
**Args:**
* `x` : input array (2-dimensional array).
Every row represents one input vector (features).
**Returns:**
* `y`: output vector (n-dimensional array). Every row represents
output (outputs) for an input vector.
|
[
"Function",
"for",
"batch",
"usage",
"of",
"already",
"trained",
"and",
"tested",
"MLP",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L336-L365
|
train
|
matousc89/padasip
|
padasip/preprocess/pca.py
|
PCA_components
|
def PCA_components(x):
"""
Principal Component Analysis helper to check out eigenvalues of components.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Returns:**
* `components`: sorted array of principal components eigenvalues
"""
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - order the eigenvalues
return eigen_values[(-eigen_values).argsort()]
|
python
|
def PCA_components(x):
"""
Principal Component Analysis helper to check out eigenvalues of components.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Returns:**
* `components`: sorted array of principal components eigenvalues
"""
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - order the eigenvalues
return eigen_values[(-eigen_values).argsort()]
|
[
"def",
"PCA_components",
"(",
"x",
")",
":",
"# validate inputs",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x to a numpy array.'",
")",
"# eigen values and eigen vectors of data covariance matrix",
"eigen_values",
",",
"eigen_vectors",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"np",
".",
"cov",
"(",
"x",
".",
"T",
")",
")",
"# sort eigen vectors according biggest eigen value",
"eigen_order",
"=",
"eigen_vectors",
".",
"T",
"[",
"(",
"-",
"eigen_values",
")",
".",
"argsort",
"(",
")",
"]",
"# form output - order the eigenvalues",
"return",
"eigen_values",
"[",
"(",
"-",
"eigen_values",
")",
".",
"argsort",
"(",
")",
"]"
] |
Principal Component Analysis helper to check out eigenvalues of components.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Returns:**
* `components`: sorted array of principal components eigenvalues
|
[
"Principal",
"Component",
"Analysis",
"helper",
"to",
"check",
"out",
"eigenvalues",
"of",
"components",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/pca.py#L68-L91
|
train
|
matousc89/padasip
|
padasip/preprocess/pca.py
|
PCA
|
def PCA(x, n=False):
"""
Principal component analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* `new_x` : matrix with reduced size (lower number of columns)
"""
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - reduced x matrix
return eigen_order[:n].dot(x.T).T
|
python
|
def PCA(x, n=False):
"""
Principal component analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* `new_x` : matrix with reduced size (lower number of columns)
"""
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - reduced x matrix
return eigen_order[:n].dot(x.T).T
|
[
"def",
"PCA",
"(",
"x",
",",
"n",
"=",
"False",
")",
":",
"# select n if not provided",
"if",
"not",
"n",
":",
"n",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"# validate inputs",
"try",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Impossible to convert x to a numpy array.'",
")",
"assert",
"type",
"(",
"n",
")",
"==",
"int",
",",
"\"Provided n is not an integer.\"",
"assert",
"x",
".",
"shape",
"[",
"1",
"]",
">",
"n",
",",
"\"The requested n is bigger than \\\n number of features in x.\"",
"# eigen values and eigen vectors of data covariance matrix",
"eigen_values",
",",
"eigen_vectors",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"np",
".",
"cov",
"(",
"x",
".",
"T",
")",
")",
"# sort eigen vectors according biggest eigen value",
"eigen_order",
"=",
"eigen_vectors",
".",
"T",
"[",
"(",
"-",
"eigen_values",
")",
".",
"argsort",
"(",
")",
"]",
"# form output - reduced x matrix",
"return",
"eigen_order",
"[",
":",
"n",
"]",
".",
"dot",
"(",
"x",
".",
"T",
")",
".",
"T"
] |
Principal component analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* `new_x` : matrix with reduced size (lower number of columns)
|
[
"Principal",
"component",
"analysis",
"function",
"."
] |
c969eadd7fa181a84da0554d737fc13c6450d16f
|
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/preprocess/pca.py#L94-L127
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
clean_axis
|
def clean_axis(axis):
"""Remove ticks, tick labels, and frame from axis"""
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
for spine in list(axis.spines.values()):
spine.set_visible(False)
|
python
|
def clean_axis(axis):
"""Remove ticks, tick labels, and frame from axis"""
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
for spine in list(axis.spines.values()):
spine.set_visible(False)
|
[
"def",
"clean_axis",
"(",
"axis",
")",
":",
"axis",
".",
"get_xaxis",
"(",
")",
".",
"set_ticks",
"(",
"[",
"]",
")",
"axis",
".",
"get_yaxis",
"(",
")",
".",
"set_ticks",
"(",
"[",
"]",
")",
"for",
"spine",
"in",
"list",
"(",
"axis",
".",
"spines",
".",
"values",
"(",
")",
")",
":",
"spine",
".",
"set_visible",
"(",
"False",
")"
] |
Remove ticks, tick labels, and frame from axis
|
[
"Remove",
"ticks",
"tick",
"labels",
"and",
"frame",
"from",
"axis"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L63-L68
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
get_seaborn_colorbar
|
def get_seaborn_colorbar(dfr, classes):
"""Return a colorbar representing classes, for a Seaborn plot.
The aim is to get a pd.Series for the passed dataframe columns,
in the form:
0 colour for class in col 0
1 colour for class in col 1
... colour for class in col ...
n colour for class in col n
"""
levels = sorted(list(set(classes.values())))
paldict = {
lvl: pal
for (lvl, pal) in zip(
levels,
sns.cubehelix_palette(
len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2
),
)
}
lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())}
col_cb = pd.Series(dfr.index).map(lvl_pal)
# The col_cb Series index now has to match the dfr.index, but
# we don't create the Series with this (and if we try, it
# fails) - so change it with this line
col_cb.index = dfr.index
return col_cb
|
python
|
def get_seaborn_colorbar(dfr, classes):
"""Return a colorbar representing classes, for a Seaborn plot.
The aim is to get a pd.Series for the passed dataframe columns,
in the form:
0 colour for class in col 0
1 colour for class in col 1
... colour for class in col ...
n colour for class in col n
"""
levels = sorted(list(set(classes.values())))
paldict = {
lvl: pal
for (lvl, pal) in zip(
levels,
sns.cubehelix_palette(
len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2
),
)
}
lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())}
col_cb = pd.Series(dfr.index).map(lvl_pal)
# The col_cb Series index now has to match the dfr.index, but
# we don't create the Series with this (and if we try, it
# fails) - so change it with this line
col_cb.index = dfr.index
return col_cb
|
[
"def",
"get_seaborn_colorbar",
"(",
"dfr",
",",
"classes",
")",
":",
"levels",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"classes",
".",
"values",
"(",
")",
")",
")",
")",
"paldict",
"=",
"{",
"lvl",
":",
"pal",
"for",
"(",
"lvl",
",",
"pal",
")",
"in",
"zip",
"(",
"levels",
",",
"sns",
".",
"cubehelix_palette",
"(",
"len",
"(",
"levels",
")",
",",
"light",
"=",
"0.9",
",",
"dark",
"=",
"0.1",
",",
"reverse",
"=",
"True",
",",
"start",
"=",
"1",
",",
"rot",
"=",
"-",
"2",
")",
",",
")",
"}",
"lvl_pal",
"=",
"{",
"cls",
":",
"paldict",
"[",
"lvl",
"]",
"for",
"(",
"cls",
",",
"lvl",
")",
"in",
"list",
"(",
"classes",
".",
"items",
"(",
")",
")",
"}",
"col_cb",
"=",
"pd",
".",
"Series",
"(",
"dfr",
".",
"index",
")",
".",
"map",
"(",
"lvl_pal",
")",
"# The col_cb Series index now has to match the dfr.index, but",
"# we don't create the Series with this (and if we try, it",
"# fails) - so change it with this line",
"col_cb",
".",
"index",
"=",
"dfr",
".",
"index",
"return",
"col_cb"
] |
Return a colorbar representing classes, for a Seaborn plot.
The aim is to get a pd.Series for the passed dataframe columns,
in the form:
0 colour for class in col 0
1 colour for class in col 1
... colour for class in col ...
n colour for class in col n
|
[
"Return",
"a",
"colorbar",
"representing",
"classes",
"for",
"a",
"Seaborn",
"plot",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L72-L98
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
get_safe_seaborn_labels
|
def get_safe_seaborn_labels(dfr, labels):
"""Returns labels guaranteed to correspond to the dataframe."""
if labels is not None:
return [labels.get(i, i) for i in dfr.index]
return [i for i in dfr.index]
|
python
|
def get_safe_seaborn_labels(dfr, labels):
"""Returns labels guaranteed to correspond to the dataframe."""
if labels is not None:
return [labels.get(i, i) for i in dfr.index]
return [i for i in dfr.index]
|
[
"def",
"get_safe_seaborn_labels",
"(",
"dfr",
",",
"labels",
")",
":",
"if",
"labels",
"is",
"not",
"None",
":",
"return",
"[",
"labels",
".",
"get",
"(",
"i",
",",
"i",
")",
"for",
"i",
"in",
"dfr",
".",
"index",
"]",
"return",
"[",
"i",
"for",
"i",
"in",
"dfr",
".",
"index",
"]"
] |
Returns labels guaranteed to correspond to the dataframe.
|
[
"Returns",
"labels",
"guaranteed",
"to",
"correspond",
"to",
"the",
"dataframe",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L102-L106
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
get_seaborn_clustermap
|
def get_seaborn_clustermap(dfr, params, title=None, annot=True):
"""Returns a Seaborn clustermap."""
fig = sns.clustermap(
dfr,
cmap=params.cmap,
vmin=params.vmin,
vmax=params.vmax,
col_colors=params.colorbar,
row_colors=params.colorbar,
figsize=(params.figsize, params.figsize),
linewidths=params.linewidths,
xticklabels=params.labels,
yticklabels=params.labels,
annot=annot,
)
fig.cax.yaxis.set_label_position("left")
if title:
fig.cax.set_ylabel(title)
# Rotate ticklabels
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0)
# Return clustermap
return fig
|
python
|
def get_seaborn_clustermap(dfr, params, title=None, annot=True):
"""Returns a Seaborn clustermap."""
fig = sns.clustermap(
dfr,
cmap=params.cmap,
vmin=params.vmin,
vmax=params.vmax,
col_colors=params.colorbar,
row_colors=params.colorbar,
figsize=(params.figsize, params.figsize),
linewidths=params.linewidths,
xticklabels=params.labels,
yticklabels=params.labels,
annot=annot,
)
fig.cax.yaxis.set_label_position("left")
if title:
fig.cax.set_ylabel(title)
# Rotate ticklabels
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0)
# Return clustermap
return fig
|
[
"def",
"get_seaborn_clustermap",
"(",
"dfr",
",",
"params",
",",
"title",
"=",
"None",
",",
"annot",
"=",
"True",
")",
":",
"fig",
"=",
"sns",
".",
"clustermap",
"(",
"dfr",
",",
"cmap",
"=",
"params",
".",
"cmap",
",",
"vmin",
"=",
"params",
".",
"vmin",
",",
"vmax",
"=",
"params",
".",
"vmax",
",",
"col_colors",
"=",
"params",
".",
"colorbar",
",",
"row_colors",
"=",
"params",
".",
"colorbar",
",",
"figsize",
"=",
"(",
"params",
".",
"figsize",
",",
"params",
".",
"figsize",
")",
",",
"linewidths",
"=",
"params",
".",
"linewidths",
",",
"xticklabels",
"=",
"params",
".",
"labels",
",",
"yticklabels",
"=",
"params",
".",
"labels",
",",
"annot",
"=",
"annot",
",",
")",
"fig",
".",
"cax",
".",
"yaxis",
".",
"set_label_position",
"(",
"\"left\"",
")",
"if",
"title",
":",
"fig",
".",
"cax",
".",
"set_ylabel",
"(",
"title",
")",
"# Rotate ticklabels",
"fig",
".",
"ax_heatmap",
".",
"set_xticklabels",
"(",
"fig",
".",
"ax_heatmap",
".",
"get_xticklabels",
"(",
")",
",",
"rotation",
"=",
"90",
")",
"fig",
".",
"ax_heatmap",
".",
"set_yticklabels",
"(",
"fig",
".",
"ax_heatmap",
".",
"get_yticklabels",
"(",
")",
",",
"rotation",
"=",
"0",
")",
"# Return clustermap",
"return",
"fig"
] |
Returns a Seaborn clustermap.
|
[
"Returns",
"a",
"Seaborn",
"clustermap",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L110-L134
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
heatmap_seaborn
|
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None):
"""Returns seaborn heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
"""
# Decide on figure layout size: a minimum size is required for
# aesthetics, and a maximum to avoid core dumps on rendering.
# If we hit the maximum size, we should modify font size.
maxfigsize = 120
calcfigsize = dfr.shape[0] * 1.1
figsize = min(max(8, calcfigsize), maxfigsize)
if figsize == maxfigsize:
scale = maxfigsize / calcfigsize
sns.set_context("notebook", font_scale=scale)
# Add a colorbar?
if params.classes is None:
col_cb = None
else:
col_cb = get_seaborn_colorbar(dfr, params.classes)
# Labels are defined before we build the clustering
# If a label mapping is missing, use the key text as fall back
params.labels = get_safe_seaborn_labels(dfr, params.labels)
# Add attributes to parameter object, and draw heatmap
params.colorbar = col_cb
params.figsize = figsize
params.linewidths = 0.25
fig = get_seaborn_clustermap(dfr, params, title=title)
# Save to file
if outfilename:
fig.savefig(outfilename)
# Return clustermap
return fig
|
python
|
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None):
"""Returns seaborn heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
"""
# Decide on figure layout size: a minimum size is required for
# aesthetics, and a maximum to avoid core dumps on rendering.
# If we hit the maximum size, we should modify font size.
maxfigsize = 120
calcfigsize = dfr.shape[0] * 1.1
figsize = min(max(8, calcfigsize), maxfigsize)
if figsize == maxfigsize:
scale = maxfigsize / calcfigsize
sns.set_context("notebook", font_scale=scale)
# Add a colorbar?
if params.classes is None:
col_cb = None
else:
col_cb = get_seaborn_colorbar(dfr, params.classes)
# Labels are defined before we build the clustering
# If a label mapping is missing, use the key text as fall back
params.labels = get_safe_seaborn_labels(dfr, params.labels)
# Add attributes to parameter object, and draw heatmap
params.colorbar = col_cb
params.figsize = figsize
params.linewidths = 0.25
fig = get_seaborn_clustermap(dfr, params, title=title)
# Save to file
if outfilename:
fig.savefig(outfilename)
# Return clustermap
return fig
|
[
"def",
"heatmap_seaborn",
"(",
"dfr",
",",
"outfilename",
"=",
"None",
",",
"title",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"# Decide on figure layout size: a minimum size is required for",
"# aesthetics, and a maximum to avoid core dumps on rendering.",
"# If we hit the maximum size, we should modify font size.",
"maxfigsize",
"=",
"120",
"calcfigsize",
"=",
"dfr",
".",
"shape",
"[",
"0",
"]",
"*",
"1.1",
"figsize",
"=",
"min",
"(",
"max",
"(",
"8",
",",
"calcfigsize",
")",
",",
"maxfigsize",
")",
"if",
"figsize",
"==",
"maxfigsize",
":",
"scale",
"=",
"maxfigsize",
"/",
"calcfigsize",
"sns",
".",
"set_context",
"(",
"\"notebook\"",
",",
"font_scale",
"=",
"scale",
")",
"# Add a colorbar?",
"if",
"params",
".",
"classes",
"is",
"None",
":",
"col_cb",
"=",
"None",
"else",
":",
"col_cb",
"=",
"get_seaborn_colorbar",
"(",
"dfr",
",",
"params",
".",
"classes",
")",
"# Labels are defined before we build the clustering",
"# If a label mapping is missing, use the key text as fall back",
"params",
".",
"labels",
"=",
"get_safe_seaborn_labels",
"(",
"dfr",
",",
"params",
".",
"labels",
")",
"# Add attributes to parameter object, and draw heatmap",
"params",
".",
"colorbar",
"=",
"col_cb",
"params",
".",
"figsize",
"=",
"figsize",
"params",
".",
"linewidths",
"=",
"0.25",
"fig",
"=",
"get_seaborn_clustermap",
"(",
"dfr",
",",
"params",
",",
"title",
"=",
"title",
")",
"# Save to file",
"if",
"outfilename",
":",
"fig",
".",
"savefig",
"(",
"outfilename",
")",
"# Return clustermap",
"return",
"fig"
] |
Returns seaborn heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
|
[
"Returns",
"seaborn",
"heatmap",
"with",
"cluster",
"dendrograms",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L138-L175
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
add_mpl_dendrogram
|
def add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col"):
"""Return a dendrogram and corresponding gridspec, attached to the fig
Modifies the fig in-place. Orientation is either 'row' or 'col' and
determines location and orientation of the rendered dendrogram.
"""
# Row or column axes?
if orientation == "row":
dists = distance.squareform(distance.pdist(dfr))
spec = heatmap_gs[1, 0]
orient = "left"
nrows, ncols = 1, 2
height_ratios = [1]
else: # Column dendrogram
dists = distance.squareform(distance.pdist(dfr.T))
spec = heatmap_gs[0, 1]
orient = "top"
nrows, ncols = 2, 1
height_ratios = [1, 0.15]
# Create row dendrogram axis
gspec = gridspec.GridSpecFromSubplotSpec(
nrows,
ncols,
subplot_spec=spec,
wspace=0.0,
hspace=0.1,
height_ratios=height_ratios,
)
dend_axes = fig.add_subplot(gspec[0, 0])
dend = sch.dendrogram(
sch.linkage(distance.squareform(dists), method="complete"),
color_threshold=np.inf,
orientation=orient,
)
clean_axis(dend_axes)
return {"dendrogram": dend, "gridspec": gspec}
|
python
|
def add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col"):
"""Return a dendrogram and corresponding gridspec, attached to the fig
Modifies the fig in-place. Orientation is either 'row' or 'col' and
determines location and orientation of the rendered dendrogram.
"""
# Row or column axes?
if orientation == "row":
dists = distance.squareform(distance.pdist(dfr))
spec = heatmap_gs[1, 0]
orient = "left"
nrows, ncols = 1, 2
height_ratios = [1]
else: # Column dendrogram
dists = distance.squareform(distance.pdist(dfr.T))
spec = heatmap_gs[0, 1]
orient = "top"
nrows, ncols = 2, 1
height_ratios = [1, 0.15]
# Create row dendrogram axis
gspec = gridspec.GridSpecFromSubplotSpec(
nrows,
ncols,
subplot_spec=spec,
wspace=0.0,
hspace=0.1,
height_ratios=height_ratios,
)
dend_axes = fig.add_subplot(gspec[0, 0])
dend = sch.dendrogram(
sch.linkage(distance.squareform(dists), method="complete"),
color_threshold=np.inf,
orientation=orient,
)
clean_axis(dend_axes)
return {"dendrogram": dend, "gridspec": gspec}
|
[
"def",
"add_mpl_dendrogram",
"(",
"dfr",
",",
"fig",
",",
"heatmap_gs",
",",
"orientation",
"=",
"\"col\"",
")",
":",
"# Row or column axes?",
"if",
"orientation",
"==",
"\"row\"",
":",
"dists",
"=",
"distance",
".",
"squareform",
"(",
"distance",
".",
"pdist",
"(",
"dfr",
")",
")",
"spec",
"=",
"heatmap_gs",
"[",
"1",
",",
"0",
"]",
"orient",
"=",
"\"left\"",
"nrows",
",",
"ncols",
"=",
"1",
",",
"2",
"height_ratios",
"=",
"[",
"1",
"]",
"else",
":",
"# Column dendrogram",
"dists",
"=",
"distance",
".",
"squareform",
"(",
"distance",
".",
"pdist",
"(",
"dfr",
".",
"T",
")",
")",
"spec",
"=",
"heatmap_gs",
"[",
"0",
",",
"1",
"]",
"orient",
"=",
"\"top\"",
"nrows",
",",
"ncols",
"=",
"2",
",",
"1",
"height_ratios",
"=",
"[",
"1",
",",
"0.15",
"]",
"# Create row dendrogram axis",
"gspec",
"=",
"gridspec",
".",
"GridSpecFromSubplotSpec",
"(",
"nrows",
",",
"ncols",
",",
"subplot_spec",
"=",
"spec",
",",
"wspace",
"=",
"0.0",
",",
"hspace",
"=",
"0.1",
",",
"height_ratios",
"=",
"height_ratios",
",",
")",
"dend_axes",
"=",
"fig",
".",
"add_subplot",
"(",
"gspec",
"[",
"0",
",",
"0",
"]",
")",
"dend",
"=",
"sch",
".",
"dendrogram",
"(",
"sch",
".",
"linkage",
"(",
"distance",
".",
"squareform",
"(",
"dists",
")",
",",
"method",
"=",
"\"complete\"",
")",
",",
"color_threshold",
"=",
"np",
".",
"inf",
",",
"orientation",
"=",
"orient",
",",
")",
"clean_axis",
"(",
"dend_axes",
")",
"return",
"{",
"\"dendrogram\"",
":",
"dend",
",",
"\"gridspec\"",
":",
"gspec",
"}"
] |
Return a dendrogram and corresponding gridspec, attached to the fig
Modifies the fig in-place. Orientation is either 'row' or 'col' and
determines location and orientation of the rendered dendrogram.
|
[
"Return",
"a",
"dendrogram",
"and",
"corresponding",
"gridspec",
"attached",
"to",
"the",
"fig"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L179-L215
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
get_mpl_heatmap_axes
|
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs):
"""Return axis for Matplotlib heatmap."""
# Create heatmap axis
heatmap_axes = fig.add_subplot(heatmap_gs[1, 1])
heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.grid(False)
heatmap_axes.xaxis.tick_bottom()
heatmap_axes.yaxis.tick_right()
return heatmap_axes
|
python
|
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs):
"""Return axis for Matplotlib heatmap."""
# Create heatmap axis
heatmap_axes = fig.add_subplot(heatmap_gs[1, 1])
heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.grid(False)
heatmap_axes.xaxis.tick_bottom()
heatmap_axes.yaxis.tick_right()
return heatmap_axes
|
[
"def",
"get_mpl_heatmap_axes",
"(",
"dfr",
",",
"fig",
",",
"heatmap_gs",
")",
":",
"# Create heatmap axis",
"heatmap_axes",
"=",
"fig",
".",
"add_subplot",
"(",
"heatmap_gs",
"[",
"1",
",",
"1",
"]",
")",
"heatmap_axes",
".",
"set_xticks",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"dfr",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
",",
"dfr",
".",
"shape",
"[",
"0",
"]",
")",
")",
"heatmap_axes",
".",
"set_yticks",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"dfr",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
",",
"dfr",
".",
"shape",
"[",
"0",
"]",
")",
")",
"heatmap_axes",
".",
"grid",
"(",
"False",
")",
"heatmap_axes",
".",
"xaxis",
".",
"tick_bottom",
"(",
")",
"heatmap_axes",
".",
"yaxis",
".",
"tick_right",
"(",
")",
"return",
"heatmap_axes"
] |
Return axis for Matplotlib heatmap.
|
[
"Return",
"axis",
"for",
"Matplotlib",
"heatmap",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L219-L228
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
add_mpl_colorbar
|
def add_mpl_colorbar(dfr, fig, dend, params, orientation="row"):
"""Add class colorbars to Matplotlib heatmap."""
for name in dfr.index[dend["dendrogram"]["leaves"]]:
if name not in params.classes:
params.classes[name] = name
# Assign a numerical value to each class, for mpl
classdict = {cls: idx for (idx, cls) in enumerate(params.classes.values())}
# colourbar
cblist = []
for name in dfr.index[dend["dendrogram"]["leaves"]]:
try:
cblist.append(classdict[params.classes[name]])
except KeyError:
cblist.append(classdict[name])
colbar = pd.Series(cblist)
# Create colourbar axis - could capture if needed
if orientation == "row":
cbaxes = fig.add_subplot(dend["gridspec"][0, 1])
cbaxes.imshow(
[[cbar] for cbar in colbar.values],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
else:
cbaxes = fig.add_subplot(dend["gridspec"][1, 0])
cbaxes.imshow(
[colbar],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
clean_axis(cbaxes)
return colbar
|
python
|
def add_mpl_colorbar(dfr, fig, dend, params, orientation="row"):
"""Add class colorbars to Matplotlib heatmap."""
for name in dfr.index[dend["dendrogram"]["leaves"]]:
if name not in params.classes:
params.classes[name] = name
# Assign a numerical value to each class, for mpl
classdict = {cls: idx for (idx, cls) in enumerate(params.classes.values())}
# colourbar
cblist = []
for name in dfr.index[dend["dendrogram"]["leaves"]]:
try:
cblist.append(classdict[params.classes[name]])
except KeyError:
cblist.append(classdict[name])
colbar = pd.Series(cblist)
# Create colourbar axis - could capture if needed
if orientation == "row":
cbaxes = fig.add_subplot(dend["gridspec"][0, 1])
cbaxes.imshow(
[[cbar] for cbar in colbar.values],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
else:
cbaxes = fig.add_subplot(dend["gridspec"][1, 0])
cbaxes.imshow(
[colbar],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
clean_axis(cbaxes)
return colbar
|
[
"def",
"add_mpl_colorbar",
"(",
"dfr",
",",
"fig",
",",
"dend",
",",
"params",
",",
"orientation",
"=",
"\"row\"",
")",
":",
"for",
"name",
"in",
"dfr",
".",
"index",
"[",
"dend",
"[",
"\"dendrogram\"",
"]",
"[",
"\"leaves\"",
"]",
"]",
":",
"if",
"name",
"not",
"in",
"params",
".",
"classes",
":",
"params",
".",
"classes",
"[",
"name",
"]",
"=",
"name",
"# Assign a numerical value to each class, for mpl",
"classdict",
"=",
"{",
"cls",
":",
"idx",
"for",
"(",
"idx",
",",
"cls",
")",
"in",
"enumerate",
"(",
"params",
".",
"classes",
".",
"values",
"(",
")",
")",
"}",
"# colourbar",
"cblist",
"=",
"[",
"]",
"for",
"name",
"in",
"dfr",
".",
"index",
"[",
"dend",
"[",
"\"dendrogram\"",
"]",
"[",
"\"leaves\"",
"]",
"]",
":",
"try",
":",
"cblist",
".",
"append",
"(",
"classdict",
"[",
"params",
".",
"classes",
"[",
"name",
"]",
"]",
")",
"except",
"KeyError",
":",
"cblist",
".",
"append",
"(",
"classdict",
"[",
"name",
"]",
")",
"colbar",
"=",
"pd",
".",
"Series",
"(",
"cblist",
")",
"# Create colourbar axis - could capture if needed",
"if",
"orientation",
"==",
"\"row\"",
":",
"cbaxes",
"=",
"fig",
".",
"add_subplot",
"(",
"dend",
"[",
"\"gridspec\"",
"]",
"[",
"0",
",",
"1",
"]",
")",
"cbaxes",
".",
"imshow",
"(",
"[",
"[",
"cbar",
"]",
"for",
"cbar",
"in",
"colbar",
".",
"values",
"]",
",",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"pyani_config",
".",
"MPL_CBAR",
")",
",",
"interpolation",
"=",
"\"nearest\"",
",",
"aspect",
"=",
"\"auto\"",
",",
"origin",
"=",
"\"lower\"",
",",
")",
"else",
":",
"cbaxes",
"=",
"fig",
".",
"add_subplot",
"(",
"dend",
"[",
"\"gridspec\"",
"]",
"[",
"1",
",",
"0",
"]",
")",
"cbaxes",
".",
"imshow",
"(",
"[",
"colbar",
"]",
",",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"pyani_config",
".",
"MPL_CBAR",
")",
",",
"interpolation",
"=",
"\"nearest\"",
",",
"aspect",
"=",
"\"auto\"",
",",
"origin",
"=",
"\"lower\"",
",",
")",
"clean_axis",
"(",
"cbaxes",
")",
"return",
"colbar"
] |
Add class colorbars to Matplotlib heatmap.
|
[
"Add",
"class",
"colorbars",
"to",
"Matplotlib",
"heatmap",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L231-L269
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
add_mpl_labels
|
def add_mpl_labels(heatmap_axes, rowlabels, collabels, params):
"""Add labels to Matplotlib heatmap axes, in-place."""
if params.labels:
# If a label mapping is missing, use the key text as fall back
rowlabels = [params.labels.get(lab, lab) for lab in rowlabels]
collabels = [params.labels.get(lab, lab) for lab in collabels]
xlabs = heatmap_axes.set_xticklabels(collabels)
ylabs = heatmap_axes.set_yticklabels(rowlabels)
for label in xlabs: # Rotate column labels
label.set_rotation(90)
for labset in (xlabs, ylabs): # Smaller font
for label in labset:
label.set_fontsize(8)
|
python
|
def add_mpl_labels(heatmap_axes, rowlabels, collabels, params):
"""Add labels to Matplotlib heatmap axes, in-place."""
if params.labels:
# If a label mapping is missing, use the key text as fall back
rowlabels = [params.labels.get(lab, lab) for lab in rowlabels]
collabels = [params.labels.get(lab, lab) for lab in collabels]
xlabs = heatmap_axes.set_xticklabels(collabels)
ylabs = heatmap_axes.set_yticklabels(rowlabels)
for label in xlabs: # Rotate column labels
label.set_rotation(90)
for labset in (xlabs, ylabs): # Smaller font
for label in labset:
label.set_fontsize(8)
|
[
"def",
"add_mpl_labels",
"(",
"heatmap_axes",
",",
"rowlabels",
",",
"collabels",
",",
"params",
")",
":",
"if",
"params",
".",
"labels",
":",
"# If a label mapping is missing, use the key text as fall back",
"rowlabels",
"=",
"[",
"params",
".",
"labels",
".",
"get",
"(",
"lab",
",",
"lab",
")",
"for",
"lab",
"in",
"rowlabels",
"]",
"collabels",
"=",
"[",
"params",
".",
"labels",
".",
"get",
"(",
"lab",
",",
"lab",
")",
"for",
"lab",
"in",
"collabels",
"]",
"xlabs",
"=",
"heatmap_axes",
".",
"set_xticklabels",
"(",
"collabels",
")",
"ylabs",
"=",
"heatmap_axes",
".",
"set_yticklabels",
"(",
"rowlabels",
")",
"for",
"label",
"in",
"xlabs",
":",
"# Rotate column labels",
"label",
".",
"set_rotation",
"(",
"90",
")",
"for",
"labset",
"in",
"(",
"xlabs",
",",
"ylabs",
")",
":",
"# Smaller font",
"for",
"label",
"in",
"labset",
":",
"label",
".",
"set_fontsize",
"(",
"8",
")"
] |
Add labels to Matplotlib heatmap axes, in-place.
|
[
"Add",
"labels",
"to",
"Matplotlib",
"heatmap",
"axes",
"in",
"-",
"place",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L273-L285
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
add_mpl_colorscale
|
def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None):
"""Add colour scale to heatmap."""
# Set tick intervals
cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)]
if params.vmax > 10:
exponent = int(floor(log10(params.vmax))) - 1
cbticks = [int(round(e, -exponent)) for e in cbticks]
scale_subplot = gridspec.GridSpecFromSubplotSpec(
1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0
)
scale_ax = fig.add_subplot(scale_subplot[0, 1])
cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks)
if title:
cbar.set_label(title, fontsize=6)
cbar.ax.yaxis.set_ticks_position("left")
cbar.ax.yaxis.set_label_position("left")
cbar.ax.tick_params(labelsize=6)
cbar.outline.set_linewidth(0)
return cbar
|
python
|
def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None):
"""Add colour scale to heatmap."""
# Set tick intervals
cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)]
if params.vmax > 10:
exponent = int(floor(log10(params.vmax))) - 1
cbticks = [int(round(e, -exponent)) for e in cbticks]
scale_subplot = gridspec.GridSpecFromSubplotSpec(
1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0
)
scale_ax = fig.add_subplot(scale_subplot[0, 1])
cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks)
if title:
cbar.set_label(title, fontsize=6)
cbar.ax.yaxis.set_ticks_position("left")
cbar.ax.yaxis.set_label_position("left")
cbar.ax.tick_params(labelsize=6)
cbar.outline.set_linewidth(0)
return cbar
|
[
"def",
"add_mpl_colorscale",
"(",
"fig",
",",
"heatmap_gs",
",",
"ax_map",
",",
"params",
",",
"title",
"=",
"None",
")",
":",
"# Set tick intervals",
"cbticks",
"=",
"[",
"params",
".",
"vmin",
"+",
"e",
"*",
"params",
".",
"vdiff",
"for",
"e",
"in",
"(",
"0",
",",
"0.25",
",",
"0.5",
",",
"0.75",
",",
"1",
")",
"]",
"if",
"params",
".",
"vmax",
">",
"10",
":",
"exponent",
"=",
"int",
"(",
"floor",
"(",
"log10",
"(",
"params",
".",
"vmax",
")",
")",
")",
"-",
"1",
"cbticks",
"=",
"[",
"int",
"(",
"round",
"(",
"e",
",",
"-",
"exponent",
")",
")",
"for",
"e",
"in",
"cbticks",
"]",
"scale_subplot",
"=",
"gridspec",
".",
"GridSpecFromSubplotSpec",
"(",
"1",
",",
"3",
",",
"subplot_spec",
"=",
"heatmap_gs",
"[",
"0",
",",
"0",
"]",
",",
"wspace",
"=",
"0.0",
",",
"hspace",
"=",
"0.0",
")",
"scale_ax",
"=",
"fig",
".",
"add_subplot",
"(",
"scale_subplot",
"[",
"0",
",",
"1",
"]",
")",
"cbar",
"=",
"fig",
".",
"colorbar",
"(",
"ax_map",
",",
"scale_ax",
",",
"ticks",
"=",
"cbticks",
")",
"if",
"title",
":",
"cbar",
".",
"set_label",
"(",
"title",
",",
"fontsize",
"=",
"6",
")",
"cbar",
".",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"\"left\"",
")",
"cbar",
".",
"ax",
".",
"yaxis",
".",
"set_label_position",
"(",
"\"left\"",
")",
"cbar",
".",
"ax",
".",
"tick_params",
"(",
"labelsize",
"=",
"6",
")",
"cbar",
".",
"outline",
".",
"set_linewidth",
"(",
"0",
")",
"return",
"cbar"
] |
Add colour scale to heatmap.
|
[
"Add",
"colour",
"scale",
"to",
"heatmap",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L289-L308
|
train
|
widdowquinn/pyani
|
pyani/pyani_graphics.py
|
heatmap_mpl
|
def heatmap_mpl(dfr, outfilename=None, title=None, params=None):
"""Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
"""
# Layout figure grid and add title
# Set figure size by the number of rows in the dataframe
figsize = max(8, dfr.shape[0] * 0.175)
fig = plt.figure(figsize=(figsize, figsize))
# if title:
# fig.suptitle(title)
heatmap_gs = gridspec.GridSpec(
2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1]
)
# Add column and row dendrograms/axes to figure
coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col")
rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row")
# Add heatmap axes to figure, with rows/columns as in the dendrograms
heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs)
ax_map = heatmap_axes.imshow(
dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]],
interpolation="nearest",
cmap=params.cmap,
origin="lower",
vmin=params.vmin,
vmax=params.vmax,
aspect="auto",
)
# Are there class colourbars to add?
if params.classes is not None:
add_mpl_colorbar(dfr, fig, coldend, params, orientation="col")
add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row")
# Add heatmap labels
add_mpl_labels(
heatmap_axes,
dfr.index[rowdend["dendrogram"]["leaves"]],
dfr.index[coldend["dendrogram"]["leaves"]],
params,
)
# Add colour scale
add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title)
# Return figure output, and write, if required
plt.subplots_adjust(top=0.85) # Leave room for title
# fig.set_tight_layout(True)
# We know that there is a UserWarning here about tight_layout and
# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5)
if outfilename:
fig.savefig(outfilename)
return fig
|
python
|
def heatmap_mpl(dfr, outfilename=None, title=None, params=None):
"""Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
"""
# Layout figure grid and add title
# Set figure size by the number of rows in the dataframe
figsize = max(8, dfr.shape[0] * 0.175)
fig = plt.figure(figsize=(figsize, figsize))
# if title:
# fig.suptitle(title)
heatmap_gs = gridspec.GridSpec(
2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1]
)
# Add column and row dendrograms/axes to figure
coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col")
rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row")
# Add heatmap axes to figure, with rows/columns as in the dendrograms
heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs)
ax_map = heatmap_axes.imshow(
dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]],
interpolation="nearest",
cmap=params.cmap,
origin="lower",
vmin=params.vmin,
vmax=params.vmax,
aspect="auto",
)
# Are there class colourbars to add?
if params.classes is not None:
add_mpl_colorbar(dfr, fig, coldend, params, orientation="col")
add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row")
# Add heatmap labels
add_mpl_labels(
heatmap_axes,
dfr.index[rowdend["dendrogram"]["leaves"]],
dfr.index[coldend["dendrogram"]["leaves"]],
params,
)
# Add colour scale
add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title)
# Return figure output, and write, if required
plt.subplots_adjust(top=0.85) # Leave room for title
# fig.set_tight_layout(True)
# We know that there is a UserWarning here about tight_layout and
# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5)
if outfilename:
fig.savefig(outfilename)
return fig
|
[
"def",
"heatmap_mpl",
"(",
"dfr",
",",
"outfilename",
"=",
"None",
",",
"title",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"# Layout figure grid and add title",
"# Set figure size by the number of rows in the dataframe",
"figsize",
"=",
"max",
"(",
"8",
",",
"dfr",
".",
"shape",
"[",
"0",
"]",
"*",
"0.175",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"figsize",
",",
"figsize",
")",
")",
"# if title:",
"# fig.suptitle(title)",
"heatmap_gs",
"=",
"gridspec",
".",
"GridSpec",
"(",
"2",
",",
"2",
",",
"wspace",
"=",
"0.0",
",",
"hspace",
"=",
"0.0",
",",
"width_ratios",
"=",
"[",
"0.3",
",",
"1",
"]",
",",
"height_ratios",
"=",
"[",
"0.3",
",",
"1",
"]",
")",
"# Add column and row dendrograms/axes to figure",
"coldend",
"=",
"add_mpl_dendrogram",
"(",
"dfr",
",",
"fig",
",",
"heatmap_gs",
",",
"orientation",
"=",
"\"col\"",
")",
"rowdend",
"=",
"add_mpl_dendrogram",
"(",
"dfr",
",",
"fig",
",",
"heatmap_gs",
",",
"orientation",
"=",
"\"row\"",
")",
"# Add heatmap axes to figure, with rows/columns as in the dendrograms",
"heatmap_axes",
"=",
"get_mpl_heatmap_axes",
"(",
"dfr",
",",
"fig",
",",
"heatmap_gs",
")",
"ax_map",
"=",
"heatmap_axes",
".",
"imshow",
"(",
"dfr",
".",
"iloc",
"[",
"rowdend",
"[",
"\"dendrogram\"",
"]",
"[",
"\"leaves\"",
"]",
",",
"coldend",
"[",
"\"dendrogram\"",
"]",
"[",
"\"leaves\"",
"]",
"]",
",",
"interpolation",
"=",
"\"nearest\"",
",",
"cmap",
"=",
"params",
".",
"cmap",
",",
"origin",
"=",
"\"lower\"",
",",
"vmin",
"=",
"params",
".",
"vmin",
",",
"vmax",
"=",
"params",
".",
"vmax",
",",
"aspect",
"=",
"\"auto\"",
",",
")",
"# Are there class colourbars to add?",
"if",
"params",
".",
"classes",
"is",
"not",
"None",
":",
"add_mpl_colorbar",
"(",
"dfr",
",",
"fig",
",",
"coldend",
",",
"params",
",",
"orientation",
"=",
"\"col\"",
")",
"add_mpl_colorbar",
"(",
"dfr",
",",
"fig",
",",
"rowdend",
",",
"params",
",",
"orientation",
"=",
"\"row\"",
")",
"# Add heatmap labels",
"add_mpl_labels",
"(",
"heatmap_axes",
",",
"dfr",
".",
"index",
"[",
"rowdend",
"[",
"\"dendrogram\"",
"]",
"[",
"\"leaves\"",
"]",
"]",
",",
"dfr",
".",
"index",
"[",
"coldend",
"[",
"\"dendrogram\"",
"]",
"[",
"\"leaves\"",
"]",
"]",
",",
"params",
",",
")",
"# Add colour scale",
"add_mpl_colorscale",
"(",
"fig",
",",
"heatmap_gs",
",",
"ax_map",
",",
"params",
",",
"title",
")",
"# Return figure output, and write, if required",
"plt",
".",
"subplots_adjust",
"(",
"top",
"=",
"0.85",
")",
"# Leave room for title",
"# fig.set_tight_layout(True)",
"# We know that there is a UserWarning here about tight_layout and",
"# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"heatmap_gs",
".",
"tight_layout",
"(",
"fig",
",",
"h_pad",
"=",
"0.1",
",",
"w_pad",
"=",
"0.5",
")",
"if",
"outfilename",
":",
"fig",
".",
"savefig",
"(",
"outfilename",
")",
"return",
"fig"
] |
Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
|
[
"Returns",
"matplotlib",
"heatmap",
"with",
"cluster",
"dendrograms",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L312-L375
|
train
|
widdowquinn/pyani
|
pyani/run_multiprocessing.py
|
run_dependency_graph
|
def run_dependency_graph(jobgraph, workers=None, logger=None):
"""Creates and runs pools of jobs based on the passed jobgraph.
- jobgraph - list of jobs, which may have dependencies.
- verbose - flag for multiprocessing verbosity
- logger - a logger module logger (optional)
The strategy here is to loop over each job in the list of jobs (jobgraph),
and create/populate a series of Sets of commands, to be run in
reverse order with multiprocessing_run as asynchronous pools.
"""
cmdsets = []
for job in jobgraph:
cmdsets = populate_cmdsets(job, cmdsets, depth=1)
# Put command sets in reverse order, and submit to multiprocessing_run
cmdsets.reverse()
cumretval = 0
for cmdset in cmdsets:
if logger: # Try to be informative, if the logger module is being used
logger.info("Command pool now running:")
for cmd in cmdset:
logger.info(cmd)
cumretval += multiprocessing_run(cmdset, workers)
if logger: # Try to be informative, if the logger module is being used
logger.info("Command pool done.")
return cumretval
|
python
|
def run_dependency_graph(jobgraph, workers=None, logger=None):
"""Creates and runs pools of jobs based on the passed jobgraph.
- jobgraph - list of jobs, which may have dependencies.
- verbose - flag for multiprocessing verbosity
- logger - a logger module logger (optional)
The strategy here is to loop over each job in the list of jobs (jobgraph),
and create/populate a series of Sets of commands, to be run in
reverse order with multiprocessing_run as asynchronous pools.
"""
cmdsets = []
for job in jobgraph:
cmdsets = populate_cmdsets(job, cmdsets, depth=1)
# Put command sets in reverse order, and submit to multiprocessing_run
cmdsets.reverse()
cumretval = 0
for cmdset in cmdsets:
if logger: # Try to be informative, if the logger module is being used
logger.info("Command pool now running:")
for cmd in cmdset:
logger.info(cmd)
cumretval += multiprocessing_run(cmdset, workers)
if logger: # Try to be informative, if the logger module is being used
logger.info("Command pool done.")
return cumretval
|
[
"def",
"run_dependency_graph",
"(",
"jobgraph",
",",
"workers",
"=",
"None",
",",
"logger",
"=",
"None",
")",
":",
"cmdsets",
"=",
"[",
"]",
"for",
"job",
"in",
"jobgraph",
":",
"cmdsets",
"=",
"populate_cmdsets",
"(",
"job",
",",
"cmdsets",
",",
"depth",
"=",
"1",
")",
"# Put command sets in reverse order, and submit to multiprocessing_run",
"cmdsets",
".",
"reverse",
"(",
")",
"cumretval",
"=",
"0",
"for",
"cmdset",
"in",
"cmdsets",
":",
"if",
"logger",
":",
"# Try to be informative, if the logger module is being used",
"logger",
".",
"info",
"(",
"\"Command pool now running:\"",
")",
"for",
"cmd",
"in",
"cmdset",
":",
"logger",
".",
"info",
"(",
"cmd",
")",
"cumretval",
"+=",
"multiprocessing_run",
"(",
"cmdset",
",",
"workers",
")",
"if",
"logger",
":",
"# Try to be informative, if the logger module is being used",
"logger",
".",
"info",
"(",
"\"Command pool done.\"",
")",
"return",
"cumretval"
] |
Creates and runs pools of jobs based on the passed jobgraph.
- jobgraph - list of jobs, which may have dependencies.
- verbose - flag for multiprocessing verbosity
- logger - a logger module logger (optional)
The strategy here is to loop over each job in the list of jobs (jobgraph),
and create/populate a series of Sets of commands, to be run in
reverse order with multiprocessing_run as asynchronous pools.
|
[
"Creates",
"and",
"runs",
"pools",
"of",
"jobs",
"based",
"on",
"the",
"passed",
"jobgraph",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L22-L48
|
train
|
widdowquinn/pyani
|
pyani/run_multiprocessing.py
|
populate_cmdsets
|
def populate_cmdsets(job, cmdsets, depth):
"""Creates a list of sets containing jobs at different depths of the
dependency tree.
This is a recursive function (is there something quicker in the itertools
module?) that descends each 'root' job in turn, populating each
"""
if len(cmdsets) < depth:
cmdsets.append(set())
cmdsets[depth-1].add(job.command)
if len(job.dependencies) == 0:
return cmdsets
for j in job.dependencies:
cmdsets = populate_cmdsets(j, cmdsets, depth+1)
return cmdsets
|
python
|
def populate_cmdsets(job, cmdsets, depth):
"""Creates a list of sets containing jobs at different depths of the
dependency tree.
This is a recursive function (is there something quicker in the itertools
module?) that descends each 'root' job in turn, populating each
"""
if len(cmdsets) < depth:
cmdsets.append(set())
cmdsets[depth-1].add(job.command)
if len(job.dependencies) == 0:
return cmdsets
for j in job.dependencies:
cmdsets = populate_cmdsets(j, cmdsets, depth+1)
return cmdsets
|
[
"def",
"populate_cmdsets",
"(",
"job",
",",
"cmdsets",
",",
"depth",
")",
":",
"if",
"len",
"(",
"cmdsets",
")",
"<",
"depth",
":",
"cmdsets",
".",
"append",
"(",
"set",
"(",
")",
")",
"cmdsets",
"[",
"depth",
"-",
"1",
"]",
".",
"add",
"(",
"job",
".",
"command",
")",
"if",
"len",
"(",
"job",
".",
"dependencies",
")",
"==",
"0",
":",
"return",
"cmdsets",
"for",
"j",
"in",
"job",
".",
"dependencies",
":",
"cmdsets",
"=",
"populate_cmdsets",
"(",
"j",
",",
"cmdsets",
",",
"depth",
"+",
"1",
")",
"return",
"cmdsets"
] |
Creates a list of sets containing jobs at different depths of the
dependency tree.
This is a recursive function (is there something quicker in the itertools
module?) that descends each 'root' job in turn, populating each
|
[
"Creates",
"a",
"list",
"of",
"sets",
"containing",
"jobs",
"at",
"different",
"depths",
"of",
"the",
"dependency",
"tree",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L51-L65
|
train
|
widdowquinn/pyani
|
pyani/run_multiprocessing.py
|
multiprocessing_run
|
def multiprocessing_run(cmdlines, workers=None):
"""Distributes passed command-line jobs using multiprocessing.
- cmdlines - an iterable of command line strings
Returns the sum of exit codes from each job that was run. If
all goes well, this should be 0. Anything else and the calling
function should act accordingly.
"""
# Run jobs
# If workers is None or greater than the number of cores available,
# it will be set to the maximum number of cores
pool = multiprocessing.Pool(processes=workers)
results = [pool.apply_async(subprocess.run, (str(cline), ),
{'shell': sys.platform != "win32",
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE})
for cline in cmdlines]
pool.close()
pool.join()
return sum([r.get().returncode for r in results])
|
python
|
def multiprocessing_run(cmdlines, workers=None):
"""Distributes passed command-line jobs using multiprocessing.
- cmdlines - an iterable of command line strings
Returns the sum of exit codes from each job that was run. If
all goes well, this should be 0. Anything else and the calling
function should act accordingly.
"""
# Run jobs
# If workers is None or greater than the number of cores available,
# it will be set to the maximum number of cores
pool = multiprocessing.Pool(processes=workers)
results = [pool.apply_async(subprocess.run, (str(cline), ),
{'shell': sys.platform != "win32",
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE})
for cline in cmdlines]
pool.close()
pool.join()
return sum([r.get().returncode for r in results])
|
[
"def",
"multiprocessing_run",
"(",
"cmdlines",
",",
"workers",
"=",
"None",
")",
":",
"# Run jobs",
"# If workers is None or greater than the number of cores available,",
"# it will be set to the maximum number of cores",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"workers",
")",
"results",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"subprocess",
".",
"run",
",",
"(",
"str",
"(",
"cline",
")",
",",
")",
",",
"{",
"'shell'",
":",
"sys",
".",
"platform",
"!=",
"\"win32\"",
",",
"'stdout'",
":",
"subprocess",
".",
"PIPE",
",",
"'stderr'",
":",
"subprocess",
".",
"PIPE",
"}",
")",
"for",
"cline",
"in",
"cmdlines",
"]",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"sum",
"(",
"[",
"r",
".",
"get",
"(",
")",
".",
"returncode",
"for",
"r",
"in",
"results",
"]",
")"
] |
Distributes passed command-line jobs using multiprocessing.
- cmdlines - an iterable of command line strings
Returns the sum of exit codes from each job that was run. If
all goes well, this should be 0. Anything else and the calling
function should act accordingly.
|
[
"Distributes",
"passed",
"command",
"-",
"line",
"jobs",
"using",
"multiprocessing",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L69-L89
|
train
|
widdowquinn/pyani
|
pyani/pyani_files.py
|
get_input_files
|
def get_input_files(dirname, *ext):
"""Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
"""
filelist = [f for f in os.listdir(dirname) if
os.path.splitext(f)[-1] in ext]
return [os.path.join(dirname, f) for f in filelist]
|
python
|
def get_input_files(dirname, *ext):
"""Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
"""
filelist = [f for f in os.listdir(dirname) if
os.path.splitext(f)[-1] in ext]
return [os.path.join(dirname, f) for f in filelist]
|
[
"def",
"get_input_files",
"(",
"dirname",
",",
"*",
"ext",
")",
":",
"filelist",
"=",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"dirname",
")",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"[",
"-",
"1",
"]",
"in",
"ext",
"]",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"f",
")",
"for",
"f",
"in",
"filelist",
"]"
] |
Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
|
[
"Returns",
"files",
"in",
"passed",
"directory",
"filtered",
"by",
"extension",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_files.py#L27-L35
|
train
|
widdowquinn/pyani
|
pyani/pyani_files.py
|
get_sequence_lengths
|
def get_sequence_lengths(fastafilenames):
"""Returns dictionary of sequence lengths, keyed by organism.
Biopython's SeqIO module is used to parse all sequences in the FASTA
file corresponding to each organism, and the total base count in each
is obtained.
NOTE: ambiguity symbols are not discounted.
"""
tot_lengths = {}
for fn in fastafilenames:
tot_lengths[os.path.splitext(os.path.split(fn)[-1])[0]] = \
sum([len(s) for s in SeqIO.parse(fn, 'fasta')])
return tot_lengths
|
python
|
def get_sequence_lengths(fastafilenames):
"""Returns dictionary of sequence lengths, keyed by organism.
Biopython's SeqIO module is used to parse all sequences in the FASTA
file corresponding to each organism, and the total base count in each
is obtained.
NOTE: ambiguity symbols are not discounted.
"""
tot_lengths = {}
for fn in fastafilenames:
tot_lengths[os.path.splitext(os.path.split(fn)[-1])[0]] = \
sum([len(s) for s in SeqIO.parse(fn, 'fasta')])
return tot_lengths
|
[
"def",
"get_sequence_lengths",
"(",
"fastafilenames",
")",
":",
"tot_lengths",
"=",
"{",
"}",
"for",
"fn",
"in",
"fastafilenames",
":",
"tot_lengths",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fn",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"]",
"=",
"sum",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"SeqIO",
".",
"parse",
"(",
"fn",
",",
"'fasta'",
")",
"]",
")",
"return",
"tot_lengths"
] |
Returns dictionary of sequence lengths, keyed by organism.
Biopython's SeqIO module is used to parse all sequences in the FASTA
file corresponding to each organism, and the total base count in each
is obtained.
NOTE: ambiguity symbols are not discounted.
|
[
"Returns",
"dictionary",
"of",
"sequence",
"lengths",
"keyed",
"by",
"organism",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_files.py#L39-L52
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
last_exception
|
def last_exception():
""" Returns last exception as a string, or use in logging.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
|
python
|
def last_exception():
""" Returns last exception as a string, or use in logging.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
|
[
"def",
"last_exception",
"(",
")",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"return",
"\"\"",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
")"
] |
Returns last exception as a string, or use in logging.
|
[
"Returns",
"last",
"exception",
"as",
"a",
"string",
"or",
"use",
"in",
"logging",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L439-L443
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
make_outdir
|
def make_outdir():
"""Make the output directory, if required.
This is a little involved. If the output directory already exists,
we take the safe option by default, and stop with an error. We can,
however, choose to force the program to go on, in which case we can
either clobber the existing directory, or not. The options turn out
as the following, if the directory exists:
DEFAULT: stop and report the collision
FORCE: continue, and remove the existing output directory
NOCLOBBER+FORCE: continue, but do not remove the existing output
"""
if os.path.exists(args.outdirname):
if not args.force:
logger.error(
"Output directory %s would overwrite existing " + "files (exiting)",
args.outdirname,
)
sys.exit(1)
elif args.noclobber:
logger.warning(
"NOCLOBBER: not actually deleting directory %s", args.outdirname
)
else:
logger.info(
"Removing directory %s and everything below it", args.outdirname
)
shutil.rmtree(args.outdirname)
logger.info("Creating directory %s", args.outdirname)
try:
os.makedirs(args.outdirname) # We make the directory recursively
# Depending on the choice of method, a subdirectory will be made for
# alignment output files
if args.method != "TETRA":
os.makedirs(os.path.join(args.outdirname, ALIGNDIR[args.method]))
except OSError:
# This gets thrown if the directory exists. If we've forced overwrite/
# delete and we're not clobbering, we let things slide
if args.noclobber and args.force:
logger.info("NOCLOBBER+FORCE: not creating directory")
else:
logger.error(last_exception)
sys.exit(1)
|
python
|
def make_outdir():
"""Make the output directory, if required.
This is a little involved. If the output directory already exists,
we take the safe option by default, and stop with an error. We can,
however, choose to force the program to go on, in which case we can
either clobber the existing directory, or not. The options turn out
as the following, if the directory exists:
DEFAULT: stop and report the collision
FORCE: continue, and remove the existing output directory
NOCLOBBER+FORCE: continue, but do not remove the existing output
"""
if os.path.exists(args.outdirname):
if not args.force:
logger.error(
"Output directory %s would overwrite existing " + "files (exiting)",
args.outdirname,
)
sys.exit(1)
elif args.noclobber:
logger.warning(
"NOCLOBBER: not actually deleting directory %s", args.outdirname
)
else:
logger.info(
"Removing directory %s and everything below it", args.outdirname
)
shutil.rmtree(args.outdirname)
logger.info("Creating directory %s", args.outdirname)
try:
os.makedirs(args.outdirname) # We make the directory recursively
# Depending on the choice of method, a subdirectory will be made for
# alignment output files
if args.method != "TETRA":
os.makedirs(os.path.join(args.outdirname, ALIGNDIR[args.method]))
except OSError:
# This gets thrown if the directory exists. If we've forced overwrite/
# delete and we're not clobbering, we let things slide
if args.noclobber and args.force:
logger.info("NOCLOBBER+FORCE: not creating directory")
else:
logger.error(last_exception)
sys.exit(1)
|
[
"def",
"make_outdir",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"outdirname",
")",
":",
"if",
"not",
"args",
".",
"force",
":",
"logger",
".",
"error",
"(",
"\"Output directory %s would overwrite existing \"",
"+",
"\"files (exiting)\"",
",",
"args",
".",
"outdirname",
",",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"args",
".",
"noclobber",
":",
"logger",
".",
"warning",
"(",
"\"NOCLOBBER: not actually deleting directory %s\"",
",",
"args",
".",
"outdirname",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Removing directory %s and everything below it\"",
",",
"args",
".",
"outdirname",
")",
"shutil",
".",
"rmtree",
"(",
"args",
".",
"outdirname",
")",
"logger",
".",
"info",
"(",
"\"Creating directory %s\"",
",",
"args",
".",
"outdirname",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"args",
".",
"outdirname",
")",
"# We make the directory recursively",
"# Depending on the choice of method, a subdirectory will be made for",
"# alignment output files",
"if",
"args",
".",
"method",
"!=",
"\"TETRA\"",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdirname",
",",
"ALIGNDIR",
"[",
"args",
".",
"method",
"]",
")",
")",
"except",
"OSError",
":",
"# This gets thrown if the directory exists. If we've forced overwrite/",
"# delete and we're not clobbering, we let things slide",
"if",
"args",
".",
"noclobber",
"and",
"args",
".",
"force",
":",
"logger",
".",
"info",
"(",
"\"NOCLOBBER+FORCE: not creating directory\"",
")",
"else",
":",
"logger",
".",
"error",
"(",
"last_exception",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Make the output directory, if required.
This is a little involved. If the output directory already exists,
we take the safe option by default, and stop with an error. We can,
however, choose to force the program to go on, in which case we can
either clobber the existing directory, or not. The options turn out
as the following, if the directory exists:
DEFAULT: stop and report the collision
FORCE: continue, and remove the existing output directory
NOCLOBBER+FORCE: continue, but do not remove the existing output
|
[
"Make",
"the",
"output",
"directory",
"if",
"required",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L447-L490
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
compress_delete_outdir
|
def compress_delete_outdir(outdir):
"""Compress the contents of the passed directory to .tar.gz and delete."""
# Compress output in .tar.gz file and remove raw output
tarfn = outdir + ".tar.gz"
logger.info("\tCompressing output from %s to %s", outdir, tarfn)
with tarfile.open(tarfn, "w:gz") as fh:
fh.add(outdir)
logger.info("\tRemoving output directory %s", outdir)
shutil.rmtree(outdir)
|
python
|
def compress_delete_outdir(outdir):
"""Compress the contents of the passed directory to .tar.gz and delete."""
# Compress output in .tar.gz file and remove raw output
tarfn = outdir + ".tar.gz"
logger.info("\tCompressing output from %s to %s", outdir, tarfn)
with tarfile.open(tarfn, "w:gz") as fh:
fh.add(outdir)
logger.info("\tRemoving output directory %s", outdir)
shutil.rmtree(outdir)
|
[
"def",
"compress_delete_outdir",
"(",
"outdir",
")",
":",
"# Compress output in .tar.gz file and remove raw output",
"tarfn",
"=",
"outdir",
"+",
"\".tar.gz\"",
"logger",
".",
"info",
"(",
"\"\\tCompressing output from %s to %s\"",
",",
"outdir",
",",
"tarfn",
")",
"with",
"tarfile",
".",
"open",
"(",
"tarfn",
",",
"\"w:gz\"",
")",
"as",
"fh",
":",
"fh",
".",
"add",
"(",
"outdir",
")",
"logger",
".",
"info",
"(",
"\"\\tRemoving output directory %s\"",
",",
"outdir",
")",
"shutil",
".",
"rmtree",
"(",
"outdir",
")"
] |
Compress the contents of the passed directory to .tar.gz and delete.
|
[
"Compress",
"the",
"contents",
"of",
"the",
"passed",
"directory",
"to",
".",
"tar",
".",
"gz",
"and",
"delete",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L494-L502
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
calculate_anim
|
def calculate_anim(infiles, org_lengths):
"""Returns ANIm result dataframes for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (which must
be in the path). NUCmer output is stored in the output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment between
the two organisms, as represented by the sequences in the FASTA files.
These are processed to give matrices of aligned sequence lengths,
average nucleotide identity (ANI) percentages, coverage (aligned
percentage of whole genome), and similarity error cound for each pairwise
comparison.
"""
logger.info("Running ANIm")
logger.info("Generating NUCmer command-lines")
deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"])
logger.info("Writing nucmer output to %s", deltadir)
# Schedule NUCmer runs
if not args.skip_nucmer:
joblist = anim.generate_nucmer_jobs(
infiles,
args.outdirname,
nucmer_exe=args.nucmer_exe,
filter_exe=args.filter_exe,
maxmatch=args.maxmatch,
jobprefix=args.jobprefix,
)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
joblist, workers=args.workers, logger=logger
)
logger.info("Cumulative return value: %d", cumval)
if 0 < cumval:
logger.warning(
"At least one NUCmer comparison failed. " + "ANIm may fail."
)
else:
logger.info("All multiprocessing jobs complete.")
else:
logger.info("Running jobs with SGE")
logger.info("Jobarray group size set to %d", args.sgegroupsize)
run_sge.run_dependency_graph(
joblist,
logger=logger,
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
else:
logger.warning("Skipping NUCmer run (as instructed)!")
# Process resulting .delta files
logger.info("Processing NUCmer .delta files.")
results = anim.process_deltadir(deltadir, org_lengths, logger=logger)
if results.zero_error: # zero percentage identity error
if not args.skip_nucmer and args.scheduler == "multiprocessing":
if 0 < cumval:
logger.error(
"This has possibly been a NUCmer run failure, "
+ "please investigate"
)
logger.error(last_exception())
sys.exit(1)
else:
logger.error(
"This is possibly due to a NUCmer comparison "
+ "being too distant for use. Please consider "
+ "using the --maxmatch option."
)
logger.error(
"This is alternatively due to NUCmer run "
+ "failure, analysis will continue, but please "
+ "investigate."
)
if not args.nocompress:
logger.info("Compressing/deleting %s", deltadir)
compress_delete_outdir(deltadir)
# Return processed data from .delta files
return results
|
python
|
def calculate_anim(infiles, org_lengths):
"""Returns ANIm result dataframes for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (which must
be in the path). NUCmer output is stored in the output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment between
the two organisms, as represented by the sequences in the FASTA files.
These are processed to give matrices of aligned sequence lengths,
average nucleotide identity (ANI) percentages, coverage (aligned
percentage of whole genome), and similarity error cound for each pairwise
comparison.
"""
logger.info("Running ANIm")
logger.info("Generating NUCmer command-lines")
deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"])
logger.info("Writing nucmer output to %s", deltadir)
# Schedule NUCmer runs
if not args.skip_nucmer:
joblist = anim.generate_nucmer_jobs(
infiles,
args.outdirname,
nucmer_exe=args.nucmer_exe,
filter_exe=args.filter_exe,
maxmatch=args.maxmatch,
jobprefix=args.jobprefix,
)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
joblist, workers=args.workers, logger=logger
)
logger.info("Cumulative return value: %d", cumval)
if 0 < cumval:
logger.warning(
"At least one NUCmer comparison failed. " + "ANIm may fail."
)
else:
logger.info("All multiprocessing jobs complete.")
else:
logger.info("Running jobs with SGE")
logger.info("Jobarray group size set to %d", args.sgegroupsize)
run_sge.run_dependency_graph(
joblist,
logger=logger,
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
else:
logger.warning("Skipping NUCmer run (as instructed)!")
# Process resulting .delta files
logger.info("Processing NUCmer .delta files.")
results = anim.process_deltadir(deltadir, org_lengths, logger=logger)
if results.zero_error: # zero percentage identity error
if not args.skip_nucmer and args.scheduler == "multiprocessing":
if 0 < cumval:
logger.error(
"This has possibly been a NUCmer run failure, "
+ "please investigate"
)
logger.error(last_exception())
sys.exit(1)
else:
logger.error(
"This is possibly due to a NUCmer comparison "
+ "being too distant for use. Please consider "
+ "using the --maxmatch option."
)
logger.error(
"This is alternatively due to NUCmer run "
+ "failure, analysis will continue, but please "
+ "investigate."
)
if not args.nocompress:
logger.info("Compressing/deleting %s", deltadir)
compress_delete_outdir(deltadir)
# Return processed data from .delta files
return results
|
[
"def",
"calculate_anim",
"(",
"infiles",
",",
"org_lengths",
")",
":",
"logger",
".",
"info",
"(",
"\"Running ANIm\"",
")",
"logger",
".",
"info",
"(",
"\"Generating NUCmer command-lines\"",
")",
"deltadir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdirname",
",",
"ALIGNDIR",
"[",
"\"ANIm\"",
"]",
")",
"logger",
".",
"info",
"(",
"\"Writing nucmer output to %s\"",
",",
"deltadir",
")",
"# Schedule NUCmer runs",
"if",
"not",
"args",
".",
"skip_nucmer",
":",
"joblist",
"=",
"anim",
".",
"generate_nucmer_jobs",
"(",
"infiles",
",",
"args",
".",
"outdirname",
",",
"nucmer_exe",
"=",
"args",
".",
"nucmer_exe",
",",
"filter_exe",
"=",
"args",
".",
"filter_exe",
",",
"maxmatch",
"=",
"args",
".",
"maxmatch",
",",
"jobprefix",
"=",
"args",
".",
"jobprefix",
",",
")",
"if",
"args",
".",
"scheduler",
"==",
"\"multiprocessing\"",
":",
"logger",
".",
"info",
"(",
"\"Running jobs with multiprocessing\"",
")",
"if",
"args",
".",
"workers",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"(using maximum number of available \"",
"+",
"\"worker threads)\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"(using %d worker threads, if available)\"",
",",
"args",
".",
"workers",
")",
"cumval",
"=",
"run_mp",
".",
"run_dependency_graph",
"(",
"joblist",
",",
"workers",
"=",
"args",
".",
"workers",
",",
"logger",
"=",
"logger",
")",
"logger",
".",
"info",
"(",
"\"Cumulative return value: %d\"",
",",
"cumval",
")",
"if",
"0",
"<",
"cumval",
":",
"logger",
".",
"warning",
"(",
"\"At least one NUCmer comparison failed. \"",
"+",
"\"ANIm may fail.\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"All multiprocessing jobs complete.\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Running jobs with SGE\"",
")",
"logger",
".",
"info",
"(",
"\"Jobarray group size set to %d\"",
",",
"args",
".",
"sgegroupsize",
")",
"run_sge",
".",
"run_dependency_graph",
"(",
"joblist",
",",
"logger",
"=",
"logger",
",",
"jgprefix",
"=",
"args",
".",
"jobprefix",
",",
"sgegroupsize",
"=",
"args",
".",
"sgegroupsize",
",",
"sgeargs",
"=",
"args",
".",
"sgeargs",
",",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Skipping NUCmer run (as instructed)!\"",
")",
"# Process resulting .delta files",
"logger",
".",
"info",
"(",
"\"Processing NUCmer .delta files.\"",
")",
"results",
"=",
"anim",
".",
"process_deltadir",
"(",
"deltadir",
",",
"org_lengths",
",",
"logger",
"=",
"logger",
")",
"if",
"results",
".",
"zero_error",
":",
"# zero percentage identity error",
"if",
"not",
"args",
".",
"skip_nucmer",
"and",
"args",
".",
"scheduler",
"==",
"\"multiprocessing\"",
":",
"if",
"0",
"<",
"cumval",
":",
"logger",
".",
"error",
"(",
"\"This has possibly been a NUCmer run failure, \"",
"+",
"\"please investigate\"",
")",
"logger",
".",
"error",
"(",
"last_exception",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"This is possibly due to a NUCmer comparison \"",
"+",
"\"being too distant for use. Please consider \"",
"+",
"\"using the --maxmatch option.\"",
")",
"logger",
".",
"error",
"(",
"\"This is alternatively due to NUCmer run \"",
"+",
"\"failure, analysis will continue, but please \"",
"+",
"\"investigate.\"",
")",
"if",
"not",
"args",
".",
"nocompress",
":",
"logger",
".",
"info",
"(",
"\"Compressing/deleting %s\"",
",",
"deltadir",
")",
"compress_delete_outdir",
"(",
"deltadir",
")",
"# Return processed data from .delta files",
"return",
"results"
] |
Returns ANIm result dataframes for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (which must
be in the path). NUCmer output is stored in the output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment between
the two organisms, as represented by the sequences in the FASTA files.
These are processed to give matrices of aligned sequence lengths,
average nucleotide identity (ANI) percentages, coverage (aligned
percentage of whole genome), and similarity error cound for each pairwise
comparison.
|
[
"Returns",
"ANIm",
"result",
"dataframes",
"for",
"files",
"in",
"input",
"directory",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L506-L599
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
calculate_tetra
|
def calculate_tetra(infiles):
"""Calculate TETRA for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Calculates TETRA correlation scores, as described in:
Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for
the prokaryotic species definition. Proc Natl Acad Sci USA 106:
19126-19131. doi:10.1073/pnas.0906412106.
and
Teeling et al. (2004) Application of tetranucleotide frequencies for the
assignment of genomic fragments. Env. Microbiol. 6(9): 938-947.
doi:10.1111/j.1462-2920.2004.00624.x
"""
logger.info("Running TETRA.")
# First, find Z-scores
logger.info("Calculating TETRA Z-scores for each sequence.")
tetra_zscores = {}
for filename in infiles:
logger.info("Calculating TETRA Z-scores for %s", filename)
org = os.path.splitext(os.path.split(filename)[-1])[0]
tetra_zscores[org] = tetra.calculate_tetra_zscore(filename)
# Then calculate Pearson correlation between Z-scores for each sequence
logger.info("Calculating TETRA correlation scores.")
tetra_correlations = tetra.calculate_correlations(tetra_zscores)
return tetra_correlations
|
python
|
def calculate_tetra(infiles):
"""Calculate TETRA for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Calculates TETRA correlation scores, as described in:
Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for
the prokaryotic species definition. Proc Natl Acad Sci USA 106:
19126-19131. doi:10.1073/pnas.0906412106.
and
Teeling et al. (2004) Application of tetranucleotide frequencies for the
assignment of genomic fragments. Env. Microbiol. 6(9): 938-947.
doi:10.1111/j.1462-2920.2004.00624.x
"""
logger.info("Running TETRA.")
# First, find Z-scores
logger.info("Calculating TETRA Z-scores for each sequence.")
tetra_zscores = {}
for filename in infiles:
logger.info("Calculating TETRA Z-scores for %s", filename)
org = os.path.splitext(os.path.split(filename)[-1])[0]
tetra_zscores[org] = tetra.calculate_tetra_zscore(filename)
# Then calculate Pearson correlation between Z-scores for each sequence
logger.info("Calculating TETRA correlation scores.")
tetra_correlations = tetra.calculate_correlations(tetra_zscores)
return tetra_correlations
|
[
"def",
"calculate_tetra",
"(",
"infiles",
")",
":",
"logger",
".",
"info",
"(",
"\"Running TETRA.\"",
")",
"# First, find Z-scores",
"logger",
".",
"info",
"(",
"\"Calculating TETRA Z-scores for each sequence.\"",
")",
"tetra_zscores",
"=",
"{",
"}",
"for",
"filename",
"in",
"infiles",
":",
"logger",
".",
"info",
"(",
"\"Calculating TETRA Z-scores for %s\"",
",",
"filename",
")",
"org",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"tetra_zscores",
"[",
"org",
"]",
"=",
"tetra",
".",
"calculate_tetra_zscore",
"(",
"filename",
")",
"# Then calculate Pearson correlation between Z-scores for each sequence",
"logger",
".",
"info",
"(",
"\"Calculating TETRA correlation scores.\"",
")",
"tetra_correlations",
"=",
"tetra",
".",
"calculate_correlations",
"(",
"tetra_zscores",
")",
"return",
"tetra_correlations"
] |
Calculate TETRA for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Calculates TETRA correlation scores, as described in:
Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for
the prokaryotic species definition. Proc Natl Acad Sci USA 106:
19126-19131. doi:10.1073/pnas.0906412106.
and
Teeling et al. (2004) Application of tetranucleotide frequencies for the
assignment of genomic fragments. Env. Microbiol. 6(9): 938-947.
doi:10.1111/j.1462-2920.2004.00624.x
|
[
"Calculate",
"TETRA",
"for",
"files",
"in",
"input",
"directory",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L603-L632
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
unified_anib
|
def unified_anib(infiles, org_lengths):
"""Calculate ANIb for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Calculates ANI by the ANIb method, as described in Goris et al. (2007)
Int J Syst Evol Micr 57: 81-91. doi:10.1099/ijs.0.64483-0. There are
some minor differences depending on whether BLAST+ or legacy BLAST
(BLASTALL) methods are used.
All FASTA format files (selected by suffix) in the input directory are
used to construct BLAST databases, placed in the output directory.
Each file's contents are also split into sequence fragments of length
options.fragsize, and the multiple FASTA file that results written to
the output directory. These are BLASTNed, pairwise, against the
databases.
The BLAST output is interrogated for all fragment matches that cover
at least 70% of the query sequence, with at least 30% nucleotide
identity over the full length of the query sequence. This is an odd
choice and doesn't correspond to the twilight zone limit as implied by
Goris et al. We persist with their definition, however. Only these
qualifying matches contribute to the total aligned length, and total
aligned sequence identity used to calculate ANI.
The results are processed to give matrices of aligned sequence length
(aln_lengths.tab), similarity error counts (sim_errors.tab), ANIs
(perc_ids.tab), and minimum aligned percentage (perc_aln.tab) of
each genome, for each pairwise comparison. These are written to the
output directory in plain text tab-separated format.
"""
logger.info("Running %s", args.method)
blastdir = os.path.join(args.outdirname, ALIGNDIR[args.method])
logger.info("Writing BLAST output to %s", blastdir)
# Build BLAST databases and run pairwise BLASTN
if not args.skip_blastn:
# Make sequence fragments
logger.info("Fragmenting input files, and writing to %s", args.outdirname)
# Fraglengths does not get reused with BLASTN
fragfiles, fraglengths = anib.fragment_fasta_files(
infiles, blastdir, args.fragsize
)
# Export fragment lengths as JSON, in case we re-run with --skip_blastn
with open(os.path.join(blastdir, "fraglengths.json"), "w") as outfile:
json.dump(fraglengths, outfile)
# Which executables are we using?
# if args.method == "ANIblastall":
# format_exe = args.formatdb_exe
# blast_exe = args.blastall_exe
# else:
# format_exe = args.makeblastdb_exe
# blast_exe = args.blastn_exe
# Run BLAST database-building and executables from a jobgraph
logger.info("Creating job dependency graph")
jobgraph = anib.make_job_graph(
infiles, fragfiles, anib.make_blastcmd_builder(args.method, blastdir)
)
# jobgraph = anib.make_job_graph(infiles, fragfiles, blastdir,
# format_exe, blast_exe, args.method,
# jobprefix=args.jobprefix)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
logger.info("Running job dependency graph")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
jobgraph, workers=args.workers, logger=logger
)
if 0 < cumval:
logger.warning(
"At least one BLAST run failed. " + "%s may fail.", args.method
)
else:
logger.info("All multiprocessing jobs complete.")
else:
run_sge.run_dependency_graph(jobgraph, logger=logger)
logger.info("Running jobs with SGE")
else:
# Import fragment lengths from JSON
if args.method == "ANIblastall":
with open(os.path.join(blastdir, "fraglengths.json"), "rU") as infile:
fraglengths = json.load(infile)
else:
fraglengths = None
logger.warning("Skipping BLASTN runs (as instructed)!")
# Process pairwise BLASTN output
logger.info("Processing pairwise %s BLAST output.", args.method)
try:
data = anib.process_blast(
blastdir, org_lengths, fraglengths=fraglengths, mode=args.method
)
except ZeroDivisionError:
logger.error("One or more BLAST output files has a problem.")
if not args.skip_blastn:
if 0 < cumval:
logger.error(
"This is possibly due to BLASTN run failure, "
+ "please investigate"
)
else:
logger.error(
"This is possibly due to a BLASTN comparison "
+ "being too distant for use."
)
logger.error(last_exception())
if not args.nocompress:
logger.info("Compressing/deleting %s", blastdir)
compress_delete_outdir(blastdir)
# Return processed BLAST data
return data
|
python
|
def unified_anib(infiles, org_lengths):
"""Calculate ANIb for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Calculates ANI by the ANIb method, as described in Goris et al. (2007)
Int J Syst Evol Micr 57: 81-91. doi:10.1099/ijs.0.64483-0. There are
some minor differences depending on whether BLAST+ or legacy BLAST
(BLASTALL) methods are used.
All FASTA format files (selected by suffix) in the input directory are
used to construct BLAST databases, placed in the output directory.
Each file's contents are also split into sequence fragments of length
options.fragsize, and the multiple FASTA file that results written to
the output directory. These are BLASTNed, pairwise, against the
databases.
The BLAST output is interrogated for all fragment matches that cover
at least 70% of the query sequence, with at least 30% nucleotide
identity over the full length of the query sequence. This is an odd
choice and doesn't correspond to the twilight zone limit as implied by
Goris et al. We persist with their definition, however. Only these
qualifying matches contribute to the total aligned length, and total
aligned sequence identity used to calculate ANI.
The results are processed to give matrices of aligned sequence length
(aln_lengths.tab), similarity error counts (sim_errors.tab), ANIs
(perc_ids.tab), and minimum aligned percentage (perc_aln.tab) of
each genome, for each pairwise comparison. These are written to the
output directory in plain text tab-separated format.
"""
logger.info("Running %s", args.method)
blastdir = os.path.join(args.outdirname, ALIGNDIR[args.method])
logger.info("Writing BLAST output to %s", blastdir)
# Build BLAST databases and run pairwise BLASTN
if not args.skip_blastn:
# Make sequence fragments
logger.info("Fragmenting input files, and writing to %s", args.outdirname)
# Fraglengths does not get reused with BLASTN
fragfiles, fraglengths = anib.fragment_fasta_files(
infiles, blastdir, args.fragsize
)
# Export fragment lengths as JSON, in case we re-run with --skip_blastn
with open(os.path.join(blastdir, "fraglengths.json"), "w") as outfile:
json.dump(fraglengths, outfile)
# Which executables are we using?
# if args.method == "ANIblastall":
# format_exe = args.formatdb_exe
# blast_exe = args.blastall_exe
# else:
# format_exe = args.makeblastdb_exe
# blast_exe = args.blastn_exe
# Run BLAST database-building and executables from a jobgraph
logger.info("Creating job dependency graph")
jobgraph = anib.make_job_graph(
infiles, fragfiles, anib.make_blastcmd_builder(args.method, blastdir)
)
# jobgraph = anib.make_job_graph(infiles, fragfiles, blastdir,
# format_exe, blast_exe, args.method,
# jobprefix=args.jobprefix)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
logger.info("Running job dependency graph")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
jobgraph, workers=args.workers, logger=logger
)
if 0 < cumval:
logger.warning(
"At least one BLAST run failed. " + "%s may fail.", args.method
)
else:
logger.info("All multiprocessing jobs complete.")
else:
run_sge.run_dependency_graph(jobgraph, logger=logger)
logger.info("Running jobs with SGE")
else:
# Import fragment lengths from JSON
if args.method == "ANIblastall":
with open(os.path.join(blastdir, "fraglengths.json"), "rU") as infile:
fraglengths = json.load(infile)
else:
fraglengths = None
logger.warning("Skipping BLASTN runs (as instructed)!")
# Process pairwise BLASTN output
logger.info("Processing pairwise %s BLAST output.", args.method)
try:
data = anib.process_blast(
blastdir, org_lengths, fraglengths=fraglengths, mode=args.method
)
except ZeroDivisionError:
logger.error("One or more BLAST output files has a problem.")
if not args.skip_blastn:
if 0 < cumval:
logger.error(
"This is possibly due to BLASTN run failure, "
+ "please investigate"
)
else:
logger.error(
"This is possibly due to a BLASTN comparison "
+ "being too distant for use."
)
logger.error(last_exception())
if not args.nocompress:
logger.info("Compressing/deleting %s", blastdir)
compress_delete_outdir(blastdir)
# Return processed BLAST data
return data
|
[
"def",
"unified_anib",
"(",
"infiles",
",",
"org_lengths",
")",
":",
"logger",
".",
"info",
"(",
"\"Running %s\"",
",",
"args",
".",
"method",
")",
"blastdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdirname",
",",
"ALIGNDIR",
"[",
"args",
".",
"method",
"]",
")",
"logger",
".",
"info",
"(",
"\"Writing BLAST output to %s\"",
",",
"blastdir",
")",
"# Build BLAST databases and run pairwise BLASTN",
"if",
"not",
"args",
".",
"skip_blastn",
":",
"# Make sequence fragments",
"logger",
".",
"info",
"(",
"\"Fragmenting input files, and writing to %s\"",
",",
"args",
".",
"outdirname",
")",
"# Fraglengths does not get reused with BLASTN",
"fragfiles",
",",
"fraglengths",
"=",
"anib",
".",
"fragment_fasta_files",
"(",
"infiles",
",",
"blastdir",
",",
"args",
".",
"fragsize",
")",
"# Export fragment lengths as JSON, in case we re-run with --skip_blastn",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"blastdir",
",",
"\"fraglengths.json\"",
")",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"json",
".",
"dump",
"(",
"fraglengths",
",",
"outfile",
")",
"# Which executables are we using?",
"# if args.method == \"ANIblastall\":",
"# format_exe = args.formatdb_exe",
"# blast_exe = args.blastall_exe",
"# else:",
"# format_exe = args.makeblastdb_exe",
"# blast_exe = args.blastn_exe",
"# Run BLAST database-building and executables from a jobgraph",
"logger",
".",
"info",
"(",
"\"Creating job dependency graph\"",
")",
"jobgraph",
"=",
"anib",
".",
"make_job_graph",
"(",
"infiles",
",",
"fragfiles",
",",
"anib",
".",
"make_blastcmd_builder",
"(",
"args",
".",
"method",
",",
"blastdir",
")",
")",
"# jobgraph = anib.make_job_graph(infiles, fragfiles, blastdir,",
"# format_exe, blast_exe, args.method,",
"# jobprefix=args.jobprefix)",
"if",
"args",
".",
"scheduler",
"==",
"\"multiprocessing\"",
":",
"logger",
".",
"info",
"(",
"\"Running jobs with multiprocessing\"",
")",
"logger",
".",
"info",
"(",
"\"Running job dependency graph\"",
")",
"if",
"args",
".",
"workers",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"(using maximum number of available \"",
"+",
"\"worker threads)\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"(using %d worker threads, if available)\"",
",",
"args",
".",
"workers",
")",
"cumval",
"=",
"run_mp",
".",
"run_dependency_graph",
"(",
"jobgraph",
",",
"workers",
"=",
"args",
".",
"workers",
",",
"logger",
"=",
"logger",
")",
"if",
"0",
"<",
"cumval",
":",
"logger",
".",
"warning",
"(",
"\"At least one BLAST run failed. \"",
"+",
"\"%s may fail.\"",
",",
"args",
".",
"method",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"All multiprocessing jobs complete.\"",
")",
"else",
":",
"run_sge",
".",
"run_dependency_graph",
"(",
"jobgraph",
",",
"logger",
"=",
"logger",
")",
"logger",
".",
"info",
"(",
"\"Running jobs with SGE\"",
")",
"else",
":",
"# Import fragment lengths from JSON",
"if",
"args",
".",
"method",
"==",
"\"ANIblastall\"",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"blastdir",
",",
"\"fraglengths.json\"",
")",
",",
"\"rU\"",
")",
"as",
"infile",
":",
"fraglengths",
"=",
"json",
".",
"load",
"(",
"infile",
")",
"else",
":",
"fraglengths",
"=",
"None",
"logger",
".",
"warning",
"(",
"\"Skipping BLASTN runs (as instructed)!\"",
")",
"# Process pairwise BLASTN output",
"logger",
".",
"info",
"(",
"\"Processing pairwise %s BLAST output.\"",
",",
"args",
".",
"method",
")",
"try",
":",
"data",
"=",
"anib",
".",
"process_blast",
"(",
"blastdir",
",",
"org_lengths",
",",
"fraglengths",
"=",
"fraglengths",
",",
"mode",
"=",
"args",
".",
"method",
")",
"except",
"ZeroDivisionError",
":",
"logger",
".",
"error",
"(",
"\"One or more BLAST output files has a problem.\"",
")",
"if",
"not",
"args",
".",
"skip_blastn",
":",
"if",
"0",
"<",
"cumval",
":",
"logger",
".",
"error",
"(",
"\"This is possibly due to BLASTN run failure, \"",
"+",
"\"please investigate\"",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"This is possibly due to a BLASTN comparison \"",
"+",
"\"being too distant for use.\"",
")",
"logger",
".",
"error",
"(",
"last_exception",
"(",
")",
")",
"if",
"not",
"args",
".",
"nocompress",
":",
"logger",
".",
"info",
"(",
"\"Compressing/deleting %s\"",
",",
"blastdir",
")",
"compress_delete_outdir",
"(",
"blastdir",
")",
"# Return processed BLAST data",
"return",
"data"
] |
Calculate ANIb for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Calculates ANI by the ANIb method, as described in Goris et al. (2007)
Int J Syst Evol Micr 57: 81-91. doi:10.1099/ijs.0.64483-0. There are
some minor differences depending on whether BLAST+ or legacy BLAST
(BLASTALL) methods are used.
All FASTA format files (selected by suffix) in the input directory are
used to construct BLAST databases, placed in the output directory.
Each file's contents are also split into sequence fragments of length
options.fragsize, and the multiple FASTA file that results written to
the output directory. These are BLASTNed, pairwise, against the
databases.
The BLAST output is interrogated for all fragment matches that cover
at least 70% of the query sequence, with at least 30% nucleotide
identity over the full length of the query sequence. This is an odd
choice and doesn't correspond to the twilight zone limit as implied by
Goris et al. We persist with their definition, however. Only these
qualifying matches contribute to the total aligned length, and total
aligned sequence identity used to calculate ANI.
The results are processed to give matrices of aligned sequence length
(aln_lengths.tab), similarity error counts (sim_errors.tab), ANIs
(perc_ids.tab), and minimum aligned percentage (perc_aln.tab) of
each genome, for each pairwise comparison. These are written to the
output directory in plain text tab-separated format.
|
[
"Calculate",
"ANIb",
"for",
"files",
"in",
"input",
"directory",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L636-L752
|
train
|
widdowquinn/pyani
|
bin/average_nucleotide_identity.py
|
subsample_input
|
def subsample_input(infiles):
"""Returns a random subsample of the input files.
- infiles: a list of input files for analysis
"""
logger.info("--subsample: %s", args.subsample)
try:
samplesize = float(args.subsample)
except TypeError: # Not a number
logger.error(
"--subsample must be int or float, got %s (exiting)", type(args.subsample)
)
sys.exit(1)
if samplesize <= 0: # Not a positive value
logger.error("--subsample must be positive value, got %s", str(args.subsample))
sys.exit(1)
if int(samplesize) > 1:
logger.info("Sample size integer > 1: %d", samplesize)
k = min(int(samplesize), len(infiles))
else:
logger.info("Sample size proportion in (0, 1]: %.3f", samplesize)
k = int(min(samplesize, 1.0) * len(infiles))
logger.info("Randomly subsampling %d sequences for analysis", k)
if args.seed:
logger.info("Setting random seed with: %s", args.seed)
random.seed(args.seed)
else:
logger.warning("Subsampling without specified random seed!")
logger.warning("Subsampling may NOT be easily reproducible!")
return random.sample(infiles, k)
|
python
|
def subsample_input(infiles):
"""Returns a random subsample of the input files.
- infiles: a list of input files for analysis
"""
logger.info("--subsample: %s", args.subsample)
try:
samplesize = float(args.subsample)
except TypeError: # Not a number
logger.error(
"--subsample must be int or float, got %s (exiting)", type(args.subsample)
)
sys.exit(1)
if samplesize <= 0: # Not a positive value
logger.error("--subsample must be positive value, got %s", str(args.subsample))
sys.exit(1)
if int(samplesize) > 1:
logger.info("Sample size integer > 1: %d", samplesize)
k = min(int(samplesize), len(infiles))
else:
logger.info("Sample size proportion in (0, 1]: %.3f", samplesize)
k = int(min(samplesize, 1.0) * len(infiles))
logger.info("Randomly subsampling %d sequences for analysis", k)
if args.seed:
logger.info("Setting random seed with: %s", args.seed)
random.seed(args.seed)
else:
logger.warning("Subsampling without specified random seed!")
logger.warning("Subsampling may NOT be easily reproducible!")
return random.sample(infiles, k)
|
[
"def",
"subsample_input",
"(",
"infiles",
")",
":",
"logger",
".",
"info",
"(",
"\"--subsample: %s\"",
",",
"args",
".",
"subsample",
")",
"try",
":",
"samplesize",
"=",
"float",
"(",
"args",
".",
"subsample",
")",
"except",
"TypeError",
":",
"# Not a number",
"logger",
".",
"error",
"(",
"\"--subsample must be int or float, got %s (exiting)\"",
",",
"type",
"(",
"args",
".",
"subsample",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"samplesize",
"<=",
"0",
":",
"# Not a positive value",
"logger",
".",
"error",
"(",
"\"--subsample must be positive value, got %s\"",
",",
"str",
"(",
"args",
".",
"subsample",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"int",
"(",
"samplesize",
")",
">",
"1",
":",
"logger",
".",
"info",
"(",
"\"Sample size integer > 1: %d\"",
",",
"samplesize",
")",
"k",
"=",
"min",
"(",
"int",
"(",
"samplesize",
")",
",",
"len",
"(",
"infiles",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Sample size proportion in (0, 1]: %.3f\"",
",",
"samplesize",
")",
"k",
"=",
"int",
"(",
"min",
"(",
"samplesize",
",",
"1.0",
")",
"*",
"len",
"(",
"infiles",
")",
")",
"logger",
".",
"info",
"(",
"\"Randomly subsampling %d sequences for analysis\"",
",",
"k",
")",
"if",
"args",
".",
"seed",
":",
"logger",
".",
"info",
"(",
"\"Setting random seed with: %s\"",
",",
"args",
".",
"seed",
")",
"random",
".",
"seed",
"(",
"args",
".",
"seed",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Subsampling without specified random seed!\"",
")",
"logger",
".",
"warning",
"(",
"\"Subsampling may NOT be easily reproducible!\"",
")",
"return",
"random",
".",
"sample",
"(",
"infiles",
",",
"k",
")"
] |
Returns a random subsample of the input files.
- infiles: a list of input files for analysis
|
[
"Returns",
"a",
"random",
"subsample",
"of",
"the",
"input",
"files",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L813-L842
|
train
|
widdowquinn/pyani
|
pyani/pyani_jobs.py
|
Job.wait
|
def wait(self, interval=SGE_WAIT):
"""Wait until the job finishes, and poll SGE on its status."""
finished = False
while not finished:
time.sleep(interval)
interval = min(2 * interval, 60)
finished = os.system("qstat -j %s > /dev/null" % (self.name))
|
python
|
def wait(self, interval=SGE_WAIT):
"""Wait until the job finishes, and poll SGE on its status."""
finished = False
while not finished:
time.sleep(interval)
interval = min(2 * interval, 60)
finished = os.system("qstat -j %s > /dev/null" % (self.name))
|
[
"def",
"wait",
"(",
"self",
",",
"interval",
"=",
"SGE_WAIT",
")",
":",
"finished",
"=",
"False",
"while",
"not",
"finished",
":",
"time",
".",
"sleep",
"(",
"interval",
")",
"interval",
"=",
"min",
"(",
"2",
"*",
"interval",
",",
"60",
")",
"finished",
"=",
"os",
".",
"system",
"(",
"\"qstat -j %s > /dev/null\"",
"%",
"(",
"self",
".",
"name",
")",
")"
] |
Wait until the job finishes, and poll SGE on its status.
|
[
"Wait",
"until",
"the",
"job",
"finishes",
"and",
"poll",
"SGE",
"on",
"its",
"status",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_jobs.py#L77-L83
|
train
|
widdowquinn/pyani
|
pyani/anim.py
|
generate_nucmer_jobs
|
def generate_nucmer_jobs(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
jobprefix="ANINUCmer",
):
"""Return a list of Jobs describing NUCmer command-lines for ANIm
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files, generating Jobs describing NUCmer command lines
for each pairwise comparison.
"""
ncmds, fcmds = generate_nucmer_commands(
filenames, outdir, nucmer_exe, filter_exe, maxmatch
)
joblist = []
for idx, ncmd in enumerate(ncmds):
njob = pyani_jobs.Job("%s_%06d-n" % (jobprefix, idx), ncmd)
fjob = pyani_jobs.Job("%s_%06d-f" % (jobprefix, idx), fcmds[idx])
fjob.add_dependency(njob)
# joblist.append(njob) # not required: dependency in fjob
joblist.append(fjob)
return joblist
|
python
|
def generate_nucmer_jobs(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
jobprefix="ANINUCmer",
):
"""Return a list of Jobs describing NUCmer command-lines for ANIm
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files, generating Jobs describing NUCmer command lines
for each pairwise comparison.
"""
ncmds, fcmds = generate_nucmer_commands(
filenames, outdir, nucmer_exe, filter_exe, maxmatch
)
joblist = []
for idx, ncmd in enumerate(ncmds):
njob = pyani_jobs.Job("%s_%06d-n" % (jobprefix, idx), ncmd)
fjob = pyani_jobs.Job("%s_%06d-f" % (jobprefix, idx), fcmds[idx])
fjob.add_dependency(njob)
# joblist.append(njob) # not required: dependency in fjob
joblist.append(fjob)
return joblist
|
[
"def",
"generate_nucmer_jobs",
"(",
"filenames",
",",
"outdir",
"=",
"\".\"",
",",
"nucmer_exe",
"=",
"pyani_config",
".",
"NUCMER_DEFAULT",
",",
"filter_exe",
"=",
"pyani_config",
".",
"FILTER_DEFAULT",
",",
"maxmatch",
"=",
"False",
",",
"jobprefix",
"=",
"\"ANINUCmer\"",
",",
")",
":",
"ncmds",
",",
"fcmds",
"=",
"generate_nucmer_commands",
"(",
"filenames",
",",
"outdir",
",",
"nucmer_exe",
",",
"filter_exe",
",",
"maxmatch",
")",
"joblist",
"=",
"[",
"]",
"for",
"idx",
",",
"ncmd",
"in",
"enumerate",
"(",
"ncmds",
")",
":",
"njob",
"=",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_%06d-n\"",
"%",
"(",
"jobprefix",
",",
"idx",
")",
",",
"ncmd",
")",
"fjob",
"=",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_%06d-f\"",
"%",
"(",
"jobprefix",
",",
"idx",
")",
",",
"fcmds",
"[",
"idx",
"]",
")",
"fjob",
".",
"add_dependency",
"(",
"njob",
")",
"# joblist.append(njob) # not required: dependency in fjob",
"joblist",
".",
"append",
"(",
"fjob",
")",
"return",
"joblist"
] |
Return a list of Jobs describing NUCmer command-lines for ANIm
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files, generating Jobs describing NUCmer command lines
for each pairwise comparison.
|
[
"Return",
"a",
"list",
"of",
"Jobs",
"describing",
"NUCmer",
"command",
"-",
"lines",
"for",
"ANIm"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L33-L61
|
train
|
widdowquinn/pyani
|
pyani/anim.py
|
generate_nucmer_commands
|
def generate_nucmer_commands(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Return a tuple of lists of NUCmer command-lines for ANIm
The first element is a list of NUCmer commands, the second a list
of delta_filter_wrapper.py commands. These are ordered such that
commands are paired. The NUCmer commands should be run before
the delta-filter commands.
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
"""
nucmer_cmdlines, delta_filter_cmdlines = [], []
for idx, fname1 in enumerate(filenames[:-1]):
for fname2 in filenames[idx + 1 :]:
ncmd, dcmd = construct_nucmer_cmdline(
fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch
)
nucmer_cmdlines.append(ncmd)
delta_filter_cmdlines.append(dcmd)
return (nucmer_cmdlines, delta_filter_cmdlines)
|
python
|
def generate_nucmer_commands(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Return a tuple of lists of NUCmer command-lines for ANIm
The first element is a list of NUCmer commands, the second a list
of delta_filter_wrapper.py commands. These are ordered such that
commands are paired. The NUCmer commands should be run before
the delta-filter commands.
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
"""
nucmer_cmdlines, delta_filter_cmdlines = [], []
for idx, fname1 in enumerate(filenames[:-1]):
for fname2 in filenames[idx + 1 :]:
ncmd, dcmd = construct_nucmer_cmdline(
fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch
)
nucmer_cmdlines.append(ncmd)
delta_filter_cmdlines.append(dcmd)
return (nucmer_cmdlines, delta_filter_cmdlines)
|
[
"def",
"generate_nucmer_commands",
"(",
"filenames",
",",
"outdir",
"=",
"\".\"",
",",
"nucmer_exe",
"=",
"pyani_config",
".",
"NUCMER_DEFAULT",
",",
"filter_exe",
"=",
"pyani_config",
".",
"FILTER_DEFAULT",
",",
"maxmatch",
"=",
"False",
",",
")",
":",
"nucmer_cmdlines",
",",
"delta_filter_cmdlines",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"idx",
",",
"fname1",
"in",
"enumerate",
"(",
"filenames",
"[",
":",
"-",
"1",
"]",
")",
":",
"for",
"fname2",
"in",
"filenames",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"ncmd",
",",
"dcmd",
"=",
"construct_nucmer_cmdline",
"(",
"fname1",
",",
"fname2",
",",
"outdir",
",",
"nucmer_exe",
",",
"filter_exe",
",",
"maxmatch",
")",
"nucmer_cmdlines",
".",
"append",
"(",
"ncmd",
")",
"delta_filter_cmdlines",
".",
"append",
"(",
"dcmd",
")",
"return",
"(",
"nucmer_cmdlines",
",",
"delta_filter_cmdlines",
")"
] |
Return a tuple of lists of NUCmer command-lines for ANIm
The first element is a list of NUCmer commands, the second a list
of delta_filter_wrapper.py commands. These are ordered such that
commands are paired. The NUCmer commands should be run before
the delta-filter commands.
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
|
[
"Return",
"a",
"tuple",
"of",
"lists",
"of",
"NUCmer",
"command",
"-",
"lines",
"for",
"ANIm"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L66-L96
|
train
|
widdowquinn/pyani
|
pyani/anim.py
|
construct_nucmer_cmdline
|
def construct_nucmer_cmdline(
fname1,
fname2,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Returns a tuple of NUCmer and delta-filter commands
The split into a tuple was made necessary by changes to SGE/OGE. The
delta-filter command must now be run as a dependency of the NUCmer
command, and be wrapped in a Python script to capture STDOUT.
NOTE: This command-line writes output data to a subdirectory of the passed
outdir, called "nucmer_output".
- fname1 - query FASTA filepath
- fname2 - subject FASTA filepath
- outdir - path to output directory
- maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch
option. If not, the -mum option is used instead
"""
outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"])
outprefix = os.path.join(
outsubdir,
"%s_vs_%s"
% (
os.path.splitext(os.path.split(fname1)[-1])[0],
os.path.splitext(os.path.split(fname2)[-1])[0],
),
)
if maxmatch:
mode = "--maxmatch"
else:
mode = "--mum"
nucmercmd = "{0} {1} -p {2} {3} {4}".format(
nucmer_exe, mode, outprefix, fname1, fname2
)
filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format(
filter_exe, outprefix + ".delta", outprefix + ".filter"
)
return (nucmercmd, filtercmd)
|
python
|
def construct_nucmer_cmdline(
fname1,
fname2,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Returns a tuple of NUCmer and delta-filter commands
The split into a tuple was made necessary by changes to SGE/OGE. The
delta-filter command must now be run as a dependency of the NUCmer
command, and be wrapped in a Python script to capture STDOUT.
NOTE: This command-line writes output data to a subdirectory of the passed
outdir, called "nucmer_output".
- fname1 - query FASTA filepath
- fname2 - subject FASTA filepath
- outdir - path to output directory
- maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch
option. If not, the -mum option is used instead
"""
outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"])
outprefix = os.path.join(
outsubdir,
"%s_vs_%s"
% (
os.path.splitext(os.path.split(fname1)[-1])[0],
os.path.splitext(os.path.split(fname2)[-1])[0],
),
)
if maxmatch:
mode = "--maxmatch"
else:
mode = "--mum"
nucmercmd = "{0} {1} -p {2} {3} {4}".format(
nucmer_exe, mode, outprefix, fname1, fname2
)
filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format(
filter_exe, outprefix + ".delta", outprefix + ".filter"
)
return (nucmercmd, filtercmd)
|
[
"def",
"construct_nucmer_cmdline",
"(",
"fname1",
",",
"fname2",
",",
"outdir",
"=",
"\".\"",
",",
"nucmer_exe",
"=",
"pyani_config",
".",
"NUCMER_DEFAULT",
",",
"filter_exe",
"=",
"pyani_config",
".",
"FILTER_DEFAULT",
",",
"maxmatch",
"=",
"False",
",",
")",
":",
"outsubdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"pyani_config",
".",
"ALIGNDIR",
"[",
"\"ANIm\"",
"]",
")",
"outprefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outsubdir",
",",
"\"%s_vs_%s\"",
"%",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname1",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname2",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
",",
")",
",",
")",
"if",
"maxmatch",
":",
"mode",
"=",
"\"--maxmatch\"",
"else",
":",
"mode",
"=",
"\"--mum\"",
"nucmercmd",
"=",
"\"{0} {1} -p {2} {3} {4}\"",
".",
"format",
"(",
"nucmer_exe",
",",
"mode",
",",
"outprefix",
",",
"fname1",
",",
"fname2",
")",
"filtercmd",
"=",
"\"delta_filter_wrapper.py \"",
"+",
"\"{0} -1 {1} {2}\"",
".",
"format",
"(",
"filter_exe",
",",
"outprefix",
"+",
"\".delta\"",
",",
"outprefix",
"+",
"\".filter\"",
")",
"return",
"(",
"nucmercmd",
",",
"filtercmd",
")"
] |
Returns a tuple of NUCmer and delta-filter commands
The split into a tuple was made necessary by changes to SGE/OGE. The
delta-filter command must now be run as a dependency of the NUCmer
command, and be wrapped in a Python script to capture STDOUT.
NOTE: This command-line writes output data to a subdirectory of the passed
outdir, called "nucmer_output".
- fname1 - query FASTA filepath
- fname2 - subject FASTA filepath
- outdir - path to output directory
- maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch
option. If not, the -mum option is used instead
|
[
"Returns",
"a",
"tuple",
"of",
"NUCmer",
"and",
"delta",
"-",
"filter",
"commands"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L101-L143
|
train
|
widdowquinn/pyani
|
pyani/anim.py
|
process_deltadir
|
def process_deltadir(delta_dir, org_lengths, logger=None):
"""Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files - as of v0.2.4 we use the
# .filter files that result from delta-filter (1:1 alignments)
deltafiles = pyani_files.get_input_files(delta_dir, ".filter")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "ANIm")
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .delta files assuming that the filename format holds:
# org1_vs_org2.delta
for deltafile in deltafiles:
qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_")
# We may have .delta files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the .delta file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % deltafile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % deltafile
)
continue
tot_length, tot_sim_error = parse_delta(deltafile)
if tot_length == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in " + "%s is zero!" % deltafile
)
query_cover = float(tot_length) / org_lengths[qname]
sbjct_cover = float(tot_length) / org_lengths[sname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(tot_sim_error) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
# Populate dataframes: when assigning data from symmetrical MUMmer
# output, both upper and lower triangles will be populated
results.add_tot_length(qname, sname, tot_length)
results.add_sim_errors(qname, sname, tot_sim_error)
results.add_pid(qname, sname, perc_id)
results.add_coverage(qname, sname, query_cover, sbjct_cover)
return results
|
python
|
def process_deltadir(delta_dir, org_lengths, logger=None):
"""Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files - as of v0.2.4 we use the
# .filter files that result from delta-filter (1:1 alignments)
deltafiles = pyani_files.get_input_files(delta_dir, ".filter")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "ANIm")
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .delta files assuming that the filename format holds:
# org1_vs_org2.delta
for deltafile in deltafiles:
qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_")
# We may have .delta files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the .delta file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % deltafile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % deltafile
)
continue
tot_length, tot_sim_error = parse_delta(deltafile)
if tot_length == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in " + "%s is zero!" % deltafile
)
query_cover = float(tot_length) / org_lengths[qname]
sbjct_cover = float(tot_length) / org_lengths[sname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(tot_sim_error) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
# Populate dataframes: when assigning data from symmetrical MUMmer
# output, both upper and lower triangles will be populated
results.add_tot_length(qname, sname, tot_length)
results.add_sim_errors(qname, sname, tot_sim_error)
results.add_pid(qname, sname, perc_id)
results.add_coverage(qname, sname, query_cover, sbjct_cover)
return results
|
[
"def",
"process_deltadir",
"(",
"delta_dir",
",",
"org_lengths",
",",
"logger",
"=",
"None",
")",
":",
"# Process directory to identify input files - as of v0.2.4 we use the",
"# .filter files that result from delta-filter (1:1 alignments)",
"deltafiles",
"=",
"pyani_files",
".",
"get_input_files",
"(",
"delta_dir",
",",
"\".filter\"",
")",
"# Hold data in ANIResults object",
"results",
"=",
"ANIResults",
"(",
"list",
"(",
"org_lengths",
".",
"keys",
"(",
")",
")",
",",
"\"ANIm\"",
")",
"# Fill diagonal NA values for alignment_length with org_lengths",
"for",
"org",
",",
"length",
"in",
"list",
"(",
"org_lengths",
".",
"items",
"(",
")",
")",
":",
"results",
".",
"alignment_lengths",
"[",
"org",
"]",
"[",
"org",
"]",
"=",
"length",
"# Process .delta files assuming that the filename format holds:",
"# org1_vs_org2.delta",
"for",
"deltafile",
"in",
"deltafiles",
":",
"qname",
",",
"sname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"deltafile",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\"_vs_\"",
")",
"# We may have .delta files from other analyses in the same directory",
"# If this occurs, we raise a warning, and skip the .delta file",
"if",
"qname",
"not",
"in",
"list",
"(",
"org_lengths",
".",
"keys",
"(",
")",
")",
":",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"Query name %s not in input \"",
"%",
"qname",
"+",
"\"sequence list, skipping %s\"",
"%",
"deltafile",
")",
"continue",
"if",
"sname",
"not",
"in",
"list",
"(",
"org_lengths",
".",
"keys",
"(",
")",
")",
":",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"Subject name %s not in input \"",
"%",
"sname",
"+",
"\"sequence list, skipping %s\"",
"%",
"deltafile",
")",
"continue",
"tot_length",
",",
"tot_sim_error",
"=",
"parse_delta",
"(",
"deltafile",
")",
"if",
"tot_length",
"==",
"0",
"and",
"logger",
"is",
"not",
"None",
":",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"Total alignment length reported in \"",
"+",
"\"%s is zero!\"",
"%",
"deltafile",
")",
"query_cover",
"=",
"float",
"(",
"tot_length",
")",
"/",
"org_lengths",
"[",
"qname",
"]",
"sbjct_cover",
"=",
"float",
"(",
"tot_length",
")",
"/",
"org_lengths",
"[",
"sname",
"]",
"# Calculate percentage ID of aligned length. This may fail if",
"# total length is zero.",
"# The ZeroDivisionError that would arise should be handled",
"# Common causes are that a NUCmer run failed, or that a very",
"# distant sequence was included in the analysis.",
"try",
":",
"perc_id",
"=",
"1",
"-",
"float",
"(",
"tot_sim_error",
")",
"/",
"tot_length",
"except",
"ZeroDivisionError",
":",
"perc_id",
"=",
"0",
"# set arbitrary value of zero identity",
"results",
".",
"zero_error",
"=",
"True",
"# Populate dataframes: when assigning data from symmetrical MUMmer",
"# output, both upper and lower triangles will be populated",
"results",
".",
"add_tot_length",
"(",
"qname",
",",
"sname",
",",
"tot_length",
")",
"results",
".",
"add_sim_errors",
"(",
"qname",
",",
"sname",
",",
"tot_sim_error",
")",
"results",
".",
"add_pid",
"(",
"qname",
",",
"sname",
",",
"perc_id",
")",
"results",
".",
"add_coverage",
"(",
"qname",
",",
"sname",
",",
"query_cover",
",",
"sbjct_cover",
")",
"return",
"results"
] |
Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
|
[
"Returns",
"a",
"tuple",
"of",
"ANIm",
"results",
"for",
".",
"deltas",
"in",
"passed",
"directory",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anim.py#L169-L244
|
train
|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
set_ncbi_email
|
def set_ncbi_email():
"""Set contact email for NCBI."""
Entrez.email = args.email
logger.info("Set NCBI contact email to %s", args.email)
Entrez.tool = "genbank_get_genomes_by_taxon.py"
|
python
|
def set_ncbi_email():
"""Set contact email for NCBI."""
Entrez.email = args.email
logger.info("Set NCBI contact email to %s", args.email)
Entrez.tool = "genbank_get_genomes_by_taxon.py"
|
[
"def",
"set_ncbi_email",
"(",
")",
":",
"Entrez",
".",
"email",
"=",
"args",
".",
"email",
"logger",
".",
"info",
"(",
"\"Set NCBI contact email to %s\"",
",",
"args",
".",
"email",
")",
"Entrez",
".",
"tool",
"=",
"\"genbank_get_genomes_by_taxon.py\""
] |
Set contact email for NCBI.
|
[
"Set",
"contact",
"email",
"for",
"NCBI",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L139-L143
|
train
|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
entrez_retry
|
def entrez_retry(func, *fnargs, **fnkwargs):
"""Retries the passed function up to the number of times specified
by args.retries
"""
tries, success = 0, False
while not success and tries < args.retries:
try:
output = func(*fnargs, **fnkwargs)
success = True
except (HTTPError, URLError):
tries += 1
logger.warning("Entrez query %s(%s, %s) failed (%d/%d)", func,
fnargs, fnkwargs, tries + 1, args.retries)
logger.warning(last_exception())
if not success:
logger.error("Too many Entrez failures (exiting)")
sys.exit(1)
return output
|
python
|
def entrez_retry(func, *fnargs, **fnkwargs):
"""Retries the passed function up to the number of times specified
by args.retries
"""
tries, success = 0, False
while not success and tries < args.retries:
try:
output = func(*fnargs, **fnkwargs)
success = True
except (HTTPError, URLError):
tries += 1
logger.warning("Entrez query %s(%s, %s) failed (%d/%d)", func,
fnargs, fnkwargs, tries + 1, args.retries)
logger.warning(last_exception())
if not success:
logger.error("Too many Entrez failures (exiting)")
sys.exit(1)
return output
|
[
"def",
"entrez_retry",
"(",
"func",
",",
"*",
"fnargs",
",",
"*",
"*",
"fnkwargs",
")",
":",
"tries",
",",
"success",
"=",
"0",
",",
"False",
"while",
"not",
"success",
"and",
"tries",
"<",
"args",
".",
"retries",
":",
"try",
":",
"output",
"=",
"func",
"(",
"*",
"fnargs",
",",
"*",
"*",
"fnkwargs",
")",
"success",
"=",
"True",
"except",
"(",
"HTTPError",
",",
"URLError",
")",
":",
"tries",
"+=",
"1",
"logger",
".",
"warning",
"(",
"\"Entrez query %s(%s, %s) failed (%d/%d)\"",
",",
"func",
",",
"fnargs",
",",
"fnkwargs",
",",
"tries",
"+",
"1",
",",
"args",
".",
"retries",
")",
"logger",
".",
"warning",
"(",
"last_exception",
"(",
")",
")",
"if",
"not",
"success",
":",
"logger",
".",
"error",
"(",
"\"Too many Entrez failures (exiting)\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"output"
] |
Retries the passed function up to the number of times specified
by args.retries
|
[
"Retries",
"the",
"passed",
"function",
"up",
"to",
"the",
"number",
"of",
"times",
"specified",
"by",
"args",
".",
"retries"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L187-L204
|
train
|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
entrez_batch_webhistory
|
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs):
"""Recovers the Entrez data from a prior NCBI webhistory search, in
batches of defined size, using Efetch. Returns all results as a list.
- record: Entrez webhistory record
- expected: number of expected search returns
- batchsize: how many search returns to retrieve in a batch
- *fnargs: arguments to Efetch
- **fnkwargs: keyword arguments to Efetch
"""
results = []
for start in range(0, expected, batchsize):
batch_handle = entrez_retry(
Entrez.efetch,
retstart=start,
retmax=batchsize,
webenv=record["WebEnv"],
query_key=record["QueryKey"],
*fnargs,
**fnkwargs)
batch_record = Entrez.read(batch_handle, validate=False)
results.extend(batch_record)
return results
|
python
|
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs):
"""Recovers the Entrez data from a prior NCBI webhistory search, in
batches of defined size, using Efetch. Returns all results as a list.
- record: Entrez webhistory record
- expected: number of expected search returns
- batchsize: how many search returns to retrieve in a batch
- *fnargs: arguments to Efetch
- **fnkwargs: keyword arguments to Efetch
"""
results = []
for start in range(0, expected, batchsize):
batch_handle = entrez_retry(
Entrez.efetch,
retstart=start,
retmax=batchsize,
webenv=record["WebEnv"],
query_key=record["QueryKey"],
*fnargs,
**fnkwargs)
batch_record = Entrez.read(batch_handle, validate=False)
results.extend(batch_record)
return results
|
[
"def",
"entrez_batch_webhistory",
"(",
"record",
",",
"expected",
",",
"batchsize",
",",
"*",
"fnargs",
",",
"*",
"*",
"fnkwargs",
")",
":",
"results",
"=",
"[",
"]",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"expected",
",",
"batchsize",
")",
":",
"batch_handle",
"=",
"entrez_retry",
"(",
"Entrez",
".",
"efetch",
",",
"retstart",
"=",
"start",
",",
"retmax",
"=",
"batchsize",
",",
"webenv",
"=",
"record",
"[",
"\"WebEnv\"",
"]",
",",
"query_key",
"=",
"record",
"[",
"\"QueryKey\"",
"]",
",",
"*",
"fnargs",
",",
"*",
"*",
"fnkwargs",
")",
"batch_record",
"=",
"Entrez",
".",
"read",
"(",
"batch_handle",
",",
"validate",
"=",
"False",
")",
"results",
".",
"extend",
"(",
"batch_record",
")",
"return",
"results"
] |
Recovers the Entrez data from a prior NCBI webhistory search, in
batches of defined size, using Efetch. Returns all results as a list.
- record: Entrez webhistory record
- expected: number of expected search returns
- batchsize: how many search returns to retrieve in a batch
- *fnargs: arguments to Efetch
- **fnkwargs: keyword arguments to Efetch
|
[
"Recovers",
"the",
"Entrez",
"data",
"from",
"a",
"prior",
"NCBI",
"webhistory",
"search",
"in",
"batches",
"of",
"defined",
"size",
"using",
"Efetch",
".",
"Returns",
"all",
"results",
"as",
"a",
"list",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L208-L230
|
train
|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
get_asm_uids
|
def get_asm_uids(taxon_uid):
"""Returns a set of NCBI UIDs associated with the passed taxon.
This query at NCBI returns all assemblies for the taxon subtree
rooted at the passed taxon_uid.
"""
query = "txid%s[Organism:exp]" % taxon_uid
logger.info("Entrez ESearch with query: %s", query)
# Perform initial search for assembly UIDs with taxon ID as query.
# Use NCBI history for the search.
handle = entrez_retry(
Entrez.esearch,
db="assembly",
term=query,
format="xml",
usehistory="y")
record = Entrez.read(handle, validate=False)
result_count = int(record['Count'])
logger.info("Entrez ESearch returns %d assembly IDs", result_count)
# Recover assembly UIDs from the web history
asm_ids = entrez_batch_webhistory(
record, result_count, 250, db="assembly", retmode="xml")
logger.info("Identified %d unique assemblies", len(asm_ids))
return asm_ids
|
python
|
def get_asm_uids(taxon_uid):
"""Returns a set of NCBI UIDs associated with the passed taxon.
This query at NCBI returns all assemblies for the taxon subtree
rooted at the passed taxon_uid.
"""
query = "txid%s[Organism:exp]" % taxon_uid
logger.info("Entrez ESearch with query: %s", query)
# Perform initial search for assembly UIDs with taxon ID as query.
# Use NCBI history for the search.
handle = entrez_retry(
Entrez.esearch,
db="assembly",
term=query,
format="xml",
usehistory="y")
record = Entrez.read(handle, validate=False)
result_count = int(record['Count'])
logger.info("Entrez ESearch returns %d assembly IDs", result_count)
# Recover assembly UIDs from the web history
asm_ids = entrez_batch_webhistory(
record, result_count, 250, db="assembly", retmode="xml")
logger.info("Identified %d unique assemblies", len(asm_ids))
return asm_ids
|
[
"def",
"get_asm_uids",
"(",
"taxon_uid",
")",
":",
"query",
"=",
"\"txid%s[Organism:exp]\"",
"%",
"taxon_uid",
"logger",
".",
"info",
"(",
"\"Entrez ESearch with query: %s\"",
",",
"query",
")",
"# Perform initial search for assembly UIDs with taxon ID as query.",
"# Use NCBI history for the search.",
"handle",
"=",
"entrez_retry",
"(",
"Entrez",
".",
"esearch",
",",
"db",
"=",
"\"assembly\"",
",",
"term",
"=",
"query",
",",
"format",
"=",
"\"xml\"",
",",
"usehistory",
"=",
"\"y\"",
")",
"record",
"=",
"Entrez",
".",
"read",
"(",
"handle",
",",
"validate",
"=",
"False",
")",
"result_count",
"=",
"int",
"(",
"record",
"[",
"'Count'",
"]",
")",
"logger",
".",
"info",
"(",
"\"Entrez ESearch returns %d assembly IDs\"",
",",
"result_count",
")",
"# Recover assembly UIDs from the web history",
"asm_ids",
"=",
"entrez_batch_webhistory",
"(",
"record",
",",
"result_count",
",",
"250",
",",
"db",
"=",
"\"assembly\"",
",",
"retmode",
"=",
"\"xml\"",
")",
"logger",
".",
"info",
"(",
"\"Identified %d unique assemblies\"",
",",
"len",
"(",
"asm_ids",
")",
")",
"return",
"asm_ids"
] |
Returns a set of NCBI UIDs associated with the passed taxon.
This query at NCBI returns all assemblies for the taxon subtree
rooted at the passed taxon_uid.
|
[
"Returns",
"a",
"set",
"of",
"NCBI",
"UIDs",
"associated",
"with",
"the",
"passed",
"taxon",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L234-L259
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.