code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _adjusted_script_code(self, script):
'''
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
'''
script_code = ByteData()
if script[0] == len(script) - 1:
return script
script_code += VarInt(len(script))
script_code += script
return script_code | Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary) | Below is the the instruction that describes the task:
### Input:
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
### Response:
def _adjusted_script_code(self, script):
'''
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
'''
script_code = ByteData()
if script[0] == len(script) - 1:
return script
script_code += VarInt(len(script))
script_code += script
return script_code |
def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits="minmax", selection=False, delay=False):
"""Calculate the median , possibly on a grid defined by binby.
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
percentile_shape and percentile_limits
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param percentile_limits: {percentile_limits}
:param percentile_shape: {percentile_shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}
"""
return self.percentile_approx(expression, 50, binby=binby, limits=limits, shape=shape, percentile_shape=percentile_shape, percentile_limits=percentile_limits, selection=selection, delay=delay) | Calculate the median , possibly on a grid defined by binby.
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
percentile_shape and percentile_limits
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param percentile_limits: {percentile_limits}
:param percentile_shape: {percentile_shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar} | Below is the the instruction that describes the task:
### Input:
Calculate the median , possibly on a grid defined by binby.
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
percentile_shape and percentile_limits
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param percentile_limits: {percentile_limits}
:param percentile_shape: {percentile_shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}
### Response:
def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits="minmax", selection=False, delay=False):
"""Calculate the median , possibly on a grid defined by binby.
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
percentile_shape and percentile_limits
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param percentile_limits: {percentile_limits}
:param percentile_shape: {percentile_shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}
"""
return self.percentile_approx(expression, 50, binby=binby, limits=limits, shape=shape, percentile_shape=percentile_shape, percentile_limits=percentile_limits, selection=selection, delay=delay) |
async def unignore_all(self, ctx):
"""Unignores all channels in this server from being processed.
To use this command you must have the Manage Channels permission or have the
Bot Admin role.
"""
channels = [c for c in ctx.message.server.channels if c.type is discord.ChannelType.text]
await ctx.invoke(self.unignore, *channels) | Unignores all channels in this server from being processed.
To use this command you must have the Manage Channels permission or have the
Bot Admin role. | Below is the the instruction that describes the task:
### Input:
Unignores all channels in this server from being processed.
To use this command you must have the Manage Channels permission or have the
Bot Admin role.
### Response:
async def unignore_all(self, ctx):
"""Unignores all channels in this server from being processed.
To use this command you must have the Manage Channels permission or have the
Bot Admin role.
"""
channels = [c for c in ctx.message.server.channels if c.type is discord.ChannelType.text]
await ctx.invoke(self.unignore, *channels) |
def user_has_reviewed(obj, user):
"""Returns True if the user has already reviewed the object."""
ctype = ContentType.objects.get_for_model(obj)
try:
models.Review.objects.get(user=user, content_type=ctype,
object_id=obj.id)
except models.Review.DoesNotExist:
return False
return True | Returns True if the user has already reviewed the object. | Below is the the instruction that describes the task:
### Input:
Returns True if the user has already reviewed the object.
### Response:
def user_has_reviewed(obj, user):
"""Returns True if the user has already reviewed the object."""
ctype = ContentType.objects.get_for_model(obj)
try:
models.Review.objects.get(user=user, content_type=ctype,
object_id=obj.id)
except models.Review.DoesNotExist:
return False
return True |
def get_high_level_mean_pars(self, high_level_type, high_level_name, calculation_type, elements_type, mean_fit, dirtype):
"""
Gets the Parameters of a mean of lower level data such as a Site
level Fisher mean of Specimen interpretations
Parameters
----------
high_level_type : 'samples','sites','locations','study'
high_level_name : sample, site, location, or study whose
data to which to apply the mean
calculation_type : 'Bingham','Fisher','Fisher by polarity'
elements_type : what to average: 'specimens', 'samples',
'sites' (Ron. ToDo allow VGP and maybe locations?)
mean_fit : name of interpretation to average if All uses all
figure out what level to average,and what elements to average
(specimen, samples, sites, vgp)
"""
elements_list = self.Data_hierarchy[high_level_type][high_level_name][elements_type]
pars_for_mean = {}
pars_for_mean["All"] = []
for element in elements_list:
if elements_type == 'specimens' and element in self.pmag_results_data['specimens']:
for fit in self.pmag_results_data['specimens'][element]:
if fit in self.bad_fits:
continue
if fit.name not in list(pars_for_mean.keys()):
pars_for_mean[fit.name] = []
try:
# is this fit to be included in mean
if mean_fit == 'All' or mean_fit == fit.name:
pars = fit.get(dirtype)
if pars == {} or pars == None:
pars = self.get_PCA_parameters(
element, fit, fit.tmin, fit.tmax, dirtype, fit.PCA_type)
if pars == {} or pars == None:
print(("cannot calculate parameters for element %s and fit %s in calculate_high_level_mean leaving out of fisher mean, please check this value." % (
element, fit.name)))
continue
fit.put(element, dirtype, pars)
else:
continue
if "calculation_type" in list(pars.keys()) and pars["calculation_type"] == 'DE-BFP':
dec, inc, direction_type = pars["specimen_dec"], pars["specimen_inc"], 'p'
elif "specimen_dec" in list(pars.keys()) and "specimen_inc" in list(pars.keys()):
dec, inc, direction_type = pars["specimen_dec"], pars["specimen_inc"], 'l'
elif "dec" in list(pars.keys()) and "inc" in list(pars.keys()):
dec, inc, direction_type = pars["dec"], pars["inc"], 'l'
else:
print(
("-E- ERROR: can't find mean for specimen interpertation: %s , %s" % (element, fit.name)))
print(pars)
continue
# add for calculation
pars_for_mean[fit.name].append({'dec': float(dec), 'inc': float(
inc), 'direction_type': direction_type, 'element_name': element})
pars_for_mean["All"].append({'dec': float(dec), 'inc': float(
inc), 'direction_type': direction_type, 'element_name': element})
except KeyError:
print(
("KeyError in calculate_high_level_mean for element: " + str(element)))
continue
else:
try:
pars = self.high_level_means[elements_type][element][mean_fit][dirtype]
if "dec" in list(pars.keys()) and "inc" in list(pars.keys()):
dec, inc, direction_type = pars["dec"], pars["inc"], 'l'
else:
# print "-E- ERROR: can't find mean for element %s"%element
continue
except KeyError:
# print("KeyError in calculate_high_level_mean for element: " + str(element) + " please report to a dev")
continue
return pars_for_mean | Gets the Parameters of a mean of lower level data such as a Site
level Fisher mean of Specimen interpretations
Parameters
----------
high_level_type : 'samples','sites','locations','study'
high_level_name : sample, site, location, or study whose
data to which to apply the mean
calculation_type : 'Bingham','Fisher','Fisher by polarity'
elements_type : what to average: 'specimens', 'samples',
'sites' (Ron. ToDo allow VGP and maybe locations?)
mean_fit : name of interpretation to average if All uses all
figure out what level to average,and what elements to average
(specimen, samples, sites, vgp) | Below is the the instruction that describes the task:
### Input:
Gets the Parameters of a mean of lower level data such as a Site
level Fisher mean of Specimen interpretations
Parameters
----------
high_level_type : 'samples','sites','locations','study'
high_level_name : sample, site, location, or study whose
data to which to apply the mean
calculation_type : 'Bingham','Fisher','Fisher by polarity'
elements_type : what to average: 'specimens', 'samples',
'sites' (Ron. ToDo allow VGP and maybe locations?)
mean_fit : name of interpretation to average if All uses all
figure out what level to average,and what elements to average
(specimen, samples, sites, vgp)
### Response:
def get_high_level_mean_pars(self, high_level_type, high_level_name, calculation_type, elements_type, mean_fit, dirtype):
"""
Gets the Parameters of a mean of lower level data such as a Site
level Fisher mean of Specimen interpretations
Parameters
----------
high_level_type : 'samples','sites','locations','study'
high_level_name : sample, site, location, or study whose
data to which to apply the mean
calculation_type : 'Bingham','Fisher','Fisher by polarity'
elements_type : what to average: 'specimens', 'samples',
'sites' (Ron. ToDo allow VGP and maybe locations?)
mean_fit : name of interpretation to average if All uses all
figure out what level to average,and what elements to average
(specimen, samples, sites, vgp)
"""
elements_list = self.Data_hierarchy[high_level_type][high_level_name][elements_type]
pars_for_mean = {}
pars_for_mean["All"] = []
for element in elements_list:
if elements_type == 'specimens' and element in self.pmag_results_data['specimens']:
for fit in self.pmag_results_data['specimens'][element]:
if fit in self.bad_fits:
continue
if fit.name not in list(pars_for_mean.keys()):
pars_for_mean[fit.name] = []
try:
# is this fit to be included in mean
if mean_fit == 'All' or mean_fit == fit.name:
pars = fit.get(dirtype)
if pars == {} or pars == None:
pars = self.get_PCA_parameters(
element, fit, fit.tmin, fit.tmax, dirtype, fit.PCA_type)
if pars == {} or pars == None:
print(("cannot calculate parameters for element %s and fit %s in calculate_high_level_mean leaving out of fisher mean, please check this value." % (
element, fit.name)))
continue
fit.put(element, dirtype, pars)
else:
continue
if "calculation_type" in list(pars.keys()) and pars["calculation_type"] == 'DE-BFP':
dec, inc, direction_type = pars["specimen_dec"], pars["specimen_inc"], 'p'
elif "specimen_dec" in list(pars.keys()) and "specimen_inc" in list(pars.keys()):
dec, inc, direction_type = pars["specimen_dec"], pars["specimen_inc"], 'l'
elif "dec" in list(pars.keys()) and "inc" in list(pars.keys()):
dec, inc, direction_type = pars["dec"], pars["inc"], 'l'
else:
print(
("-E- ERROR: can't find mean for specimen interpertation: %s , %s" % (element, fit.name)))
print(pars)
continue
# add for calculation
pars_for_mean[fit.name].append({'dec': float(dec), 'inc': float(
inc), 'direction_type': direction_type, 'element_name': element})
pars_for_mean["All"].append({'dec': float(dec), 'inc': float(
inc), 'direction_type': direction_type, 'element_name': element})
except KeyError:
print(
("KeyError in calculate_high_level_mean for element: " + str(element)))
continue
else:
try:
pars = self.high_level_means[elements_type][element][mean_fit][dirtype]
if "dec" in list(pars.keys()) and "inc" in list(pars.keys()):
dec, inc, direction_type = pars["dec"], pars["inc"], 'l'
else:
# print "-E- ERROR: can't find mean for element %s"%element
continue
except KeyError:
# print("KeyError in calculate_high_level_mean for element: " + str(element) + " please report to a dev")
continue
return pars_for_mean |
def set_header(self, name: str, value: _HeaderTypes) -> None:
"""Sets the given response header name and value.
All header values are converted to strings (`datetime` objects
are formatted according to the HTTP specification for the
``Date`` header).
"""
self._headers[name] = self._convert_header_value(value) | Sets the given response header name and value.
All header values are converted to strings (`datetime` objects
are formatted according to the HTTP specification for the
``Date`` header). | Below is the the instruction that describes the task:
### Input:
Sets the given response header name and value.
All header values are converted to strings (`datetime` objects
are formatted according to the HTTP specification for the
``Date`` header).
### Response:
def set_header(self, name: str, value: _HeaderTypes) -> None:
"""Sets the given response header name and value.
All header values are converted to strings (`datetime` objects
are formatted according to the HTTP specification for the
``Date`` header).
"""
self._headers[name] = self._convert_header_value(value) |
def path(self):
"""
Return the path to this directory.
"""
p = ''
if self._parent and self._parent.path:
p = os.path.join(p, self._parent.path)
if self._base:
p = os.path.join(p, self._base)
if self._path:
p = os.path.join(p, self._path)
return p | Return the path to this directory. | Below is the the instruction that describes the task:
### Input:
Return the path to this directory.
### Response:
def path(self):
"""
Return the path to this directory.
"""
p = ''
if self._parent and self._parent.path:
p = os.path.join(p, self._parent.path)
if self._base:
p = os.path.join(p, self._base)
if self._path:
p = os.path.join(p, self._path)
return p |
def _to_topology(self, atom_list, chains=None, residues=None):
"""Create a mdtraj.Topology from a Compound.
Parameters
----------
atom_list : list of mb.Compound
Atoms to include in the topology
chains : mb.Compound or list of mb.Compound
Chain types to add to the topology
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
Returns
-------
top : mdtraj.Topology
See Also
--------
mdtraj.Topology : Details on the mdtraj Topology object
"""
from mdtraj.core.topology import Topology
if isinstance(chains, string_types):
chains = [chains]
if isinstance(chains, (list, set)):
chains = tuple(chains)
if isinstance(residues, string_types):
residues = [residues]
if isinstance(residues, (list, set)):
residues = tuple(residues)
top = Topology()
atom_mapping = {}
default_chain = top.add_chain()
default_residue = top.add_residue('RES', default_chain)
compound_residue_map = dict()
atom_residue_map = dict()
compound_chain_map = dict()
atom_chain_map = dict()
for atom in atom_list:
# Chains
if chains:
if atom.name in chains:
current_chain = top.add_chain()
compound_chain_map[atom] = current_chain
else:
for parent in atom.ancestors():
if chains and parent.name in chains:
if parent not in compound_chain_map:
current_chain = top.add_chain()
compound_chain_map[parent] = current_chain
current_residue = top.add_residue(
'RES', current_chain)
break
else:
current_chain = default_chain
else:
current_chain = default_chain
atom_chain_map[atom] = current_chain
# Residues
if residues:
if atom.name in residues:
current_residue = top.add_residue(atom.name, current_chain)
compound_residue_map[atom] = current_residue
else:
for parent in atom.ancestors():
if residues and parent.name in residues:
if parent not in compound_residue_map:
current_residue = top.add_residue(
parent.name, current_chain)
compound_residue_map[parent] = current_residue
break
else:
current_residue = default_residue
else:
if chains:
try: # Grab the default residue from the custom chain.
current_residue = next(current_chain.residues)
except StopIteration: # Add the residue to the current chain
current_residue = top.add_residue('RES', current_chain)
else: # Grab the default chain's default residue
current_residue = default_residue
atom_residue_map[atom] = current_residue
# Add the actual atoms
try:
elem = get_by_symbol(atom.name)
except KeyError:
elem = get_by_symbol("VS")
at = top.add_atom(atom.name, elem, atom_residue_map[atom])
at.charge = atom.charge
atom_mapping[atom] = at
# Remove empty default residues.
chains_to_remove = [
chain for chain in top.chains if chain.n_atoms == 0]
residues_to_remove = [res for res in top.residues if res.n_atoms == 0]
for chain in chains_to_remove:
top._chains.remove(chain)
for res in residues_to_remove:
for chain in top.chains:
try:
chain._residues.remove(res)
except ValueError: # Already gone.
pass
for atom1, atom2 in self.bonds():
# Ensure that both atoms are part of the compound. This becomes an
# issue if you try to convert a sub-compound to a topology which is
# bonded to a different subcompound.
if all(a in atom_mapping.keys() for a in [atom1, atom2]):
top.add_bond(atom_mapping[atom1], atom_mapping[atom2])
return top | Create a mdtraj.Topology from a Compound.
Parameters
----------
atom_list : list of mb.Compound
Atoms to include in the topology
chains : mb.Compound or list of mb.Compound
Chain types to add to the topology
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
Returns
-------
top : mdtraj.Topology
See Also
--------
mdtraj.Topology : Details on the mdtraj Topology object | Below is the the instruction that describes the task:
### Input:
Create a mdtraj.Topology from a Compound.
Parameters
----------
atom_list : list of mb.Compound
Atoms to include in the topology
chains : mb.Compound or list of mb.Compound
Chain types to add to the topology
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
Returns
-------
top : mdtraj.Topology
See Also
--------
mdtraj.Topology : Details on the mdtraj Topology object
### Response:
def _to_topology(self, atom_list, chains=None, residues=None):
"""Create a mdtraj.Topology from a Compound.
Parameters
----------
atom_list : list of mb.Compound
Atoms to include in the topology
chains : mb.Compound or list of mb.Compound
Chain types to add to the topology
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
Returns
-------
top : mdtraj.Topology
See Also
--------
mdtraj.Topology : Details on the mdtraj Topology object
"""
from mdtraj.core.topology import Topology
if isinstance(chains, string_types):
chains = [chains]
if isinstance(chains, (list, set)):
chains = tuple(chains)
if isinstance(residues, string_types):
residues = [residues]
if isinstance(residues, (list, set)):
residues = tuple(residues)
top = Topology()
atom_mapping = {}
default_chain = top.add_chain()
default_residue = top.add_residue('RES', default_chain)
compound_residue_map = dict()
atom_residue_map = dict()
compound_chain_map = dict()
atom_chain_map = dict()
for atom in atom_list:
# Chains
if chains:
if atom.name in chains:
current_chain = top.add_chain()
compound_chain_map[atom] = current_chain
else:
for parent in atom.ancestors():
if chains and parent.name in chains:
if parent not in compound_chain_map:
current_chain = top.add_chain()
compound_chain_map[parent] = current_chain
current_residue = top.add_residue(
'RES', current_chain)
break
else:
current_chain = default_chain
else:
current_chain = default_chain
atom_chain_map[atom] = current_chain
# Residues
if residues:
if atom.name in residues:
current_residue = top.add_residue(atom.name, current_chain)
compound_residue_map[atom] = current_residue
else:
for parent in atom.ancestors():
if residues and parent.name in residues:
if parent not in compound_residue_map:
current_residue = top.add_residue(
parent.name, current_chain)
compound_residue_map[parent] = current_residue
break
else:
current_residue = default_residue
else:
if chains:
try: # Grab the default residue from the custom chain.
current_residue = next(current_chain.residues)
except StopIteration: # Add the residue to the current chain
current_residue = top.add_residue('RES', current_chain)
else: # Grab the default chain's default residue
current_residue = default_residue
atom_residue_map[atom] = current_residue
# Add the actual atoms
try:
elem = get_by_symbol(atom.name)
except KeyError:
elem = get_by_symbol("VS")
at = top.add_atom(atom.name, elem, atom_residue_map[atom])
at.charge = atom.charge
atom_mapping[atom] = at
# Remove empty default residues.
chains_to_remove = [
chain for chain in top.chains if chain.n_atoms == 0]
residues_to_remove = [res for res in top.residues if res.n_atoms == 0]
for chain in chains_to_remove:
top._chains.remove(chain)
for res in residues_to_remove:
for chain in top.chains:
try:
chain._residues.remove(res)
except ValueError: # Already gone.
pass
for atom1, atom2 in self.bonds():
# Ensure that both atoms are part of the compound. This becomes an
# issue if you try to convert a sub-compound to a topology which is
# bonded to a different subcompound.
if all(a in atom_mapping.keys() for a in [atom1, atom2]):
top.add_bond(atom_mapping[atom1], atom_mapping[atom2])
return top |
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (
previous.end - previous.control))
else:
return self.control == self.start | [Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.] | Below is the the instruction that describes the task:
### Input:
[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]
### Response:
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (
previous.end - previous.control))
else:
return self.control == self.start |
def list_gemeenten(self, sort=1):
'''
List all `gemeenten` in Vlaanderen.
:param integer sort: What field to sort on.
:rtype: A :class:`list` of :class:`Gemeente`.
'''
def creator():
url = self.base_url + '/municipality'
h = self.base_headers
p = {
'orderbyCode': sort == 1
}
res = capakey_rest_gateway_request(url, h, p).json()
return [
Gemeente(r['municipalityCode'], r['municipalityName'])
for r in res['municipalities']
]
if self.caches['permanent'].is_configured:
key = 'list_gemeenten_rest#%s' % sort
gemeente = self.caches['permanent'].get_or_create(key, creator)
else:
gemeente = creator()
for g in gemeente:
g.set_gateway(self)
return gemeente | List all `gemeenten` in Vlaanderen.
:param integer sort: What field to sort on.
:rtype: A :class:`list` of :class:`Gemeente`. | Below is the the instruction that describes the task:
### Input:
List all `gemeenten` in Vlaanderen.
:param integer sort: What field to sort on.
:rtype: A :class:`list` of :class:`Gemeente`.
### Response:
def list_gemeenten(self, sort=1):
'''
List all `gemeenten` in Vlaanderen.
:param integer sort: What field to sort on.
:rtype: A :class:`list` of :class:`Gemeente`.
'''
def creator():
url = self.base_url + '/municipality'
h = self.base_headers
p = {
'orderbyCode': sort == 1
}
res = capakey_rest_gateway_request(url, h, p).json()
return [
Gemeente(r['municipalityCode'], r['municipalityName'])
for r in res['municipalities']
]
if self.caches['permanent'].is_configured:
key = 'list_gemeenten_rest#%s' % sort
gemeente = self.caches['permanent'].get_or_create(key, creator)
else:
gemeente = creator()
for g in gemeente:
g.set_gateway(self)
return gemeente |
def create(cls, title, conn=None, google_user=None,
google_password=None):
""" Create a new spreadsheet with the given ``title``. """
conn = Connection.connect(conn=conn, google_user=google_user,
google_password=google_password)
res = Resource(type='spreadsheet', title=title)
res = conn.docs_client.CreateResource(res)
id = res.id.text.rsplit('%3A', 1)[-1]
return cls(id, conn, resource=res) | Create a new spreadsheet with the given ``title``. | Below is the the instruction that describes the task:
### Input:
Create a new spreadsheet with the given ``title``.
### Response:
def create(cls, title, conn=None, google_user=None,
google_password=None):
""" Create a new spreadsheet with the given ``title``. """
conn = Connection.connect(conn=conn, google_user=google_user,
google_password=google_password)
res = Resource(type='spreadsheet', title=title)
res = conn.docs_client.CreateResource(res)
id = res.id.text.rsplit('%3A', 1)[-1]
return cls(id, conn, resource=res) |
def getLogicalInterfaces(self, draft=False, name=None, schemaId=None):
"""
Get all logical interfaces for an org.
Parameters: draft (boolean).
Returns:
- list of ids
- response object
Throws APIException on failure.
"""
if draft:
req = ApiClient.allLogicalInterfacesUrl % (self.host, "/draft")
else:
req = ApiClient.allLogicalInterfacesUrl % (self.host, "")
if name or schemaId:
req += "?"
if name:
req += "name="+name
if schemaId:
if name:
req += "&"
req += "schemaId="+schemaId
resp = requests.get(req, auth=self.credentials, verify=self.verify)
if resp.status_code == 200:
self.logger.debug("All logical interfaces retrieved")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error getting all logical interfaces", resp)
return [x["id"] for x in resp.json()["results"]], resp.json() | Get all logical interfaces for an org.
Parameters: draft (boolean).
Returns:
- list of ids
- response object
Throws APIException on failure. | Below is the the instruction that describes the task:
### Input:
Get all logical interfaces for an org.
Parameters: draft (boolean).
Returns:
- list of ids
- response object
Throws APIException on failure.
### Response:
def getLogicalInterfaces(self, draft=False, name=None, schemaId=None):
"""
Get all logical interfaces for an org.
Parameters: draft (boolean).
Returns:
- list of ids
- response object
Throws APIException on failure.
"""
if draft:
req = ApiClient.allLogicalInterfacesUrl % (self.host, "/draft")
else:
req = ApiClient.allLogicalInterfacesUrl % (self.host, "")
if name or schemaId:
req += "?"
if name:
req += "name="+name
if schemaId:
if name:
req += "&"
req += "schemaId="+schemaId
resp = requests.get(req, auth=self.credentials, verify=self.verify)
if resp.status_code == 200:
self.logger.debug("All logical interfaces retrieved")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error getting all logical interfaces", resp)
return [x["id"] for x in resp.json()["results"]], resp.json() |
def interval_cover(I):
"""Minimum interval cover
:param I: list of closed intervals
:returns: minimum list of points covering all intervals
:complexity: O(n log n)
"""
S = []
for start, end in sorted(I, key=lambda v: v[1]):
if not S or S[-1] < start:
S.append(end)
return S | Minimum interval cover
:param I: list of closed intervals
:returns: minimum list of points covering all intervals
:complexity: O(n log n) | Below is the the instruction that describes the task:
### Input:
Minimum interval cover
:param I: list of closed intervals
:returns: minimum list of points covering all intervals
:complexity: O(n log n)
### Response:
def interval_cover(I):
"""Minimum interval cover
:param I: list of closed intervals
:returns: minimum list of points covering all intervals
:complexity: O(n log n)
"""
S = []
for start, end in sorted(I, key=lambda v: v[1]):
if not S or S[-1] < start:
S.append(end)
return S |
def clean(self):
"""
Check format name uniqueness for sites
:return: cleaned_data
"""
data = self.cleaned_data
formats = Format.objects.filter(name=data['name'])
if self.instance:
formats = formats.exclude(pk=self.instance.pk)
exists_sites = []
for f in formats:
for s in f.sites.all():
if s in data['sites']:
exists_sites.append(s.__unicode__())
if len(exists_sites):
raise ValidationError(ugettext("Format with this name exists for site(s): %s" % ", ".join(exists_sites)))
return data | Check format name uniqueness for sites
:return: cleaned_data | Below is the the instruction that describes the task:
### Input:
Check format name uniqueness for sites
:return: cleaned_data
### Response:
def clean(self):
"""
Check format name uniqueness for sites
:return: cleaned_data
"""
data = self.cleaned_data
formats = Format.objects.filter(name=data['name'])
if self.instance:
formats = formats.exclude(pk=self.instance.pk)
exists_sites = []
for f in formats:
for s in f.sites.all():
if s in data['sites']:
exists_sites.append(s.__unicode__())
if len(exists_sites):
raise ValidationError(ugettext("Format with this name exists for site(s): %s" % ", ".join(exists_sites)))
return data |
def less(a, b, *args):
"""Implements the '<' operator with JS-style type coertion."""
types = set([type(a), type(b)])
if float in types or int in types:
try:
a, b = float(a), float(b)
except TypeError:
# NaN
return False
return a < b and (not args or less(b, *args)) | Implements the '<' operator with JS-style type coertion. | Below is the the instruction that describes the task:
### Input:
Implements the '<' operator with JS-style type coertion.
### Response:
def less(a, b, *args):
"""Implements the '<' operator with JS-style type coertion."""
types = set([type(a), type(b)])
if float in types or int in types:
try:
a, b = float(a), float(b)
except TypeError:
# NaN
return False
return a < b and (not args or less(b, *args)) |
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
PromptManager,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
] | This has to be in a method, for TerminalIPythonApp to be available. | Below is the the instruction that describes the task:
### Input:
This has to be in a method, for TerminalIPythonApp to be available.
### Response:
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
PromptManager,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
] |
def data_directory(self):
"""
The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
"""
return expand_path(self.get(property_name='data_directory',
environment_variable='PIP_ACCEL_CACHE',
configuration_option='data-directory',
default='/var/cache/pip-accel' if is_root() else '~/.pip-accel')) | The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise | Below is the the instruction that describes the task:
### Input:
The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
### Response:
def data_directory(self):
"""
The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
"""
return expand_path(self.get(property_name='data_directory',
environment_variable='PIP_ACCEL_CACHE',
configuration_option='data-directory',
default='/var/cache/pip-accel' if is_root() else '~/.pip-accel')) |
def translations_generator_to_dataframe(translations_generator):
"""
Given a generator of (Variant, [Translation]) pairs,
returns a DataFrame of translated protein fragments with columns
for each field of a Translation object (and chr/pos/ref/alt per variant).
"""
return dataframe_from_generator(
element_class=Translation,
variant_and_elements_generator=translations_generator,
exclude=[],
converters={
"untrimmed_variant_sequence": lambda vs: vs.sequence,
"variant_sequence_in_reading_frame": (
lambda vs: vs.in_frame_cdna_sequence),
"reference_context": (
lambda rc: ";".join([
transcript.name for
transcript in rc.transcripts]))
},
extra_column_fns={
"untrimmed_variant_sequence_read_count": (
lambda _, t: len(t.untrimmed_variant_sequence.reads)),
}) | Given a generator of (Variant, [Translation]) pairs,
returns a DataFrame of translated protein fragments with columns
for each field of a Translation object (and chr/pos/ref/alt per variant). | Below is the the instruction that describes the task:
### Input:
Given a generator of (Variant, [Translation]) pairs,
returns a DataFrame of translated protein fragments with columns
for each field of a Translation object (and chr/pos/ref/alt per variant).
### Response:
def translations_generator_to_dataframe(translations_generator):
"""
Given a generator of (Variant, [Translation]) pairs,
returns a DataFrame of translated protein fragments with columns
for each field of a Translation object (and chr/pos/ref/alt per variant).
"""
return dataframe_from_generator(
element_class=Translation,
variant_and_elements_generator=translations_generator,
exclude=[],
converters={
"untrimmed_variant_sequence": lambda vs: vs.sequence,
"variant_sequence_in_reading_frame": (
lambda vs: vs.in_frame_cdna_sequence),
"reference_context": (
lambda rc: ";".join([
transcript.name for
transcript in rc.transcripts]))
},
extra_column_fns={
"untrimmed_variant_sequence_read_count": (
lambda _, t: len(t.untrimmed_variant_sequence.reads)),
}) |
def render_pulp_tag(self):
"""
Configure the pulp_tag plugin.
"""
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins',
'pulp_tag'):
return
pulp_registry = self.spec.pulp_registry.value
if pulp_registry:
self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_tag',
'pulp_registry_name', pulp_registry)
# Verify we have either a secret or username/password
if self.spec.pulp_secret.value is None:
conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'pulp_tag')
args = conf.get('args', {})
if 'username' not in args:
raise OsbsValidationException("Pulp registry specified "
"but no auth config")
else:
# If no pulp registry is specified, don't run the pulp plugin
logger.info("removing pulp_tag from request, "
"requires pulp_registry")
self.dj.remove_plugin("postbuild_plugins", "pulp_tag") | Configure the pulp_tag plugin. | Below is the the instruction that describes the task:
### Input:
Configure the pulp_tag plugin.
### Response:
def render_pulp_tag(self):
"""
Configure the pulp_tag plugin.
"""
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins',
'pulp_tag'):
return
pulp_registry = self.spec.pulp_registry.value
if pulp_registry:
self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_tag',
'pulp_registry_name', pulp_registry)
# Verify we have either a secret or username/password
if self.spec.pulp_secret.value is None:
conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'pulp_tag')
args = conf.get('args', {})
if 'username' not in args:
raise OsbsValidationException("Pulp registry specified "
"but no auth config")
else:
# If no pulp registry is specified, don't run the pulp plugin
logger.info("removing pulp_tag from request, "
"requires pulp_registry")
self.dj.remove_plugin("postbuild_plugins", "pulp_tag") |
def remove(name_or_path):
'''Remove an environment or module
:param name_or_path: name or path to environment or module
'''
r = resolve(name_or_path)
r.resolved[0].remove()
EnvironmentCache.discard(r.resolved[0])
EnvironmentCache.save() | Remove an environment or module
:param name_or_path: name or path to environment or module | Below is the the instruction that describes the task:
### Input:
Remove an environment or module
:param name_or_path: name or path to environment or module
### Response:
def remove(name_or_path):
'''Remove an environment or module
:param name_or_path: name or path to environment or module
'''
r = resolve(name_or_path)
r.resolved[0].remove()
EnvironmentCache.discard(r.resolved[0])
EnvironmentCache.save() |
def reduce_stock(self, product_id, sku_info, quantity):
"""
减少库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 减少的库存数量
:return: 返回的 JSON 数据包
"""
return self._post(
'merchant/stock/reduce',
data={
"product_id": product_id,
"sku_info": sku_info,
"quantity": quantity
}
) | 减少库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 减少的库存数量
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
减少库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 减少的库存数量
:return: 返回的 JSON 数据包
### Response:
def reduce_stock(self, product_id, sku_info, quantity):
"""
减少库存
:param product_id: 商品ID
:param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可
:param quantity: 减少的库存数量
:return: 返回的 JSON 数据包
"""
return self._post(
'merchant/stock/reduce',
data={
"product_id": product_id,
"sku_info": sku_info,
"quantity": quantity
}
) |
def slice_to(self, s):
'''
Copy the slice into the supplied StringBuffer
@type s: string
'''
result = ''
if self.slice_check():
result = self.current[self.bra:self.ket]
return result | Copy the slice into the supplied StringBuffer
@type s: string | Below is the the instruction that describes the task:
### Input:
Copy the slice into the supplied StringBuffer
@type s: string
### Response:
def slice_to(self, s):
'''
Copy the slice into the supplied StringBuffer
@type s: string
'''
result = ''
if self.slice_check():
result = self.current[self.bra:self.ket]
return result |
def add_time(data):
"""And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
"""
payload = data['data']
updated = data['updated'].date()
if updated == date.today():
payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S')
elif updated >= (date.today() - timedelta(days=1)):
payload['last_updated'] = 'yesterday'
elif updated >= (date.today() - timedelta(days=7)):
payload['last_updated'] = updated.strftime('on %A')
else:
payload['last_updated'] = updated.strftime('%Y-%m-%d')
return payload | And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time. | Below is the the instruction that describes the task:
### Input:
And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
### Response:
def add_time(data):
"""And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
"""
payload = data['data']
updated = data['updated'].date()
if updated == date.today():
payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S')
elif updated >= (date.today() - timedelta(days=1)):
payload['last_updated'] = 'yesterday'
elif updated >= (date.today() - timedelta(days=7)):
payload['last_updated'] = updated.strftime('on %A')
else:
payload['last_updated'] = updated.strftime('%Y-%m-%d')
return payload |
def get_job(self, job_id):
"""GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival
"""
try:
return RawMantaClient.get_job(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
# Job was archived, try to retrieve the archived data.
mpath = "/%s/jobs/%s/job.json" % (self.account, job_id)
content = self.get_object(mpath, accept='application/json')
try:
return json.loads(content)
except ValueError:
raise errors.MantaError('invalid job data: %r' % content) | GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival | Below is the the instruction that describes the task:
### Input:
GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival
### Response:
def get_job(self, job_id):
"""GetJob
https://apidocs.joyent.com/manta/api.html#GetJob
with the added sugar that it will retrieve the archived job if it has
been archived, per:
https://apidocs.joyent.com/manta/jobs-reference.html#job-completion-and-archival
"""
try:
return RawMantaClient.get_job(self, job_id)
except errors.MantaAPIError as ex:
if ex.res.status != 404:
raise
# Job was archived, try to retrieve the archived data.
mpath = "/%s/jobs/%s/job.json" % (self.account, job_id)
content = self.get_object(mpath, accept='application/json')
try:
return json.loads(content)
except ValueError:
raise errors.MantaError('invalid job data: %r' % content) |
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
self.action_type = UBInt16(enum_ref=ActionType)
self.action_type.unpack(buff, offset)
for cls in ActionHeader.__subclasses__():
if self.action_type.value in cls.get_allowed_types():
self.__class__ = cls
break
super().unpack(buff, offset) | Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error. | Below is the the instruction that describes the task:
### Input:
Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
### Response:
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
self.action_type = UBInt16(enum_ref=ActionType)
self.action_type.unpack(buff, offset)
for cls in ActionHeader.__subclasses__():
if self.action_type.value in cls.get_allowed_types():
self.__class__ = cls
break
super().unpack(buff, offset) |
def prepare_env(app, env, docname):
"""
Prepares the sphinx environment to store sphinx-needs internal data.
"""
if not hasattr(env, 'needs_all_needs'):
# Used to store all needed information about all needs in document
env.needs_all_needs = {}
if not hasattr(env, 'needs_functions'):
# Used to store all registered functions for supporting dynamic need values.
env.needs_functions = {}
# needs_functions = getattr(app.config, 'needs_functions', [])
needs_functions = app.needs_functions
if needs_functions is None:
needs_functions = []
if not isinstance(needs_functions, list):
raise SphinxError('Config parameter needs_functions must be a list!')
# Register built-in functions
for need_common_func in needs_common_functions:
register_func(env, need_common_func)
# Register functions configured by user
for needs_func in needs_functions:
register_func(env, needs_func)
app.config.needs_hide_options += ['hidden']
app.config.needs_extra_options['hidden'] = directives.unchanged
if not hasattr(env, 'needs_workflow'):
# Used to store workflow status information for already executed tasks.
# Some tasks like backlink_creation need be be performed only once.
# But most sphinx-events get called several times (for each single document file), which would also
# execute our code several times...
env.needs_workflow = {
'backlink_creation': False,
'dynamic_values_resolved': False
} | Prepares the sphinx environment to store sphinx-needs internal data. | Below is the the instruction that describes the task:
### Input:
Prepares the sphinx environment to store sphinx-needs internal data.
### Response:
def prepare_env(app, env, docname):
"""
Prepares the sphinx environment to store sphinx-needs internal data.
"""
if not hasattr(env, 'needs_all_needs'):
# Used to store all needed information about all needs in document
env.needs_all_needs = {}
if not hasattr(env, 'needs_functions'):
# Used to store all registered functions for supporting dynamic need values.
env.needs_functions = {}
# needs_functions = getattr(app.config, 'needs_functions', [])
needs_functions = app.needs_functions
if needs_functions is None:
needs_functions = []
if not isinstance(needs_functions, list):
raise SphinxError('Config parameter needs_functions must be a list!')
# Register built-in functions
for need_common_func in needs_common_functions:
register_func(env, need_common_func)
# Register functions configured by user
for needs_func in needs_functions:
register_func(env, needs_func)
app.config.needs_hide_options += ['hidden']
app.config.needs_extra_options['hidden'] = directives.unchanged
if not hasattr(env, 'needs_workflow'):
# Used to store workflow status information for already executed tasks.
# Some tasks like backlink_creation need be be performed only once.
# But most sphinx-events get called several times (for each single document file), which would also
# execute our code several times...
env.needs_workflow = {
'backlink_creation': False,
'dynamic_values_resolved': False
} |
def to_mapping(self,**values):
"""
Create a JSON-serializable representation of the plane that is usable with the
javascript frontend
"""
strike, dip, rake = self.strike_dip_rake()
min, max = self.angular_errors()
try:
disabled = self.disabled
except AttributeError:
disabled = False
mapping = dict(
uid=self.hash,
axes=self.axes.tolist(),
hyperbolic_axes=self.hyperbolic_axes.tolist(),
max_angular_error=max,
min_angular_error=min,
strike=strike,
dip=dip,
rake=rake,
disabled=disabled)
# Add in user-provided-values, overwriting if
# necessary
for k,v in values.items():
mapping[k] = v
return mapping | Create a JSON-serializable representation of the plane that is usable with the
javascript frontend | Below is the the instruction that describes the task:
### Input:
Create a JSON-serializable representation of the plane that is usable with the
javascript frontend
### Response:
def to_mapping(self,**values):
"""
Create a JSON-serializable representation of the plane that is usable with the
javascript frontend
"""
strike, dip, rake = self.strike_dip_rake()
min, max = self.angular_errors()
try:
disabled = self.disabled
except AttributeError:
disabled = False
mapping = dict(
uid=self.hash,
axes=self.axes.tolist(),
hyperbolic_axes=self.hyperbolic_axes.tolist(),
max_angular_error=max,
min_angular_error=min,
strike=strike,
dip=dip,
rake=rake,
disabled=disabled)
# Add in user-provided-values, overwriting if
# necessary
for k,v in values.items():
mapping[k] = v
return mapping |
def guard_submit(analysis_request):
"""Return whether the transition "submit" can be performed or not.
Returns True if there is at least one analysis in a non-detached state and
all analyses in a non-detached analyses have been submitted.
"""
analyses_ready = False
for analysis in analysis_request.getAnalyses():
analysis = api.get_object(analysis)
analysis_status = api.get_workflow_status_of(analysis)
if analysis_status in ANALYSIS_DETACHED_STATES:
continue
if analysis_status in ['assigned', 'unassigned', 'registered']:
return False
analyses_ready = True
return analyses_ready | Return whether the transition "submit" can be performed or not.
Returns True if there is at least one analysis in a non-detached state and
all analyses in a non-detached analyses have been submitted. | Below is the the instruction that describes the task:
### Input:
Return whether the transition "submit" can be performed or not.
Returns True if there is at least one analysis in a non-detached state and
all analyses in a non-detached analyses have been submitted.
### Response:
def guard_submit(analysis_request):
"""Return whether the transition "submit" can be performed or not.
Returns True if there is at least one analysis in a non-detached state and
all analyses in a non-detached analyses have been submitted.
"""
analyses_ready = False
for analysis in analysis_request.getAnalyses():
analysis = api.get_object(analysis)
analysis_status = api.get_workflow_status_of(analysis)
if analysis_status in ANALYSIS_DETACHED_STATES:
continue
if analysis_status in ['assigned', 'unassigned', 'registered']:
return False
analyses_ready = True
return analyses_ready |
def from_array(array):
"""
Deserialize a new Sticker from a given dictionary.
:return: new Sticker instance.
:rtype: Sticker
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import PhotoSize
from pytgbot.api_types.receivable.stickers import MaskPosition
data = {}
data['file_id'] = u(array.get('file_id'))
data['width'] = int(array.get('width'))
data['height'] = int(array.get('height'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['emoji'] = u(array.get('emoji')) if array.get('emoji') is not None else None
data['set_name'] = u(array.get('set_name')) if array.get('set_name') is not None else None
data['mask_position'] = MaskPosition.from_array(array.get('mask_position')) if array.get('mask_position') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
data['_raw'] = array
return Sticker(**data) | Deserialize a new Sticker from a given dictionary.
:return: new Sticker instance.
:rtype: Sticker | Below is the the instruction that describes the task:
### Input:
Deserialize a new Sticker from a given dictionary.
:return: new Sticker instance.
:rtype: Sticker
### Response:
def from_array(array):
"""
Deserialize a new Sticker from a given dictionary.
:return: new Sticker instance.
:rtype: Sticker
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import PhotoSize
from pytgbot.api_types.receivable.stickers import MaskPosition
data = {}
data['file_id'] = u(array.get('file_id'))
data['width'] = int(array.get('width'))
data['height'] = int(array.get('height'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['emoji'] = u(array.get('emoji')) if array.get('emoji') is not None else None
data['set_name'] = u(array.get('set_name')) if array.get('set_name') is not None else None
data['mask_position'] = MaskPosition.from_array(array.get('mask_position')) if array.get('mask_position') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
data['_raw'] = array
return Sticker(**data) |
def start_packet_groups(self, clear_time_stamps=True, *ports):
""" Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
"""
port_list = self.set_ports_list(*ports)
if clear_time_stamps:
self.api.call_rc('ixClearTimeStamp {}'.format(port_list))
self.api.call_rc('ixStartPacketGroups {}'.format(port_list)) | Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports. | Below is the the instruction that describes the task:
### Input:
Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
### Response:
def start_packet_groups(self, clear_time_stamps=True, *ports):
""" Start packet groups on ports.
:param clear_time_stamps: True - clear time stamps, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
"""
port_list = self.set_ports_list(*ports)
if clear_time_stamps:
self.api.call_rc('ixClearTimeStamp {}'.format(port_list))
self.api.call_rc('ixStartPacketGroups {}'.format(port_list)) |
def get_leaves(self):
"""Get the deepest entries as a flat set."""
ret_set = set()
for val in self.values():
if isinstance(val, self.__class__):
ret_set |= val.get_leaves()
elif isinstance(val, dict):
ret_set |= set(val.values())
elif isinstance(val, list):
ret_set |= set(val)
elif isinstance(val, set):
ret_set |= val
else:
ret_set.add(val)
return ret_set | Get the deepest entries as a flat set. | Below is the the instruction that describes the task:
### Input:
Get the deepest entries as a flat set.
### Response:
def get_leaves(self):
"""Get the deepest entries as a flat set."""
ret_set = set()
for val in self.values():
if isinstance(val, self.__class__):
ret_set |= val.get_leaves()
elif isinstance(val, dict):
ret_set |= set(val.values())
elif isinstance(val, list):
ret_set |= set(val)
elif isinstance(val, set):
ret_set |= val
else:
ret_set.add(val)
return ret_set |
def define_passive_branch_flows_with_kirchhoff(network,snapshots,skip_vars=False):
""" define passive branch flows with the kirchoff method """
for sub_network in network.sub_networks.obj:
find_tree(sub_network)
find_cycles(sub_network)
#following is necessary to calculate angles post-facto
find_bus_controls(sub_network)
if len(sub_network.branches_i()) > 0:
calculate_B_H(sub_network)
passive_branches = network.passive_branches()
if not skip_vars:
network.model.passive_branch_p = Var(list(passive_branches.index), snapshots)
cycle_index = []
cycle_constraints = {}
for subnetwork in network.sub_networks.obj:
branches = subnetwork.branches()
attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff"
sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork,
snapshots,
network.model.passive_branch_p, attribute)
cycle_index.extend( sub_network_cycle_index)
cycle_constraints.update( sub_network_cycle_constraints)
l_constraint(network.model, "cycle_constraints", cycle_constraints,
cycle_index, snapshots) | define passive branch flows with the kirchoff method | Below is the the instruction that describes the task:
### Input:
define passive branch flows with the kirchoff method
### Response:
def define_passive_branch_flows_with_kirchhoff(network,snapshots,skip_vars=False):
""" define passive branch flows with the kirchoff method """
for sub_network in network.sub_networks.obj:
find_tree(sub_network)
find_cycles(sub_network)
#following is necessary to calculate angles post-facto
find_bus_controls(sub_network)
if len(sub_network.branches_i()) > 0:
calculate_B_H(sub_network)
passive_branches = network.passive_branches()
if not skip_vars:
network.model.passive_branch_p = Var(list(passive_branches.index), snapshots)
cycle_index = []
cycle_constraints = {}
for subnetwork in network.sub_networks.obj:
branches = subnetwork.branches()
attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff"
sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork,
snapshots,
network.model.passive_branch_p, attribute)
cycle_index.extend( sub_network_cycle_index)
cycle_constraints.update( sub_network_cycle_constraints)
l_constraint(network.model, "cycle_constraints", cycle_constraints,
cycle_index, snapshots) |
def rectify_ajax_form_data(self, data):
"""
If a widget was converted and the Form data was submitted through an Ajax request,
then these data fields must be converted to suit the Django Form validation
"""
for name, field in self.base_fields.items():
try:
data[name] = field.convert_ajax_data(data.get(name, {}))
except AttributeError:
pass
return data | If a widget was converted and the Form data was submitted through an Ajax request,
then these data fields must be converted to suit the Django Form validation | Below is the the instruction that describes the task:
### Input:
If a widget was converted and the Form data was submitted through an Ajax request,
then these data fields must be converted to suit the Django Form validation
### Response:
def rectify_ajax_form_data(self, data):
"""
If a widget was converted and the Form data was submitted through an Ajax request,
then these data fields must be converted to suit the Django Form validation
"""
for name, field in self.base_fields.items():
try:
data[name] = field.convert_ajax_data(data.get(name, {}))
except AttributeError:
pass
return data |
def ParseOptions(cls, options, analysis_plugin):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (AnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not isinstance(analysis_plugin, tagging.TaggingAnalysisPlugin):
raise errors.BadConfigObject(
'Analysis plugin is not an instance of TaggingAnalysisPlugin')
tagging_file = cls._ParseStringOption(options, 'tagging_file')
if not tagging_file:
raise errors.BadConfigOption(
'Tagging analysis plugin requires a tagging file.')
tagging_file_path = tagging_file
if not os.path.isfile(tagging_file_path):
# Check if the file exists in the data location path.
data_location = getattr(options, 'data_location', None)
if data_location:
tagging_file_path = os.path.join(data_location, tagging_file)
if not os.path.isfile(tagging_file_path):
raise errors.BadConfigOption(
'No such tagging file: {0:s}.'.format(tagging_file))
try:
analysis_plugin.SetAndLoadTagFile(tagging_file_path)
except UnicodeDecodeError:
raise errors.BadConfigOption(
'Invalid tagging file: {0:s} encoding must be UTF-8.'.format(
tagging_file))
except errors.TaggingFileError as exception:
raise errors.BadConfigOption(
'Unable to read tagging file: {0:s} with error: {1!s}'.format(
tagging_file, exception)) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (AnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation. | Below is the the instruction that describes the task:
### Input:
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (AnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
### Response:
def ParseOptions(cls, options, analysis_plugin):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (AnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not isinstance(analysis_plugin, tagging.TaggingAnalysisPlugin):
raise errors.BadConfigObject(
'Analysis plugin is not an instance of TaggingAnalysisPlugin')
tagging_file = cls._ParseStringOption(options, 'tagging_file')
if not tagging_file:
raise errors.BadConfigOption(
'Tagging analysis plugin requires a tagging file.')
tagging_file_path = tagging_file
if not os.path.isfile(tagging_file_path):
# Check if the file exists in the data location path.
data_location = getattr(options, 'data_location', None)
if data_location:
tagging_file_path = os.path.join(data_location, tagging_file)
if not os.path.isfile(tagging_file_path):
raise errors.BadConfigOption(
'No such tagging file: {0:s}.'.format(tagging_file))
try:
analysis_plugin.SetAndLoadTagFile(tagging_file_path)
except UnicodeDecodeError:
raise errors.BadConfigOption(
'Invalid tagging file: {0:s} encoding must be UTF-8.'.format(
tagging_file))
except errors.TaggingFileError as exception:
raise errors.BadConfigOption(
'Unable to read tagging file: {0:s} with error: {1!s}'.format(
tagging_file, exception)) |
def _computeOverlaps(data, selfOverlaps=False, dtype="int16"):
"""
Calculates all pairwise overlaps between the rows of the input. Returns an
array of all n(n-1)/2 values in the upper triangular portion of the
pairwise overlap matrix. Values are returned in row-major order.
@param data (scipy.sparse.csr_matrix) A CSR sparse matrix with one vector
per row. Any non-zero value is considered an active bit.
@param selfOverlaps (boolean) If true, include diagonal (density) values
from the pairwise similarity matrix. Then the returned vector has
n(n+1)/2 elements. Optional, defaults to False.
@param dtype (string) Data type of returned array in numpy dtype format.
Optional, defaults to 'int16'.
@returns (numpy.ndarray) A vector of pairwise overlaps as described above.
"""
nVectors = data.shape[0]
nDims = data.shape[1]
nPairs = (nVectors+1)*nVectors/2 if selfOverlaps else (
nVectors*(nVectors-1)/2)
overlaps = numpy.ndarray(nPairs, dtype=dtype)
pos = 0
for i in xrange(nVectors):
start = i if selfOverlaps else i+1
a = data[i]
b = data[start:]
newOverlaps = a.multiply(b).getnnz(1)
run = newOverlaps.shape[0]
overlaps[pos:pos+run] = newOverlaps
pos += run
return overlaps | Calculates all pairwise overlaps between the rows of the input. Returns an
array of all n(n-1)/2 values in the upper triangular portion of the
pairwise overlap matrix. Values are returned in row-major order.
@param data (scipy.sparse.csr_matrix) A CSR sparse matrix with one vector
per row. Any non-zero value is considered an active bit.
@param selfOverlaps (boolean) If true, include diagonal (density) values
from the pairwise similarity matrix. Then the returned vector has
n(n+1)/2 elements. Optional, defaults to False.
@param dtype (string) Data type of returned array in numpy dtype format.
Optional, defaults to 'int16'.
@returns (numpy.ndarray) A vector of pairwise overlaps as described above. | Below is the the instruction that describes the task:
### Input:
Calculates all pairwise overlaps between the rows of the input. Returns an
array of all n(n-1)/2 values in the upper triangular portion of the
pairwise overlap matrix. Values are returned in row-major order.
@param data (scipy.sparse.csr_matrix) A CSR sparse matrix with one vector
per row. Any non-zero value is considered an active bit.
@param selfOverlaps (boolean) If true, include diagonal (density) values
from the pairwise similarity matrix. Then the returned vector has
n(n+1)/2 elements. Optional, defaults to False.
@param dtype (string) Data type of returned array in numpy dtype format.
Optional, defaults to 'int16'.
@returns (numpy.ndarray) A vector of pairwise overlaps as described above.
### Response:
def _computeOverlaps(data, selfOverlaps=False, dtype="int16"):
"""
Calculates all pairwise overlaps between the rows of the input. Returns an
array of all n(n-1)/2 values in the upper triangular portion of the
pairwise overlap matrix. Values are returned in row-major order.
@param data (scipy.sparse.csr_matrix) A CSR sparse matrix with one vector
per row. Any non-zero value is considered an active bit.
@param selfOverlaps (boolean) If true, include diagonal (density) values
from the pairwise similarity matrix. Then the returned vector has
n(n+1)/2 elements. Optional, defaults to False.
@param dtype (string) Data type of returned array in numpy dtype format.
Optional, defaults to 'int16'.
@returns (numpy.ndarray) A vector of pairwise overlaps as described above.
"""
nVectors = data.shape[0]
nDims = data.shape[1]
nPairs = (nVectors+1)*nVectors/2 if selfOverlaps else (
nVectors*(nVectors-1)/2)
overlaps = numpy.ndarray(nPairs, dtype=dtype)
pos = 0
for i in xrange(nVectors):
start = i if selfOverlaps else i+1
a = data[i]
b = data[start:]
newOverlaps = a.multiply(b).getnnz(1)
run = newOverlaps.shape[0]
overlaps[pos:pos+run] = newOverlaps
pos += run
return overlaps |
def get_ip(self, use_cached=True):
"""Get the last known IP of this device"""
device_json = self.get_device_json(use_cached)
return device_json.get("dpLastKnownIp") | Get the last known IP of this device | Below is the the instruction that describes the task:
### Input:
Get the last known IP of this device
### Response:
def get_ip(self, use_cached=True):
"""Get the last known IP of this device"""
device_json = self.get_device_json(use_cached)
return device_json.get("dpLastKnownIp") |
def get(self, url, name, params=None, headers=None, connection=None):
"""
Synchronous GET request.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
return make_get_request(endpoint, params, headers, connection=connection) | Synchronous GET request. | Below is the the instruction that describes the task:
### Input:
Synchronous GET request.
### Response:
def get(self, url, name, params=None, headers=None, connection=None):
"""
Synchronous GET request.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
return make_get_request(endpoint, params, headers, connection=connection) |
def contains(bank, key):
'''
Checks if the specified bank contains the specified key.
'''
_init_client()
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
r = client.read(etcd_key)
# return True for keys, not dirs
return r.dir is False
except etcd.EtcdKeyNotFound:
return False
except Exception as exc:
raise SaltCacheError(
'There was an error getting the key, {0}: {1}'.format(
etcd_key, exc
)
) | Checks if the specified bank contains the specified key. | Below is the the instruction that describes the task:
### Input:
Checks if the specified bank contains the specified key.
### Response:
def contains(bank, key):
'''
Checks if the specified bank contains the specified key.
'''
_init_client()
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
r = client.read(etcd_key)
# return True for keys, not dirs
return r.dir is False
except etcd.EtcdKeyNotFound:
return False
except Exception as exc:
raise SaltCacheError(
'There was an error getting the key, {0}: {1}'.format(
etcd_key, exc
)
) |
def expand_star(mod_name):
"""Expand something like 'unuk.tasks.*' into a list of all the modules
there.
"""
expanded = []
mod_dir = os.path.dirname(
__import__(mod_name[:-2], {}, {}, ['']).__file__)
for f in glob.glob1(mod_dir, "[!_]*.py"):
expanded.append('%s.%s' % (mod_name[:-2], f[:-3]))
return expanded | Expand something like 'unuk.tasks.*' into a list of all the modules
there. | Below is the the instruction that describes the task:
### Input:
Expand something like 'unuk.tasks.*' into a list of all the modules
there.
### Response:
def expand_star(mod_name):
"""Expand something like 'unuk.tasks.*' into a list of all the modules
there.
"""
expanded = []
mod_dir = os.path.dirname(
__import__(mod_name[:-2], {}, {}, ['']).__file__)
for f in glob.glob1(mod_dir, "[!_]*.py"):
expanded.append('%s.%s' % (mod_name[:-2], f[:-3]))
return expanded |
def split_func(string):
"""
Take a string like 'requiredIf("arg_name")'
return the function name and the argument:
(requiredIf, arg_name)
"""
ind = string.index("(")
return string[:ind], string[ind+1:-1].strip('"') | Take a string like 'requiredIf("arg_name")'
return the function name and the argument:
(requiredIf, arg_name) | Below is the the instruction that describes the task:
### Input:
Take a string like 'requiredIf("arg_name")'
return the function name and the argument:
(requiredIf, arg_name)
### Response:
def split_func(string):
"""
Take a string like 'requiredIf("arg_name")'
return the function name and the argument:
(requiredIf, arg_name)
"""
ind = string.index("(")
return string[:ind], string[ind+1:-1].strip('"') |
def bar_chart(data, bar_char='=', width=80):
"""Return an horizontal bar chart
>>> print bar_chart({
... 'one': '1',
... 'two': '2',
... 'three': '3',
... 'four': '4',
... 'five': '5',
... })
five =====
four ====
one =
three ===
two ==
>>> print bar_chart({
... '1/1': 1/1.0,
... '1/2': 1/2.0,
... '1/3': 1/3.0,
... '1/4': 1/4.0,
... '1/5': 1/5.0,
... '2': 2,
... '3': 3,
... '4': 4,
... '5': 5,
... })
1/1 ===============
1/2 =======
1/3 =====
1/4 ===
1/5 ===
2 ==============================
3 =============================================
4 ============================================================
5 ===========================================================================
>>> print bar_chart({
... '1': 2**1,
... '2': 2**2,
... '3': 2**3,
... '4': 2**4,
... '5': 2**5,
... '6': 2**6,
... '7': 2**7,
... })
1 =
2 ==
3 ====
4 =========
5 ===================
6 ======================================
7 =============================================================================
"""
if type(data) is dict:
output = []
max_len = len(max(data, key=len))
float_values = map(float, data.values())
max_value = max(float_values)
min_value = min(float_values)
all_integer = all(f.is_integer() for f in float_values)
for key in sorted(data):
output.append('%s %s'%(key.rjust(max_len, ' '), draw_bar(bar_char, float(data[key]), all_integer, min_value, max_value, width-max_len-2)))
return '\n'.join(output) | Return an horizontal bar chart
>>> print bar_chart({
... 'one': '1',
... 'two': '2',
... 'three': '3',
... 'four': '4',
... 'five': '5',
... })
five =====
four ====
one =
three ===
two ==
>>> print bar_chart({
... '1/1': 1/1.0,
... '1/2': 1/2.0,
... '1/3': 1/3.0,
... '1/4': 1/4.0,
... '1/5': 1/5.0,
... '2': 2,
... '3': 3,
... '4': 4,
... '5': 5,
... })
1/1 ===============
1/2 =======
1/3 =====
1/4 ===
1/5 ===
2 ==============================
3 =============================================
4 ============================================================
5 ===========================================================================
>>> print bar_chart({
... '1': 2**1,
... '2': 2**2,
... '3': 2**3,
... '4': 2**4,
... '5': 2**5,
... '6': 2**6,
... '7': 2**7,
... })
1 =
2 ==
3 ====
4 =========
5 ===================
6 ======================================
7 ============================================================================= | Below is the the instruction that describes the task:
### Input:
Return an horizontal bar chart
>>> print bar_chart({
... 'one': '1',
... 'two': '2',
... 'three': '3',
... 'four': '4',
... 'five': '5',
... })
five =====
four ====
one =
three ===
two ==
>>> print bar_chart({
... '1/1': 1/1.0,
... '1/2': 1/2.0,
... '1/3': 1/3.0,
... '1/4': 1/4.0,
... '1/5': 1/5.0,
... '2': 2,
... '3': 3,
... '4': 4,
... '5': 5,
... })
1/1 ===============
1/2 =======
1/3 =====
1/4 ===
1/5 ===
2 ==============================
3 =============================================
4 ============================================================
5 ===========================================================================
>>> print bar_chart({
... '1': 2**1,
... '2': 2**2,
... '3': 2**3,
... '4': 2**4,
... '5': 2**5,
... '6': 2**6,
... '7': 2**7,
... })
1 =
2 ==
3 ====
4 =========
5 ===================
6 ======================================
7 =============================================================================
### Response:
def bar_chart(data, bar_char='=', width=80):
"""Return an horizontal bar chart
>>> print bar_chart({
... 'one': '1',
... 'two': '2',
... 'three': '3',
... 'four': '4',
... 'five': '5',
... })
five =====
four ====
one =
three ===
two ==
>>> print bar_chart({
... '1/1': 1/1.0,
... '1/2': 1/2.0,
... '1/3': 1/3.0,
... '1/4': 1/4.0,
... '1/5': 1/5.0,
... '2': 2,
... '3': 3,
... '4': 4,
... '5': 5,
... })
1/1 ===============
1/2 =======
1/3 =====
1/4 ===
1/5 ===
2 ==============================
3 =============================================
4 ============================================================
5 ===========================================================================
>>> print bar_chart({
... '1': 2**1,
... '2': 2**2,
... '3': 2**3,
... '4': 2**4,
... '5': 2**5,
... '6': 2**6,
... '7': 2**7,
... })
1 =
2 ==
3 ====
4 =========
5 ===================
6 ======================================
7 =============================================================================
"""
if type(data) is dict:
output = []
max_len = len(max(data, key=len))
float_values = map(float, data.values())
max_value = max(float_values)
min_value = min(float_values)
all_integer = all(f.is_integer() for f in float_values)
for key in sorted(data):
output.append('%s %s'%(key.rjust(max_len, ' '), draw_bar(bar_char, float(data[key]), all_integer, min_value, max_value, width-max_len-2)))
return '\n'.join(output) |
def handle(self):
"""
Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records.
"""
# Unpack the data from the request
data, socket = self.request
try:
# Parse it as JSON
message_attrs = json.loads(data.decode('utf-8'))
# Fluff it up into a proper logging record
record = logging.makeLogRecord(message_attrs)
except:
# Complain someone is sending us bad logging data
logging.error("Malformed log message from {}".format(self.client_address[0]))
else:
# Log level filtering should have been done on the remote end. The handle() method
# skips it on this end.
log.handle(record) | Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records. | Below is the the instruction that describes the task:
### Input:
Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records.
### Response:
def handle(self):
"""
Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records.
"""
# Unpack the data from the request
data, socket = self.request
try:
# Parse it as JSON
message_attrs = json.loads(data.decode('utf-8'))
# Fluff it up into a proper logging record
record = logging.makeLogRecord(message_attrs)
except:
# Complain someone is sending us bad logging data
logging.error("Malformed log message from {}".format(self.client_address[0]))
else:
# Log level filtering should have been done on the remote end. The handle() method
# skips it on this end.
log.handle(record) |
def getConstants(ast):
'''
RAM: implemented magic method __lt__ for ASTNode to fix issues
#88 and #209. The following test code works now, as does the test suite.
import numexpr as ne
a = 1 + 3j; b = 5.0
ne.evaluate( 'a*2 + 15j - b' )
'''
constants_order = sorted( ast.allOf('constant') )
constants = [convertConstantToKind(a.value, a.astKind)
for a in constants_order]
return constants_order, constants | RAM: implemented magic method __lt__ for ASTNode to fix issues
#88 and #209. The following test code works now, as does the test suite.
import numexpr as ne
a = 1 + 3j; b = 5.0
ne.evaluate( 'a*2 + 15j - b' ) | Below is the the instruction that describes the task:
### Input:
RAM: implemented magic method __lt__ for ASTNode to fix issues
#88 and #209. The following test code works now, as does the test suite.
import numexpr as ne
a = 1 + 3j; b = 5.0
ne.evaluate( 'a*2 + 15j - b' )
### Response:
def getConstants(ast):
'''
RAM: implemented magic method __lt__ for ASTNode to fix issues
#88 and #209. The following test code works now, as does the test suite.
import numexpr as ne
a = 1 + 3j; b = 5.0
ne.evaluate( 'a*2 + 15j - b' )
'''
constants_order = sorted( ast.allOf('constant') )
constants = [convertConstantToKind(a.value, a.astKind)
for a in constants_order]
return constants_order, constants |
def volumes(self):
"""Returns all available volumes"""
if self._data is not None:
volumes = []
for volume in self._data["volumes"]:
volumes.append(volume["id"])
return volumes | Returns all available volumes | Below is the the instruction that describes the task:
### Input:
Returns all available volumes
### Response:
def volumes(self):
"""Returns all available volumes"""
if self._data is not None:
volumes = []
for volume in self._data["volumes"]:
volumes.append(volume["id"])
return volumes |
def visit_ImportFrom(self, node):
"""callback for 'import from' statement"""
self.imports.extend((node.module, n.name, n.asname, node.level)
for n in node.names)
ast.NodeVisitor.generic_visit(self, node) | callback for 'import from' statement | Below is the the instruction that describes the task:
### Input:
callback for 'import from' statement
### Response:
def visit_ImportFrom(self, node):
"""callback for 'import from' statement"""
self.imports.extend((node.module, n.name, n.asname, node.level)
for n in node.names)
ast.NodeVisitor.generic_visit(self, node) |
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value | Setter for _segment_file attribute | Below is the the instruction that describes the task:
### Input:
Setter for _segment_file attribute
### Response:
def segment_file(self, value):
""" Setter for _segment_file attribute """
assert os.path.isfile(value), "%s is not a valid file" % value
self._segment_file = value |
def transform(self, Z):
"""Transform an ArrayRDD (or DictRDD with column 'X') containing
sequence of documents to a document-term matrix.
Parameters
----------
Z : ArrayRDD or DictRDD with raw text documents
Samples. Each sample must be a text document (either bytes or
unicode strings) which will be tokenized and hashed.
Returns
-------
Z : SparseRDD/DictRDD containg scipy.sparse matrix
Document-term matrix.
"""
mapper = super(SparkHashingVectorizer, self).transform
return Z.transform(mapper, column='X', dtype=sp.spmatrix) | Transform an ArrayRDD (or DictRDD with column 'X') containing
sequence of documents to a document-term matrix.
Parameters
----------
Z : ArrayRDD or DictRDD with raw text documents
Samples. Each sample must be a text document (either bytes or
unicode strings) which will be tokenized and hashed.
Returns
-------
Z : SparseRDD/DictRDD containg scipy.sparse matrix
Document-term matrix. | Below is the the instruction that describes the task:
### Input:
Transform an ArrayRDD (or DictRDD with column 'X') containing
sequence of documents to a document-term matrix.
Parameters
----------
Z : ArrayRDD or DictRDD with raw text documents
Samples. Each sample must be a text document (either bytes or
unicode strings) which will be tokenized and hashed.
Returns
-------
Z : SparseRDD/DictRDD containg scipy.sparse matrix
Document-term matrix.
### Response:
def transform(self, Z):
"""Transform an ArrayRDD (or DictRDD with column 'X') containing
sequence of documents to a document-term matrix.
Parameters
----------
Z : ArrayRDD or DictRDD with raw text documents
Samples. Each sample must be a text document (either bytes or
unicode strings) which will be tokenized and hashed.
Returns
-------
Z : SparseRDD/DictRDD containg scipy.sparse matrix
Document-term matrix.
"""
mapper = super(SparkHashingVectorizer, self).transform
return Z.transform(mapper, column='X', dtype=sp.spmatrix) |
def _encode_telegram_base64(string):
"""
Inverse for `_decode_telegram_base64`.
"""
try:
return base64.urlsafe_b64encode(string).rstrip(b'=').decode('ascii')
except (binascii.Error, ValueError, TypeError):
return None | Inverse for `_decode_telegram_base64`. | Below is the the instruction that describes the task:
### Input:
Inverse for `_decode_telegram_base64`.
### Response:
def _encode_telegram_base64(string):
"""
Inverse for `_decode_telegram_base64`.
"""
try:
return base64.urlsafe_b64encode(string).rstrip(b'=').decode('ascii')
except (binascii.Error, ValueError, TypeError):
return None |
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
n = self.levels
p = mu / self.levels
return sp.stats.binom.logpmf(y, n, p) | computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n | Below is the the instruction that describes the task:
### Input:
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
### Response:
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
n = self.levels
p = mu / self.levels
return sp.stats.binom.logpmf(y, n, p) |
def add_trial(self, trial):
"""Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!"
self._live_trials[trial] = None
self._all_trials.append(trial) | Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up. | Below is the the instruction that describes the task:
### Input:
Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up.
### Response:
def add_trial(self, trial):
"""Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!"
self._live_trials[trial] = None
self._all_trials.append(trial) |
def timefrequency(data, method='morlet', time_skip=1, **options):
"""Compute the power spectrum over time.
Parameters
----------
data : instance of ChanTime
data to analyze
method : str
the method to compute the time-frequency representation, such as
'morlet' (wavelet using complex morlet window),
'spectrogram' (corresponds to 'spectraldensity' in frequency()),
'stft' (short-time fourier transform, corresponds to 'complex' in
frequency())
options : dict
options depend on the method used, see below.
Returns
-------
instance of ChanTimeFreq
data in time-frequency representation. The exact output depends on
the method. Using 'morlet', you get a complex output at each frequency
where the wavelet was computed.
Examples
--------
The data in ChanTimeFreq are complex and they should stay that way. You
can also get the magnitude or power the easy way using Math.
>>> from wonambi.trans import math, timefreq
>>> tf = timefreq(data, foi=(8, 10))
>>> tf_abs = math(tf, operator_name='abs')
>>> tf_abs.data[0][0, 0, 0]
1737.4662329214384)
Notes
-----
It uses sampling frequency as specified in s_freq, it does not
recompute the sampling frequency based on the time axis.
For method 'morlet', the following options should be specified:
foi : ndarray or list or tuple
vector with frequency of interest
ratio : float
ratio for a wavelet family ( = freq / sigma_f)
sigma_f : float
standard deviation of the wavelet in frequency domain
dur_in_sd : float
duration of the wavelet, given as number of the standard deviation
in the time domain, in one side.
dur_in_s : float
total duration of the wavelet, two-sided (i.e. from start to
finish)
time_skip : int, in samples
number of time points to skip (it runs convolution on all the
data points, but you don't need to store them all)
normalization : str
'area' means that energy is normalized to 1, 'peak' means that the
peak of the wavelet is set at 1, 'max' is a normalization used by
nitime where the max value of the output of the convolution remains
the same even if you change the sigma_f.
zero_mean : bool
make sure that the wavelet has zero mean (only relevant if ratio
< 5)
For method 'spectrogram' or 'stft', the following options should be specified:
duraton : int
duration of the window to compute the power spectrum, in s
overlap : int
amount of overlap (0 -> no overlap, 1 -> full overlap)
"""
implemented_methods = ('morlet',
'spectrogram', # this is output spectraldensity
'stft') # this is output complex
if method not in implemented_methods:
raise ValueError('Method ' + method + ' is not implemented yet.\n'
'Currently implemented methods are ' +
', '.join(implemented_methods))
if method == 'morlet':
default_options = {'foi': None,
'ratio': 5,
'sigma_f': None,
'dur_in_sd': 4,
'dur_in_s': None,
'normalization': 'area',
'zero_mean': False,
}
elif method in ('spectrogram', 'stft'):
default_options = {'duration': 1,
'overlap': 0.5,
'step': None,
'detrend': 'linear',
'taper': 'hann',
'sides': 'one',
'scaling': 'power',
'halfbandwidth': 2,
'NW': None,
}
default_options.update(options)
options = default_options
timefreq = ChanTimeFreq()
timefreq.attr = deepcopy(data.attr)
timefreq.s_freq = data.s_freq
timefreq.start_time = data.start_time
timefreq.axis['chan'] = data.axis['chan']
timefreq.axis['time'] = empty(data.number_of('trial'), dtype='O')
timefreq.axis['freq'] = empty(data.number_of('trial'), dtype='O')
if method == 'stft':
timefreq.axis['taper'] = empty(data.number_of('trial'), dtype='O')
timefreq.data = empty(data.number_of('trial'), dtype='O')
if method == 'morlet':
wavelets = _create_morlet(deepcopy(options), data.s_freq)
for i in range(data.number_of('trial')):
lg.info('Processing trial # {0: 6}'.format(i))
timefreq.axis['freq'][i] = array(options['foi'])
timefreq.axis['time'][i] = data.axis['time'][i][::time_skip]
timefreq.data[i] = empty((data.number_of('chan')[i],
data.number_of('time')[i] // time_skip,
len(options['foi'])),
dtype='complex')
for i_c, chan in enumerate(data.axis['chan'][i]):
dat = data(trial=i, chan=chan)
for i_f, wavelet in enumerate(wavelets):
tf = fftconvolve(dat, wavelet, 'same')
timefreq.data[i][i_c, :, i_f] = tf[::time_skip]
if time_skip != 1:
warn('sampling frequency in s_freq refers to the input data, '
'not to the timefrequency output')
elif method in ('spectrogram', 'stft'): # TODO: add timeskip
nperseg = int(options['duration'] * data.s_freq)
if options['step'] is not None:
nstep = int(options['step'] * data.s_freq)
else:
nstep = nperseg - int(options['overlap'] * nperseg)
if method == 'spectrogram':
output = 'spectraldensity'
elif method == 'stft':
output = 'complex'
for i in range(data.number_of('trial')):
t = _create_subepochs(data.time[i], nperseg, nstep).mean(axis=1)
x = _create_subepochs(data(trial=i), nperseg, nstep)
f, Sxx = _fft(x,
s_freq=data.s_freq,
detrend=options['detrend'],
taper=options['taper'],
output=output,
sides=options['sides'],
scaling=options['scaling'],
halfbandwidth=options['halfbandwidth'],
NW=options['NW'])
timefreq.axis['time'][i] = t
timefreq.axis['freq'][i] = f
if method == 'stft':
timefreq.axis['taper'][i] = arange(Sxx.shape[-1])
timefreq.data[i] = Sxx
return timefreq | Compute the power spectrum over time.
Parameters
----------
data : instance of ChanTime
data to analyze
method : str
the method to compute the time-frequency representation, such as
'morlet' (wavelet using complex morlet window),
'spectrogram' (corresponds to 'spectraldensity' in frequency()),
'stft' (short-time fourier transform, corresponds to 'complex' in
frequency())
options : dict
options depend on the method used, see below.
Returns
-------
instance of ChanTimeFreq
data in time-frequency representation. The exact output depends on
the method. Using 'morlet', you get a complex output at each frequency
where the wavelet was computed.
Examples
--------
The data in ChanTimeFreq are complex and they should stay that way. You
can also get the magnitude or power the easy way using Math.
>>> from wonambi.trans import math, timefreq
>>> tf = timefreq(data, foi=(8, 10))
>>> tf_abs = math(tf, operator_name='abs')
>>> tf_abs.data[0][0, 0, 0]
1737.4662329214384)
Notes
-----
It uses sampling frequency as specified in s_freq, it does not
recompute the sampling frequency based on the time axis.
For method 'morlet', the following options should be specified:
foi : ndarray or list or tuple
vector with frequency of interest
ratio : float
ratio for a wavelet family ( = freq / sigma_f)
sigma_f : float
standard deviation of the wavelet in frequency domain
dur_in_sd : float
duration of the wavelet, given as number of the standard deviation
in the time domain, in one side.
dur_in_s : float
total duration of the wavelet, two-sided (i.e. from start to
finish)
time_skip : int, in samples
number of time points to skip (it runs convolution on all the
data points, but you don't need to store them all)
normalization : str
'area' means that energy is normalized to 1, 'peak' means that the
peak of the wavelet is set at 1, 'max' is a normalization used by
nitime where the max value of the output of the convolution remains
the same even if you change the sigma_f.
zero_mean : bool
make sure that the wavelet has zero mean (only relevant if ratio
< 5)
For method 'spectrogram' or 'stft', the following options should be specified:
duraton : int
duration of the window to compute the power spectrum, in s
overlap : int
amount of overlap (0 -> no overlap, 1 -> full overlap) | Below is the the instruction that describes the task:
### Input:
Compute the power spectrum over time.
Parameters
----------
data : instance of ChanTime
data to analyze
method : str
the method to compute the time-frequency representation, such as
'morlet' (wavelet using complex morlet window),
'spectrogram' (corresponds to 'spectraldensity' in frequency()),
'stft' (short-time fourier transform, corresponds to 'complex' in
frequency())
options : dict
options depend on the method used, see below.
Returns
-------
instance of ChanTimeFreq
data in time-frequency representation. The exact output depends on
the method. Using 'morlet', you get a complex output at each frequency
where the wavelet was computed.
Examples
--------
The data in ChanTimeFreq are complex and they should stay that way. You
can also get the magnitude or power the easy way using Math.
>>> from wonambi.trans import math, timefreq
>>> tf = timefreq(data, foi=(8, 10))
>>> tf_abs = math(tf, operator_name='abs')
>>> tf_abs.data[0][0, 0, 0]
1737.4662329214384)
Notes
-----
It uses sampling frequency as specified in s_freq, it does not
recompute the sampling frequency based on the time axis.
For method 'morlet', the following options should be specified:
foi : ndarray or list or tuple
vector with frequency of interest
ratio : float
ratio for a wavelet family ( = freq / sigma_f)
sigma_f : float
standard deviation of the wavelet in frequency domain
dur_in_sd : float
duration of the wavelet, given as number of the standard deviation
in the time domain, in one side.
dur_in_s : float
total duration of the wavelet, two-sided (i.e. from start to
finish)
time_skip : int, in samples
number of time points to skip (it runs convolution on all the
data points, but you don't need to store them all)
normalization : str
'area' means that energy is normalized to 1, 'peak' means that the
peak of the wavelet is set at 1, 'max' is a normalization used by
nitime where the max value of the output of the convolution remains
the same even if you change the sigma_f.
zero_mean : bool
make sure that the wavelet has zero mean (only relevant if ratio
< 5)
For method 'spectrogram' or 'stft', the following options should be specified:
duraton : int
duration of the window to compute the power spectrum, in s
overlap : int
amount of overlap (0 -> no overlap, 1 -> full overlap)
### Response:
def timefrequency(data, method='morlet', time_skip=1, **options):
"""Compute the power spectrum over time.
Parameters
----------
data : instance of ChanTime
data to analyze
method : str
the method to compute the time-frequency representation, such as
'morlet' (wavelet using complex morlet window),
'spectrogram' (corresponds to 'spectraldensity' in frequency()),
'stft' (short-time fourier transform, corresponds to 'complex' in
frequency())
options : dict
options depend on the method used, see below.
Returns
-------
instance of ChanTimeFreq
data in time-frequency representation. The exact output depends on
the method. Using 'morlet', you get a complex output at each frequency
where the wavelet was computed.
Examples
--------
The data in ChanTimeFreq are complex and they should stay that way. You
can also get the magnitude or power the easy way using Math.
>>> from wonambi.trans import math, timefreq
>>> tf = timefreq(data, foi=(8, 10))
>>> tf_abs = math(tf, operator_name='abs')
>>> tf_abs.data[0][0, 0, 0]
1737.4662329214384)
Notes
-----
It uses sampling frequency as specified in s_freq, it does not
recompute the sampling frequency based on the time axis.
For method 'morlet', the following options should be specified:
foi : ndarray or list or tuple
vector with frequency of interest
ratio : float
ratio for a wavelet family ( = freq / sigma_f)
sigma_f : float
standard deviation of the wavelet in frequency domain
dur_in_sd : float
duration of the wavelet, given as number of the standard deviation
in the time domain, in one side.
dur_in_s : float
total duration of the wavelet, two-sided (i.e. from start to
finish)
time_skip : int, in samples
number of time points to skip (it runs convolution on all the
data points, but you don't need to store them all)
normalization : str
'area' means that energy is normalized to 1, 'peak' means that the
peak of the wavelet is set at 1, 'max' is a normalization used by
nitime where the max value of the output of the convolution remains
the same even if you change the sigma_f.
zero_mean : bool
make sure that the wavelet has zero mean (only relevant if ratio
< 5)
For method 'spectrogram' or 'stft', the following options should be specified:
duraton : int
duration of the window to compute the power spectrum, in s
overlap : int
amount of overlap (0 -> no overlap, 1 -> full overlap)
"""
implemented_methods = ('morlet',
'spectrogram', # this is output spectraldensity
'stft') # this is output complex
if method not in implemented_methods:
raise ValueError('Method ' + method + ' is not implemented yet.\n'
'Currently implemented methods are ' +
', '.join(implemented_methods))
if method == 'morlet':
default_options = {'foi': None,
'ratio': 5,
'sigma_f': None,
'dur_in_sd': 4,
'dur_in_s': None,
'normalization': 'area',
'zero_mean': False,
}
elif method in ('spectrogram', 'stft'):
default_options = {'duration': 1,
'overlap': 0.5,
'step': None,
'detrend': 'linear',
'taper': 'hann',
'sides': 'one',
'scaling': 'power',
'halfbandwidth': 2,
'NW': None,
}
default_options.update(options)
options = default_options
timefreq = ChanTimeFreq()
timefreq.attr = deepcopy(data.attr)
timefreq.s_freq = data.s_freq
timefreq.start_time = data.start_time
timefreq.axis['chan'] = data.axis['chan']
timefreq.axis['time'] = empty(data.number_of('trial'), dtype='O')
timefreq.axis['freq'] = empty(data.number_of('trial'), dtype='O')
if method == 'stft':
timefreq.axis['taper'] = empty(data.number_of('trial'), dtype='O')
timefreq.data = empty(data.number_of('trial'), dtype='O')
if method == 'morlet':
wavelets = _create_morlet(deepcopy(options), data.s_freq)
for i in range(data.number_of('trial')):
lg.info('Processing trial # {0: 6}'.format(i))
timefreq.axis['freq'][i] = array(options['foi'])
timefreq.axis['time'][i] = data.axis['time'][i][::time_skip]
timefreq.data[i] = empty((data.number_of('chan')[i],
data.number_of('time')[i] // time_skip,
len(options['foi'])),
dtype='complex')
for i_c, chan in enumerate(data.axis['chan'][i]):
dat = data(trial=i, chan=chan)
for i_f, wavelet in enumerate(wavelets):
tf = fftconvolve(dat, wavelet, 'same')
timefreq.data[i][i_c, :, i_f] = tf[::time_skip]
if time_skip != 1:
warn('sampling frequency in s_freq refers to the input data, '
'not to the timefrequency output')
elif method in ('spectrogram', 'stft'): # TODO: add timeskip
nperseg = int(options['duration'] * data.s_freq)
if options['step'] is not None:
nstep = int(options['step'] * data.s_freq)
else:
nstep = nperseg - int(options['overlap'] * nperseg)
if method == 'spectrogram':
output = 'spectraldensity'
elif method == 'stft':
output = 'complex'
for i in range(data.number_of('trial')):
t = _create_subepochs(data.time[i], nperseg, nstep).mean(axis=1)
x = _create_subepochs(data(trial=i), nperseg, nstep)
f, Sxx = _fft(x,
s_freq=data.s_freq,
detrend=options['detrend'],
taper=options['taper'],
output=output,
sides=options['sides'],
scaling=options['scaling'],
halfbandwidth=options['halfbandwidth'],
NW=options['NW'])
timefreq.axis['time'][i] = t
timefreq.axis['freq'][i] = f
if method == 'stft':
timefreq.axis['taper'][i] = arange(Sxx.shape[-1])
timefreq.data[i] = Sxx
return timefreq |
def simulate_dynamic(self, order = 0.998, solution = solve_type.FAST, collect_dynamic = False, step = 0.1, int_step = 0.01, threshold_changes = 0.0000001):
"""!
@brief Performs dynamic simulation of the network until stop condition is not reached. Stop condition is defined by input argument 'order'.
@param[in] order (double): Order of process synchronization, distributed 0..1.
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@param[in] step (double): Time step of one iteration of simulation.
@param[in] int_step (double): Integration step, should be less than step.
@param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_static()
"""
if (self._ccore_network_pointer is not None):
ccore_instance_dynamic = wrapper.sync_simulate_dynamic(self._ccore_network_pointer, order, solution, collect_dynamic, step, int_step, threshold_changes);
return sync_dynamic(None, None, ccore_instance_dynamic);
# For statistics and integration
time_counter = 0;
# Prevent infinite loop. It's possible when required state cannot be reached.
previous_order = 0;
current_order = self.sync_local_order();
# If requested input dynamics
dyn_phase = [];
dyn_time = [];
if (collect_dynamic == True):
dyn_phase.append(self._phases);
dyn_time.append(0);
# Execute until sync state will be reached
while (current_order < order):
# update states of oscillators
self._phases = self._calculate_phases(solution, time_counter, step, int_step);
# update time
time_counter += step;
# if requested input dynamic
if (collect_dynamic == True):
dyn_phase.append(self._phases);
dyn_time.append(time_counter);
# update orders
previous_order = current_order;
current_order = self.sync_local_order();
# hang prevention
if (abs(current_order - previous_order) < threshold_changes):
# print("Warning: sync_network::simulate_dynamic - simulation is aborted due to low level of convergence rate (order = " + str(current_order) + ").");
break;
if (collect_dynamic != True):
dyn_phase.append(self._phases);
dyn_time.append(time_counter);
output_sync_dynamic = sync_dynamic(dyn_phase, dyn_time, None);
return output_sync_dynamic; | !
@brief Performs dynamic simulation of the network until stop condition is not reached. Stop condition is defined by input argument 'order'.
@param[in] order (double): Order of process synchronization, distributed 0..1.
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@param[in] step (double): Time step of one iteration of simulation.
@param[in] int_step (double): Integration step, should be less than step.
@param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_static() | Below is the the instruction that describes the task:
### Input:
!
@brief Performs dynamic simulation of the network until stop condition is not reached. Stop condition is defined by input argument 'order'.
@param[in] order (double): Order of process synchronization, distributed 0..1.
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@param[in] step (double): Time step of one iteration of simulation.
@param[in] int_step (double): Integration step, should be less than step.
@param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_static()
### Response:
def simulate_dynamic(self, order = 0.998, solution = solve_type.FAST, collect_dynamic = False, step = 0.1, int_step = 0.01, threshold_changes = 0.0000001):
"""!
@brief Performs dynamic simulation of the network until stop condition is not reached. Stop condition is defined by input argument 'order'.
@param[in] order (double): Order of process synchronization, distributed 0..1.
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@param[in] step (double): Time step of one iteration of simulation.
@param[in] int_step (double): Integration step, should be less than step.
@param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_static()
"""
if (self._ccore_network_pointer is not None):
ccore_instance_dynamic = wrapper.sync_simulate_dynamic(self._ccore_network_pointer, order, solution, collect_dynamic, step, int_step, threshold_changes);
return sync_dynamic(None, None, ccore_instance_dynamic);
# For statistics and integration
time_counter = 0;
# Prevent infinite loop. It's possible when required state cannot be reached.
previous_order = 0;
current_order = self.sync_local_order();
# If requested input dynamics
dyn_phase = [];
dyn_time = [];
if (collect_dynamic == True):
dyn_phase.append(self._phases);
dyn_time.append(0);
# Execute until sync state will be reached
while (current_order < order):
# update states of oscillators
self._phases = self._calculate_phases(solution, time_counter, step, int_step);
# update time
time_counter += step;
# if requested input dynamic
if (collect_dynamic == True):
dyn_phase.append(self._phases);
dyn_time.append(time_counter);
# update orders
previous_order = current_order;
current_order = self.sync_local_order();
# hang prevention
if (abs(current_order - previous_order) < threshold_changes):
# print("Warning: sync_network::simulate_dynamic - simulation is aborted due to low level of convergence rate (order = " + str(current_order) + ").");
break;
if (collect_dynamic != True):
dyn_phase.append(self._phases);
dyn_time.append(time_counter);
output_sync_dynamic = sync_dynamic(dyn_phase, dyn_time, None);
return output_sync_dynamic; |
def from_deformation(cls, deformation):
"""
Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like):
"""
dfm = Deformation(deformation)
return cls(0.5 * (np.dot(dfm.trans, dfm) - np.eye(3))) | Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like): | Below is the the instruction that describes the task:
### Input:
Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like):
### Response:
def from_deformation(cls, deformation):
"""
Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like):
"""
dfm = Deformation(deformation)
return cls(0.5 * (np.dot(dfm.trans, dfm) - np.eye(3))) |
def context(name):
'''A decorator for theme context processors'''
def wrapper(func):
g.theme.context_processors[name] = func
return func
return wrapper | A decorator for theme context processors | Below is the the instruction that describes the task:
### Input:
A decorator for theme context processors
### Response:
def context(name):
'''A decorator for theme context processors'''
def wrapper(func):
g.theme.context_processors[name] = func
return func
return wrapper |
def add(self, other):
"""Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
Chi: the linear addition self + other as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions.
"""
if not isinstance(other, Chi):
other = Chi(other)
if self.dim != other.dim:
raise QiskitError("other QuantumChannel dimensions are not equal")
return Chi(self._data + other.data, self._input_dims,
self._output_dims) | Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
Chi: the linear addition self + other as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions. | Below is the the instruction that describes the task:
### Input:
Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
Chi: the linear addition self + other as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions.
### Response:
def add(self, other):
"""Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
Chi: the linear addition self + other as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions.
"""
if not isinstance(other, Chi):
other = Chi(other)
if self.dim != other.dim:
raise QiskitError("other QuantumChannel dimensions are not equal")
return Chi(self._data + other.data, self._input_dims,
self._output_dims) |
def CreateNewZipWithSignedLibs(z_in,
z_out,
ignore_files=None,
signer=None,
skip_signing_files=None):
"""Copies files from one zip to another, signing all qualifying files."""
ignore_files = ignore_files or []
skip_signing_files = skip_signing_files or []
extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"]
to_sign = []
for template_file in z_in.namelist():
if template_file not in ignore_files:
extension = os.path.splitext(template_file)[1].lower()
if (signer and template_file not in skip_signing_files and
extension in extensions_to_sign):
to_sign.append(template_file)
else:
CopyFileInZip(z_in, template_file, z_out)
temp_files = {}
for filename in to_sign:
fd, path = tempfile.mkstemp()
with os.fdopen(fd, "wb") as temp_fd:
temp_fd.write(z_in.read(filename))
temp_files[filename] = path
try:
signer.SignFiles(itervalues(temp_files))
except AttributeError:
for f in itervalues(temp_files):
signer.SignFile(f)
for filename, tempfile_path in iteritems(temp_files):
z_out.writestr(filename, open(tempfile_path, "rb").read()) | Copies files from one zip to another, signing all qualifying files. | Below is the the instruction that describes the task:
### Input:
Copies files from one zip to another, signing all qualifying files.
### Response:
def CreateNewZipWithSignedLibs(z_in,
z_out,
ignore_files=None,
signer=None,
skip_signing_files=None):
"""Copies files from one zip to another, signing all qualifying files."""
ignore_files = ignore_files or []
skip_signing_files = skip_signing_files or []
extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"]
to_sign = []
for template_file in z_in.namelist():
if template_file not in ignore_files:
extension = os.path.splitext(template_file)[1].lower()
if (signer and template_file not in skip_signing_files and
extension in extensions_to_sign):
to_sign.append(template_file)
else:
CopyFileInZip(z_in, template_file, z_out)
temp_files = {}
for filename in to_sign:
fd, path = tempfile.mkstemp()
with os.fdopen(fd, "wb") as temp_fd:
temp_fd.write(z_in.read(filename))
temp_files[filename] = path
try:
signer.SignFiles(itervalues(temp_files))
except AttributeError:
for f in itervalues(temp_files):
signer.SignFile(f)
for filename, tempfile_path in iteritems(temp_files):
z_out.writestr(filename, open(tempfile_path, "rb").read()) |
def start_web_rtc(self, ersip, ersport, roomId):
"""
Starts a WebRTC signalling client to an ERS (Evostream Rendezvous
Server).
:param ersip: IP address (xx.yy.zz.xx) of ERS.
:type ersip: str
:param ersport: IP port of ERS.
:type ersport: int
:param roomId: Unique room Identifier within ERS that will be used by
client browsers to connect to this EMS.
:type roomId: str
:link: http://docs.evostream.com/ems_api_definition/startwebrtc
"""
return self.protocol.execute('startwebrtc', ersip=ersip, ersport=ersport,
roomId=roomId) | Starts a WebRTC signalling client to an ERS (Evostream Rendezvous
Server).
:param ersip: IP address (xx.yy.zz.xx) of ERS.
:type ersip: str
:param ersport: IP port of ERS.
:type ersport: int
:param roomId: Unique room Identifier within ERS that will be used by
client browsers to connect to this EMS.
:type roomId: str
:link: http://docs.evostream.com/ems_api_definition/startwebrtc | Below is the the instruction that describes the task:
### Input:
Starts a WebRTC signalling client to an ERS (Evostream Rendezvous
Server).
:param ersip: IP address (xx.yy.zz.xx) of ERS.
:type ersip: str
:param ersport: IP port of ERS.
:type ersport: int
:param roomId: Unique room Identifier within ERS that will be used by
client browsers to connect to this EMS.
:type roomId: str
:link: http://docs.evostream.com/ems_api_definition/startwebrtc
### Response:
def start_web_rtc(self, ersip, ersport, roomId):
"""
Starts a WebRTC signalling client to an ERS (Evostream Rendezvous
Server).
:param ersip: IP address (xx.yy.zz.xx) of ERS.
:type ersip: str
:param ersport: IP port of ERS.
:type ersport: int
:param roomId: Unique room Identifier within ERS that will be used by
client browsers to connect to this EMS.
:type roomId: str
:link: http://docs.evostream.com/ems_api_definition/startwebrtc
"""
return self.protocol.execute('startwebrtc', ersip=ersip, ersport=ersport,
roomId=roomId) |
def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False | Starts writing a new member if required. | Below is the the instruction that describes the task:
### Input:
Starts writing a new member if required.
### Response:
def _start_member(self):
"""Starts writing a new member if required.
"""
if self._new_member:
self._init_write(self.name)
self._write_gzip_header()
self._new_member = False |
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output) | Put output string to ffmpeg command. | Below is the the instruction that describes the task:
### Input:
Put output string to ffmpeg command.
### Response:
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output) |
def matchi(string, templ, wstr, wchr):
"""
Determine whether a string is matched by a template containing wild cards.
The pattern comparison is case-insensitive.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/matchi_c.html
:param string: String to be tested.
:type string: str
:param templ: Template (with wild cards) to test against string.
:type templ: str
:param wstr: Wild string token.
:type wstr: str of length 1
:param wchr: Wild character token.
:type wchr: str of length 1
:return: The function returns True if string matches templ, else False
:rtype: bool
"""
string = stypes.stringToCharP(string)
templ = stypes.stringToCharP(templ)
wstr = ctypes.c_char(wstr.encode(encoding='UTF-8'))
wchr = ctypes.c_char(wchr.encode(encoding='UTF-8'))
return bool(libspice.matchi_c(string, templ, wstr, wchr)) | Determine whether a string is matched by a template containing wild cards.
The pattern comparison is case-insensitive.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/matchi_c.html
:param string: String to be tested.
:type string: str
:param templ: Template (with wild cards) to test against string.
:type templ: str
:param wstr: Wild string token.
:type wstr: str of length 1
:param wchr: Wild character token.
:type wchr: str of length 1
:return: The function returns True if string matches templ, else False
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Determine whether a string is matched by a template containing wild cards.
The pattern comparison is case-insensitive.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/matchi_c.html
:param string: String to be tested.
:type string: str
:param templ: Template (with wild cards) to test against string.
:type templ: str
:param wstr: Wild string token.
:type wstr: str of length 1
:param wchr: Wild character token.
:type wchr: str of length 1
:return: The function returns True if string matches templ, else False
:rtype: bool
### Response:
def matchi(string, templ, wstr, wchr):
"""
Determine whether a string is matched by a template containing wild cards.
The pattern comparison is case-insensitive.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/matchi_c.html
:param string: String to be tested.
:type string: str
:param templ: Template (with wild cards) to test against string.
:type templ: str
:param wstr: Wild string token.
:type wstr: str of length 1
:param wchr: Wild character token.
:type wchr: str of length 1
:return: The function returns True if string matches templ, else False
:rtype: bool
"""
string = stypes.stringToCharP(string)
templ = stypes.stringToCharP(templ)
wstr = ctypes.c_char(wstr.encode(encoding='UTF-8'))
wchr = ctypes.c_char(wchr.encode(encoding='UTF-8'))
return bool(libspice.matchi_c(string, templ, wstr, wchr)) |
def format_relation_name(value, format_type=None):
"""
.. warning::
The 'format_relation_name' function has been renamed 'format_resource_type' and the
settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of
'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'
"""
warnings.warn(
"The 'format_relation_name' function has been renamed 'format_resource_type' and the "
"settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of "
"'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'",
DeprecationWarning
)
if format_type is None:
format_type = json_api_settings.FORMAT_RELATION_KEYS
pluralize = json_api_settings.PLURALIZE_RELATION_TYPE
return format_resource_type(value, format_type, pluralize) | .. warning::
The 'format_relation_name' function has been renamed 'format_resource_type' and the
settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of
'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE' | Below is the the instruction that describes the task:
### Input:
.. warning::
The 'format_relation_name' function has been renamed 'format_resource_type' and the
settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of
'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'
### Response:
def format_relation_name(value, format_type=None):
"""
.. warning::
The 'format_relation_name' function has been renamed 'format_resource_type' and the
settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of
'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'
"""
warnings.warn(
"The 'format_relation_name' function has been renamed 'format_resource_type' and the "
"settings are now 'JSON_API_FORMAT_TYPES' and 'JSON_API_PLURALIZE_TYPES' instead of "
"'JSON_API_FORMAT_RELATION_KEYS' and 'JSON_API_PLURALIZE_RELATION_TYPE'",
DeprecationWarning
)
if format_type is None:
format_type = json_api_settings.FORMAT_RELATION_KEYS
pluralize = json_api_settings.PLURALIZE_RELATION_TYPE
return format_resource_type(value, format_type, pluralize) |
def minimise_tables(routing_tables, target_lengths,
methods=(remove_default_entries, ordered_covering)):
"""Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
"""
# Coerce the target lengths into the correct forms
if not isinstance(target_lengths, dict):
lengths = collections.defaultdict(lambda: target_lengths)
else:
lengths = target_lengths
# Minimise the routing tables
new_tables = dict()
for chip, table in iteritems(routing_tables):
# Try to minimise the table
try:
new_table = minimise_table(table, lengths[chip], methods)
except MinimisationFailedError as exc:
exc.chip = chip
raise
# Store the table if it isn't empty
if new_table:
new_tables[chip] = new_table
return new_tables | Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table. | Below is the the instruction that describes the task:
### Input:
Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
### Response:
def minimise_tables(routing_tables, target_lengths,
methods=(remove_default_entries, ordered_covering)):
"""Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
"""
# Coerce the target lengths into the correct forms
if not isinstance(target_lengths, dict):
lengths = collections.defaultdict(lambda: target_lengths)
else:
lengths = target_lengths
# Minimise the routing tables
new_tables = dict()
for chip, table in iteritems(routing_tables):
# Try to minimise the table
try:
new_table = minimise_table(table, lengths[chip], methods)
except MinimisationFailedError as exc:
exc.chip = chip
raise
# Store the table if it isn't empty
if new_table:
new_tables[chip] = new_table
return new_tables |
def forward_complex(self, log_sigma):
"""Compute a model response, i.e. complex impedances
Parameters
----------
log_sigma : 1xN or 2xN numpy.ndarray
Model parameters log sigma, N the number of cells. If first
dimension is of length one, assume phase values to be zero
Returns
-------
measurements : Nx2 numpy nd array
Return log_e sigma values of computed forward response
"""
m = 1.0 / np.exp(log_sigma)
tdm = self._get_tdm(m)
measurements = tdm.measurements()
# import IPython
# IPython.embed()
# convert R to log sigma
measurements[:, 0] = np.log(1.0 / measurements[:, 0])
return measurements | Compute a model response, i.e. complex impedances
Parameters
----------
log_sigma : 1xN or 2xN numpy.ndarray
Model parameters log sigma, N the number of cells. If first
dimension is of length one, assume phase values to be zero
Returns
-------
measurements : Nx2 numpy nd array
Return log_e sigma values of computed forward response | Below is the the instruction that describes the task:
### Input:
Compute a model response, i.e. complex impedances
Parameters
----------
log_sigma : 1xN or 2xN numpy.ndarray
Model parameters log sigma, N the number of cells. If first
dimension is of length one, assume phase values to be zero
Returns
-------
measurements : Nx2 numpy nd array
Return log_e sigma values of computed forward response
### Response:
def forward_complex(self, log_sigma):
"""Compute a model response, i.e. complex impedances
Parameters
----------
log_sigma : 1xN or 2xN numpy.ndarray
Model parameters log sigma, N the number of cells. If first
dimension is of length one, assume phase values to be zero
Returns
-------
measurements : Nx2 numpy nd array
Return log_e sigma values of computed forward response
"""
m = 1.0 / np.exp(log_sigma)
tdm = self._get_tdm(m)
measurements = tdm.measurements()
# import IPython
# IPython.embed()
# convert R to log sigma
measurements[:, 0] = np.log(1.0 / measurements[:, 0])
return measurements |
def handler(self, environ, start_response):
"""Proxy for requests to the actual http server"""
logger = logging.getLogger(__name__ + '.WSGIProxyApplication.handler')
url = urlparse(reconstruct_url(environ, self.port))
# Create connection object
try:
connection = self.connection_class(url.netloc)
# Build path
path = url.geturl().replace('%s://%s' % (url.scheme, url.netloc),
'')
except Exception:
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
logger.exception('Could not Connect')
yield '<H1>Could not connect</H1>'
return
# Read in request body if it exists
body = length = None
try:
length = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
# This is a situation where client HTTP POST is missing content-length.
# This is also situation where (WebOb?) may screw up encoding and isert extranous = in the body.
# https://github.com/ipython/ipython/issues/8416
if environ["REQUEST_METHOD"] == "POST":
if environ.get("CONTENT_TYPE") == 'application/x-www-form-urlencoded; charset=UTF-8':
body = environ['wsgi.input'].read()
try:
body = unquote_plus(body.decode("utf-8"))
# Fix extra = at end of JSON payload
if body.startswith("{") and body.endswith("}="):
body = body[0:len(body) - 1]
except Exception as e:
logger.exception(e)
logger.error("Could not decode body: %s", body)
length = len(body)
else:
body = environ['wsgi.input'].read(length)
# Build headers
logger.debug('environ = %r', environ)
headers = dict(
(key, value)
for key, value in (
# This is a hacky way of getting the header names right
(key[5:].lower().replace('_', '-'), value)
for key, value in environ.items()
# Keys that start with HTTP_ are all headers
if key.startswith('HTTP_')
)
if not is_hop_by_hop(key)
)
# Handler headers that aren't HTTP_ in environ
try:
headers['content-type'] = environ['CONTENT_TYPE']
except KeyError:
pass
# Add our host if one isn't defined
if 'host' not in headers:
headers['host'] = environ['SERVER_NAME']
# Make the remote request
try:
logger.debug('%s %s %r',
environ['REQUEST_METHOD'], path, headers)
connection.request(environ['REQUEST_METHOD'], path,
body=body, headers=headers)
except Exception as e:
# We need extra exception handling in the case the server fails
# in mid connection, it's an edge case but I've seen it
if isinstance(e, ConnectionRefusedError):
# The notebook was shutdown by the user
pass
else:
# This might be a genuine error
logger.exception(e)
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
yield '<H1>Could not proxy IPython Notebook running localhost:{}</H1>'.format(self.port).encode("utf-8")
return
try:
response = connection.getresponse()
except ConnectionResetError:
# Notebook shutdown
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
yield '<H1>Could not proxy IPython Notebook running localhost:{}</H1>'.format(self.port).encode("utf-8")
return
hopped_headers = response.getheaders()
headers = [(key, value)
for key, value in hopped_headers
if not is_hop_by_hop(key)]
start_response('{0.status} {0.reason}'.format(response), headers)
while True:
chunk = response.read(4096)
if chunk:
yield chunk
else:
break | Proxy for requests to the actual http server | Below is the the instruction that describes the task:
### Input:
Proxy for requests to the actual http server
### Response:
def handler(self, environ, start_response):
"""Proxy for requests to the actual http server"""
logger = logging.getLogger(__name__ + '.WSGIProxyApplication.handler')
url = urlparse(reconstruct_url(environ, self.port))
# Create connection object
try:
connection = self.connection_class(url.netloc)
# Build path
path = url.geturl().replace('%s://%s' % (url.scheme, url.netloc),
'')
except Exception:
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
logger.exception('Could not Connect')
yield '<H1>Could not connect</H1>'
return
# Read in request body if it exists
body = length = None
try:
length = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
# This is a situation where client HTTP POST is missing content-length.
# This is also situation where (WebOb?) may screw up encoding and isert extranous = in the body.
# https://github.com/ipython/ipython/issues/8416
if environ["REQUEST_METHOD"] == "POST":
if environ.get("CONTENT_TYPE") == 'application/x-www-form-urlencoded; charset=UTF-8':
body = environ['wsgi.input'].read()
try:
body = unquote_plus(body.decode("utf-8"))
# Fix extra = at end of JSON payload
if body.startswith("{") and body.endswith("}="):
body = body[0:len(body) - 1]
except Exception as e:
logger.exception(e)
logger.error("Could not decode body: %s", body)
length = len(body)
else:
body = environ['wsgi.input'].read(length)
# Build headers
logger.debug('environ = %r', environ)
headers = dict(
(key, value)
for key, value in (
# This is a hacky way of getting the header names right
(key[5:].lower().replace('_', '-'), value)
for key, value in environ.items()
# Keys that start with HTTP_ are all headers
if key.startswith('HTTP_')
)
if not is_hop_by_hop(key)
)
# Handler headers that aren't HTTP_ in environ
try:
headers['content-type'] = environ['CONTENT_TYPE']
except KeyError:
pass
# Add our host if one isn't defined
if 'host' not in headers:
headers['host'] = environ['SERVER_NAME']
# Make the remote request
try:
logger.debug('%s %s %r',
environ['REQUEST_METHOD'], path, headers)
connection.request(environ['REQUEST_METHOD'], path,
body=body, headers=headers)
except Exception as e:
# We need extra exception handling in the case the server fails
# in mid connection, it's an edge case but I've seen it
if isinstance(e, ConnectionRefusedError):
# The notebook was shutdown by the user
pass
else:
# This might be a genuine error
logger.exception(e)
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
yield '<H1>Could not proxy IPython Notebook running localhost:{}</H1>'.format(self.port).encode("utf-8")
return
try:
response = connection.getresponse()
except ConnectionResetError:
# Notebook shutdown
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
yield '<H1>Could not proxy IPython Notebook running localhost:{}</H1>'.format(self.port).encode("utf-8")
return
hopped_headers = response.getheaders()
headers = [(key, value)
for key, value in hopped_headers
if not is_hop_by_hop(key)]
start_response('{0.status} {0.reason}'.format(response), headers)
while True:
chunk = response.read(4096)
if chunk:
yield chunk
else:
break |
def sizeof(self, context=None) -> int:
"""
Return the size of the construct in bytes.
:param context: Optional context dictionary.
"""
if context is None:
context = Context()
if not isinstance(context, Context):
context = Context(context)
try:
return self._sizeof(context)
except Error:
raise
except Exception as exc:
raise SizeofError(str(exc)) | Return the size of the construct in bytes.
:param context: Optional context dictionary. | Below is the the instruction that describes the task:
### Input:
Return the size of the construct in bytes.
:param context: Optional context dictionary.
### Response:
def sizeof(self, context=None) -> int:
"""
Return the size of the construct in bytes.
:param context: Optional context dictionary.
"""
if context is None:
context = Context()
if not isinstance(context, Context):
context = Context(context)
try:
return self._sizeof(context)
except Error:
raise
except Exception as exc:
raise SizeofError(str(exc)) |
def create(python, env_dir, system, prompt, bare, virtualenv_py=None):
"""Main entry point to use this as a module.
"""
if not python or python == sys.executable:
_create_with_this(
env_dir=env_dir, system=system, prompt=prompt,
bare=bare, virtualenv_py=virtualenv_py,
)
else:
_create_with_python(
python=python,
env_dir=env_dir, system=system, prompt=prompt,
bare=bare, virtualenv_py=virtualenv_py,
) | Main entry point to use this as a module. | Below is the the instruction that describes the task:
### Input:
Main entry point to use this as a module.
### Response:
def create(python, env_dir, system, prompt, bare, virtualenv_py=None):
"""Main entry point to use this as a module.
"""
if not python or python == sys.executable:
_create_with_this(
env_dir=env_dir, system=system, prompt=prompt,
bare=bare, virtualenv_py=virtualenv_py,
)
else:
_create_with_python(
python=python,
env_dir=env_dir, system=system, prompt=prompt,
bare=bare, virtualenv_py=virtualenv_py,
) |
def json_worker(self, mask, cache_id=None, cache_method="string",
cache_section="www"):
"""A function annotation that adds a worker request. A worker request
is a POST request that is computed asynchronously. That is, the
actual task is performed in a different thread and the network
request returns immediately. The client side uses polling to fetch
the result and can also cancel the task. The worker javascript
client side must be linked and used for accessing the request.
Parameters
----------
mask : string
The URL that must be matched to perform this request.
cache_id : function(args) or None
Optional function for caching the result. If set the worker must be
idempotent. Requires a `cache` object for the server. The function
needs to return an object constructed from the function arguments
to uniquely identify the result. Results are cached verbatim.
cache_method : string or None
Optional cache method string. Gets passed to get_hnd() of the
cache. Defaults to "string" which requires a JSON serializable
cache_id.
cache_section : string or None
Optional cache section string. Gets passed to get_hnd() of the
cache. Defaults to "www".
fun : function(args); (The annotated function)
A function returning a (JSON-able) object. The function takes one
argument which is the dictionary containing the payload from the
client side. If the result is None a 404 error is sent.
"""
use_cache = cache_id is not None
def wrapper(fun):
lock = threading.RLock()
tasks = {}
cargo = {}
cargo_cleaner = [None]
def is_done(cur_key):
with lock:
if cur_key not in tasks:
return True
if "running" not in tasks[cur_key]:
return False
return not tasks[cur_key]["running"]
def start_cargo_cleaner():
def get_next_cargo():
with lock:
next_ttl = None
for value in cargo.values():
ttl, _ = value
if next_ttl is None or ttl < next_ttl:
next_ttl = ttl
return next_ttl
def clean_for(timestamp):
with lock:
keys = []
for (key, value) in cargo.items():
ttl, _ = value
if ttl > timestamp:
continue
keys.append(key)
for k in keys:
cargo.pop(k)
msg("purged cargo that was never read ({0})", k)
def remove_cleaner():
with lock:
if get_next_cargo() is not None:
return False
cargo_cleaner[0] = None
return True
def clean():
while True:
next_ttl = get_next_cargo()
if next_ttl is None:
if remove_cleaner():
break
else:
continue
time_until = next_ttl - time.time()
if time_until > 0:
time.sleep(time_until)
clean_for(time.time())
with lock:
if cargo_cleaner[0] is not None:
return
cleaner = self._thread_factory(
target=clean,
name="{0}-Cargo-Cleaner".format(self.__class__))
cleaner.daemon = True
cargo_cleaner[0] = cleaner
cleaner.start()
def add_cargo(content):
with lock:
mcs = self.max_chunk_size
if mcs < 1:
raise ValueError("invalid chunk size: {0}".format(mcs))
ttl = time.time() + 10 * 60 # 10 minutes
chunks = []
while len(content) > 0:
chunk = content[:mcs]
content = content[mcs:]
cur_key = get_key()
cargo[cur_key] = (ttl, chunk)
chunks.append(cur_key)
start_cargo_cleaner()
return chunks
def remove_cargo(cur_key):
with lock:
_, result = cargo.pop(cur_key)
return result
def remove_worker(cur_key):
with lock:
task = tasks.pop(cur_key, None)
if task is None:
err_msg = "Task {0} not found!".format(cur_key)
return None, (ValueError(err_msg), None)
if task["running"]:
th = task["thread"]
if th.is_alive():
# kill the thread
tid = None
for tk, tobj in threading._active.items():
if tobj is th:
tid = tk
break
if tid is not None:
papi = ctypes.pythonapi
pts_sae = papi.PyThreadState_SetAsyncExc
res = pts_sae(ctypes.c_long(tid),
ctypes.py_object(WorkerDeath))
if res == 0:
# invalid thread id -- the thread might
# be done already
msg("invalid thread id for " +
"killing worker {0}", cur_key)
elif res != 1:
# roll back
pts_sae(ctypes.c_long(tid), None)
msg("killed too many ({0}) workers? {1}",
res, cur_key)
else:
if self.verbose_workers:
msg("killed worker {0}", cur_key)
err_msg = "Task {0} is still running!".format(cur_key)
return None, (ValueError(err_msg), None)
return task["result"], task["exception"]
def start_worker(args, cur_key, get_thread):
try:
with lock:
task = {
"running": True,
"result": None,
"exception": None,
"thread": get_thread(),
}
tasks[cur_key] = task
if use_cache:
cache_obj = cache_id(args)
if cache_obj is not None and self.cache is not None:
with self.cache.get_hnd(
cache_obj,
section=cache_section,
method=cache_method) as hnd:
if hnd.has():
result = hnd.read()
else:
result = hnd.write(json_dumps(fun(args)))
else:
result = json_dumps(fun(args))
else:
result = json_dumps(fun(args))
with lock:
task["running"] = False
task["result"] = result
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
with lock:
task["running"] = False
task["exception"] = (e, traceback.format_exc())
return
# make sure the result does not get stored forever
try:
# remove 2 minutes after not reading the result
time.sleep(120)
finally:
_result, err = remove_worker(cur_key)
if err is not None:
e, tb = err
if tb is not None:
msg("Error in purged worker for {0}: {1}\n{2}",
cur_key, e, tb)
return
msg("purged result that was never read ({0})", cur_key)
def get_key():
with lock:
crc32 = zlib.crc32(repr(get_time()).encode('utf8'))
cur_key = int(crc32 & 0xFFFFFFFF)
while cur_key in tasks or cur_key in cargo:
key = int(cur_key + 1)
if key == cur_key:
key = 0
cur_key = key
return cur_key
def reserve_worker():
with lock:
cur_key = get_key()
tasks[cur_key] = {} # put marker
return cur_key
def run_worker(req, args):
post = args["post"]
try:
action = post["action"]
cur_key = None
if action == "stop":
cur_key = post["token"]
remove_worker(cur_key) # throw away the result
return {
"token": cur_key,
"done": True,
"result": None,
"continue": False,
}
if action == "start":
cur_key = reserve_worker()
inner_post = post.get("payload", {})
th = []
wname = "{0}-Worker-{1}".format(self.__class__,
cur_key)
worker = self._thread_factory(
target=start_worker,
name=wname,
args=(inner_post, cur_key, lambda: th[0]))
th.append(worker)
worker.start()
# give fast tasks a way to immediately return results
time.sleep(0.1)
if action == "cargo":
cur_key = post["token"]
result = remove_cargo(cur_key)
return {
"token": cur_key,
"result": result,
}
if action == "get":
cur_key = post["token"]
if cur_key is None:
raise ValueError("invalid action: {0}".format(action))
if is_done(cur_key):
result, exception = remove_worker(cur_key)
if exception is not None:
e, tb = exception
if tb is None:
# token does not exist anymore
return {
"token": cur_key,
"done": False,
"result": None,
"continue": False,
}
if isinstance(e, PreventDefaultResponse):
raise e
msg("Error in worker for {0}: {1}\n{2}",
cur_key, e, tb)
raise PreventDefaultResponse(500, "worker error")
if len(result) > self.max_chunk_size:
cargo_keys = add_cargo(result)
return {
"token": cur_key,
"done": True,
"result": cargo_keys,
"continue": True,
}
return {
"token": cur_key,
"done": True,
"result": result,
"continue": False,
}
return {
"token": cur_key,
"done": False,
"result": None,
"continue": True,
}
except: # nopep8
msg("Error processing worker command: {0}", post)
raise
self.add_json_post_mask(mask, run_worker)
self.set_file_argc(mask, 0)
return fun
return wrapper | A function annotation that adds a worker request. A worker request
is a POST request that is computed asynchronously. That is, the
actual task is performed in a different thread and the network
request returns immediately. The client side uses polling to fetch
the result and can also cancel the task. The worker javascript
client side must be linked and used for accessing the request.
Parameters
----------
mask : string
The URL that must be matched to perform this request.
cache_id : function(args) or None
Optional function for caching the result. If set the worker must be
idempotent. Requires a `cache` object for the server. The function
needs to return an object constructed from the function arguments
to uniquely identify the result. Results are cached verbatim.
cache_method : string or None
Optional cache method string. Gets passed to get_hnd() of the
cache. Defaults to "string" which requires a JSON serializable
cache_id.
cache_section : string or None
Optional cache section string. Gets passed to get_hnd() of the
cache. Defaults to "www".
fun : function(args); (The annotated function)
A function returning a (JSON-able) object. The function takes one
argument which is the dictionary containing the payload from the
client side. If the result is None a 404 error is sent. | Below is the the instruction that describes the task:
### Input:
A function annotation that adds a worker request. A worker request
is a POST request that is computed asynchronously. That is, the
actual task is performed in a different thread and the network
request returns immediately. The client side uses polling to fetch
the result and can also cancel the task. The worker javascript
client side must be linked and used for accessing the request.
Parameters
----------
mask : string
The URL that must be matched to perform this request.
cache_id : function(args) or None
Optional function for caching the result. If set the worker must be
idempotent. Requires a `cache` object for the server. The function
needs to return an object constructed from the function arguments
to uniquely identify the result. Results are cached verbatim.
cache_method : string or None
Optional cache method string. Gets passed to get_hnd() of the
cache. Defaults to "string" which requires a JSON serializable
cache_id.
cache_section : string or None
Optional cache section string. Gets passed to get_hnd() of the
cache. Defaults to "www".
fun : function(args); (The annotated function)
A function returning a (JSON-able) object. The function takes one
argument which is the dictionary containing the payload from the
client side. If the result is None a 404 error is sent.
### Response:
def json_worker(self, mask, cache_id=None, cache_method="string",
cache_section="www"):
"""A function annotation that adds a worker request. A worker request
is a POST request that is computed asynchronously. That is, the
actual task is performed in a different thread and the network
request returns immediately. The client side uses polling to fetch
the result and can also cancel the task. The worker javascript
client side must be linked and used for accessing the request.
Parameters
----------
mask : string
The URL that must be matched to perform this request.
cache_id : function(args) or None
Optional function for caching the result. If set the worker must be
idempotent. Requires a `cache` object for the server. The function
needs to return an object constructed from the function arguments
to uniquely identify the result. Results are cached verbatim.
cache_method : string or None
Optional cache method string. Gets passed to get_hnd() of the
cache. Defaults to "string" which requires a JSON serializable
cache_id.
cache_section : string or None
Optional cache section string. Gets passed to get_hnd() of the
cache. Defaults to "www".
fun : function(args); (The annotated function)
A function returning a (JSON-able) object. The function takes one
argument which is the dictionary containing the payload from the
client side. If the result is None a 404 error is sent.
"""
use_cache = cache_id is not None
def wrapper(fun):
lock = threading.RLock()
tasks = {}
cargo = {}
cargo_cleaner = [None]
def is_done(cur_key):
with lock:
if cur_key not in tasks:
return True
if "running" not in tasks[cur_key]:
return False
return not tasks[cur_key]["running"]
def start_cargo_cleaner():
def get_next_cargo():
with lock:
next_ttl = None
for value in cargo.values():
ttl, _ = value
if next_ttl is None or ttl < next_ttl:
next_ttl = ttl
return next_ttl
def clean_for(timestamp):
with lock:
keys = []
for (key, value) in cargo.items():
ttl, _ = value
if ttl > timestamp:
continue
keys.append(key)
for k in keys:
cargo.pop(k)
msg("purged cargo that was never read ({0})", k)
def remove_cleaner():
with lock:
if get_next_cargo() is not None:
return False
cargo_cleaner[0] = None
return True
def clean():
while True:
next_ttl = get_next_cargo()
if next_ttl is None:
if remove_cleaner():
break
else:
continue
time_until = next_ttl - time.time()
if time_until > 0:
time.sleep(time_until)
clean_for(time.time())
with lock:
if cargo_cleaner[0] is not None:
return
cleaner = self._thread_factory(
target=clean,
name="{0}-Cargo-Cleaner".format(self.__class__))
cleaner.daemon = True
cargo_cleaner[0] = cleaner
cleaner.start()
def add_cargo(content):
with lock:
mcs = self.max_chunk_size
if mcs < 1:
raise ValueError("invalid chunk size: {0}".format(mcs))
ttl = time.time() + 10 * 60 # 10 minutes
chunks = []
while len(content) > 0:
chunk = content[:mcs]
content = content[mcs:]
cur_key = get_key()
cargo[cur_key] = (ttl, chunk)
chunks.append(cur_key)
start_cargo_cleaner()
return chunks
def remove_cargo(cur_key):
with lock:
_, result = cargo.pop(cur_key)
return result
def remove_worker(cur_key):
with lock:
task = tasks.pop(cur_key, None)
if task is None:
err_msg = "Task {0} not found!".format(cur_key)
return None, (ValueError(err_msg), None)
if task["running"]:
th = task["thread"]
if th.is_alive():
# kill the thread
tid = None
for tk, tobj in threading._active.items():
if tobj is th:
tid = tk
break
if tid is not None:
papi = ctypes.pythonapi
pts_sae = papi.PyThreadState_SetAsyncExc
res = pts_sae(ctypes.c_long(tid),
ctypes.py_object(WorkerDeath))
if res == 0:
# invalid thread id -- the thread might
# be done already
msg("invalid thread id for " +
"killing worker {0}", cur_key)
elif res != 1:
# roll back
pts_sae(ctypes.c_long(tid), None)
msg("killed too many ({0}) workers? {1}",
res, cur_key)
else:
if self.verbose_workers:
msg("killed worker {0}", cur_key)
err_msg = "Task {0} is still running!".format(cur_key)
return None, (ValueError(err_msg), None)
return task["result"], task["exception"]
def start_worker(args, cur_key, get_thread):
try:
with lock:
task = {
"running": True,
"result": None,
"exception": None,
"thread": get_thread(),
}
tasks[cur_key] = task
if use_cache:
cache_obj = cache_id(args)
if cache_obj is not None and self.cache is not None:
with self.cache.get_hnd(
cache_obj,
section=cache_section,
method=cache_method) as hnd:
if hnd.has():
result = hnd.read()
else:
result = hnd.write(json_dumps(fun(args)))
else:
result = json_dumps(fun(args))
else:
result = json_dumps(fun(args))
with lock:
task["running"] = False
task["result"] = result
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
with lock:
task["running"] = False
task["exception"] = (e, traceback.format_exc())
return
# make sure the result does not get stored forever
try:
# remove 2 minutes after not reading the result
time.sleep(120)
finally:
_result, err = remove_worker(cur_key)
if err is not None:
e, tb = err
if tb is not None:
msg("Error in purged worker for {0}: {1}\n{2}",
cur_key, e, tb)
return
msg("purged result that was never read ({0})", cur_key)
def get_key():
with lock:
crc32 = zlib.crc32(repr(get_time()).encode('utf8'))
cur_key = int(crc32 & 0xFFFFFFFF)
while cur_key in tasks or cur_key in cargo:
key = int(cur_key + 1)
if key == cur_key:
key = 0
cur_key = key
return cur_key
def reserve_worker():
with lock:
cur_key = get_key()
tasks[cur_key] = {} # put marker
return cur_key
def run_worker(req, args):
post = args["post"]
try:
action = post["action"]
cur_key = None
if action == "stop":
cur_key = post["token"]
remove_worker(cur_key) # throw away the result
return {
"token": cur_key,
"done": True,
"result": None,
"continue": False,
}
if action == "start":
cur_key = reserve_worker()
inner_post = post.get("payload", {})
th = []
wname = "{0}-Worker-{1}".format(self.__class__,
cur_key)
worker = self._thread_factory(
target=start_worker,
name=wname,
args=(inner_post, cur_key, lambda: th[0]))
th.append(worker)
worker.start()
# give fast tasks a way to immediately return results
time.sleep(0.1)
if action == "cargo":
cur_key = post["token"]
result = remove_cargo(cur_key)
return {
"token": cur_key,
"result": result,
}
if action == "get":
cur_key = post["token"]
if cur_key is None:
raise ValueError("invalid action: {0}".format(action))
if is_done(cur_key):
result, exception = remove_worker(cur_key)
if exception is not None:
e, tb = exception
if tb is None:
# token does not exist anymore
return {
"token": cur_key,
"done": False,
"result": None,
"continue": False,
}
if isinstance(e, PreventDefaultResponse):
raise e
msg("Error in worker for {0}: {1}\n{2}",
cur_key, e, tb)
raise PreventDefaultResponse(500, "worker error")
if len(result) > self.max_chunk_size:
cargo_keys = add_cargo(result)
return {
"token": cur_key,
"done": True,
"result": cargo_keys,
"continue": True,
}
return {
"token": cur_key,
"done": True,
"result": result,
"continue": False,
}
return {
"token": cur_key,
"done": False,
"result": None,
"continue": True,
}
except: # nopep8
msg("Error processing worker command: {0}", post)
raise
self.add_json_post_mask(mask, run_worker)
self.set_file_argc(mask, 0)
return fun
return wrapper |
def get_season_player_stats(self, season_key, player_key):
"""
Calling Season Player Stats API.
Arg:
season_key: key of the season
player_key: key of the player
Return:
json data
"""
season_player_stats_url = self.api_path + "season/" + season_key + "/player/" + player_key + "/stats/"
response = self.get_response(season_player_stats_url)
return response | Calling Season Player Stats API.
Arg:
season_key: key of the season
player_key: key of the player
Return:
json data | Below is the the instruction that describes the task:
### Input:
Calling Season Player Stats API.
Arg:
season_key: key of the season
player_key: key of the player
Return:
json data
### Response:
def get_season_player_stats(self, season_key, player_key):
"""
Calling Season Player Stats API.
Arg:
season_key: key of the season
player_key: key of the player
Return:
json data
"""
season_player_stats_url = self.api_path + "season/" + season_key + "/player/" + player_key + "/stats/"
response = self.get_response(season_player_stats_url)
return response |
def index_bams(job, config):
"""
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
"""
job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv()
config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv()
job.addFollowOnJobFn(preprocessing_declaration, config) | Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs | Below is the the instruction that describes the task:
### Input:
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
### Response:
def index_bams(job, config):
"""
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
"""
job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv()
config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv()
job.addFollowOnJobFn(preprocessing_declaration, config) |
def verify_compact_verbose(self, jws=None, keys=None, allow_none=False,
sigalg=None):
"""
Verify a JWT signature and return dict with validation results
:param jws: A signed JSON Web Token
:param keys: A list of keys that can possibly be used to verify the
signature
:param allow_none: If signature algorithm 'none' is allowed
:param sigalg: Expected sigalg
:return: Dictionary with 2 keys 'msg' required, 'key' optional.
The value of 'msg' is the unpacked and verified message.
The value of 'key' is the key used to verify the message
"""
if jws:
jwt = JWSig().unpack(jws)
if len(jwt) != 3:
raise WrongNumberOfParts(len(jwt))
self.jwt = jwt
elif not self.jwt:
raise ValueError('Missing singed JWT')
else:
jwt = self.jwt
try:
_alg = jwt.headers["alg"]
except KeyError:
_alg = None
else:
if _alg is None or _alg.lower() == "none":
if allow_none:
self.msg = jwt.payload()
return {'msg': self.msg}
else:
raise SignerAlgError("none not allowed")
if "alg" in self and self['alg'] and _alg:
if isinstance(self['alg'], list):
if _alg not in self["alg"] :
raise SignerAlgError(
"Wrong signing algorithm, expected {} got {}".format(
self['alg'], _alg))
elif _alg != self['alg']:
raise SignerAlgError(
"Wrong signing algorithm, expected {} got {}".format(
self['alg'], _alg))
if sigalg and sigalg != _alg:
raise SignerAlgError("Expected {0} got {1}".format(
sigalg, jwt.headers["alg"]))
self["alg"] = _alg
if keys:
_keys = self.pick_keys(keys)
else:
_keys = self.pick_keys(self._get_keys())
if not _keys:
if "kid" in self:
raise NoSuitableSigningKeys(
"No key with kid: %s" % (self["kid"]))
elif "kid" in self.jwt.headers:
raise NoSuitableSigningKeys(
"No key with kid: %s" % (self.jwt.headers["kid"]))
else:
raise NoSuitableSigningKeys("No key for algorithm: %s" % _alg)
verifier = SIGNER_ALGS[_alg]
for key in _keys:
if isinstance(key, AsymmetricKey):
_key = key.public_key()
else:
_key = key.key
try:
if not verifier.verify(jwt.sign_input(), jwt.signature(), _key):
continue
except (BadSignature, IndexError):
pass
except (ValueError, TypeError) as err:
logger.warning('Exception "{}" caught'.format(err))
else:
logger.debug(
"Verified message using key with kid=%s" % key.kid)
self.msg = jwt.payload()
self.key = key
return {'msg': self.msg, 'key': key}
raise BadSignature() | Verify a JWT signature and return dict with validation results
:param jws: A signed JSON Web Token
:param keys: A list of keys that can possibly be used to verify the
signature
:param allow_none: If signature algorithm 'none' is allowed
:param sigalg: Expected sigalg
:return: Dictionary with 2 keys 'msg' required, 'key' optional.
The value of 'msg' is the unpacked and verified message.
The value of 'key' is the key used to verify the message | Below is the the instruction that describes the task:
### Input:
Verify a JWT signature and return dict with validation results
:param jws: A signed JSON Web Token
:param keys: A list of keys that can possibly be used to verify the
signature
:param allow_none: If signature algorithm 'none' is allowed
:param sigalg: Expected sigalg
:return: Dictionary with 2 keys 'msg' required, 'key' optional.
The value of 'msg' is the unpacked and verified message.
The value of 'key' is the key used to verify the message
### Response:
def verify_compact_verbose(self, jws=None, keys=None, allow_none=False,
sigalg=None):
"""
Verify a JWT signature and return dict with validation results
:param jws: A signed JSON Web Token
:param keys: A list of keys that can possibly be used to verify the
signature
:param allow_none: If signature algorithm 'none' is allowed
:param sigalg: Expected sigalg
:return: Dictionary with 2 keys 'msg' required, 'key' optional.
The value of 'msg' is the unpacked and verified message.
The value of 'key' is the key used to verify the message
"""
if jws:
jwt = JWSig().unpack(jws)
if len(jwt) != 3:
raise WrongNumberOfParts(len(jwt))
self.jwt = jwt
elif not self.jwt:
raise ValueError('Missing singed JWT')
else:
jwt = self.jwt
try:
_alg = jwt.headers["alg"]
except KeyError:
_alg = None
else:
if _alg is None or _alg.lower() == "none":
if allow_none:
self.msg = jwt.payload()
return {'msg': self.msg}
else:
raise SignerAlgError("none not allowed")
if "alg" in self and self['alg'] and _alg:
if isinstance(self['alg'], list):
if _alg not in self["alg"] :
raise SignerAlgError(
"Wrong signing algorithm, expected {} got {}".format(
self['alg'], _alg))
elif _alg != self['alg']:
raise SignerAlgError(
"Wrong signing algorithm, expected {} got {}".format(
self['alg'], _alg))
if sigalg and sigalg != _alg:
raise SignerAlgError("Expected {0} got {1}".format(
sigalg, jwt.headers["alg"]))
self["alg"] = _alg
if keys:
_keys = self.pick_keys(keys)
else:
_keys = self.pick_keys(self._get_keys())
if not _keys:
if "kid" in self:
raise NoSuitableSigningKeys(
"No key with kid: %s" % (self["kid"]))
elif "kid" in self.jwt.headers:
raise NoSuitableSigningKeys(
"No key with kid: %s" % (self.jwt.headers["kid"]))
else:
raise NoSuitableSigningKeys("No key for algorithm: %s" % _alg)
verifier = SIGNER_ALGS[_alg]
for key in _keys:
if isinstance(key, AsymmetricKey):
_key = key.public_key()
else:
_key = key.key
try:
if not verifier.verify(jwt.sign_input(), jwt.signature(), _key):
continue
except (BadSignature, IndexError):
pass
except (ValueError, TypeError) as err:
logger.warning('Exception "{}" caught'.format(err))
else:
logger.debug(
"Verified message using key with kid=%s" % key.kid)
self.msg = jwt.payload()
self.key = key
return {'msg': self.msg, 'key': key}
raise BadSignature() |
def get_shape(self, prune=False, hs_dims=None):
"""Tuple of array dimensions' lengths.
It returns a tuple of ints, each representing the length of a cube
dimension, in the order those dimensions appear in the cube.
Pruning is supported. Dimensions that get reduced to a single element
(e.g. due to pruning) are removed from the returning shape, thus
allowing for the differentiation between true 2D cubes (over which
statistical testing can be performed) and essentially
1D cubes (over which it can't).
Usage:
>>> shape = get_shape()
>>> pruned_shape = get_shape(prune=True)
"""
if not prune:
return self.as_array(include_transforms_for_dims=hs_dims).shape
shape = compress_pruned(
self.as_array(prune=True, include_transforms_for_dims=hs_dims)
).shape
# Eliminate dimensions that get reduced to 1
# (e.g. single element categoricals)
return tuple(n for n in shape if n > 1) | Tuple of array dimensions' lengths.
It returns a tuple of ints, each representing the length of a cube
dimension, in the order those dimensions appear in the cube.
Pruning is supported. Dimensions that get reduced to a single element
(e.g. due to pruning) are removed from the returning shape, thus
allowing for the differentiation between true 2D cubes (over which
statistical testing can be performed) and essentially
1D cubes (over which it can't).
Usage:
>>> shape = get_shape()
>>> pruned_shape = get_shape(prune=True) | Below is the the instruction that describes the task:
### Input:
Tuple of array dimensions' lengths.
It returns a tuple of ints, each representing the length of a cube
dimension, in the order those dimensions appear in the cube.
Pruning is supported. Dimensions that get reduced to a single element
(e.g. due to pruning) are removed from the returning shape, thus
allowing for the differentiation between true 2D cubes (over which
statistical testing can be performed) and essentially
1D cubes (over which it can't).
Usage:
>>> shape = get_shape()
>>> pruned_shape = get_shape(prune=True)
### Response:
def get_shape(self, prune=False, hs_dims=None):
"""Tuple of array dimensions' lengths.
It returns a tuple of ints, each representing the length of a cube
dimension, in the order those dimensions appear in the cube.
Pruning is supported. Dimensions that get reduced to a single element
(e.g. due to pruning) are removed from the returning shape, thus
allowing for the differentiation between true 2D cubes (over which
statistical testing can be performed) and essentially
1D cubes (over which it can't).
Usage:
>>> shape = get_shape()
>>> pruned_shape = get_shape(prune=True)
"""
if not prune:
return self.as_array(include_transforms_for_dims=hs_dims).shape
shape = compress_pruned(
self.as_array(prune=True, include_transforms_for_dims=hs_dims)
).shape
# Eliminate dimensions that get reduced to 1
# (e.g. single element categoricals)
return tuple(n for n in shape if n > 1) |
def walk(self, basedir):
"""Walk all the directories of basedir except hidden directories
:param basedir: string, the directory to walk
:returns: generator, same as os.walk
"""
system_d = SitePackagesDir()
filter_system_d = system_d and os.path.commonprefix([system_d, basedir]) != system_d
for root, dirs, files in os.walk(basedir, topdown=True):
# ignore dot directories and private directories (start with underscore)
dirs[:] = [d for d in dirs if d[0] != '.' and d[0] != "_"]
if filter_system_d:
dirs[:] = [d for d in dirs if not d.startswith(system_d)]
yield root, dirs, files | Walk all the directories of basedir except hidden directories
:param basedir: string, the directory to walk
:returns: generator, same as os.walk | Below is the the instruction that describes the task:
### Input:
Walk all the directories of basedir except hidden directories
:param basedir: string, the directory to walk
:returns: generator, same as os.walk
### Response:
def walk(self, basedir):
"""Walk all the directories of basedir except hidden directories
:param basedir: string, the directory to walk
:returns: generator, same as os.walk
"""
system_d = SitePackagesDir()
filter_system_d = system_d and os.path.commonprefix([system_d, basedir]) != system_d
for root, dirs, files in os.walk(basedir, topdown=True):
# ignore dot directories and private directories (start with underscore)
dirs[:] = [d for d in dirs if d[0] != '.' and d[0] != "_"]
if filter_system_d:
dirs[:] = [d for d in dirs if not d.startswith(system_d)]
yield root, dirs, files |
def add_generic_info_message_for_error(request):
"""
Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request.
"""
messages.info(
request,
_(
'{strong_start}Something happened.{strong_end} '
'{span_start}This course link is currently invalid. '
'Please reach out to your Administrator for assistance to this course.{span_end}'
).format(
span_start='<span>',
span_end='</span>',
strong_start='<strong>',
strong_end='</strong>',
)
) | Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request. | Below is the the instruction that describes the task:
### Input:
Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request.
### Response:
def add_generic_info_message_for_error(request):
"""
Add message to request indicating that there was an issue processing request.
Arguments:
request: The current request.
"""
messages.info(
request,
_(
'{strong_start}Something happened.{strong_end} '
'{span_start}This course link is currently invalid. '
'Please reach out to your Administrator for assistance to this course.{span_end}'
).format(
span_start='<span>',
span_end='</span>',
strong_start='<strong>',
strong_end='</strong>',
)
) |
def get_plaintext_citations(arxiv_id):
"""
Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations.
"""
plaintext_citations = []
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
plaintext_citations.extend(bbl.get_plaintext_citations(bbl_file))
return plaintext_citations | Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations. | Below is the the instruction that describes the task:
### Input:
Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations.
### Response:
def get_plaintext_citations(arxiv_id):
"""
Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations.
"""
plaintext_citations = []
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
plaintext_citations.extend(bbl.get_plaintext_citations(bbl_file))
return plaintext_citations |
def fit_transform(self, X, y=None):
'''
Flips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
Returns
-------
Xt : array, shape [n, n]
The transformed training similarities.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
memory = get_memory(self.memory)
discard_X = not self.copy and self.negatives_likely
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=discard_X)
vals = vals[:, None]
self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T)
if discard_X or vals[0, 0] < 0:
del X
np.abs(vals, out=vals)
X = np.dot(vecs, vals * vecs.T)
del vals, vecs
# should be symmetric, but make sure because floats
X = Symmetrize(copy=False).fit_transform(X)
return X | Flips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
Returns
-------
Xt : array, shape [n, n]
The transformed training similarities. | Below is the the instruction that describes the task:
### Input:
Flips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
Returns
-------
Xt : array, shape [n, n]
The transformed training similarities.
### Response:
def fit_transform(self, X, y=None):
'''
Flips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
Returns
-------
Xt : array, shape [n, n]
The transformed training similarities.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
memory = get_memory(self.memory)
discard_X = not self.copy and self.negatives_likely
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=discard_X)
vals = vals[:, None]
self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T)
if discard_X or vals[0, 0] < 0:
del X
np.abs(vals, out=vals)
X = np.dot(vecs, vals * vecs.T)
del vals, vecs
# should be symmetric, but make sure because floats
X = Symmetrize(copy=False).fit_transform(X)
return X |
def textContent(self, text: str) -> None: # type: ignore
"""Set textContent both on this node and related browser node."""
self._set_text_content(text)
if self.connected:
self._set_text_content_web(text) | Set textContent both on this node and related browser node. | Below is the the instruction that describes the task:
### Input:
Set textContent both on this node and related browser node.
### Response:
def textContent(self, text: str) -> None: # type: ignore
"""Set textContent both on this node and related browser node."""
self._set_text_content(text)
if self.connected:
self._set_text_content_web(text) |
def get_identifiers(source_code):
'''Split source code into python identifier-like tokens'''
tokens = set(re.split(r"[^0-9a-zA-Z_.]", source_code))
valid = re.compile(r'[a-zA-Z_]')
return [token for token in tokens if re.match(valid, token)] | Split source code into python identifier-like tokens | Below is the the instruction that describes the task:
### Input:
Split source code into python identifier-like tokens
### Response:
def get_identifiers(source_code):
'''Split source code into python identifier-like tokens'''
tokens = set(re.split(r"[^0-9a-zA-Z_.]", source_code))
valid = re.compile(r'[a-zA-Z_]')
return [token for token in tokens if re.match(valid, token)] |
async def registry_dump_handle(request):
'''
only read
:param request:
:return:
'''
registry = registry_dump_handle.registry
response_dict = {}
repo = registry._repository
response_dict['registered_services'] = repo._registered_services
response_dict['uptimes'] = repo._uptimes
response_dict['service_dependencies'] = repo._service_dependencies
return web.Response(status=400, content_type='application/json', body=json.dumps(response_dict).encode()) | only read
:param request:
:return: | Below is the the instruction that describes the task:
### Input:
only read
:param request:
:return:
### Response:
async def registry_dump_handle(request):
'''
only read
:param request:
:return:
'''
registry = registry_dump_handle.registry
response_dict = {}
repo = registry._repository
response_dict['registered_services'] = repo._registered_services
response_dict['uptimes'] = repo._uptimes
response_dict['service_dependencies'] = repo._service_dependencies
return web.Response(status=400, content_type='application/json', body=json.dumps(response_dict).encode()) |
def long_click(self, duration=2.0):
"""
Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation.
"""
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
pos_in_percentage = self.get_position(self._focus or 'anchor')
self.poco.pre_action('long_click', self, pos_in_percentage)
ret = self.poco.long_click(pos_in_percentage, duration)
self.poco.post_action('long_click', self, pos_in_percentage)
return ret | Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation. | Below is the the instruction that describes the task:
### Input:
Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation.
### Response:
def long_click(self, duration=2.0):
"""
Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation.
"""
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
pos_in_percentage = self.get_position(self._focus or 'anchor')
self.poco.pre_action('long_click', self, pos_in_percentage)
ret = self.poco.long_click(pos_in_percentage, duration)
self.poco.post_action('long_click', self, pos_in_percentage)
return ret |
def time_auth(self, load):
'''
Make sure that all failures happen in the same amount of time
'''
start = time.time()
ret = self.__auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_fail / 4
r_time = random.SystemRandom().uniform(
self.max_fail - deviation,
self.max_fail + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False | Make sure that all failures happen in the same amount of time | Below is the the instruction that describes the task:
### Input:
Make sure that all failures happen in the same amount of time
### Response:
def time_auth(self, load):
'''
Make sure that all failures happen in the same amount of time
'''
start = time.time()
ret = self.__auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_fail / 4
r_time = random.SystemRandom().uniform(
self.max_fail - deviation,
self.max_fail + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False |
def intervalTrees(reffh, scoreType=int, verbose=False):
"""
Build a dictionary of interval trees indexed by chrom from a BED stream or
file
:param reffh: This can be either a string, or a stream-like object. In the
former case, it is treated as a filename. The format of the
file/stream must be BED.
:param scoreType: The data type for scores (the fifth column) in the BED
file.
:param verbose: output progress messages to sys.stderr if True
"""
if type(reffh).__name__ == "str":
fh = open(reffh)
else:
fh = reffh
# load all the regions and split them into lists for each chrom
elements = {}
if verbose and fh != sys.stdin:
totalLines = linesInFile(fh.name)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of loading " + fh.name)
for element in BEDIterator(fh, scoreType=scoreType, verbose=verbose):
if element.chrom not in elements:
elements[element.chrom] = []
elements[element.chrom].append(element)
if verbose and fh != sys.stdin:
pind.done += 1
pind.showProgress()
# create an interval tree for each list
trees = {}
if verbose:
totalLines = len(elements)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of making interval trees")
for chrom in elements:
trees[chrom] = IntervalTree(elements[chrom], openEnded=True)
if verbose:
pind.done += 1
pind.showProgress()
return trees | Build a dictionary of interval trees indexed by chrom from a BED stream or
file
:param reffh: This can be either a string, or a stream-like object. In the
former case, it is treated as a filename. The format of the
file/stream must be BED.
:param scoreType: The data type for scores (the fifth column) in the BED
file.
:param verbose: output progress messages to sys.stderr if True | Below is the the instruction that describes the task:
### Input:
Build a dictionary of interval trees indexed by chrom from a BED stream or
file
:param reffh: This can be either a string, or a stream-like object. In the
former case, it is treated as a filename. The format of the
file/stream must be BED.
:param scoreType: The data type for scores (the fifth column) in the BED
file.
:param verbose: output progress messages to sys.stderr if True
### Response:
def intervalTrees(reffh, scoreType=int, verbose=False):
"""
Build a dictionary of interval trees indexed by chrom from a BED stream or
file
:param reffh: This can be either a string, or a stream-like object. In the
former case, it is treated as a filename. The format of the
file/stream must be BED.
:param scoreType: The data type for scores (the fifth column) in the BED
file.
:param verbose: output progress messages to sys.stderr if True
"""
if type(reffh).__name__ == "str":
fh = open(reffh)
else:
fh = reffh
# load all the regions and split them into lists for each chrom
elements = {}
if verbose and fh != sys.stdin:
totalLines = linesInFile(fh.name)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of loading " + fh.name)
for element in BEDIterator(fh, scoreType=scoreType, verbose=verbose):
if element.chrom not in elements:
elements[element.chrom] = []
elements[element.chrom].append(element)
if verbose and fh != sys.stdin:
pind.done += 1
pind.showProgress()
# create an interval tree for each list
trees = {}
if verbose:
totalLines = len(elements)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of making interval trees")
for chrom in elements:
trees[chrom] = IntervalTree(elements[chrom], openEnded=True)
if verbose:
pind.done += 1
pind.showProgress()
return trees |
def find_xy_peak(img, center=None, sigma=3.0):
""" Find the center of the peak of offsets """
# find level of noise in histogram
istats = imagestats.ImageStats(img.astype(np.float32), nclip=1,
fields='stddev,mode,mean,max,min')
if istats.stddev == 0.0:
istats = imagestats.ImageStats(img.astype(np.float32),
fields='stddev,mode,mean,max,min')
imgsum = img.sum()
# clip out all values below mean+3*sigma from histogram
imgc = img[:, :].copy()
imgc[imgc < istats.mode + istats.stddev * sigma] = 0.0
# identify position of peak
yp0, xp0 = np.where(imgc == imgc.max())
# Perform bounds checking on slice from img
ymin = max(0, int(yp0[0]) - 3)
ymax = min(img.shape[0], int(yp0[0]) + 4)
xmin = max(0, int(xp0[0]) - 3)
xmax = min(img.shape[1], int(xp0[0]) + 4)
# take sum of at most a 7x7 pixel box around peak
xp_slice = (slice(ymin, ymax),
slice(xmin, xmax))
yp, xp = ndimage.measurements.center_of_mass(img[xp_slice])
if np.isnan(xp) or np.isnan(yp):
xp = 0.0
yp = 0.0
flux = 0.0
zpqual = None
else:
xp += xp_slice[1].start
yp += xp_slice[0].start
# compute S/N criteria for this peak: flux/sqrt(mean of rest of array)
flux = imgc[xp_slice].sum()
delta_size = float(img.size - imgc[xp_slice].size)
if delta_size == 0:
delta_size = 1
delta_flux = float(imgsum - flux)
if flux > imgc[xp_slice].max():
delta_flux = flux - imgc[xp_slice].max()
else:
delta_flux = flux
zpqual = flux / np.sqrt(delta_flux / delta_size)
if np.isnan(zpqual) or np.isinf(zpqual):
zpqual = None
if center is not None:
xp -= center[0]
yp -= center[1]
flux = imgc[xp_slice].max()
del imgc
return xp, yp, flux, zpqual | Find the center of the peak of offsets | Below is the the instruction that describes the task:
### Input:
Find the center of the peak of offsets
### Response:
def find_xy_peak(img, center=None, sigma=3.0):
""" Find the center of the peak of offsets """
# find level of noise in histogram
istats = imagestats.ImageStats(img.astype(np.float32), nclip=1,
fields='stddev,mode,mean,max,min')
if istats.stddev == 0.0:
istats = imagestats.ImageStats(img.astype(np.float32),
fields='stddev,mode,mean,max,min')
imgsum = img.sum()
# clip out all values below mean+3*sigma from histogram
imgc = img[:, :].copy()
imgc[imgc < istats.mode + istats.stddev * sigma] = 0.0
# identify position of peak
yp0, xp0 = np.where(imgc == imgc.max())
# Perform bounds checking on slice from img
ymin = max(0, int(yp0[0]) - 3)
ymax = min(img.shape[0], int(yp0[0]) + 4)
xmin = max(0, int(xp0[0]) - 3)
xmax = min(img.shape[1], int(xp0[0]) + 4)
# take sum of at most a 7x7 pixel box around peak
xp_slice = (slice(ymin, ymax),
slice(xmin, xmax))
yp, xp = ndimage.measurements.center_of_mass(img[xp_slice])
if np.isnan(xp) or np.isnan(yp):
xp = 0.0
yp = 0.0
flux = 0.0
zpqual = None
else:
xp += xp_slice[1].start
yp += xp_slice[0].start
# compute S/N criteria for this peak: flux/sqrt(mean of rest of array)
flux = imgc[xp_slice].sum()
delta_size = float(img.size - imgc[xp_slice].size)
if delta_size == 0:
delta_size = 1
delta_flux = float(imgsum - flux)
if flux > imgc[xp_slice].max():
delta_flux = flux - imgc[xp_slice].max()
else:
delta_flux = flux
zpqual = flux / np.sqrt(delta_flux / delta_size)
if np.isnan(zpqual) or np.isinf(zpqual):
zpqual = None
if center is not None:
xp -= center[0]
yp -= center[1]
flux = imgc[xp_slice].max()
del imgc
return xp, yp, flux, zpqual |
def kelly_kapowski(s, g, w, its=50, r=0.025, m=1.5, **kwargs):
"""
Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1)
"""
if isinstance(s, iio.ANTsImage):
s = s.clone('unsigned int')
d = s.dimension
outimg = g.clone()
kellargs = {'d': d,
's': s,
'g': g,
'w': w,
'c': its,
'r': r,
'm': m,
'o': outimg}
for k, v in kwargs.items():
kellargs[k] = v
processed_kellargs = utils._int_antsProcessArguments(kellargs)
libfn = utils.get_lib_fn('KellyKapowski')
libfn(processed_kellargs)
return outimg | Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1) | Below is the the instruction that describes the task:
### Input:
Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1)
### Response:
def kelly_kapowski(s, g, w, its=50, r=0.025, m=1.5, **kwargs):
"""
Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1)
"""
if isinstance(s, iio.ANTsImage):
s = s.clone('unsigned int')
d = s.dimension
outimg = g.clone()
kellargs = {'d': d,
's': s,
'g': g,
'w': w,
'c': its,
'r': r,
'm': m,
'o': outimg}
for k, v in kwargs.items():
kellargs[k] = v
processed_kellargs = utils._int_antsProcessArguments(kellargs)
libfn = utils.get_lib_fn('KellyKapowski')
libfn(processed_kellargs)
return outimg |
def acknowledge_message(self):
"""Comment provided when acknowledging the alarm."""
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField('acknowledgeMessage')):
return self._proto.acknowledgeInfo.acknowledgeMessage
return None | Comment provided when acknowledging the alarm. | Below is the the instruction that describes the task:
### Input:
Comment provided when acknowledging the alarm.
### Response:
def acknowledge_message(self):
"""Comment provided when acknowledging the alarm."""
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField('acknowledgeMessage')):
return self._proto.acknowledgeInfo.acknowledgeMessage
return None |
def _asym_transform_deriv_shape(systematic_utilities,
alt_IDs,
rows_to_alts,
eta,
ref_position=None,
dh_dc_array=None,
fill_dc_d_eta=None,
output_array=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
dh_dc_array : 2D scipy sparse matrix.
Its data is to be replaced with the correct derivatives of the
transformed index vector with respect to the shape parameter vector.
Should have shape
`(systematic_utilities.shape[0], rows_to_alts.shape[1])`.
fill_dc_d_eta : callable.
Should accept `eta` and `ref_position` and return a 2D numpy array
containing the derivatives of the 'natural' shape parameter vector with
respect to the vector of transformed shape parameters.
output_array : 2D numpy matrix.
This matrix's data is to be replaced with the correct derivatives of
the transformed systematic utilities with respect to the vector of
transformed shape parameters. Should have shape
`(systematic_utilities.shape[0], shape_params.shape[0])`.
Returns
-------
output_array : 2D ndarray.
The shape of the returned array will be
`(systematic_utilities.shape[0], shape_params.shape[0])`. The returned
array specifies the derivative of the transformed utilities with
respect to the shape parameters.
"""
##########
# Convert the reduced shape parameters to the natural shape parameters
##########
natural_shape_params = _convert_eta_to_c(eta, ref_position)
##########
# Calculate the derivative of the transformed utilities with respect to
# the vector of natural shape parameters, c
##########
# Create a vector which contains the appropriate shape for each row in the
# design matrix. Note as long as natural_shape_params is a numpy array,
# then long_shapes will be a numpy array.
long_shapes = rows_to_alts.dot(natural_shape_params)
# Calculate d_ln(long_shape)_d_long_shape
d_lnShape_dShape = 1.0 / long_shapes
# Guard against overflow
d_lnShape_dShape[np.isposinf(d_lnShape_dShape)] = max_comp_value
# Calculate d_ln((1-long_shape)/(J-1))_d_long_shape
d_lnShapeComp_dShape = -1.0 / (1 - long_shapes)
# Guard against overflow
d_lnShapeComp_dShape[np.isneginf(d_lnShapeComp_dShape)] = -max_comp_value
# Differentiate the multiplier with respect to natural_shape_j.
deriv_multiplier = ((systematic_utilities >= 0) * d_lnShape_dShape +
(systematic_utilities < 0) * d_lnShapeComp_dShape)
# assert not np.isnan(deriv_multiplier).any()
# Calculate the derivative of h_ij with respect to natural_shape_j.
# Store these derivatives in their respective places in the dh_dc array
# Note that d_hij_d_ck = 0 for k != j
dh_dc_values = d_lnShape_dShape - systematic_utilities * deriv_multiplier
# Guard against overflow
dh_dc_values[np.isinf(dh_dc_values)] = -1 * max_comp_value
# Assign the computed values to the scipy sparse array
dh_dc_array.data = dh_dc_values
##########
# Calculate the derivative of the natural shape parameters, c with
# respect to the vector of reduced shape parameters, eta
##########
# Return the matrix of dh_d_eta. Note the matrix should be of dimension
# (systematic_utilities.shape[0], shape_params.shape[0])
# Note the calculation is essentially dh_dc * dc_d_eta = dh_d_eta
output_array[:, :] = dh_dc_array.dot(fill_dc_d_eta(natural_shape_params,
ref_position))
return output_array | Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
dh_dc_array : 2D scipy sparse matrix.
Its data is to be replaced with the correct derivatives of the
transformed index vector with respect to the shape parameter vector.
Should have shape
`(systematic_utilities.shape[0], rows_to_alts.shape[1])`.
fill_dc_d_eta : callable.
Should accept `eta` and `ref_position` and return a 2D numpy array
containing the derivatives of the 'natural' shape parameter vector with
respect to the vector of transformed shape parameters.
output_array : 2D numpy matrix.
This matrix's data is to be replaced with the correct derivatives of
the transformed systematic utilities with respect to the vector of
transformed shape parameters. Should have shape
`(systematic_utilities.shape[0], shape_params.shape[0])`.
Returns
-------
output_array : 2D ndarray.
The shape of the returned array will be
`(systematic_utilities.shape[0], shape_params.shape[0])`. The returned
array specifies the derivative of the transformed utilities with
respect to the shape parameters. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
dh_dc_array : 2D scipy sparse matrix.
Its data is to be replaced with the correct derivatives of the
transformed index vector with respect to the shape parameter vector.
Should have shape
`(systematic_utilities.shape[0], rows_to_alts.shape[1])`.
fill_dc_d_eta : callable.
Should accept `eta` and `ref_position` and return a 2D numpy array
containing the derivatives of the 'natural' shape parameter vector with
respect to the vector of transformed shape parameters.
output_array : 2D numpy matrix.
This matrix's data is to be replaced with the correct derivatives of
the transformed systematic utilities with respect to the vector of
transformed shape parameters. Should have shape
`(systematic_utilities.shape[0], shape_params.shape[0])`.
Returns
-------
output_array : 2D ndarray.
The shape of the returned array will be
`(systematic_utilities.shape[0], shape_params.shape[0])`. The returned
array specifies the derivative of the transformed utilities with
respect to the shape parameters.
### Response:
def _asym_transform_deriv_shape(systematic_utilities,
alt_IDs,
rows_to_alts,
eta,
ref_position=None,
dh_dc_array=None,
fill_dc_d_eta=None,
output_array=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
dh_dc_array : 2D scipy sparse matrix.
Its data is to be replaced with the correct derivatives of the
transformed index vector with respect to the shape parameter vector.
Should have shape
`(systematic_utilities.shape[0], rows_to_alts.shape[1])`.
fill_dc_d_eta : callable.
Should accept `eta` and `ref_position` and return a 2D numpy array
containing the derivatives of the 'natural' shape parameter vector with
respect to the vector of transformed shape parameters.
output_array : 2D numpy matrix.
This matrix's data is to be replaced with the correct derivatives of
the transformed systematic utilities with respect to the vector of
transformed shape parameters. Should have shape
`(systematic_utilities.shape[0], shape_params.shape[0])`.
Returns
-------
output_array : 2D ndarray.
The shape of the returned array will be
`(systematic_utilities.shape[0], shape_params.shape[0])`. The returned
array specifies the derivative of the transformed utilities with
respect to the shape parameters.
"""
##########
# Convert the reduced shape parameters to the natural shape parameters
##########
natural_shape_params = _convert_eta_to_c(eta, ref_position)
##########
# Calculate the derivative of the transformed utilities with respect to
# the vector of natural shape parameters, c
##########
# Create a vector which contains the appropriate shape for each row in the
# design matrix. Note as long as natural_shape_params is a numpy array,
# then long_shapes will be a numpy array.
long_shapes = rows_to_alts.dot(natural_shape_params)
# Calculate d_ln(long_shape)_d_long_shape
d_lnShape_dShape = 1.0 / long_shapes
# Guard against overflow
d_lnShape_dShape[np.isposinf(d_lnShape_dShape)] = max_comp_value
# Calculate d_ln((1-long_shape)/(J-1))_d_long_shape
d_lnShapeComp_dShape = -1.0 / (1 - long_shapes)
# Guard against overflow
d_lnShapeComp_dShape[np.isneginf(d_lnShapeComp_dShape)] = -max_comp_value
# Differentiate the multiplier with respect to natural_shape_j.
deriv_multiplier = ((systematic_utilities >= 0) * d_lnShape_dShape +
(systematic_utilities < 0) * d_lnShapeComp_dShape)
# assert not np.isnan(deriv_multiplier).any()
# Calculate the derivative of h_ij with respect to natural_shape_j.
# Store these derivatives in their respective places in the dh_dc array
# Note that d_hij_d_ck = 0 for k != j
dh_dc_values = d_lnShape_dShape - systematic_utilities * deriv_multiplier
# Guard against overflow
dh_dc_values[np.isinf(dh_dc_values)] = -1 * max_comp_value
# Assign the computed values to the scipy sparse array
dh_dc_array.data = dh_dc_values
##########
# Calculate the derivative of the natural shape parameters, c with
# respect to the vector of reduced shape parameters, eta
##########
# Return the matrix of dh_d_eta. Note the matrix should be of dimension
# (systematic_utilities.shape[0], shape_params.shape[0])
# Note the calculation is essentially dh_dc * dc_d_eta = dh_d_eta
output_array[:, :] = dh_dc_array.dot(fill_dc_d_eta(natural_shape_params,
ref_position))
return output_array |
def bboxiter(tile_bounds, tiles_per_row_per_region=1):
"""
Iterate through a grid of regions defined by a TileBB.
Args:
tile_bounds (GridBB):
tiles_per_row_per_region: Combine multiple tiles in one region.
E.g. if set to two, four tiles will be combined in one region.
See `kml` module description for more details. Leaving the
default value '1' simply yields all tiles in the bounding box.
Note:
If the number of regions would not be an integer due to specification
of the `tiles_per_row_per_region` parameter, the boundaries will
be rounded to the next smaller or next larger integer respectively.
Example:
We have the following bounding box with size 2x2 and set
tiles_per_row_per_region = 2, delimited by the coordinates (x, y)::
(5,5)--- ---
| |
| |
| |
--- ---(9,7)
Although this could be represented in one single region with two
tiles per row, it will create four regions::
(2,2)--- --- (5/2 = 2.5 -> 2, 5/2 = 2.5 -> 2)
| | |
--- ---
| | |
--- ---(5,4) (9/2 = 4.5 -> 5, 7/2 = 3.5 -> 4)
Yields:
Tuple: all tuples (x, y) in the region delimited by the TileBB
"""
x_lower = math.floor(tile_bounds.min.x / tiles_per_row_per_region)
y_lower = math.floor(tile_bounds.min.y / tiles_per_row_per_region)
ncol = math.ceil(tile_bounds.max.x / tiles_per_row_per_region) - x_lower
nrow = math.ceil(tile_bounds.max.y / tiles_per_row_per_region) - y_lower
yield from griditer(x_lower, y_lower, ncol, nrow) | Iterate through a grid of regions defined by a TileBB.
Args:
tile_bounds (GridBB):
tiles_per_row_per_region: Combine multiple tiles in one region.
E.g. if set to two, four tiles will be combined in one region.
See `kml` module description for more details. Leaving the
default value '1' simply yields all tiles in the bounding box.
Note:
If the number of regions would not be an integer due to specification
of the `tiles_per_row_per_region` parameter, the boundaries will
be rounded to the next smaller or next larger integer respectively.
Example:
We have the following bounding box with size 2x2 and set
tiles_per_row_per_region = 2, delimited by the coordinates (x, y)::
(5,5)--- ---
| |
| |
| |
--- ---(9,7)
Although this could be represented in one single region with two
tiles per row, it will create four regions::
(2,2)--- --- (5/2 = 2.5 -> 2, 5/2 = 2.5 -> 2)
| | |
--- ---
| | |
--- ---(5,4) (9/2 = 4.5 -> 5, 7/2 = 3.5 -> 4)
Yields:
Tuple: all tuples (x, y) in the region delimited by the TileBB | Below is the the instruction that describes the task:
### Input:
Iterate through a grid of regions defined by a TileBB.
Args:
tile_bounds (GridBB):
tiles_per_row_per_region: Combine multiple tiles in one region.
E.g. if set to two, four tiles will be combined in one region.
See `kml` module description for more details. Leaving the
default value '1' simply yields all tiles in the bounding box.
Note:
If the number of regions would not be an integer due to specification
of the `tiles_per_row_per_region` parameter, the boundaries will
be rounded to the next smaller or next larger integer respectively.
Example:
We have the following bounding box with size 2x2 and set
tiles_per_row_per_region = 2, delimited by the coordinates (x, y)::
(5,5)--- ---
| |
| |
| |
--- ---(9,7)
Although this could be represented in one single region with two
tiles per row, it will create four regions::
(2,2)--- --- (5/2 = 2.5 -> 2, 5/2 = 2.5 -> 2)
| | |
--- ---
| | |
--- ---(5,4) (9/2 = 4.5 -> 5, 7/2 = 3.5 -> 4)
Yields:
Tuple: all tuples (x, y) in the region delimited by the TileBB
### Response:
def bboxiter(tile_bounds, tiles_per_row_per_region=1):
"""
Iterate through a grid of regions defined by a TileBB.
Args:
tile_bounds (GridBB):
tiles_per_row_per_region: Combine multiple tiles in one region.
E.g. if set to two, four tiles will be combined in one region.
See `kml` module description for more details. Leaving the
default value '1' simply yields all tiles in the bounding box.
Note:
If the number of regions would not be an integer due to specification
of the `tiles_per_row_per_region` parameter, the boundaries will
be rounded to the next smaller or next larger integer respectively.
Example:
We have the following bounding box with size 2x2 and set
tiles_per_row_per_region = 2, delimited by the coordinates (x, y)::
(5,5)--- ---
| |
| |
| |
--- ---(9,7)
Although this could be represented in one single region with two
tiles per row, it will create four regions::
(2,2)--- --- (5/2 = 2.5 -> 2, 5/2 = 2.5 -> 2)
| | |
--- ---
| | |
--- ---(5,4) (9/2 = 4.5 -> 5, 7/2 = 3.5 -> 4)
Yields:
Tuple: all tuples (x, y) in the region delimited by the TileBB
"""
x_lower = math.floor(tile_bounds.min.x / tiles_per_row_per_region)
y_lower = math.floor(tile_bounds.min.y / tiles_per_row_per_region)
ncol = math.ceil(tile_bounds.max.x / tiles_per_row_per_region) - x_lower
nrow = math.ceil(tile_bounds.max.y / tiles_per_row_per_region) - y_lower
yield from griditer(x_lower, y_lower, ncol, nrow) |
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
"""
if self.layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
else:
subcategory = {'key': None}
if is_raster_layer(self.parent.layer):
return self.parent.step_kw_source
# Check if it can go to inasafe field step
inasafe_fields = get_non_compulsory_fields(
self.layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, inasafe_fields):
return self.parent.step_kw_inasafe_fields
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(
self.layer_purpose['key'],
subcategory['key'],
replace_null=True,
in_group=False
)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
# Any other case
return self.parent.step_kw_source | Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None | Below is the the instruction that describes the task:
### Input:
Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
### Response:
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to.
:rtype: WizardStep instance or None
"""
if self.layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
else:
subcategory = {'key': None}
if is_raster_layer(self.parent.layer):
return self.parent.step_kw_source
# Check if it can go to inasafe field step
inasafe_fields = get_non_compulsory_fields(
self.layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, inasafe_fields):
return self.parent.step_kw_inasafe_fields
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(
self.layer_purpose['key'],
subcategory['key'],
replace_null=True,
in_group=False
)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
# Any other case
return self.parent.step_kw_source |
def main(self, options, args):
"""
Main routine for running the reference viewer.
`options` is a OptionParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load.
"""
# Create a logger
logger = log.get_logger(name='ginga', options=options)
# Get settings (preferences)
basedir = paths.ginga_home
if not os.path.exists(basedir):
try:
os.mkdir(basedir)
except OSError as e:
logger.warning(
"Couldn't create ginga settings area (%s): %s" % (
basedir, str(e)))
logger.warning("Preferences will not be able to be saved")
# Set up preferences
prefs = Settings.Preferences(basefolder=basedir, logger=logger)
settings = prefs.create_category('general')
settings.set_defaults(useMatplotlibColormaps=False,
widgetSet='choose',
WCSpkg='choose', FITSpkg='choose',
recursion_limit=2000,
icc_working_profile=None,
font_scaling_factor=None,
save_layout=True,
channel_prefix="Image")
settings.load(onError='silent')
# default of 1000 is a little too small
sys.setrecursionlimit(settings.get('recursion_limit'))
# So we can find our plugins
sys.path.insert(0, basedir)
package_home = os.path.split(sys.modules['ginga.version'].__file__)[0]
child_dir = os.path.join(package_home, 'rv', 'plugins')
sys.path.insert(0, child_dir)
plugin_dir = os.path.join(basedir, 'plugins')
sys.path.insert(0, plugin_dir)
gc = os.path.join(basedir, "ginga_config.py")
have_ginga_config = os.path.exists(gc)
# User configuration, earliest possible intervention
if have_ginga_config:
try:
import ginga_config
if hasattr(ginga_config, 'init_config'):
ginga_config.init_config(self)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Choose a toolkit
if options.toolkit:
toolkit = options.toolkit
else:
toolkit = settings.get('widgetSet', 'choose')
if toolkit == 'choose':
try:
ginga_toolkit.choose()
except ImportError as e:
print("UI toolkit choose error: %s" % str(e))
sys.exit(1)
else:
ginga_toolkit.use(toolkit)
tkname = ginga_toolkit.get_family()
logger.info("Chosen toolkit (%s) family is '%s'" % (
ginga_toolkit.toolkit, tkname))
# these imports have to be here, otherwise they force the choice
# of toolkit too early
from ginga.rv.Control import GingaShell, GuiLogHandler
if settings.get('useMatplotlibColormaps', False):
# Add matplotlib color maps if matplotlib is installed
try:
from ginga import cmap
cmap.add_matplotlib_cmaps(fail_on_import_error=False)
except Exception as e:
logger.warning(
"failed to load matplotlib colormaps: %s" % (str(e)))
# user wants to set font scaling
font_scaling = settings.get('font_scaling_factor', None)
if font_scaling is not None:
from ginga.fonts import font_asst
font_asst.default_scaling_factor = font_scaling
# Set a working RGB ICC profile if user has one
working_profile = settings.get('icc_working_profile', None)
rgb_cms.working_profile = working_profile
# User wants to customize the WCS package?
if options.wcspkg:
wcspkg = options.wcspkg
else:
wcspkg = settings.get('WCSpkg', 'choose')
try:
from ginga.util import wcsmod
if wcspkg != 'choose':
assert wcsmod.use(wcspkg) is True
except Exception as e:
logger.warning(
"failed to set WCS package preference: %s" % (str(e)))
# User wants to customize the FITS package?
if options.fitspkg:
fitspkg = options.fitspkg
else:
fitspkg = settings.get('FITSpkg', 'choose')
try:
from ginga.util import io_fits
if fitspkg != 'choose':
assert io_fits.use(fitspkg) is True
except Exception as e:
logger.warning(
"failed to set FITS package preference: %s" % (str(e)))
# Check whether user wants to use OpenCv
use_opencv = settings.get('use_opencv', False)
if use_opencv or options.opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warning(
"failed to set OpenCv preference: %s" % (str(e)))
# Check whether user wants to use OpenCL
use_opencl = settings.get('use_opencl', False)
if use_opencl or options.opencl:
from ginga import trcalc
try:
trcalc.use('opencl')
except Exception as e:
logger.warning(
"failed to set OpenCL preference: %s" % (str(e)))
# Create the dynamic module manager
mm = ModuleManager.ModuleManager(logger)
# Create and start thread pool
ev_quit = threading.Event()
thread_pool = Task.ThreadPool(options.numthreads, logger,
ev_quit=ev_quit)
thread_pool.startall()
# Create the Ginga main object
ginga_shell = GingaShell(logger, thread_pool, mm, prefs,
ev_quit=ev_quit)
layout_file = None
if not options.norestore and settings.get('save_layout', False):
layout_file = os.path.join(basedir, 'layout')
ginga_shell.set_layout(self.layout, layout_file=layout_file)
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'pre_gui_config'):
ginga_config.pre_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error importing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Build desired layout
ginga_shell.build_toplevel()
# Did user specify a particular geometry?
if options.geometry:
ginga_shell.set_geometry(options.geometry)
# make the list of disabled plugins
if options.disable_plugins is not None:
disabled_plugins = options.disable_plugins.lower().split(',')
else:
disabled_plugins = settings.get('disable_plugins', [])
if not isinstance(disabled_plugins, list):
disabled_plugins = disabled_plugins.lower().split(',')
# Add GUI log handler (for "Log" global plugin)
guiHdlr = GuiLogHandler(ginga_shell)
guiHdlr.setLevel(options.loglevel)
fmt = logging.Formatter(log.LOG_FORMAT)
guiHdlr.setFormatter(fmt)
logger.addHandler(guiHdlr)
# Load any custom modules
if options.modules is not None:
modules = options.modules.split(',')
else:
modules = settings.get('global_plugins', [])
if not isinstance(modules, list):
modules = modules.split(',')
for long_plugin_name in modules:
if '.' in long_plugin_name:
tmpstr = long_plugin_name.split('.')
plugin_name = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
plugin_name = long_plugin_name
pfx = None
menu_name = "%s [G]" % (plugin_name)
spec = Bunch(name=plugin_name, module=plugin_name,
ptype='global', tab=plugin_name,
menu=menu_name, category="Custom",
workspace='right', pfx=pfx)
self.add_plugin_spec(spec)
# Load any custom local plugins
if options.plugins is not None:
plugins = options.plugins.split(',')
else:
plugins = settings.get('local_plugins', [])
if not isinstance(plugins, list):
plugins = plugins.split(',')
for long_plugin_name in plugins:
if '.' in long_plugin_name:
tmpstr = long_plugin_name.split('.')
plugin_name = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
plugin_name = long_plugin_name
pfx = None
spec = Bunch(module=plugin_name, workspace='dialogs',
ptype='local', category="Custom",
hidden=False, pfx=pfx)
self.add_plugin_spec(spec)
# Add non-disabled plugins
enabled_plugins = [spec for spec in self.plugins
if spec.module.lower() not in disabled_plugins]
ginga_shell.set_plugins(enabled_plugins)
# start any plugins that have start=True
ginga_shell.boot_plugins()
ginga_shell.update_pending()
# TEMP?
tab_names = [name.lower()
for name in ginga_shell.ds.get_tabnames(group=None)]
if 'info' in tab_names:
ginga_shell.ds.raise_tab('Info')
if 'synopsis' in tab_names:
ginga_shell.ds.raise_tab('Synopsis')
if 'thumbs' in tab_names:
ginga_shell.ds.raise_tab('Thumbs')
# Add custom channels
if options.channels is not None:
channels = options.channels.split(',')
else:
channels = settings.get('channels', [])
if not isinstance(channels, list):
channels = channels.split(',')
if len(channels) == 0:
# should provide at least one default channel?
channels = [settings.get('channel_prefix', "Image")]
for chname in channels:
ginga_shell.add_channel(chname)
ginga_shell.change_channel(channels[0])
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'post_gui_config'):
ginga_config.post_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Redirect warnings to logger
for hdlr in logger.handlers:
logging.getLogger('py.warnings').addHandler(hdlr)
# Display banner the first time run, unless suppressed
showBanner = True
try:
showBanner = settings.get('showBanner')
except KeyError:
# disable for subsequent runs
settings.set(showBanner=False)
if not os.path.exists(settings.preffile):
settings.save()
if (not options.nosplash) and (len(args) == 0) and showBanner:
ginga_shell.banner(raiseTab=True)
# Handle inputs like "*.fits[ext]" that sys cmd cannot auto expand.
expanded_args = []
for imgfile in args:
if '*' in imgfile:
if '[' in imgfile and imgfile.endswith(']'):
s = imgfile.split('[')
ext = '[' + s[1]
imgfile = s[0]
else:
ext = ''
for fname in glob.iglob(imgfile):
expanded_args.append(fname + ext)
else:
expanded_args.append(imgfile)
# Assume remaining arguments are fits files and load them.
if not options.separate_channels:
chname = channels[0]
ginga_shell.gui_do(ginga_shell.open_uris, expanded_args,
chname=chname)
else:
i = 0
num_channels = len(channels)
for imgfile in expanded_args:
if i < num_channels:
chname = channels[i]
i = i + 1
else:
channel = ginga_shell.add_channel_auto()
chname = channel.name
ginga_shell.gui_do(ginga_shell.open_uris, [imgfile],
chname=chname)
try:
try:
# if there is a network component, start it
if hasattr(ginga_shell, 'start'):
task = Task.FuncTask2(ginga_shell.start)
thread_pool.addTask(task)
# Main loop to handle GUI events
logger.info("Entering mainloop...")
ginga_shell.mainloop(timeout=0.001)
except KeyboardInterrupt:
logger.error("Received keyboard interrupt!")
finally:
logger.info("Shutting down...")
ev_quit.set()
sys.exit(0) | Main routine for running the reference viewer.
`options` is a OptionParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load. | Below is the the instruction that describes the task:
### Input:
Main routine for running the reference viewer.
`options` is a OptionParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load.
### Response:
def main(self, options, args):
"""
Main routine for running the reference viewer.
`options` is a OptionParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load.
"""
# Create a logger
logger = log.get_logger(name='ginga', options=options)
# Get settings (preferences)
basedir = paths.ginga_home
if not os.path.exists(basedir):
try:
os.mkdir(basedir)
except OSError as e:
logger.warning(
"Couldn't create ginga settings area (%s): %s" % (
basedir, str(e)))
logger.warning("Preferences will not be able to be saved")
# Set up preferences
prefs = Settings.Preferences(basefolder=basedir, logger=logger)
settings = prefs.create_category('general')
settings.set_defaults(useMatplotlibColormaps=False,
widgetSet='choose',
WCSpkg='choose', FITSpkg='choose',
recursion_limit=2000,
icc_working_profile=None,
font_scaling_factor=None,
save_layout=True,
channel_prefix="Image")
settings.load(onError='silent')
# default of 1000 is a little too small
sys.setrecursionlimit(settings.get('recursion_limit'))
# So we can find our plugins
sys.path.insert(0, basedir)
package_home = os.path.split(sys.modules['ginga.version'].__file__)[0]
child_dir = os.path.join(package_home, 'rv', 'plugins')
sys.path.insert(0, child_dir)
plugin_dir = os.path.join(basedir, 'plugins')
sys.path.insert(0, plugin_dir)
gc = os.path.join(basedir, "ginga_config.py")
have_ginga_config = os.path.exists(gc)
# User configuration, earliest possible intervention
if have_ginga_config:
try:
import ginga_config
if hasattr(ginga_config, 'init_config'):
ginga_config.init_config(self)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Choose a toolkit
if options.toolkit:
toolkit = options.toolkit
else:
toolkit = settings.get('widgetSet', 'choose')
if toolkit == 'choose':
try:
ginga_toolkit.choose()
except ImportError as e:
print("UI toolkit choose error: %s" % str(e))
sys.exit(1)
else:
ginga_toolkit.use(toolkit)
tkname = ginga_toolkit.get_family()
logger.info("Chosen toolkit (%s) family is '%s'" % (
ginga_toolkit.toolkit, tkname))
# these imports have to be here, otherwise they force the choice
# of toolkit too early
from ginga.rv.Control import GingaShell, GuiLogHandler
if settings.get('useMatplotlibColormaps', False):
# Add matplotlib color maps if matplotlib is installed
try:
from ginga import cmap
cmap.add_matplotlib_cmaps(fail_on_import_error=False)
except Exception as e:
logger.warning(
"failed to load matplotlib colormaps: %s" % (str(e)))
# user wants to set font scaling
font_scaling = settings.get('font_scaling_factor', None)
if font_scaling is not None:
from ginga.fonts import font_asst
font_asst.default_scaling_factor = font_scaling
# Set a working RGB ICC profile if user has one
working_profile = settings.get('icc_working_profile', None)
rgb_cms.working_profile = working_profile
# User wants to customize the WCS package?
if options.wcspkg:
wcspkg = options.wcspkg
else:
wcspkg = settings.get('WCSpkg', 'choose')
try:
from ginga.util import wcsmod
if wcspkg != 'choose':
assert wcsmod.use(wcspkg) is True
except Exception as e:
logger.warning(
"failed to set WCS package preference: %s" % (str(e)))
# User wants to customize the FITS package?
if options.fitspkg:
fitspkg = options.fitspkg
else:
fitspkg = settings.get('FITSpkg', 'choose')
try:
from ginga.util import io_fits
if fitspkg != 'choose':
assert io_fits.use(fitspkg) is True
except Exception as e:
logger.warning(
"failed to set FITS package preference: %s" % (str(e)))
# Check whether user wants to use OpenCv
use_opencv = settings.get('use_opencv', False)
if use_opencv or options.opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warning(
"failed to set OpenCv preference: %s" % (str(e)))
# Check whether user wants to use OpenCL
use_opencl = settings.get('use_opencl', False)
if use_opencl or options.opencl:
from ginga import trcalc
try:
trcalc.use('opencl')
except Exception as e:
logger.warning(
"failed to set OpenCL preference: %s" % (str(e)))
# Create the dynamic module manager
mm = ModuleManager.ModuleManager(logger)
# Create and start thread pool
ev_quit = threading.Event()
thread_pool = Task.ThreadPool(options.numthreads, logger,
ev_quit=ev_quit)
thread_pool.startall()
# Create the Ginga main object
ginga_shell = GingaShell(logger, thread_pool, mm, prefs,
ev_quit=ev_quit)
layout_file = None
if not options.norestore and settings.get('save_layout', False):
layout_file = os.path.join(basedir, 'layout')
ginga_shell.set_layout(self.layout, layout_file=layout_file)
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'pre_gui_config'):
ginga_config.pre_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error importing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Build desired layout
ginga_shell.build_toplevel()
# Did user specify a particular geometry?
if options.geometry:
ginga_shell.set_geometry(options.geometry)
# make the list of disabled plugins
if options.disable_plugins is not None:
disabled_plugins = options.disable_plugins.lower().split(',')
else:
disabled_plugins = settings.get('disable_plugins', [])
if not isinstance(disabled_plugins, list):
disabled_plugins = disabled_plugins.lower().split(',')
# Add GUI log handler (for "Log" global plugin)
guiHdlr = GuiLogHandler(ginga_shell)
guiHdlr.setLevel(options.loglevel)
fmt = logging.Formatter(log.LOG_FORMAT)
guiHdlr.setFormatter(fmt)
logger.addHandler(guiHdlr)
# Load any custom modules
if options.modules is not None:
modules = options.modules.split(',')
else:
modules = settings.get('global_plugins', [])
if not isinstance(modules, list):
modules = modules.split(',')
for long_plugin_name in modules:
if '.' in long_plugin_name:
tmpstr = long_plugin_name.split('.')
plugin_name = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
plugin_name = long_plugin_name
pfx = None
menu_name = "%s [G]" % (plugin_name)
spec = Bunch(name=plugin_name, module=plugin_name,
ptype='global', tab=plugin_name,
menu=menu_name, category="Custom",
workspace='right', pfx=pfx)
self.add_plugin_spec(spec)
# Load any custom local plugins
if options.plugins is not None:
plugins = options.plugins.split(',')
else:
plugins = settings.get('local_plugins', [])
if not isinstance(plugins, list):
plugins = plugins.split(',')
for long_plugin_name in plugins:
if '.' in long_plugin_name:
tmpstr = long_plugin_name.split('.')
plugin_name = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
plugin_name = long_plugin_name
pfx = None
spec = Bunch(module=plugin_name, workspace='dialogs',
ptype='local', category="Custom",
hidden=False, pfx=pfx)
self.add_plugin_spec(spec)
# Add non-disabled plugins
enabled_plugins = [spec for spec in self.plugins
if spec.module.lower() not in disabled_plugins]
ginga_shell.set_plugins(enabled_plugins)
# start any plugins that have start=True
ginga_shell.boot_plugins()
ginga_shell.update_pending()
# TEMP?
tab_names = [name.lower()
for name in ginga_shell.ds.get_tabnames(group=None)]
if 'info' in tab_names:
ginga_shell.ds.raise_tab('Info')
if 'synopsis' in tab_names:
ginga_shell.ds.raise_tab('Synopsis')
if 'thumbs' in tab_names:
ginga_shell.ds.raise_tab('Thumbs')
# Add custom channels
if options.channels is not None:
channels = options.channels.split(',')
else:
channels = settings.get('channels', [])
if not isinstance(channels, list):
channels = channels.split(',')
if len(channels) == 0:
# should provide at least one default channel?
channels = [settings.get('channel_prefix', "Image")]
for chname in channels:
ginga_shell.add_channel(chname)
ginga_shell.change_channel(channels[0])
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'post_gui_config'):
ginga_config.post_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Redirect warnings to logger
for hdlr in logger.handlers:
logging.getLogger('py.warnings').addHandler(hdlr)
# Display banner the first time run, unless suppressed
showBanner = True
try:
showBanner = settings.get('showBanner')
except KeyError:
# disable for subsequent runs
settings.set(showBanner=False)
if not os.path.exists(settings.preffile):
settings.save()
if (not options.nosplash) and (len(args) == 0) and showBanner:
ginga_shell.banner(raiseTab=True)
# Handle inputs like "*.fits[ext]" that sys cmd cannot auto expand.
expanded_args = []
for imgfile in args:
if '*' in imgfile:
if '[' in imgfile and imgfile.endswith(']'):
s = imgfile.split('[')
ext = '[' + s[1]
imgfile = s[0]
else:
ext = ''
for fname in glob.iglob(imgfile):
expanded_args.append(fname + ext)
else:
expanded_args.append(imgfile)
# Assume remaining arguments are fits files and load them.
if not options.separate_channels:
chname = channels[0]
ginga_shell.gui_do(ginga_shell.open_uris, expanded_args,
chname=chname)
else:
i = 0
num_channels = len(channels)
for imgfile in expanded_args:
if i < num_channels:
chname = channels[i]
i = i + 1
else:
channel = ginga_shell.add_channel_auto()
chname = channel.name
ginga_shell.gui_do(ginga_shell.open_uris, [imgfile],
chname=chname)
try:
try:
# if there is a network component, start it
if hasattr(ginga_shell, 'start'):
task = Task.FuncTask2(ginga_shell.start)
thread_pool.addTask(task)
# Main loop to handle GUI events
logger.info("Entering mainloop...")
ginga_shell.mainloop(timeout=0.001)
except KeyboardInterrupt:
logger.error("Received keyboard interrupt!")
finally:
logger.info("Shutting down...")
ev_quit.set()
sys.exit(0) |
def _resolve_shape(self, name, layers):
'''Given a list of layers, find the layer output with the given name.
Parameters
----------
name : str
Name of a layer to resolve.
layers : list of :class:`theanets.layers.base.Layer`
A list of layers to search in.
Raises
------
util.ConfigurationError :
If there is no such layer, or if there are more than one.
Returns
-------
name : str
The fully-scoped name of the desired output.
shape : tuple of None and/or int
The shape of the named output.
'''
matches = [l for l in layers if name.split(':')[0] == l.name]
if len(matches) != 1:
raise util.ConfigurationError(
'layer "{}" cannot resolve "{}" using {}'
.format(self.name, name, [l.name for l in layers]))
name = name if ':' in name else matches[0].output_name
return name, matches[0]._output_shapes[name.split(':')[1]] | Given a list of layers, find the layer output with the given name.
Parameters
----------
name : str
Name of a layer to resolve.
layers : list of :class:`theanets.layers.base.Layer`
A list of layers to search in.
Raises
------
util.ConfigurationError :
If there is no such layer, or if there are more than one.
Returns
-------
name : str
The fully-scoped name of the desired output.
shape : tuple of None and/or int
The shape of the named output. | Below is the the instruction that describes the task:
### Input:
Given a list of layers, find the layer output with the given name.
Parameters
----------
name : str
Name of a layer to resolve.
layers : list of :class:`theanets.layers.base.Layer`
A list of layers to search in.
Raises
------
util.ConfigurationError :
If there is no such layer, or if there are more than one.
Returns
-------
name : str
The fully-scoped name of the desired output.
shape : tuple of None and/or int
The shape of the named output.
### Response:
def _resolve_shape(self, name, layers):
'''Given a list of layers, find the layer output with the given name.
Parameters
----------
name : str
Name of a layer to resolve.
layers : list of :class:`theanets.layers.base.Layer`
A list of layers to search in.
Raises
------
util.ConfigurationError :
If there is no such layer, or if there are more than one.
Returns
-------
name : str
The fully-scoped name of the desired output.
shape : tuple of None and/or int
The shape of the named output.
'''
matches = [l for l in layers if name.split(':')[0] == l.name]
if len(matches) != 1:
raise util.ConfigurationError(
'layer "{}" cannot resolve "{}" using {}'
.format(self.name, name, [l.name for l in layers]))
name = name if ':' in name else matches[0].output_name
return name, matches[0]._output_shapes[name.split(':')[1]] |
def get_postadres_by_huisnummer(self, huisnummer):
'''
Get the `postadres` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`postadres` is wanted. OR A huisnummer id.
:rtype: A :class:`str`.
'''
try:
id = huisnummer.id
except AttributeError:
id = huisnummer
def creator():
res = crab_gateway_request(
self.client, 'GetPostadresByHuisnummerId', id
)
if res == None:
raise GatewayResourceNotFoundException()
return res.Postadres
if self.caches['short'].is_configured:
key = 'GetPostadresByHuisnummerId#%s' % (id)
postadres = self.caches['short'].get_or_create(key, creator)
else:
postadres = creator()
return postadres | Get the `postadres` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`postadres` is wanted. OR A huisnummer id.
:rtype: A :class:`str`. | Below is the the instruction that describes the task:
### Input:
Get the `postadres` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`postadres` is wanted. OR A huisnummer id.
:rtype: A :class:`str`.
### Response:
def get_postadres_by_huisnummer(self, huisnummer):
'''
Get the `postadres` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`postadres` is wanted. OR A huisnummer id.
:rtype: A :class:`str`.
'''
try:
id = huisnummer.id
except AttributeError:
id = huisnummer
def creator():
res = crab_gateway_request(
self.client, 'GetPostadresByHuisnummerId', id
)
if res == None:
raise GatewayResourceNotFoundException()
return res.Postadres
if self.caches['short'].is_configured:
key = 'GetPostadresByHuisnummerId#%s' % (id)
postadres = self.caches['short'].get_or_create(key, creator)
else:
postadres = creator()
return postadres |
def warn_deprecated(since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum=''):
"""Display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "metpy.name_of_module"
warn_deprecated('0.6.0', name='metpy.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(since, message, name, alternative,
pending, obj_type)
warnings.warn(message, metpyDeprecation, stacklevel=1) | Display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "metpy.name_of_module"
warn_deprecated('0.6.0', name='metpy.name_of_module',
obj_type='module') | Below is the the instruction that describes the task:
### Input:
Display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "metpy.name_of_module"
warn_deprecated('0.6.0', name='metpy.name_of_module',
obj_type='module')
### Response:
def warn_deprecated(since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum=''):
"""Display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "metpy.name_of_module"
warn_deprecated('0.6.0', name='metpy.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(since, message, name, alternative,
pending, obj_type)
warnings.warn(message, metpyDeprecation, stacklevel=1) |
def surround_parse(self, node, pre_char, post_char):
"""Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. Prepend `pre_char` and append `post_char` to
the output in self.pieces."""
self.add_text(pre_char)
self.subnode_parse(node)
self.add_text(post_char) | Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. Prepend `pre_char` and append `post_char` to
the output in self.pieces. | Below is the the instruction that describes the task:
### Input:
Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. Prepend `pre_char` and append `post_char` to
the output in self.pieces.
### Response:
def surround_parse(self, node, pre_char, post_char):
"""Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. Prepend `pre_char` and append `post_char` to
the output in self.pieces."""
self.add_text(pre_char)
self.subnode_parse(node)
self.add_text(post_char) |
async def delete(self, *args, **kwargs):
'''
Corresponds to DELETE request with a resource identifier, deleting a single document from the database
'''
pk = self.pk_type(kwargs['pk'])
result = await self._meta.object_class.delete_entries(db=self.db, query={self.pk: pk})
if result.acknowledged:
if result.deleted_count == 0:
raise NotFound()
else:
raise BadRequest('Failed to delete object') | Corresponds to DELETE request with a resource identifier, deleting a single document from the database | Below is the the instruction that describes the task:
### Input:
Corresponds to DELETE request with a resource identifier, deleting a single document from the database
### Response:
async def delete(self, *args, **kwargs):
'''
Corresponds to DELETE request with a resource identifier, deleting a single document from the database
'''
pk = self.pk_type(kwargs['pk'])
result = await self._meta.object_class.delete_entries(db=self.db, query={self.pk: pk})
if result.acknowledged:
if result.deleted_count == 0:
raise NotFound()
else:
raise BadRequest('Failed to delete object') |
def _add_pos1(token):
"""
Adds a 'pos1' element to a frog token.
"""
result = token.copy()
result['pos1'] = _POSMAP[token['pos'].split("(")[0]]
return result | Adds a 'pos1' element to a frog token. | Below is the the instruction that describes the task:
### Input:
Adds a 'pos1' element to a frog token.
### Response:
def _add_pos1(token):
"""
Adds a 'pos1' element to a frog token.
"""
result = token.copy()
result['pos1'] = _POSMAP[token['pos'].split("(")[0]]
return result |
def add_link(dataset, source, target, count=1):
"""Add a link.
Parameters
----------
dataset : `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`])
Dataset.
source : `iterable` of `str`
Link source.
target : `str`
Link target.
count : `int`, optional
Link count (default: 1).
"""
try:
node = dataset[source]
values, links = node
if isinstance(links, list):
try:
idx = links.index(target)
values[idx] += count
except ValueError:
links.append(target)
values.append(count)
elif links == target:
node[0] += count
else:
node[0] = [values, count]
node[1] = [links, target]
except KeyError:
dataset[source] = [count, target] | Add a link.
Parameters
----------
dataset : `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`])
Dataset.
source : `iterable` of `str`
Link source.
target : `str`
Link target.
count : `int`, optional
Link count (default: 1). | Below is the the instruction that describes the task:
### Input:
Add a link.
Parameters
----------
dataset : `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`])
Dataset.
source : `iterable` of `str`
Link source.
target : `str`
Link target.
count : `int`, optional
Link count (default: 1).
### Response:
def add_link(dataset, source, target, count=1):
"""Add a link.
Parameters
----------
dataset : `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`])
Dataset.
source : `iterable` of `str`
Link source.
target : `str`
Link target.
count : `int`, optional
Link count (default: 1).
"""
try:
node = dataset[source]
values, links = node
if isinstance(links, list):
try:
idx = links.index(target)
values[idx] += count
except ValueError:
links.append(target)
values.append(count)
elif links == target:
node[0] += count
else:
node[0] = [values, count]
node[1] = [links, target]
except KeyError:
dataset[source] = [count, target] |
def get_batched_changesets(self, changesets_request_data):
"""GetBatchedChangesets.
Returns changesets for a given list of changeset Ids.
:param :class:`<TfvcChangesetsRequestData> <azure.devops.v5_0.tfvc.models.TfvcChangesetsRequestData>` changesets_request_data: List of changeset IDs.
:rtype: [TfvcChangesetRef]
"""
content = self._serialize.body(changesets_request_data, 'TfvcChangesetsRequestData')
response = self._send(http_method='POST',
location_id='b7e7c173-803c-4fea-9ec8-31ee35c5502a',
version='5.0',
content=content)
return self._deserialize('[TfvcChangesetRef]', self._unwrap_collection(response)) | GetBatchedChangesets.
Returns changesets for a given list of changeset Ids.
:param :class:`<TfvcChangesetsRequestData> <azure.devops.v5_0.tfvc.models.TfvcChangesetsRequestData>` changesets_request_data: List of changeset IDs.
:rtype: [TfvcChangesetRef] | Below is the the instruction that describes the task:
### Input:
GetBatchedChangesets.
Returns changesets for a given list of changeset Ids.
:param :class:`<TfvcChangesetsRequestData> <azure.devops.v5_0.tfvc.models.TfvcChangesetsRequestData>` changesets_request_data: List of changeset IDs.
:rtype: [TfvcChangesetRef]
### Response:
def get_batched_changesets(self, changesets_request_data):
"""GetBatchedChangesets.
Returns changesets for a given list of changeset Ids.
:param :class:`<TfvcChangesetsRequestData> <azure.devops.v5_0.tfvc.models.TfvcChangesetsRequestData>` changesets_request_data: List of changeset IDs.
:rtype: [TfvcChangesetRef]
"""
content = self._serialize.body(changesets_request_data, 'TfvcChangesetsRequestData')
response = self._send(http_method='POST',
location_id='b7e7c173-803c-4fea-9ec8-31ee35c5502a',
version='5.0',
content=content)
return self._deserialize('[TfvcChangesetRef]', self._unwrap_collection(response)) |
def crack1(self, rnum, snum, message, signsecret):
"""
find privkey, given signsecret k, message m, signature (r,s)
x= (s*k-m)/r
"""
m = self.GFn.value(message)
r = self.GFn.value(rnum)
s = self.GFn.value(snum)
k = self.GFn.value(signsecret)
return (s * k - m) / r | find privkey, given signsecret k, message m, signature (r,s)
x= (s*k-m)/r | Below is the the instruction that describes the task:
### Input:
find privkey, given signsecret k, message m, signature (r,s)
x= (s*k-m)/r
### Response:
def crack1(self, rnum, snum, message, signsecret):
"""
find privkey, given signsecret k, message m, signature (r,s)
x= (s*k-m)/r
"""
m = self.GFn.value(message)
r = self.GFn.value(rnum)
s = self.GFn.value(snum)
k = self.GFn.value(signsecret)
return (s * k - m) / r |
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
) | Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term. | Below is the the instruction that describes the task:
### Input:
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
### Response:
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
) |
def run_command(self, command, shell=True, env=None, execute='/bin/bash',
return_code=None):
"""Run a shell command.
The options available:
* ``shell`` to be enabled or disabled, which provides the ability
to execute arbitrary stings or not. if disabled commands must be
in the format of a ``list``
* ``env`` is an environment override and or manipulation setting
which sets environment variables within the locally executed
shell.
* ``execute`` changes the interpreter which is executing the
command(s).
* ``return_code`` defines the return code that the command must
have in order to ensure success. This can be a list of return
codes if multiple return codes are acceptable.
:param command: ``str``
:param shell: ``bol``
:param env: ``dict``
:param execute: ``str``
:param return_code: ``int``
"""
self.log.info('Command: [ %s ]', command)
if env is None:
env = os.environ
if self.debug is False:
stdout = open(os.devnull, 'wb')
else:
stdout = subprocess.PIPE
if return_code is None:
return_code = [0]
stderr = subprocess.PIPE
process = subprocess.Popen(
command,
stdout=stdout,
stderr=stderr,
executable=execute,
env=env,
shell=shell
)
output, error = process.communicate()
if process.returncode not in return_code:
self.log.debug('Command Output: %s, Error Msg: %s', output, error)
return error, False
else:
self.log.debug('Command Output: %s', output)
return output, True | Run a shell command.
The options available:
* ``shell`` to be enabled or disabled, which provides the ability
to execute arbitrary stings or not. if disabled commands must be
in the format of a ``list``
* ``env`` is an environment override and or manipulation setting
which sets environment variables within the locally executed
shell.
* ``execute`` changes the interpreter which is executing the
command(s).
* ``return_code`` defines the return code that the command must
have in order to ensure success. This can be a list of return
codes if multiple return codes are acceptable.
:param command: ``str``
:param shell: ``bol``
:param env: ``dict``
:param execute: ``str``
:param return_code: ``int`` | Below is the the instruction that describes the task:
### Input:
Run a shell command.
The options available:
* ``shell`` to be enabled or disabled, which provides the ability
to execute arbitrary stings or not. if disabled commands must be
in the format of a ``list``
* ``env`` is an environment override and or manipulation setting
which sets environment variables within the locally executed
shell.
* ``execute`` changes the interpreter which is executing the
command(s).
* ``return_code`` defines the return code that the command must
have in order to ensure success. This can be a list of return
codes if multiple return codes are acceptable.
:param command: ``str``
:param shell: ``bol``
:param env: ``dict``
:param execute: ``str``
:param return_code: ``int``
### Response:
def run_command(self, command, shell=True, env=None, execute='/bin/bash',
return_code=None):
"""Run a shell command.
The options available:
* ``shell`` to be enabled or disabled, which provides the ability
to execute arbitrary stings or not. if disabled commands must be
in the format of a ``list``
* ``env`` is an environment override and or manipulation setting
which sets environment variables within the locally executed
shell.
* ``execute`` changes the interpreter which is executing the
command(s).
* ``return_code`` defines the return code that the command must
have in order to ensure success. This can be a list of return
codes if multiple return codes are acceptable.
:param command: ``str``
:param shell: ``bol``
:param env: ``dict``
:param execute: ``str``
:param return_code: ``int``
"""
self.log.info('Command: [ %s ]', command)
if env is None:
env = os.environ
if self.debug is False:
stdout = open(os.devnull, 'wb')
else:
stdout = subprocess.PIPE
if return_code is None:
return_code = [0]
stderr = subprocess.PIPE
process = subprocess.Popen(
command,
stdout=stdout,
stderr=stderr,
executable=execute,
env=env,
shell=shell
)
output, error = process.communicate()
if process.returncode not in return_code:
self.log.debug('Command Output: %s, Error Msg: %s', output, error)
return error, False
else:
self.log.debug('Command Output: %s', output)
return output, True |
def peek_string(self, lpBaseAddress, fUnicode = False, dwMaxSize = 0x1000):
"""
Tries to read an ASCII or Unicode string
from the address space of the process.
@see: L{read_string}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type fUnicode: bool
@param fUnicode: C{True} is the string is expected to be Unicode,
C{False} if it's expected to be ANSI.
@type dwMaxSize: int
@param dwMaxSize: Maximum allowed string length to read, in bytes.
@rtype: str, compat.unicode
@return: String read from the process memory space.
It B{doesn't} include the terminating null character.
Returns an empty string on failure.
"""
# Validate the parameters.
if not lpBaseAddress or dwMaxSize == 0:
if fUnicode:
return u''
return ''
if not dwMaxSize:
dwMaxSize = 0x1000
# Read the string.
szString = self.peek(lpBaseAddress, dwMaxSize)
# If the string is Unicode...
if fUnicode:
# Decode the string.
szString = compat.unicode(szString, 'U16', 'replace')
## try:
## szString = compat.unicode(szString, 'U16')
## except UnicodeDecodeError:
## szString = struct.unpack('H' * (len(szString) / 2), szString)
## szString = [ unichr(c) for c in szString ]
## szString = u''.join(szString)
# Truncate the string when the first null char is found.
szString = szString[ : szString.find(u'\0') ]
# If the string is ANSI...
else:
# Truncate the string when the first null char is found.
szString = szString[ : szString.find('\0') ]
# Return the decoded string.
return szString | Tries to read an ASCII or Unicode string
from the address space of the process.
@see: L{read_string}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type fUnicode: bool
@param fUnicode: C{True} is the string is expected to be Unicode,
C{False} if it's expected to be ANSI.
@type dwMaxSize: int
@param dwMaxSize: Maximum allowed string length to read, in bytes.
@rtype: str, compat.unicode
@return: String read from the process memory space.
It B{doesn't} include the terminating null character.
Returns an empty string on failure. | Below is the the instruction that describes the task:
### Input:
Tries to read an ASCII or Unicode string
from the address space of the process.
@see: L{read_string}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type fUnicode: bool
@param fUnicode: C{True} is the string is expected to be Unicode,
C{False} if it's expected to be ANSI.
@type dwMaxSize: int
@param dwMaxSize: Maximum allowed string length to read, in bytes.
@rtype: str, compat.unicode
@return: String read from the process memory space.
It B{doesn't} include the terminating null character.
Returns an empty string on failure.
### Response:
def peek_string(self, lpBaseAddress, fUnicode = False, dwMaxSize = 0x1000):
"""
Tries to read an ASCII or Unicode string
from the address space of the process.
@see: L{read_string}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type fUnicode: bool
@param fUnicode: C{True} is the string is expected to be Unicode,
C{False} if it's expected to be ANSI.
@type dwMaxSize: int
@param dwMaxSize: Maximum allowed string length to read, in bytes.
@rtype: str, compat.unicode
@return: String read from the process memory space.
It B{doesn't} include the terminating null character.
Returns an empty string on failure.
"""
# Validate the parameters.
if not lpBaseAddress or dwMaxSize == 0:
if fUnicode:
return u''
return ''
if not dwMaxSize:
dwMaxSize = 0x1000
# Read the string.
szString = self.peek(lpBaseAddress, dwMaxSize)
# If the string is Unicode...
if fUnicode:
# Decode the string.
szString = compat.unicode(szString, 'U16', 'replace')
## try:
## szString = compat.unicode(szString, 'U16')
## except UnicodeDecodeError:
## szString = struct.unpack('H' * (len(szString) / 2), szString)
## szString = [ unichr(c) for c in szString ]
## szString = u''.join(szString)
# Truncate the string when the first null char is found.
szString = szString[ : szString.find(u'\0') ]
# If the string is ANSI...
else:
# Truncate the string when the first null char is found.
szString = szString[ : szString.find('\0') ]
# Return the decoded string.
return szString |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.