code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def calculus_integrate(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
# Don't allow alphabet to use capital letters. Those are reserved for function
# names.
if alphabet_size > 26:
raise ValueError(
"alphabet_size must not be greater than 26. Got %s." % alphabet_size)
functions = {"log": "L"}
alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions)
nbr_case = 0
while nbr_case < nbr_cases:
try:
sample, target = generate_calculus_integrate_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
except: # pylint:disable=bare-except
continue
if nbr_case % 10000 == 0:
print(" calculus_integrate: generating case %d." % nbr_case)
nbr_case += 1 | Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26. | Below is the the instruction that describes the task:
### Input:
Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
### Response:
def calculus_integrate(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
# Don't allow alphabet to use capital letters. Those are reserved for function
# names.
if alphabet_size > 26:
raise ValueError(
"alphabet_size must not be greater than 26. Got %s." % alphabet_size)
functions = {"log": "L"}
alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions)
nbr_case = 0
while nbr_case < nbr_cases:
try:
sample, target = generate_calculus_integrate_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
except: # pylint:disable=bare-except
continue
if nbr_case % 10000 == 0:
print(" calculus_integrate: generating case %d." % nbr_case)
nbr_case += 1 |
def cannot_convert(self, node, reason=None):
"""Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
for_output = node.clone()
for_output.prefix = u""
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
self.log_message(reason) | Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted. | Below is the the instruction that describes the task:
### Input:
Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
### Response:
def cannot_convert(self, node, reason=None):
"""Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
for_output = node.clone()
for_output.prefix = u""
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
self.log_message(reason) |
def _read_namespaced(
ctx: ReaderContext, allowed_suffix: Optional[str] = None
) -> Tuple[Optional[str], str]:
"""Read a namespaced token from the input stream."""
ns: List[str] = []
name: List[str] = []
reader = ctx.reader
has_ns = False
while True:
token = reader.peek()
if token == "/":
reader.next_token()
if has_ns:
raise SyntaxError("Found '/'; expected word character")
elif len(name) == 0:
name.append("/")
else:
if "/" in name:
raise SyntaxError("Found '/' after '/'")
has_ns = True
ns = name
name = []
elif ns_name_chars.match(token):
reader.next_token()
name.append(token)
elif allowed_suffix is not None and token == allowed_suffix:
reader.next_token()
name.append(token)
else:
break
ns_str = None if not has_ns else "".join(ns)
name_str = "".join(name)
# A small exception for the symbol '/ used for division
if ns_str is None:
if "/" in name_str and name_str != "/":
raise SyntaxError("'/' character disallowed in names")
assert ns_str is None or len(ns_str) > 0
return ns_str, name_str | Read a namespaced token from the input stream. | Below is the the instruction that describes the task:
### Input:
Read a namespaced token from the input stream.
### Response:
def _read_namespaced(
ctx: ReaderContext, allowed_suffix: Optional[str] = None
) -> Tuple[Optional[str], str]:
"""Read a namespaced token from the input stream."""
ns: List[str] = []
name: List[str] = []
reader = ctx.reader
has_ns = False
while True:
token = reader.peek()
if token == "/":
reader.next_token()
if has_ns:
raise SyntaxError("Found '/'; expected word character")
elif len(name) == 0:
name.append("/")
else:
if "/" in name:
raise SyntaxError("Found '/' after '/'")
has_ns = True
ns = name
name = []
elif ns_name_chars.match(token):
reader.next_token()
name.append(token)
elif allowed_suffix is not None and token == allowed_suffix:
reader.next_token()
name.append(token)
else:
break
ns_str = None if not has_ns else "".join(ns)
name_str = "".join(name)
# A small exception for the symbol '/ used for division
if ns_str is None:
if "/" in name_str and name_str != "/":
raise SyntaxError("'/' character disallowed in names")
assert ns_str is None or len(ns_str) > 0
return ns_str, name_str |
def check_network_role(self, public_key):
""" Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network
"""
state_root = self._current_root_func()
if state_root == INIT_ROOT_KEY:
LOGGER.debug("Chain head is not set yet. Permit all.")
return True
self._cache.update_view(state_root)
role = self._cache.get_role("network", state_root)
if role is None:
policy_name = "default"
else:
policy_name = role.policy_name
policy = self._cache.get_policy(policy_name, state_root)
if policy is not None:
if not self._allowed(public_key, policy):
LOGGER.debug("Node is not permitted: %s.", public_key)
return False
return True | Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network | Below is the the instruction that describes the task:
### Input:
Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network
### Response:
def check_network_role(self, public_key):
""" Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network
"""
state_root = self._current_root_func()
if state_root == INIT_ROOT_KEY:
LOGGER.debug("Chain head is not set yet. Permit all.")
return True
self._cache.update_view(state_root)
role = self._cache.get_role("network", state_root)
if role is None:
policy_name = "default"
else:
policy_name = role.policy_name
policy = self._cache.get_policy(policy_name, state_root)
if policy is not None:
if not self._allowed(public_key, policy):
LOGGER.debug("Node is not permitted: %s.", public_key)
return False
return True |
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
params = self.current_params
try:
wfs = self._waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
hh = 0.
hd = 0j
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax)
if self._kmin >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
hh_i = 0.
hd_i = 0j
else:
# whiten the waveform
h[self._kmin:kmax] *= self._weight[det][self._kmin:kmax]
# calculate inner products
hh_i = h[self._kmin:kmax].inner(h[self._kmin:kmax]).real
hd_i = self.data[det][self._kmin:kmax].inner(
h[self._kmin:kmax])
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
hh += hh_i
hd += hd_i
hd = abs(hd)
self._current_stats.maxl_phase = numpy.angle(hd)
return numpy.log(special.i0e(hd)) + hd - 0.5*hh | r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point. | Below is the the instruction that describes the task:
### Input:
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
### Response:
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
params = self.current_params
try:
wfs = self._waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
hh = 0.
hd = 0j
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax)
if self._kmin >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
hh_i = 0.
hd_i = 0j
else:
# whiten the waveform
h[self._kmin:kmax] *= self._weight[det][self._kmin:kmax]
# calculate inner products
hh_i = h[self._kmin:kmax].inner(h[self._kmin:kmax]).real
hd_i = self.data[det][self._kmin:kmax].inner(
h[self._kmin:kmax])
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
hh += hh_i
hd += hd_i
hd = abs(hd)
self._current_stats.maxl_phase = numpy.angle(hd)
return numpy.log(special.i0e(hd)) + hd - 0.5*hh |
def _plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed):
"""compute and plot ML model"""
ML, MLp, MLerr, ML_model = _calc_ML(
sampler, modelidx, e_range=e_range, e_npoints=e_npoints
)
f_unit, sedf = sed_conversion(ML_model[0], ML_model[1].unit, sed)
ax.loglog(
ML_model[0].to(e_unit).value,
(ML_model[1] * sedf).to(f_unit).value,
color="k",
lw=2,
alpha=0.8,
) | compute and plot ML model | Below is the the instruction that describes the task:
### Input:
compute and plot ML model
### Response:
def _plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed):
"""compute and plot ML model"""
ML, MLp, MLerr, ML_model = _calc_ML(
sampler, modelidx, e_range=e_range, e_npoints=e_npoints
)
f_unit, sedf = sed_conversion(ML_model[0], ML_model[1].unit, sed)
ax.loglog(
ML_model[0].to(e_unit).value,
(ML_model[1] * sedf).to(f_unit).value,
color="k",
lw=2,
alpha=0.8,
) |
def genOutputs(self, code, match):
"""Return a list out template outputs based on the triggers found in
the code and the template they create.
"""
out = sorted((k, match.output(m)) for (k, m) in
self.collectTriggers(match.match, code).items())
out = list(map(lambda a: a[1], out))
return out | Return a list out template outputs based on the triggers found in
the code and the template they create. | Below is the the instruction that describes the task:
### Input:
Return a list out template outputs based on the triggers found in
the code and the template they create.
### Response:
def genOutputs(self, code, match):
"""Return a list out template outputs based on the triggers found in
the code and the template they create.
"""
out = sorted((k, match.output(m)) for (k, m) in
self.collectTriggers(match.match, code).items())
out = list(map(lambda a: a[1], out))
return out |
def get_signalcheck(self, sar, **params):
"""get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary
"""
params = sar
endpoint = 'rest/signal/check'
# The 'sar'-request dictionary should be sent as valid JSON data, so
# we need to convert it to JSON
# when we construct the request in API.request
retValue = self._API__request(endpoint, 'POST',
params=params, convJSON=True)
return retValue | get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary | Below is the the instruction that describes the task:
### Input:
get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary
### Response:
def get_signalcheck(self, sar, **params):
"""get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary
"""
params = sar
endpoint = 'rest/signal/check'
# The 'sar'-request dictionary should be sent as valid JSON data, so
# we need to convert it to JSON
# when we construct the request in API.request
retValue = self._API__request(endpoint, 'POST',
params=params, convJSON=True)
return retValue |
def confirm(*args, **kwargs):
"""Prompt for confirmation (yes/no) and handle any abort exceptions."""
try:
return click.confirm(*args, **kwargs)
except click.Abort:
return False | Prompt for confirmation (yes/no) and handle any abort exceptions. | Below is the the instruction that describes the task:
### Input:
Prompt for confirmation (yes/no) and handle any abort exceptions.
### Response:
def confirm(*args, **kwargs):
"""Prompt for confirmation (yes/no) and handle any abort exceptions."""
try:
return click.confirm(*args, **kwargs)
except click.Abort:
return False |
def replace_from_url(self, url, **kwds):
"""
Endpoint: /photo/<id>replace.json
Import a photo from the specified URL to replace this photo.
"""
result = self._client.photo.replace_from_url(self, url, **kwds)
self._replace_fields(result.get_fields()) | Endpoint: /photo/<id>replace.json
Import a photo from the specified URL to replace this photo. | Below is the the instruction that describes the task:
### Input:
Endpoint: /photo/<id>replace.json
Import a photo from the specified URL to replace this photo.
### Response:
def replace_from_url(self, url, **kwds):
"""
Endpoint: /photo/<id>replace.json
Import a photo from the specified URL to replace this photo.
"""
result = self._client.photo.replace_from_url(self, url, **kwds)
self._replace_fields(result.get_fields()) |
def pip_upgrade_all_user(line):
"""Attempt to upgrade all packages installed with --user"""
import pip
for dist in pip.get_installed_distributions(user_only=True):
do_pip(["install", "--upgrade", "--user", dist.project_name]) | Attempt to upgrade all packages installed with --user | Below is the the instruction that describes the task:
### Input:
Attempt to upgrade all packages installed with --user
### Response:
def pip_upgrade_all_user(line):
"""Attempt to upgrade all packages installed with --user"""
import pip
for dist in pip.get_installed_distributions(user_only=True):
do_pip(["install", "--upgrade", "--user", dist.project_name]) |
def recombine(self, other, d=0.7):
"""
Genetic recombination of two themes using cut and splice technique.
"""
a, b = self, other
d1 = max(0, min(d, 1))
d2 = d1
c = ColorTheme(
name=a.name[:int(len(a.name) * d1)] +
b.name[int(len(b.name) * d2):],
ranges=a.ranges[:int(len(a.ranges) * d1)] +
b.ranges[int(len(b.ranges) * d2):],
top=a.top,
cache=os.path.join(DEFAULT_CACHE, "recombined"),
blue=a.blue,
length=a.length * d1 + b.length * d2
)
c.tags = a.tags[:int(len(a.tags) * d1)]
c.tags += b.tags[int(len(b.tags) * d2):]
return c | Genetic recombination of two themes using cut and splice technique. | Below is the the instruction that describes the task:
### Input:
Genetic recombination of two themes using cut and splice technique.
### Response:
def recombine(self, other, d=0.7):
"""
Genetic recombination of two themes using cut and splice technique.
"""
a, b = self, other
d1 = max(0, min(d, 1))
d2 = d1
c = ColorTheme(
name=a.name[:int(len(a.name) * d1)] +
b.name[int(len(b.name) * d2):],
ranges=a.ranges[:int(len(a.ranges) * d1)] +
b.ranges[int(len(b.ranges) * d2):],
top=a.top,
cache=os.path.join(DEFAULT_CACHE, "recombined"),
blue=a.blue,
length=a.length * d1 + b.length * d2
)
c.tags = a.tags[:int(len(a.tags) * d1)]
c.tags += b.tags[int(len(b.tags) * d2):]
return c |
def add_contents(self, dest, contents):
"""Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
"""
assert not self._closed, "Archive closed"
if not isinstance(dest, zipfile.ZipInfo):
dest = zinfo(dest) # see for some caveats
# Ensure we apply the compression
dest.compress_type = self.zip_compression
# Mark host OS as Linux for all archives
dest.create_system = 3
self._zip_file.writestr(dest, contents) | Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior. | Below is the the instruction that describes the task:
### Input:
Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
### Response:
def add_contents(self, dest, contents):
"""Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
"""
assert not self._closed, "Archive closed"
if not isinstance(dest, zipfile.ZipInfo):
dest = zinfo(dest) # see for some caveats
# Ensure we apply the compression
dest.compress_type = self.zip_compression
# Mark host OS as Linux for all archives
dest.create_system = 3
self._zip_file.writestr(dest, contents) |
def get_context_tags(self):
""" Returns a dictionary of context tag key/value pairs.
"""
self.assert_open()
if self.has_context_tags:
tags = self.handle[self.global_key + 'context_tags'].attrs.items()
return {key: _clean(value) for key, value in tags}
return {} | Returns a dictionary of context tag key/value pairs. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of context tag key/value pairs.
### Response:
def get_context_tags(self):
""" Returns a dictionary of context tag key/value pairs.
"""
self.assert_open()
if self.has_context_tags:
tags = self.handle[self.global_key + 'context_tags'].attrs.items()
return {key: _clean(value) for key, value in tags}
return {} |
def fragment(args):
"""
%prog fragment fastafile enzyme
Cut the fastafile using the specified enzyme, and grab upstream and
downstream nucleotide sequence along with the cut site. In this case, the
sequences extracted are:
|- PstI
============|===========
(-------)
Sometimes we need to limit the size of the restriction fragments, for
example the GBS protocol does not allow fragments larger than 800bp.
|-PstI |- PstI |- PstI
~~~====|=============|==========~~~~~~~===|============
(---) (---)
In this case, the second fragment is longer than 800bp, therefore the two
ends are NOT extracted, as in the first fragment.
"""
p = OptionParser(fragment.__doc__)
p.add_option("--flank", default=150, type="int",
help="Extract flanking bases of the cut sites [default: %default]")
p.add_option("--full", default=False, action="store_true",
help="The full extraction mode [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, enzyme = args
flank = opts.flank
assert flank > 0
extract = extract_full if opts.full else extract_ends
tag = "full" if opts.full else "ends"
assert enzyme in set(str(x) for x in AllEnzymes)
fragfastafile = fastafile.split(".")[0] + \
".{0}.flank{1}.{2}.fasta".format(enzyme, flank, tag)
enzyme = [x for x in AllEnzymes if str(x) == enzyme][0]
f = Fasta(fastafile, lazy=True)
fw = open(fragfastafile, "w")
for name, rec in f.iteritems_ordered():
a = Analysis([enzyme], rec.seq)
sites = a.full()[enzyme]
extract(rec, sites, flank, fw)
logging.debug("Fragments written to `{0}`.".format(fragfastafile)) | %prog fragment fastafile enzyme
Cut the fastafile using the specified enzyme, and grab upstream and
downstream nucleotide sequence along with the cut site. In this case, the
sequences extracted are:
|- PstI
============|===========
(-------)
Sometimes we need to limit the size of the restriction fragments, for
example the GBS protocol does not allow fragments larger than 800bp.
|-PstI |- PstI |- PstI
~~~====|=============|==========~~~~~~~===|============
(---) (---)
In this case, the second fragment is longer than 800bp, therefore the two
ends are NOT extracted, as in the first fragment. | Below is the the instruction that describes the task:
### Input:
%prog fragment fastafile enzyme
Cut the fastafile using the specified enzyme, and grab upstream and
downstream nucleotide sequence along with the cut site. In this case, the
sequences extracted are:
|- PstI
============|===========
(-------)
Sometimes we need to limit the size of the restriction fragments, for
example the GBS protocol does not allow fragments larger than 800bp.
|-PstI |- PstI |- PstI
~~~====|=============|==========~~~~~~~===|============
(---) (---)
In this case, the second fragment is longer than 800bp, therefore the two
ends are NOT extracted, as in the first fragment.
### Response:
def fragment(args):
"""
%prog fragment fastafile enzyme
Cut the fastafile using the specified enzyme, and grab upstream and
downstream nucleotide sequence along with the cut site. In this case, the
sequences extracted are:
|- PstI
============|===========
(-------)
Sometimes we need to limit the size of the restriction fragments, for
example the GBS protocol does not allow fragments larger than 800bp.
|-PstI |- PstI |- PstI
~~~====|=============|==========~~~~~~~===|============
(---) (---)
In this case, the second fragment is longer than 800bp, therefore the two
ends are NOT extracted, as in the first fragment.
"""
p = OptionParser(fragment.__doc__)
p.add_option("--flank", default=150, type="int",
help="Extract flanking bases of the cut sites [default: %default]")
p.add_option("--full", default=False, action="store_true",
help="The full extraction mode [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, enzyme = args
flank = opts.flank
assert flank > 0
extract = extract_full if opts.full else extract_ends
tag = "full" if opts.full else "ends"
assert enzyme in set(str(x) for x in AllEnzymes)
fragfastafile = fastafile.split(".")[0] + \
".{0}.flank{1}.{2}.fasta".format(enzyme, flank, tag)
enzyme = [x for x in AllEnzymes if str(x) == enzyme][0]
f = Fasta(fastafile, lazy=True)
fw = open(fragfastafile, "w")
for name, rec in f.iteritems_ordered():
a = Analysis([enzyme], rec.seq)
sites = a.full()[enzyme]
extract(rec, sites, flank, fw)
logging.debug("Fragments written to `{0}`.".format(fragfastafile)) |
def baby_names(max_length=15):
"""Opens the baby_names csv file and produces numpy array.
Args:
max_length: The maximum length, 15 was the longest name when this was
written. Short entries will be padded with the EOS marker.
Returns:
A numpy array of the names converted to ascii codes, the labels and an
array of lengths.
Raises:
ValueError: if max_length is too small.
"""
names = []
lengths = []
targets = []
with open(os.path.join(os.path.dirname(sys.modules[__name__].__file__),
'baby_names.csv'), 'rb') as f:
first = True
for l in csv.reader(f, delimiter=','):
if first:
first = False
continue
assert len(l) == 4, l
name = l[0]
if max_length < len(name):
raise ValueError('Max length is too small: %d > %d' %
(max_length, len(name)))
chars = [convert_to_int(c) for c in name]
names.append(chars + ([EOS] * (max_length - len(chars))))
lengths.append([len(name)])
values = [float(l[2]), float(l[3])]
if abs(sum(values) - 1) > 0.001:
raise ValueError('Each row must sum to 1: %s' % l)
targets.append(values)
return np.array(names), np.array(targets), np.array(lengths) | Opens the baby_names csv file and produces numpy array.
Args:
max_length: The maximum length, 15 was the longest name when this was
written. Short entries will be padded with the EOS marker.
Returns:
A numpy array of the names converted to ascii codes, the labels and an
array of lengths.
Raises:
ValueError: if max_length is too small. | Below is the the instruction that describes the task:
### Input:
Opens the baby_names csv file and produces numpy array.
Args:
max_length: The maximum length, 15 was the longest name when this was
written. Short entries will be padded with the EOS marker.
Returns:
A numpy array of the names converted to ascii codes, the labels and an
array of lengths.
Raises:
ValueError: if max_length is too small.
### Response:
def baby_names(max_length=15):
"""Opens the baby_names csv file and produces numpy array.
Args:
max_length: The maximum length, 15 was the longest name when this was
written. Short entries will be padded with the EOS marker.
Returns:
A numpy array of the names converted to ascii codes, the labels and an
array of lengths.
Raises:
ValueError: if max_length is too small.
"""
names = []
lengths = []
targets = []
with open(os.path.join(os.path.dirname(sys.modules[__name__].__file__),
'baby_names.csv'), 'rb') as f:
first = True
for l in csv.reader(f, delimiter=','):
if first:
first = False
continue
assert len(l) == 4, l
name = l[0]
if max_length < len(name):
raise ValueError('Max length is too small: %d > %d' %
(max_length, len(name)))
chars = [convert_to_int(c) for c in name]
names.append(chars + ([EOS] * (max_length - len(chars))))
lengths.append([len(name)])
values = [float(l[2]), float(l[3])]
if abs(sum(values) - 1) > 0.001:
raise ValueError('Each row must sum to 1: %s' % l)
targets.append(values)
return np.array(names), np.array(targets), np.array(lengths) |
def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a user from an organization.'''
user = self.get_object()
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT) | Remove a user from an organization. | Below is the the instruction that describes the task:
### Input:
Remove a user from an organization.
### Response:
def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a user from an organization.'''
user = self.get_object()
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT) |
def compose_gerrit(projects):
""" Compose projects.json for gerrit, but using the git lists
change: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'
to: 'git.eclipse.org_xwt/org.eclipse.xwt
:param projects: projects.json
:return: projects.json with gerrit
"""
git_projects = [project for project in projects if 'git' in projects[project]]
for project in git_projects:
repos = [repo for repo in projects[project]['git'] if 'gitroot' in repo]
if len(repos) > 0:
projects[project]['gerrit'] = []
for repo in repos:
gerrit_project = repo.replace("http://git.eclipse.org/gitroot/", "")
gerrit_project = gerrit_project.replace(".git", "")
projects[project]['gerrit'].append("git.eclipse.org_" + gerrit_project)
return projects | Compose projects.json for gerrit, but using the git lists
change: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'
to: 'git.eclipse.org_xwt/org.eclipse.xwt
:param projects: projects.json
:return: projects.json with gerrit | Below is the the instruction that describes the task:
### Input:
Compose projects.json for gerrit, but using the git lists
change: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'
to: 'git.eclipse.org_xwt/org.eclipse.xwt
:param projects: projects.json
:return: projects.json with gerrit
### Response:
def compose_gerrit(projects):
""" Compose projects.json for gerrit, but using the git lists
change: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'
to: 'git.eclipse.org_xwt/org.eclipse.xwt
:param projects: projects.json
:return: projects.json with gerrit
"""
git_projects = [project for project in projects if 'git' in projects[project]]
for project in git_projects:
repos = [repo for repo in projects[project]['git'] if 'gitroot' in repo]
if len(repos) > 0:
projects[project]['gerrit'] = []
for repo in repos:
gerrit_project = repo.replace("http://git.eclipse.org/gitroot/", "")
gerrit_project = gerrit_project.replace(".git", "")
projects[project]['gerrit'].append("git.eclipse.org_" + gerrit_project)
return projects |
def getParent(abfFname):
"""given an ABF file name, return the ABF of its parent."""
child=os.path.abspath(abfFname)
files=sorted(glob.glob(os.path.dirname(child)+"/*.*"))
parentID=abfFname #its own parent
for fname in files:
if fname.endswith(".abf") and fname.replace(".abf",".TIF") in files:
parentID=os.path.basename(fname).replace(".abf","")
if os.path.basename(child) in fname:
break
return parentID | given an ABF file name, return the ABF of its parent. | Below is the the instruction that describes the task:
### Input:
given an ABF file name, return the ABF of its parent.
### Response:
def getParent(abfFname):
"""given an ABF file name, return the ABF of its parent."""
child=os.path.abspath(abfFname)
files=sorted(glob.glob(os.path.dirname(child)+"/*.*"))
parentID=abfFname #its own parent
for fname in files:
if fname.endswith(".abf") and fname.replace(".abf",".TIF") in files:
parentID=os.path.basename(fname).replace(".abf","")
if os.path.basename(child) in fname:
break
return parentID |
def parse_line_headers(self, line):
"""We must build headers carefully: there are multiple blank values
in the header row, and the instrument may just add more for all
we know.
"""
headers = line.split(",")
for i, v in enumerate(headers):
if v:
headers[i] = v
else:
headers[i] = str(i)
self.headers = headers | We must build headers carefully: there are multiple blank values
in the header row, and the instrument may just add more for all
we know. | Below is the the instruction that describes the task:
### Input:
We must build headers carefully: there are multiple blank values
in the header row, and the instrument may just add more for all
we know.
### Response:
def parse_line_headers(self, line):
"""We must build headers carefully: there are multiple blank values
in the header row, and the instrument may just add more for all
we know.
"""
headers = line.split(",")
for i, v in enumerate(headers):
if v:
headers[i] = v
else:
headers[i] = str(i)
self.headers = headers |
def split_gtf(gtf, sample_size=None, out_dir=None):
"""
split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total
"""
if out_dir:
part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf"
part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf"
part1 = os.path.join(out_dir, part1_fn)
part2 = os.path.join(out_dir, part2_fn)
if file_exists(part1) and file_exists(part2):
return part1, part2
else:
part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name
part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name
db = get_gtf_db(gtf)
gene_ids = set([x['gene_id'][0] for x in db.all_features()])
if not sample_size or (sample_size and sample_size > len(gene_ids)):
sample_size = len(gene_ids)
gene_ids = set(random.sample(gene_ids, sample_size))
part1_ids = set(random.sample(gene_ids, sample_size / 2))
part2_ids = gene_ids.difference(part1_ids)
with open(part1, "w") as part1_handle:
for gene in part1_ids:
for feature in db.children(gene):
part1_handle.write(str(feature) + "\n")
with open(part2, "w") as part2_handle:
for gene in part2_ids:
for feature in db.children(gene):
part2_handle.write(str(feature) + "\n")
return part1, part2 | split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total | Below is the the instruction that describes the task:
### Input:
split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total
### Response:
def split_gtf(gtf, sample_size=None, out_dir=None):
"""
split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total
"""
if out_dir:
part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf"
part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf"
part1 = os.path.join(out_dir, part1_fn)
part2 = os.path.join(out_dir, part2_fn)
if file_exists(part1) and file_exists(part2):
return part1, part2
else:
part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name
part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name
db = get_gtf_db(gtf)
gene_ids = set([x['gene_id'][0] for x in db.all_features()])
if not sample_size or (sample_size and sample_size > len(gene_ids)):
sample_size = len(gene_ids)
gene_ids = set(random.sample(gene_ids, sample_size))
part1_ids = set(random.sample(gene_ids, sample_size / 2))
part2_ids = gene_ids.difference(part1_ids)
with open(part1, "w") as part1_handle:
for gene in part1_ids:
for feature in db.children(gene):
part1_handle.write(str(feature) + "\n")
with open(part2, "w") as part2_handle:
for gene in part2_ids:
for feature in db.children(gene):
part2_handle.write(str(feature) + "\n")
return part1, part2 |
def initialize_constraint_table(cfg_list):
"""Collects all given cfg nodes and initializes the table with value 0."""
for cfg in cfg_list:
constraint_table.update(dict.fromkeys(cfg.nodes, 0)) | Collects all given cfg nodes and initializes the table with value 0. | Below is the the instruction that describes the task:
### Input:
Collects all given cfg nodes and initializes the table with value 0.
### Response:
def initialize_constraint_table(cfg_list):
"""Collects all given cfg nodes and initializes the table with value 0."""
for cfg in cfg_list:
constraint_table.update(dict.fromkeys(cfg.nodes, 0)) |
def _check_hosts(service_instance, host, host_names):
'''
Helper function that checks to see if the host provided is a vCenter Server or
an ESXi host. If it's an ESXi host, returns a list of a single host_name.
If a host reference isn't found, we're trying to find a host object for a vCenter
server. Raises a CommandExecutionError in this case, as we need host references to
check against.
'''
if not host_names:
host_name = _get_host_ref(service_instance, host)
if host_name:
host_names = [host]
else:
raise CommandExecutionError('No host reference found. If connecting to a '
'vCenter Server, a list of \'host_names\' must be '
'provided.')
elif not isinstance(host_names, list):
raise CommandExecutionError('\'host_names\' must be a list.')
return host_names | Helper function that checks to see if the host provided is a vCenter Server or
an ESXi host. If it's an ESXi host, returns a list of a single host_name.
If a host reference isn't found, we're trying to find a host object for a vCenter
server. Raises a CommandExecutionError in this case, as we need host references to
check against. | Below is the the instruction that describes the task:
### Input:
Helper function that checks to see if the host provided is a vCenter Server or
an ESXi host. If it's an ESXi host, returns a list of a single host_name.
If a host reference isn't found, we're trying to find a host object for a vCenter
server. Raises a CommandExecutionError in this case, as we need host references to
check against.
### Response:
def _check_hosts(service_instance, host, host_names):
'''
Helper function that checks to see if the host provided is a vCenter Server or
an ESXi host. If it's an ESXi host, returns a list of a single host_name.
If a host reference isn't found, we're trying to find a host object for a vCenter
server. Raises a CommandExecutionError in this case, as we need host references to
check against.
'''
if not host_names:
host_name = _get_host_ref(service_instance, host)
if host_name:
host_names = [host]
else:
raise CommandExecutionError('No host reference found. If connecting to a '
'vCenter Server, a list of \'host_names\' must be '
'provided.')
elif not isinstance(host_names, list):
raise CommandExecutionError('\'host_names\' must be a list.')
return host_names |
def _add_new_columns(dataframe, metrics):
"""Add new metrics as new columns to selected pandas dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
metrics : metric.EvalMetric
New metrics to be added.
"""
#TODO(leodirac): we don't really need to do this on every update. Optimize
new_columns = set(metrics.keys()) - set(dataframe.columns)
for col in new_columns:
dataframe[col] = None | Add new metrics as new columns to selected pandas dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
metrics : metric.EvalMetric
New metrics to be added. | Below is the the instruction that describes the task:
### Input:
Add new metrics as new columns to selected pandas dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
metrics : metric.EvalMetric
New metrics to be added.
### Response:
def _add_new_columns(dataframe, metrics):
"""Add new metrics as new columns to selected pandas dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
metrics : metric.EvalMetric
New metrics to be added.
"""
#TODO(leodirac): we don't really need to do this on every update. Optimize
new_columns = set(metrics.keys()) - set(dataframe.columns)
for col in new_columns:
dataframe[col] = None |
def download_profiles(self, profiles: Set[Profile],
profile_pic: bool = True, posts: bool = True,
tagged: bool = False, highlights: bool = False, stories: bool = False,
fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None,
storyitem_filter: Optional[Callable[[Post], bool]] = None,
raise_errors: bool = False):
"""High-level method to download set of profiles.
:param profiles: Set of profiles to download.
:param profile_pic: not :option:`--no-profile-pic`.
:param posts: not :option:`--no-posts`.
:param tagged: :option:`--tagged`.
:param highlights: :option:`--highlights`.
:param stories: :option:`--stories`.
:param fast_update: :option:`--fast-update`.
:param post_filter: :option:`--post-filter`.
:param storyitem_filter: :option:`--post-filter`.
:param raise_errors:
Whether :exc:`LoginRequiredException` and :exc:`PrivateProfileNotFollowedException` should be raised or
catched and printed with :meth:`InstaloaderContext.error_catcher`.
.. versionadded:: 4.1"""
def _error_raiser(_str):
yield
error_handler = _error_raiser if raise_errors else self.context.error_catcher
for profile in profiles:
with error_handler(profile.username):
profile_name = profile.username
# Download profile picture
if profile_pic:
with self.context.error_catcher('Download profile picture of {}'.format(profile_name)):
self.download_profilepic(profile)
# Save metadata as JSON if desired.
if self.save_metadata:
json_filename = '{0}/{1}_{2}'.format(self.dirname_pattern.format(profile=profile_name,
target=profile_name),
profile_name, profile.userid)
self.save_metadata_json(json_filename, profile)
# Catch some errors
if profile.is_private and (tagged or highlights or posts):
if not self.context.is_logged_in:
raise LoginRequiredException("--login=USERNAME required.")
if not profile.followed_by_viewer and self.context.username != profile.username:
raise PrivateProfileNotFollowedException("Private but not followed.")
# Download tagged, if requested
if tagged:
with self.context.error_catcher('Download tagged of {}'.format(profile_name)):
self.download_tagged(profile, fast_update=fast_update, post_filter=post_filter)
# Download highlights, if requested
if highlights:
with self.context.error_catcher('Download highlights of {}'.format(profile_name)):
self.download_highlights(profile, fast_update=fast_update, storyitem_filter=storyitem_filter)
# Iterate over pictures and download them
if posts:
self.context.log("Retrieving posts from profile {}.".format(profile_name))
totalcount = profile.mediacount
count = 1
for post in profile.get_posts():
self.context.log("[%3i/%3i] " % (count, totalcount), end="", flush=True)
count += 1
if post_filter is not None and not post_filter(post):
self.context.log('<skipped>')
continue
with self.context.error_catcher("Download {} of {}".format(post, profile_name)):
# The PostChangedException gets raised if the Post's id/shortcode changed while obtaining
# additional metadata. This is most likely the case if a HTTP redirect takes place while
# resolving the shortcode URL.
# The `post_changed` variable keeps the fast-update functionality alive: A Post which is
# obained after a redirect has probably already been downloaded as a previous Post of the
# same Profile.
# Observed in issue #225: https://github.com/instaloader/instaloader/issues/225
post_changed = False
while True:
try:
downloaded = self.download_post(post, target=profile_name)
break
except PostChangedException:
post_changed = True
continue
if fast_update and not downloaded and not post_changed:
break
if stories and profiles:
with self.context.error_catcher("Download stories"):
self.context.log("Downloading stories")
self.download_stories(userids=list(profiles), fast_update=fast_update, filename_target=None,
storyitem_filter=storyitem_filter) | High-level method to download set of profiles.
:param profiles: Set of profiles to download.
:param profile_pic: not :option:`--no-profile-pic`.
:param posts: not :option:`--no-posts`.
:param tagged: :option:`--tagged`.
:param highlights: :option:`--highlights`.
:param stories: :option:`--stories`.
:param fast_update: :option:`--fast-update`.
:param post_filter: :option:`--post-filter`.
:param storyitem_filter: :option:`--post-filter`.
:param raise_errors:
Whether :exc:`LoginRequiredException` and :exc:`PrivateProfileNotFollowedException` should be raised or
catched and printed with :meth:`InstaloaderContext.error_catcher`.
.. versionadded:: 4.1 | Below is the the instruction that describes the task:
### Input:
High-level method to download set of profiles.
:param profiles: Set of profiles to download.
:param profile_pic: not :option:`--no-profile-pic`.
:param posts: not :option:`--no-posts`.
:param tagged: :option:`--tagged`.
:param highlights: :option:`--highlights`.
:param stories: :option:`--stories`.
:param fast_update: :option:`--fast-update`.
:param post_filter: :option:`--post-filter`.
:param storyitem_filter: :option:`--post-filter`.
:param raise_errors:
Whether :exc:`LoginRequiredException` and :exc:`PrivateProfileNotFollowedException` should be raised or
catched and printed with :meth:`InstaloaderContext.error_catcher`.
.. versionadded:: 4.1
### Response:
def download_profiles(self, profiles: Set[Profile],
profile_pic: bool = True, posts: bool = True,
tagged: bool = False, highlights: bool = False, stories: bool = False,
fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None,
storyitem_filter: Optional[Callable[[Post], bool]] = None,
raise_errors: bool = False):
"""High-level method to download set of profiles.
:param profiles: Set of profiles to download.
:param profile_pic: not :option:`--no-profile-pic`.
:param posts: not :option:`--no-posts`.
:param tagged: :option:`--tagged`.
:param highlights: :option:`--highlights`.
:param stories: :option:`--stories`.
:param fast_update: :option:`--fast-update`.
:param post_filter: :option:`--post-filter`.
:param storyitem_filter: :option:`--post-filter`.
:param raise_errors:
Whether :exc:`LoginRequiredException` and :exc:`PrivateProfileNotFollowedException` should be raised or
catched and printed with :meth:`InstaloaderContext.error_catcher`.
.. versionadded:: 4.1"""
def _error_raiser(_str):
yield
error_handler = _error_raiser if raise_errors else self.context.error_catcher
for profile in profiles:
with error_handler(profile.username):
profile_name = profile.username
# Download profile picture
if profile_pic:
with self.context.error_catcher('Download profile picture of {}'.format(profile_name)):
self.download_profilepic(profile)
# Save metadata as JSON if desired.
if self.save_metadata:
json_filename = '{0}/{1}_{2}'.format(self.dirname_pattern.format(profile=profile_name,
target=profile_name),
profile_name, profile.userid)
self.save_metadata_json(json_filename, profile)
# Catch some errors
if profile.is_private and (tagged or highlights or posts):
if not self.context.is_logged_in:
raise LoginRequiredException("--login=USERNAME required.")
if not profile.followed_by_viewer and self.context.username != profile.username:
raise PrivateProfileNotFollowedException("Private but not followed.")
# Download tagged, if requested
if tagged:
with self.context.error_catcher('Download tagged of {}'.format(profile_name)):
self.download_tagged(profile, fast_update=fast_update, post_filter=post_filter)
# Download highlights, if requested
if highlights:
with self.context.error_catcher('Download highlights of {}'.format(profile_name)):
self.download_highlights(profile, fast_update=fast_update, storyitem_filter=storyitem_filter)
# Iterate over pictures and download them
if posts:
self.context.log("Retrieving posts from profile {}.".format(profile_name))
totalcount = profile.mediacount
count = 1
for post in profile.get_posts():
self.context.log("[%3i/%3i] " % (count, totalcount), end="", flush=True)
count += 1
if post_filter is not None and not post_filter(post):
self.context.log('<skipped>')
continue
with self.context.error_catcher("Download {} of {}".format(post, profile_name)):
# The PostChangedException gets raised if the Post's id/shortcode changed while obtaining
# additional metadata. This is most likely the case if a HTTP redirect takes place while
# resolving the shortcode URL.
# The `post_changed` variable keeps the fast-update functionality alive: A Post which is
# obained after a redirect has probably already been downloaded as a previous Post of the
# same Profile.
# Observed in issue #225: https://github.com/instaloader/instaloader/issues/225
post_changed = False
while True:
try:
downloaded = self.download_post(post, target=profile_name)
break
except PostChangedException:
post_changed = True
continue
if fast_update and not downloaded and not post_changed:
break
if stories and profiles:
with self.context.error_catcher("Download stories"):
self.context.log("Downloading stories")
self.download_stories(userids=list(profiles), fast_update=fast_update, filename_target=None,
storyitem_filter=storyitem_filter) |
def get_agents_by_resource(self, resource_id):
"""Gets the list of ``Agents`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.authentication.AgentList) - list of agents
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
agent_list = []
for agent_id in self.get_agent_ids_by_resource(resource_id):
agent_list.append(Agent(agent_id))
return AgentList(agent_list) | Gets the list of ``Agents`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.authentication.AgentList) - list of agents
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of ``Agents`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.authentication.AgentList) - list of agents
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_agents_by_resource(self, resource_id):
"""Gets the list of ``Agents`` mapped to a ``Resource``.
arg: resource_id (osid.id.Id): ``Id`` of a ``Resource``
return: (osid.authentication.AgentList) - list of agents
raise: NotFound - ``resource_id`` is not found
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
agent_list = []
for agent_id in self.get_agent_ids_by_resource(resource_id):
agent_list.append(Agent(agent_id))
return AgentList(agent_list) |
def run(parser, args, output_file=sys.stdout):
"""Run command line interface."""
# Try loading results file (if requested)
result_storage = {}
if args.store:
args.store.seek(0)
try:
result_storage = pickle.load(args.store)
except EOFError:
pass
args.store.close()
# machine information
# Read machine description
machine = MachineModel(args.machine.name, args=args)
# process kernel
if not args.kernel_description:
code = str(args.code_file.read())
code = clean_code(code)
kernel = KernelCode(code, filename=args.code_file.name, machine=machine,
keep_intermediates=not args.clean_intermediates)
else:
description = str(args.code_file.read())
kernel = KernelDescription(yaml.load(description, Loader=yaml.Loader), machine=machine)
# if no defines were given, guess suitable defines in-mem
# TODO support in-cache
# TODO broaden cases to n-dimensions
# TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...)
# works only for up to 3 dimensions
required_consts = [v[1] for v in kernel.variables.values() if v[1] is not None]
required_consts += [[l['start'], l['stop']] for l in kernel.get_loop_stack()]
# split into individual consts
required_consts = [i for l in required_consts for i in l]
required_consts = set([i for l in required_consts for i in l.free_symbols])
if len(required_consts) > 0:
# build defines permutations
define_dict = {}
for name, values in args.define:
if name not in define_dict:
define_dict[name] = [[name, v] for v in values]
continue
for v in values:
if v not in define_dict[name]:
define_dict[name].append([name, v])
define_product = list(itertools.product(*list(define_dict.values())))
# Check that all consts have been defined
if set(required_consts).difference(set([symbol_pos_int(k) for k in define_dict.keys()])):
raise ValueError("Not all constants have been defined. Required are: {}".format(
required_consts))
else:
define_product = [{}]
for define in define_product:
# Reset state of kernel
kernel.clear_state()
# Add constants from define arguments
for k, v in define:
kernel.set_constant(k, v)
for model_name in uniquify(args.pmodel):
# print header
print('{:^80}'.format(' kerncraft '), file=output_file)
print('{:<40}{:>40}'.format(args.code_file.name, '-m ' + args.machine.name),
file=output_file)
print(' '.join(['-D {} {}'.format(k, v) for k, v in define]), file=output_file)
print('{:-^80}'.format(' ' + model_name + ' '), file=output_file)
if args.verbose > 1:
if not args.kernel_description:
kernel.print_kernel_code(output_file=output_file)
print('', file=output_file)
kernel.print_variables_info(output_file=output_file)
kernel.print_kernel_info(output_file=output_file)
if args.verbose > 0:
kernel.print_constants_info(output_file=output_file)
model = getattr(models, model_name)(kernel, machine, args, parser)
model.analyze()
model.report(output_file=output_file)
# Add results to storage
kernel_name = os.path.split(args.code_file.name)[1]
if kernel_name not in result_storage:
result_storage[kernel_name] = {}
if tuple(kernel.constants.items()) not in result_storage[kernel_name]:
result_storage[kernel_name][tuple(kernel.constants.items())] = {}
result_storage[kernel_name][tuple(kernel.constants.items())][model_name] = \
model.results
print('', file=output_file)
# Save storage to file (if requested)
if args.store:
temp_name = args.store.name + '.tmp'
with open(temp_name, 'wb+') as f:
pickle.dump(result_storage, f)
shutil.move(temp_name, args.store.name) | Run command line interface. | Below is the the instruction that describes the task:
### Input:
Run command line interface.
### Response:
def run(parser, args, output_file=sys.stdout):
"""Run command line interface."""
# Try loading results file (if requested)
result_storage = {}
if args.store:
args.store.seek(0)
try:
result_storage = pickle.load(args.store)
except EOFError:
pass
args.store.close()
# machine information
# Read machine description
machine = MachineModel(args.machine.name, args=args)
# process kernel
if not args.kernel_description:
code = str(args.code_file.read())
code = clean_code(code)
kernel = KernelCode(code, filename=args.code_file.name, machine=machine,
keep_intermediates=not args.clean_intermediates)
else:
description = str(args.code_file.read())
kernel = KernelDescription(yaml.load(description, Loader=yaml.Loader), machine=machine)
# if no defines were given, guess suitable defines in-mem
# TODO support in-cache
# TODO broaden cases to n-dimensions
# TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...)
# works only for up to 3 dimensions
required_consts = [v[1] for v in kernel.variables.values() if v[1] is not None]
required_consts += [[l['start'], l['stop']] for l in kernel.get_loop_stack()]
# split into individual consts
required_consts = [i for l in required_consts for i in l]
required_consts = set([i for l in required_consts for i in l.free_symbols])
if len(required_consts) > 0:
# build defines permutations
define_dict = {}
for name, values in args.define:
if name not in define_dict:
define_dict[name] = [[name, v] for v in values]
continue
for v in values:
if v not in define_dict[name]:
define_dict[name].append([name, v])
define_product = list(itertools.product(*list(define_dict.values())))
# Check that all consts have been defined
if set(required_consts).difference(set([symbol_pos_int(k) for k in define_dict.keys()])):
raise ValueError("Not all constants have been defined. Required are: {}".format(
required_consts))
else:
define_product = [{}]
for define in define_product:
# Reset state of kernel
kernel.clear_state()
# Add constants from define arguments
for k, v in define:
kernel.set_constant(k, v)
for model_name in uniquify(args.pmodel):
# print header
print('{:^80}'.format(' kerncraft '), file=output_file)
print('{:<40}{:>40}'.format(args.code_file.name, '-m ' + args.machine.name),
file=output_file)
print(' '.join(['-D {} {}'.format(k, v) for k, v in define]), file=output_file)
print('{:-^80}'.format(' ' + model_name + ' '), file=output_file)
if args.verbose > 1:
if not args.kernel_description:
kernel.print_kernel_code(output_file=output_file)
print('', file=output_file)
kernel.print_variables_info(output_file=output_file)
kernel.print_kernel_info(output_file=output_file)
if args.verbose > 0:
kernel.print_constants_info(output_file=output_file)
model = getattr(models, model_name)(kernel, machine, args, parser)
model.analyze()
model.report(output_file=output_file)
# Add results to storage
kernel_name = os.path.split(args.code_file.name)[1]
if kernel_name not in result_storage:
result_storage[kernel_name] = {}
if tuple(kernel.constants.items()) not in result_storage[kernel_name]:
result_storage[kernel_name][tuple(kernel.constants.items())] = {}
result_storage[kernel_name][tuple(kernel.constants.items())][model_name] = \
model.results
print('', file=output_file)
# Save storage to file (if requested)
if args.store:
temp_name = args.store.name + '.tmp'
with open(temp_name, 'wb+') as f:
pickle.dump(result_storage, f)
shutil.move(temp_name, args.store.name) |
def inflect(self, text):
"""
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj,
singular_noun, a, an, no, ordinal, number_to_words,
and prespart
"""
save_persistent_count = self.persistent_count
# Dictionary of allowed methods
methods_dict = {
"plural": self.plural,
"plural_adj": self.plural_adj,
"plural_noun": self.plural_noun,
"plural_verb": self.plural_verb,
"singular_noun": self.singular_noun,
"a": self.a,
"an": self.a,
"no": self.no,
"ordinal": self.ordinal,
"number_to_words": self.number_to_words,
"present_participle": self.present_participle,
"num": self.num,
}
# Regular expression to find Python's function call syntax
functions_re = re.compile(r"((\w+)\([^)]*\)*)", re.IGNORECASE)
output = functions_re.sub(
lambda mo: self._string_to_substitute(mo, methods_dict), text
)
self.persistent_count = save_persistent_count
return output | Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj,
singular_noun, a, an, no, ordinal, number_to_words,
and prespart | Below is the the instruction that describes the task:
### Input:
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj,
singular_noun, a, an, no, ordinal, number_to_words,
and prespart
### Response:
def inflect(self, text):
"""
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj,
singular_noun, a, an, no, ordinal, number_to_words,
and prespart
"""
save_persistent_count = self.persistent_count
# Dictionary of allowed methods
methods_dict = {
"plural": self.plural,
"plural_adj": self.plural_adj,
"plural_noun": self.plural_noun,
"plural_verb": self.plural_verb,
"singular_noun": self.singular_noun,
"a": self.a,
"an": self.a,
"no": self.no,
"ordinal": self.ordinal,
"number_to_words": self.number_to_words,
"present_participle": self.present_participle,
"num": self.num,
}
# Regular expression to find Python's function call syntax
functions_re = re.compile(r"((\w+)\([^)]*\)*)", re.IGNORECASE)
output = functions_re.sub(
lambda mo: self._string_to_substitute(mo, methods_dict), text
)
self.persistent_count = save_persistent_count
return output |
def get_factory_kwargs(self):
"""
Returns the keyword arguments for calling the formset factory
"""
kwargs = super(BaseInlineFormSetFactory, self).get_factory_kwargs()
kwargs.setdefault('fields', self.fields)
kwargs.setdefault('exclude', self.exclude)
if self.get_form_class():
kwargs['form'] = self.get_form_class()
return kwargs | Returns the keyword arguments for calling the formset factory | Below is the the instruction that describes the task:
### Input:
Returns the keyword arguments for calling the formset factory
### Response:
def get_factory_kwargs(self):
"""
Returns the keyword arguments for calling the formset factory
"""
kwargs = super(BaseInlineFormSetFactory, self).get_factory_kwargs()
kwargs.setdefault('fields', self.fields)
kwargs.setdefault('exclude', self.exclude)
if self.get_form_class():
kwargs['form'] = self.get_form_class()
return kwargs |
def calc_variance(grad_dict, num_batches, param_names):
"""Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches
Parameters
----------
grad_dict: dict
dictionary that maps parameter name to gradients in the mod executor group
num_batches: int
number of batches
param_names: str
parameter name in the module
Returns
----------
grad_dict: dict
dictionary with new keys mapping to gradients variance
"""
for i in range(len(param_names)):
diff_sqr = mx.ndarray.square(mx.nd.subtract(grad_dict[param_names[i]],
grad_dict[str.format(param_names[i]+"_expectation")]))
grad_dict[str.format(param_names[i] + "_variance")] = mx.ndarray.sum(diff_sqr, axis=0) / num_batches | Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches
Parameters
----------
grad_dict: dict
dictionary that maps parameter name to gradients in the mod executor group
num_batches: int
number of batches
param_names: str
parameter name in the module
Returns
----------
grad_dict: dict
dictionary with new keys mapping to gradients variance | Below is the the instruction that describes the task:
### Input:
Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches
Parameters
----------
grad_dict: dict
dictionary that maps parameter name to gradients in the mod executor group
num_batches: int
number of batches
param_names: str
parameter name in the module
Returns
----------
grad_dict: dict
dictionary with new keys mapping to gradients variance
### Response:
def calc_variance(grad_dict, num_batches, param_names):
"""Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches
Parameters
----------
grad_dict: dict
dictionary that maps parameter name to gradients in the mod executor group
num_batches: int
number of batches
param_names: str
parameter name in the module
Returns
----------
grad_dict: dict
dictionary with new keys mapping to gradients variance
"""
for i in range(len(param_names)):
diff_sqr = mx.ndarray.square(mx.nd.subtract(grad_dict[param_names[i]],
grad_dict[str.format(param_names[i]+"_expectation")]))
grad_dict[str.format(param_names[i] + "_variance")] = mx.ndarray.sum(diff_sqr, axis=0) / num_batches |
def q(self, val):
"""
Setter method for q.
"""
self._q = np.asarray(val)
self.Q = cumsum(val) | Setter method for q. | Below is the the instruction that describes the task:
### Input:
Setter method for q.
### Response:
def q(self, val):
"""
Setter method for q.
"""
self._q = np.asarray(val)
self.Q = cumsum(val) |
def query(self, sql_string, *args, **kwargs):
"""
Execute a DML query
:sql_string: An SQL string template
:*args: Arguments to be passed for query parameters.
:commit: Whether or not to commit the transaction after the query
:returns: Psycopg2 result
"""
commit = None
columns = None
if kwargs.get('commit') is not None:
commit = kwargs.pop('commit')
if kwargs.get('columns') is not None:
columns = kwargs.pop('columns')
query = self._assemble_simple(sql_string, *args, **kwargs)
return self._execute(query, commit=commit, working_columns=columns) | Execute a DML query
:sql_string: An SQL string template
:*args: Arguments to be passed for query parameters.
:commit: Whether or not to commit the transaction after the query
:returns: Psycopg2 result | Below is the the instruction that describes the task:
### Input:
Execute a DML query
:sql_string: An SQL string template
:*args: Arguments to be passed for query parameters.
:commit: Whether or not to commit the transaction after the query
:returns: Psycopg2 result
### Response:
def query(self, sql_string, *args, **kwargs):
"""
Execute a DML query
:sql_string: An SQL string template
:*args: Arguments to be passed for query parameters.
:commit: Whether or not to commit the transaction after the query
:returns: Psycopg2 result
"""
commit = None
columns = None
if kwargs.get('commit') is not None:
commit = kwargs.pop('commit')
if kwargs.get('columns') is not None:
columns = kwargs.pop('columns')
query = self._assemble_simple(sql_string, *args, **kwargs)
return self._execute(query, commit=commit, working_columns=columns) |
def _find_longest_internal_edge(self, node):
'''return the node that has the longest branch length between the given
node and the root
Parameters
----------
node: dendropy.Tree
a node from the tree
Returns
-------
The node that has the largest length between the node and the root_node
'''
max_length = -1
max_edge = None
while node and not node.edge.rootedge:
if node.edge.length > max_length:
max_edge = node.edge
max_length = max_edge.length
node = node.parent_node
return max_edge | return the node that has the longest branch length between the given
node and the root
Parameters
----------
node: dendropy.Tree
a node from the tree
Returns
-------
The node that has the largest length between the node and the root_node | Below is the the instruction that describes the task:
### Input:
return the node that has the longest branch length between the given
node and the root
Parameters
----------
node: dendropy.Tree
a node from the tree
Returns
-------
The node that has the largest length between the node and the root_node
### Response:
def _find_longest_internal_edge(self, node):
'''return the node that has the longest branch length between the given
node and the root
Parameters
----------
node: dendropy.Tree
a node from the tree
Returns
-------
The node that has the largest length between the node and the root_node
'''
max_length = -1
max_edge = None
while node and not node.edge.rootedge:
if node.edge.length > max_length:
max_edge = node.edge
max_length = max_edge.length
node = node.parent_node
return max_edge |
def L_diffuser_outer(sed_inputs=sed_dict):
"""Return the outer length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return ((sed_inputs['manifold']['diffuser']['A'] /
(2 * sed_inputs['manifold']['diffuser']['thickness_wall']))
- w_diffuser_inner(sed_inputs).to(u.inch)).to(u.m).magnitude | Return the outer length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | Below is the the instruction that describes the task:
### Input:
Return the outer length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
### Response:
def L_diffuser_outer(sed_inputs=sed_dict):
"""Return the outer length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return ((sed_inputs['manifold']['diffuser']['A'] /
(2 * sed_inputs['manifold']['diffuser']['thickness_wall']))
- w_diffuser_inner(sed_inputs).to(u.inch)).to(u.m).magnitude |
def query(self, w, ed=1): # Can only handle ed=1
"""
Finds the fuzzy matches (within edit distance 1) of w from words
"""
assert ed <= self._ed
if ed == 0:
return [w] if w in self._L else ['']
w = str(w)
n = len(w)
prefix, suffix = w[:n // 2], w[n // 2:][::-1]
options_w_prefix = self._L.keys(prefix)
options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)]
return [
_w
for _w in set(itertools.chain(options_w_prefix, options_w_suffix))
if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1
] | Finds the fuzzy matches (within edit distance 1) of w from words | Below is the the instruction that describes the task:
### Input:
Finds the fuzzy matches (within edit distance 1) of w from words
### Response:
def query(self, w, ed=1): # Can only handle ed=1
"""
Finds the fuzzy matches (within edit distance 1) of w from words
"""
assert ed <= self._ed
if ed == 0:
return [w] if w in self._L else ['']
w = str(w)
n = len(w)
prefix, suffix = w[:n // 2], w[n // 2:][::-1]
options_w_prefix = self._L.keys(prefix)
options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)]
return [
_w
for _w in set(itertools.chain(options_w_prefix, options_w_suffix))
if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1
] |
def _is_intrinsic_dict(self, input):
"""
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
"""
# All intrinsic functions are dictionaries with just one key
return isinstance(input, dict) \
and len(input) == 1 \
and list(input.keys())[0] in self.supported_intrinsics | Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise | Below is the the instruction that describes the task:
### Input:
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
### Response:
def _is_intrinsic_dict(self, input):
"""
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
"""
# All intrinsic functions are dictionaries with just one key
return isinstance(input, dict) \
and len(input) == 1 \
and list(input.keys())[0] in self.supported_intrinsics |
def is_absolute(self):
"""
Validates that uri contains all parts except version
"""
return self.namespace and self.ext and self.scheme and self.path | Validates that uri contains all parts except version | Below is the the instruction that describes the task:
### Input:
Validates that uri contains all parts except version
### Response:
def is_absolute(self):
"""
Validates that uri contains all parts except version
"""
return self.namespace and self.ext and self.scheme and self.path |
def until(coro, coro_test, assert_coro=None, *args, **kw):
"""
Repeatedly call `coro` coroutine function until `coro_test` returns `True`.
This function is the inverse of `paco.whilst()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_gt_4():
return calls > 4
await paco.until(task, calls_gt_4)
# => [1, 2, 3, 4, 5]
"""
@asyncio.coroutine
def assert_coro(value):
return not value
return (yield from whilst(coro, coro_test,
assert_coro=assert_coro, *args, **kw)) | Repeatedly call `coro` coroutine function until `coro_test` returns `True`.
This function is the inverse of `paco.whilst()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_gt_4():
return calls > 4
await paco.until(task, calls_gt_4)
# => [1, 2, 3, 4, 5] | Below is the the instruction that describes the task:
### Input:
Repeatedly call `coro` coroutine function until `coro_test` returns `True`.
This function is the inverse of `paco.whilst()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_gt_4():
return calls > 4
await paco.until(task, calls_gt_4)
# => [1, 2, 3, 4, 5]
### Response:
def until(coro, coro_test, assert_coro=None, *args, **kw):
"""
Repeatedly call `coro` coroutine function until `coro_test` returns `True`.
This function is the inverse of `paco.whilst()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_gt_4():
return calls > 4
await paco.until(task, calls_gt_4)
# => [1, 2, 3, 4, 5]
"""
@asyncio.coroutine
def assert_coro(value):
return not value
return (yield from whilst(coro, coro_test,
assert_coro=assert_coro, *args, **kw)) |
def markdown(text, html=False, valid_tags=GFM_TAGS):
"""
Return Markdown rendered text using GitHub Flavoured Markdown,
with HTML escaped and syntax-highlighting enabled.
"""
if text is None:
return None
if html:
return Markup(sanitize_html(markdown_convert_html(gfm(text)), valid_tags=valid_tags))
else:
return Markup(markdown_convert_text(gfm(text))) | Return Markdown rendered text using GitHub Flavoured Markdown,
with HTML escaped and syntax-highlighting enabled. | Below is the the instruction that describes the task:
### Input:
Return Markdown rendered text using GitHub Flavoured Markdown,
with HTML escaped and syntax-highlighting enabled.
### Response:
def markdown(text, html=False, valid_tags=GFM_TAGS):
"""
Return Markdown rendered text using GitHub Flavoured Markdown,
with HTML escaped and syntax-highlighting enabled.
"""
if text is None:
return None
if html:
return Markup(sanitize_html(markdown_convert_html(gfm(text)), valid_tags=valid_tags))
else:
return Markup(markdown_convert_text(gfm(text))) |
def static(cls, text, token=Token):
"""
Create a :class:`.BeforeInput` instance that always inserts the same
text.
"""
def get_static_tokens(cli):
return [(token, text)]
return cls(get_static_tokens) | Create a :class:`.BeforeInput` instance that always inserts the same
text. | Below is the the instruction that describes the task:
### Input:
Create a :class:`.BeforeInput` instance that always inserts the same
text.
### Response:
def static(cls, text, token=Token):
"""
Create a :class:`.BeforeInput` instance that always inserts the same
text.
"""
def get_static_tokens(cli):
return [(token, text)]
return cls(get_static_tokens) |
def new_parser(self):
""" Create a command line argument parser
Add a few default flags, such as --version
for displaying the program version when invoked """
parser = argparse.ArgumentParser(description=self.description)
parser.add_argument(
'--version', help='show version and exit',
default=False, action='store_true')
parser.add_argument(
'--debug', help='enable debugging',
default=False, action='store_true')
return parser | Create a command line argument parser
Add a few default flags, such as --version
for displaying the program version when invoked | Below is the the instruction that describes the task:
### Input:
Create a command line argument parser
Add a few default flags, such as --version
for displaying the program version when invoked
### Response:
def new_parser(self):
""" Create a command line argument parser
Add a few default flags, such as --version
for displaying the program version when invoked """
parser = argparse.ArgumentParser(description=self.description)
parser.add_argument(
'--version', help='show version and exit',
default=False, action='store_true')
parser.add_argument(
'--debug', help='enable debugging',
default=False, action='store_true')
return parser |
def matches(self, desc):
"""Determines if a given label descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the instance to test
Return:
`True` if desc is supported, otherwise `False`
"""
desc_value_type = desc.valueType or ValueType.STRING # default not parsed
return (self.label_name == desc.key and
self.value_type == desc_value_type) | Determines if a given label descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the instance to test
Return:
`True` if desc is supported, otherwise `False` | Below is the the instruction that describes the task:
### Input:
Determines if a given label descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the instance to test
Return:
`True` if desc is supported, otherwise `False`
### Response:
def matches(self, desc):
"""Determines if a given label descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the instance to test
Return:
`True` if desc is supported, otherwise `False`
"""
desc_value_type = desc.valueType or ValueType.STRING # default not parsed
return (self.label_name == desc.key and
self.value_type == desc_value_type) |
def removekeyword(self, keyword):
"""Remove a table keyword.
Similar to :func:`getkeyword` the name can consist of multiple parts.
In that case a field in a struct will be removed.
Instead of a keyword name an index can be given which removes
the i-th keyword.
"""
if isinstance(keyword, str):
self._removekeyword('', keyword, -1)
else:
self._removekeyword('', '', keyword) | Remove a table keyword.
Similar to :func:`getkeyword` the name can consist of multiple parts.
In that case a field in a struct will be removed.
Instead of a keyword name an index can be given which removes
the i-th keyword. | Below is the the instruction that describes the task:
### Input:
Remove a table keyword.
Similar to :func:`getkeyword` the name can consist of multiple parts.
In that case a field in a struct will be removed.
Instead of a keyword name an index can be given which removes
the i-th keyword.
### Response:
def removekeyword(self, keyword):
"""Remove a table keyword.
Similar to :func:`getkeyword` the name can consist of multiple parts.
In that case a field in a struct will be removed.
Instead of a keyword name an index can be given which removes
the i-th keyword.
"""
if isinstance(keyword, str):
self._removekeyword('', keyword, -1)
else:
self._removekeyword('', '', keyword) |
def scale_axes_from_data(self):
"""Restrict data limits for Y-axis based on what you can see
"""
# get tight limits for X-axis
if self.args.xmin is None:
self.args.xmin = min(ts.xspan[0] for ts in self.timeseries)
if self.args.xmax is None:
self.args.xmax = max(ts.xspan[1] for ts in self.timeseries)
# autoscale view for Y-axis
cropped = [ts.crop(self.args.xmin, self.args.xmax) for
ts in self.timeseries]
ymin = min(ts.value.min() for ts in cropped)
ymax = max(ts.value.max() for ts in cropped)
self.plot.gca().yaxis.set_data_interval(ymin, ymax, ignore=True)
self.plot.gca().autoscale_view(scalex=False) | Restrict data limits for Y-axis based on what you can see | Below is the the instruction that describes the task:
### Input:
Restrict data limits for Y-axis based on what you can see
### Response:
def scale_axes_from_data(self):
"""Restrict data limits for Y-axis based on what you can see
"""
# get tight limits for X-axis
if self.args.xmin is None:
self.args.xmin = min(ts.xspan[0] for ts in self.timeseries)
if self.args.xmax is None:
self.args.xmax = max(ts.xspan[1] for ts in self.timeseries)
# autoscale view for Y-axis
cropped = [ts.crop(self.args.xmin, self.args.xmax) for
ts in self.timeseries]
ymin = min(ts.value.min() for ts in cropped)
ymax = max(ts.value.max() for ts in cropped)
self.plot.gca().yaxis.set_data_interval(ymin, ymax, ignore=True)
self.plot.gca().autoscale_view(scalex=False) |
def h_boiling_Amalfi(m, x, Dh, rhol, rhog, mul, mug, kl, Hvap, sigma, q,
A_channel_flow, chevron_angle=45):
r'''Calculates the two-phase boiling heat transfer coefficient of a
liquid and gas flowing inside a plate and frame heat exchanger, as
developed in [1]_ from a wide range of existing correlations and data sets.
Expected to be the most accurate correlation currently available.
For Bond number < 4 (tiny channel case):
.. math::
h= 982 \left(\frac{k_l}{D_h}\right)\left(\frac{\beta}{\beta_{max}}\right)^{1.101}
\left(\frac{G^2 D_h}{\rho_m \sigma}\right)^{0.315}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.224} Bo^{0.320}
For Bond number >= 4:
.. math::
h = 18.495 \left(\frac{k_l}{D_h}\right) \left(\frac{\beta}{\beta_{max}}
\right)^{0.248}\left(Re_g\right)^{0.135}\left(Re_{lo}\right)^{0.351}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.223} Bd^{0.235} Bo^{0.198}
In the above equations, beta max is 45 degrees; Bo is Boiling number;
and Bd is Bond number.
Note that this model depends on the specific heat flux involved.
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific point in the plate exchanger []
Dh : float
Hydraulic diameter of the plate, :math:`D_h = \frac{4\lambda}{\phi}` [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of the liquid [Pa*s]
mug : float
Viscosity of the gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of the fluid at the system pressure, [J/kg]
sigma : float
Surface tension of liquid [N/m]
q : float
Heat flux, [W/m^2]
A_channel_flow : float
The flow area for the fluid, calculated as
:math:`A_{ch} = 2\cdot \text{width} \cdot \text{amplitude}` [m]
chevron_angle : float, optional
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. For exchangers with two angles, use the average value. [degrees]
Returns
-------
h : float
Boiling heat transfer coefficient [W/m^2/K]
Notes
-----
Heat transfer correlation developed from 1903 datum. Fluids included R134a,
ammonia, R236fa, R600a, R290, R1270, R1234yf, R410A, R507A, ammonia/water,
and air/water mixtures. Wide range of operating conditions, plate geometries.
Examples
--------
>>> h_boiling_Amalfi(m=3E-5, x=.4, Dh=0.00172, rhol=567., rhog=18.09,
... kl=0.086, mul=156E-6, mug=7.11E-6, sigma=0.02, Hvap=9E5, q=1E5,
... A_channel_flow=0.0003)
776.0781179096225
References
----------
.. [1] Amalfi, Raffaele L., Farzad Vakili-Farahani, and John R. Thome.
"Flow Boiling and Frictional Pressure Gradients in Plate Heat Exchangers.
Part 2: Comparison of Literature Methods to Database and New Prediction
Methods." International Journal of Refrigeration 61 (January 2016):
185-203. doi:10.1016/j.ijrefrig.2015.07.009.
'''
chevron_angle_max = 45.
beta_s = chevron_angle/chevron_angle_max
rho_s = (rhol/rhog) # rho start in model
G = m/A_channel_flow # Calculating the area of the channel is normally specified well
Bd = Bond(rhol=rhol, rhog=rhog, sigma=sigma, L=Dh)
rho_h = 1./(x/rhog + (1-x)/rhol) # homogeneous holdup - mixture density calculation
We_m = G*G*Dh/sigma/rho_h
Bo = q/(G*Hvap) # Boiling number
if Bd < 4:
# Should occur normally for "microscale" conditions
Nu_tp = 982*beta_s**1.101*We_m**0.315*Bo**0.320*rho_s**-0.224
else:
Re_lo = G*Dh/mul
Re_g = G*x*Dh/mug
Nu_tp = 18.495*beta_s**0.135*Re_g**0.135*Re_lo**0.351*Bd**0.235*Bo**0.198*rho_s**-0.223
return kl/Dh*Nu_tp | r'''Calculates the two-phase boiling heat transfer coefficient of a
liquid and gas flowing inside a plate and frame heat exchanger, as
developed in [1]_ from a wide range of existing correlations and data sets.
Expected to be the most accurate correlation currently available.
For Bond number < 4 (tiny channel case):
.. math::
h= 982 \left(\frac{k_l}{D_h}\right)\left(\frac{\beta}{\beta_{max}}\right)^{1.101}
\left(\frac{G^2 D_h}{\rho_m \sigma}\right)^{0.315}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.224} Bo^{0.320}
For Bond number >= 4:
.. math::
h = 18.495 \left(\frac{k_l}{D_h}\right) \left(\frac{\beta}{\beta_{max}}
\right)^{0.248}\left(Re_g\right)^{0.135}\left(Re_{lo}\right)^{0.351}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.223} Bd^{0.235} Bo^{0.198}
In the above equations, beta max is 45 degrees; Bo is Boiling number;
and Bd is Bond number.
Note that this model depends on the specific heat flux involved.
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific point in the plate exchanger []
Dh : float
Hydraulic diameter of the plate, :math:`D_h = \frac{4\lambda}{\phi}` [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of the liquid [Pa*s]
mug : float
Viscosity of the gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of the fluid at the system pressure, [J/kg]
sigma : float
Surface tension of liquid [N/m]
q : float
Heat flux, [W/m^2]
A_channel_flow : float
The flow area for the fluid, calculated as
:math:`A_{ch} = 2\cdot \text{width} \cdot \text{amplitude}` [m]
chevron_angle : float, optional
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. For exchangers with two angles, use the average value. [degrees]
Returns
-------
h : float
Boiling heat transfer coefficient [W/m^2/K]
Notes
-----
Heat transfer correlation developed from 1903 datum. Fluids included R134a,
ammonia, R236fa, R600a, R290, R1270, R1234yf, R410A, R507A, ammonia/water,
and air/water mixtures. Wide range of operating conditions, plate geometries.
Examples
--------
>>> h_boiling_Amalfi(m=3E-5, x=.4, Dh=0.00172, rhol=567., rhog=18.09,
... kl=0.086, mul=156E-6, mug=7.11E-6, sigma=0.02, Hvap=9E5, q=1E5,
... A_channel_flow=0.0003)
776.0781179096225
References
----------
.. [1] Amalfi, Raffaele L., Farzad Vakili-Farahani, and John R. Thome.
"Flow Boiling and Frictional Pressure Gradients in Plate Heat Exchangers.
Part 2: Comparison of Literature Methods to Database and New Prediction
Methods." International Journal of Refrigeration 61 (January 2016):
185-203. doi:10.1016/j.ijrefrig.2015.07.009. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the two-phase boiling heat transfer coefficient of a
liquid and gas flowing inside a plate and frame heat exchanger, as
developed in [1]_ from a wide range of existing correlations and data sets.
Expected to be the most accurate correlation currently available.
For Bond number < 4 (tiny channel case):
.. math::
h= 982 \left(\frac{k_l}{D_h}\right)\left(\frac{\beta}{\beta_{max}}\right)^{1.101}
\left(\frac{G^2 D_h}{\rho_m \sigma}\right)^{0.315}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.224} Bo^{0.320}
For Bond number >= 4:
.. math::
h = 18.495 \left(\frac{k_l}{D_h}\right) \left(\frac{\beta}{\beta_{max}}
\right)^{0.248}\left(Re_g\right)^{0.135}\left(Re_{lo}\right)^{0.351}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.223} Bd^{0.235} Bo^{0.198}
In the above equations, beta max is 45 degrees; Bo is Boiling number;
and Bd is Bond number.
Note that this model depends on the specific heat flux involved.
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific point in the plate exchanger []
Dh : float
Hydraulic diameter of the plate, :math:`D_h = \frac{4\lambda}{\phi}` [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of the liquid [Pa*s]
mug : float
Viscosity of the gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of the fluid at the system pressure, [J/kg]
sigma : float
Surface tension of liquid [N/m]
q : float
Heat flux, [W/m^2]
A_channel_flow : float
The flow area for the fluid, calculated as
:math:`A_{ch} = 2\cdot \text{width} \cdot \text{amplitude}` [m]
chevron_angle : float, optional
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. For exchangers with two angles, use the average value. [degrees]
Returns
-------
h : float
Boiling heat transfer coefficient [W/m^2/K]
Notes
-----
Heat transfer correlation developed from 1903 datum. Fluids included R134a,
ammonia, R236fa, R600a, R290, R1270, R1234yf, R410A, R507A, ammonia/water,
and air/water mixtures. Wide range of operating conditions, plate geometries.
Examples
--------
>>> h_boiling_Amalfi(m=3E-5, x=.4, Dh=0.00172, rhol=567., rhog=18.09,
... kl=0.086, mul=156E-6, mug=7.11E-6, sigma=0.02, Hvap=9E5, q=1E5,
... A_channel_flow=0.0003)
776.0781179096225
References
----------
.. [1] Amalfi, Raffaele L., Farzad Vakili-Farahani, and John R. Thome.
"Flow Boiling and Frictional Pressure Gradients in Plate Heat Exchangers.
Part 2: Comparison of Literature Methods to Database and New Prediction
Methods." International Journal of Refrigeration 61 (January 2016):
185-203. doi:10.1016/j.ijrefrig.2015.07.009.
### Response:
def h_boiling_Amalfi(m, x, Dh, rhol, rhog, mul, mug, kl, Hvap, sigma, q,
A_channel_flow, chevron_angle=45):
r'''Calculates the two-phase boiling heat transfer coefficient of a
liquid and gas flowing inside a plate and frame heat exchanger, as
developed in [1]_ from a wide range of existing correlations and data sets.
Expected to be the most accurate correlation currently available.
For Bond number < 4 (tiny channel case):
.. math::
h= 982 \left(\frac{k_l}{D_h}\right)\left(\frac{\beta}{\beta_{max}}\right)^{1.101}
\left(\frac{G^2 D_h}{\rho_m \sigma}\right)^{0.315}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.224} Bo^{0.320}
For Bond number >= 4:
.. math::
h = 18.495 \left(\frac{k_l}{D_h}\right) \left(\frac{\beta}{\beta_{max}}
\right)^{0.248}\left(Re_g\right)^{0.135}\left(Re_{lo}\right)^{0.351}
\left(\frac{\rho_l}{\rho_g}\right)^{-0.223} Bd^{0.235} Bo^{0.198}
In the above equations, beta max is 45 degrees; Bo is Boiling number;
and Bd is Bond number.
Note that this model depends on the specific heat flux involved.
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific point in the plate exchanger []
Dh : float
Hydraulic diameter of the plate, :math:`D_h = \frac{4\lambda}{\phi}` [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of the liquid [Pa*s]
mug : float
Viscosity of the gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of the fluid at the system pressure, [J/kg]
sigma : float
Surface tension of liquid [N/m]
q : float
Heat flux, [W/m^2]
A_channel_flow : float
The flow area for the fluid, calculated as
:math:`A_{ch} = 2\cdot \text{width} \cdot \text{amplitude}` [m]
chevron_angle : float, optional
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. For exchangers with two angles, use the average value. [degrees]
Returns
-------
h : float
Boiling heat transfer coefficient [W/m^2/K]
Notes
-----
Heat transfer correlation developed from 1903 datum. Fluids included R134a,
ammonia, R236fa, R600a, R290, R1270, R1234yf, R410A, R507A, ammonia/water,
and air/water mixtures. Wide range of operating conditions, plate geometries.
Examples
--------
>>> h_boiling_Amalfi(m=3E-5, x=.4, Dh=0.00172, rhol=567., rhog=18.09,
... kl=0.086, mul=156E-6, mug=7.11E-6, sigma=0.02, Hvap=9E5, q=1E5,
... A_channel_flow=0.0003)
776.0781179096225
References
----------
.. [1] Amalfi, Raffaele L., Farzad Vakili-Farahani, and John R. Thome.
"Flow Boiling and Frictional Pressure Gradients in Plate Heat Exchangers.
Part 2: Comparison of Literature Methods to Database and New Prediction
Methods." International Journal of Refrigeration 61 (January 2016):
185-203. doi:10.1016/j.ijrefrig.2015.07.009.
'''
chevron_angle_max = 45.
beta_s = chevron_angle/chevron_angle_max
rho_s = (rhol/rhog) # rho start in model
G = m/A_channel_flow # Calculating the area of the channel is normally specified well
Bd = Bond(rhol=rhol, rhog=rhog, sigma=sigma, L=Dh)
rho_h = 1./(x/rhog + (1-x)/rhol) # homogeneous holdup - mixture density calculation
We_m = G*G*Dh/sigma/rho_h
Bo = q/(G*Hvap) # Boiling number
if Bd < 4:
# Should occur normally for "microscale" conditions
Nu_tp = 982*beta_s**1.101*We_m**0.315*Bo**0.320*rho_s**-0.224
else:
Re_lo = G*Dh/mul
Re_g = G*x*Dh/mug
Nu_tp = 18.495*beta_s**0.135*Re_g**0.135*Re_lo**0.351*Bd**0.235*Bo**0.198*rho_s**-0.223
return kl/Dh*Nu_tp |
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
"""
elems = self.find_elements_by_xpath(xpath)
if isinstance(elems, list) and len(elems) > 0:
return elems[0] | Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_ | Below is the the instruction that describes the task:
### Input:
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
### Response:
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
"""
elems = self.find_elements_by_xpath(xpath)
if isinstance(elems, list) and len(elems) > 0:
return elems[0] |
def _derZ(self,x,y,z):
'''
Returns the first derivative of the function with respect to Z at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derZ.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
y = temp[np.arange(m),i]
dfdz = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdz[c] = self.functions[j].derivativeZ(x[c],y[c],z[c])
return dfdz | Returns the first derivative of the function with respect to Z at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derZ. | Below is the the instruction that describes the task:
### Input:
Returns the first derivative of the function with respect to Z at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derZ.
### Response:
def _derZ(self,x,y,z):
'''
Returns the first derivative of the function with respect to Z at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derZ.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
y = temp[np.arange(m),i]
dfdz = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdz[c] = self.functions[j].derivativeZ(x[c],y[c],z[c])
return dfdz |
def location(self, x=None, y=None):
"""Return a context manager for temporarily moving the cursor.
Move the cursor to a certain position on entry, let you print stuff
there, then return the cursor to its original position::
term = Terminal()
with term.location(2, 5):
print('Hello, world!')
for x in xrange(10):
print('I can do it %i times!' % x)
Specify ``x`` to move to a certain column, ``y`` to move to a certain
row, both, or neither. If you specify neither, only the saving and
restoration of cursor position will happen. This can be useful if you
simply want to restore your place after doing some manual cursor
movement.
"""
# Save position and move to the requested column, row, or both:
self.stream.write(self.save)
if x is not None and y is not None:
self.stream.write(self.move(y, x))
elif x is not None:
self.stream.write(self.move_x(x))
elif y is not None:
self.stream.write(self.move_y(y))
try:
yield
finally:
# Restore original cursor position:
self.stream.write(self.restore) | Return a context manager for temporarily moving the cursor.
Move the cursor to a certain position on entry, let you print stuff
there, then return the cursor to its original position::
term = Terminal()
with term.location(2, 5):
print('Hello, world!')
for x in xrange(10):
print('I can do it %i times!' % x)
Specify ``x`` to move to a certain column, ``y`` to move to a certain
row, both, or neither. If you specify neither, only the saving and
restoration of cursor position will happen. This can be useful if you
simply want to restore your place after doing some manual cursor
movement. | Below is the the instruction that describes the task:
### Input:
Return a context manager for temporarily moving the cursor.
Move the cursor to a certain position on entry, let you print stuff
there, then return the cursor to its original position::
term = Terminal()
with term.location(2, 5):
print('Hello, world!')
for x in xrange(10):
print('I can do it %i times!' % x)
Specify ``x`` to move to a certain column, ``y`` to move to a certain
row, both, or neither. If you specify neither, only the saving and
restoration of cursor position will happen. This can be useful if you
simply want to restore your place after doing some manual cursor
movement.
### Response:
def location(self, x=None, y=None):
"""Return a context manager for temporarily moving the cursor.
Move the cursor to a certain position on entry, let you print stuff
there, then return the cursor to its original position::
term = Terminal()
with term.location(2, 5):
print('Hello, world!')
for x in xrange(10):
print('I can do it %i times!' % x)
Specify ``x`` to move to a certain column, ``y`` to move to a certain
row, both, or neither. If you specify neither, only the saving and
restoration of cursor position will happen. This can be useful if you
simply want to restore your place after doing some manual cursor
movement.
"""
# Save position and move to the requested column, row, or both:
self.stream.write(self.save)
if x is not None and y is not None:
self.stream.write(self.move(y, x))
elif x is not None:
self.stream.write(self.move_x(x))
elif y is not None:
self.stream.write(self.move_y(y))
try:
yield
finally:
# Restore original cursor position:
self.stream.write(self.restore) |
def rename_fields(layer, fields_to_copy):
"""Rename fields inside an attribute table.
Only since QGIS 2.16.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields_to_copy: Dictionary of fields to copy.
:type fields_to_copy: dict
"""
for field in fields_to_copy:
index = layer.fields().lookupField(field)
if index != -1:
layer.startEditing()
layer.renameAttribute(index, fields_to_copy[field])
layer.commitChanges()
LOGGER.info(
'Renaming field %s to %s' % (field, fields_to_copy[field]))
else:
LOGGER.info(
'Field %s not present in the layer while trying to renaming '
'it to %s' % (field, fields_to_copy[field])) | Rename fields inside an attribute table.
Only since QGIS 2.16.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields_to_copy: Dictionary of fields to copy.
:type fields_to_copy: dict | Below is the the instruction that describes the task:
### Input:
Rename fields inside an attribute table.
Only since QGIS 2.16.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields_to_copy: Dictionary of fields to copy.
:type fields_to_copy: dict
### Response:
def rename_fields(layer, fields_to_copy):
"""Rename fields inside an attribute table.
Only since QGIS 2.16.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields_to_copy: Dictionary of fields to copy.
:type fields_to_copy: dict
"""
for field in fields_to_copy:
index = layer.fields().lookupField(field)
if index != -1:
layer.startEditing()
layer.renameAttribute(index, fields_to_copy[field])
layer.commitChanges()
LOGGER.info(
'Renaming field %s to %s' % (field, fields_to_copy[field]))
else:
LOGGER.info(
'Field %s not present in the layer while trying to renaming '
'it to %s' % (field, fields_to_copy[field])) |
def expose_init(self, *args):
"""Process the drawing routine
"""
# Obtain a reference to the OpenGL drawable
# and rendering context.
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
# OpenGL begin
if not gldrawable or not gldrawable.gl_begin(glcontext):
return False
# logger.debug("expose_init")
# Reset buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Prepare name stack
glInitNames()
glPushName(0)
self.name_counter = 1
return False | Process the drawing routine | Below is the the instruction that describes the task:
### Input:
Process the drawing routine
### Response:
def expose_init(self, *args):
"""Process the drawing routine
"""
# Obtain a reference to the OpenGL drawable
# and rendering context.
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
# OpenGL begin
if not gldrawable or not gldrawable.gl_begin(glcontext):
return False
# logger.debug("expose_init")
# Reset buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Prepare name stack
glInitNames()
glPushName(0)
self.name_counter = 1
return False |
def editMonitor(self, monitorID, monitorStatus=None, monitorFriendlyName=None, monitorURL=None, monitorType=None,
monitorSubType=None, monitorPort=None, monitorKeywordType=None, monitorKeywordValue=None,
monitorHTTPUsername=None, monitorHTTPPassword=None, monitorAlertContacts=None):
"""
monitorID is the only required object. All others are optional and must be quoted.
Returns Response object from api.
"""
url = self.baseUrl
url += "editMonitor?apiKey=%s" % self.apiKey
url += "&monitorID=%s" % monitorID
if monitorStatus:
# Pause, Start Montir
url += "&monitorStatus=%s" % monitorStatus
if monitorFriendlyName:
# Update their FriendlyName
url += "&monitorFriendlyName=%s" % monitorFriendlyName
if monitorURL:
# Edit the MontiorUrl
url += "&monitorURL=%s" % monitorURL
if monitorType:
# Edit the type of montior
url += "&monitorType=%s" % monitorType
if monitorSubType:
# Edit the SubType
url += "&monitorSubType=%s" % monitorSubType
if monitorPort:
# Edit the Port
url += "&monitorPort=%s" % monitorPort
if monitorKeywordType:
# Edit the Keyword Type
url += "&monitorKeywordType=%s" % monitorKeywordType
if monitorKeywordValue:
# Edit the Keyword Match
url += "&monitorKeywordValue=%s" % monitorKeywordValue
if monitorHTTPUsername:
# Edit the HTTP Username
url += "&monitorHTTPUsername=%s" % monitorHTTPUsername
if monitorHTTPPassword:
# Edit the HTTP Password
url += "&monitorHTTPPassword=%s" % monitorHTTPPassword
if monitorAlertContacts:
# Edit the contacts
url += "&monitorAlertContacts=%s" % monitorAlertContacts
url += "&noJsonCallback=1&format=json"
success = self.requestApi(url)
return success | monitorID is the only required object. All others are optional and must be quoted.
Returns Response object from api. | Below is the the instruction that describes the task:
### Input:
monitorID is the only required object. All others are optional and must be quoted.
Returns Response object from api.
### Response:
def editMonitor(self, monitorID, monitorStatus=None, monitorFriendlyName=None, monitorURL=None, monitorType=None,
monitorSubType=None, monitorPort=None, monitorKeywordType=None, monitorKeywordValue=None,
monitorHTTPUsername=None, monitorHTTPPassword=None, monitorAlertContacts=None):
"""
monitorID is the only required object. All others are optional and must be quoted.
Returns Response object from api.
"""
url = self.baseUrl
url += "editMonitor?apiKey=%s" % self.apiKey
url += "&monitorID=%s" % monitorID
if monitorStatus:
# Pause, Start Montir
url += "&monitorStatus=%s" % monitorStatus
if monitorFriendlyName:
# Update their FriendlyName
url += "&monitorFriendlyName=%s" % monitorFriendlyName
if monitorURL:
# Edit the MontiorUrl
url += "&monitorURL=%s" % monitorURL
if monitorType:
# Edit the type of montior
url += "&monitorType=%s" % monitorType
if monitorSubType:
# Edit the SubType
url += "&monitorSubType=%s" % monitorSubType
if monitorPort:
# Edit the Port
url += "&monitorPort=%s" % monitorPort
if monitorKeywordType:
# Edit the Keyword Type
url += "&monitorKeywordType=%s" % monitorKeywordType
if monitorKeywordValue:
# Edit the Keyword Match
url += "&monitorKeywordValue=%s" % monitorKeywordValue
if monitorHTTPUsername:
# Edit the HTTP Username
url += "&monitorHTTPUsername=%s" % monitorHTTPUsername
if monitorHTTPPassword:
# Edit the HTTP Password
url += "&monitorHTTPPassword=%s" % monitorHTTPPassword
if monitorAlertContacts:
# Edit the contacts
url += "&monitorAlertContacts=%s" % monitorAlertContacts
url += "&noJsonCallback=1&format=json"
success = self.requestApi(url)
return success |
def query(self, model, **kwargs):
'''Create a new :class:`Query` for *model*.'''
sm = self.model(model)
query_class = sm.manager.query_class or Query
return query_class(sm._meta, self, **kwargs) | Create a new :class:`Query` for *model*. | Below is the the instruction that describes the task:
### Input:
Create a new :class:`Query` for *model*.
### Response:
def query(self, model, **kwargs):
'''Create a new :class:`Query` for *model*.'''
sm = self.model(model)
query_class = sm.manager.query_class or Query
return query_class(sm._meta, self, **kwargs) |
def select_unaligned_read_pairs(in_bam, extra, out_dir, config):
"""Retrieve unaligned read pairs from input alignment BAM, as two fastq files.
"""
runner = broad.runner_from_config(config)
base, ext = os.path.splitext(os.path.basename(in_bam))
nomap_bam = os.path.join(out_dir, "{}-{}{}".format(base, extra, ext))
if not utils.file_exists(nomap_bam):
with file_transaction(nomap_bam) as tx_out:
runner.run("FilterSamReads", [("INPUT", in_bam),
("OUTPUT", tx_out),
("EXCLUDE_ALIGNED", "true"),
("WRITE_READS_FILES", "false"),
("SORT_ORDER", "queryname")])
has_reads = False
with pysam.Samfile(nomap_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
has_reads = True
break
if has_reads:
out_fq1, out_fq2 = ["{}-{}.fq".format(os.path.splitext(nomap_bam)[0], i) for i in [1, 2]]
runner.run_fn("picard_bam_to_fastq", nomap_bam, out_fq1, out_fq2)
return out_fq1, out_fq2
else:
return None, None | Retrieve unaligned read pairs from input alignment BAM, as two fastq files. | Below is the the instruction that describes the task:
### Input:
Retrieve unaligned read pairs from input alignment BAM, as two fastq files.
### Response:
def select_unaligned_read_pairs(in_bam, extra, out_dir, config):
"""Retrieve unaligned read pairs from input alignment BAM, as two fastq files.
"""
runner = broad.runner_from_config(config)
base, ext = os.path.splitext(os.path.basename(in_bam))
nomap_bam = os.path.join(out_dir, "{}-{}{}".format(base, extra, ext))
if not utils.file_exists(nomap_bam):
with file_transaction(nomap_bam) as tx_out:
runner.run("FilterSamReads", [("INPUT", in_bam),
("OUTPUT", tx_out),
("EXCLUDE_ALIGNED", "true"),
("WRITE_READS_FILES", "false"),
("SORT_ORDER", "queryname")])
has_reads = False
with pysam.Samfile(nomap_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
has_reads = True
break
if has_reads:
out_fq1, out_fq2 = ["{}-{}.fq".format(os.path.splitext(nomap_bam)[0], i) for i in [1, 2]]
runner.run_fn("picard_bam_to_fastq", nomap_bam, out_fq1, out_fq2)
return out_fq1, out_fq2
else:
return None, None |
def format_coord(self, x, y):
"""Format displayed coordinates during mouseover of axes."""
p, b = stereonet_math.geographic2plunge_bearing(x, y)
s, d = stereonet_math.geographic2pole(x, y)
pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0])
sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0])
return u'{}, {}'.format(pb, sd) | Format displayed coordinates during mouseover of axes. | Below is the the instruction that describes the task:
### Input:
Format displayed coordinates during mouseover of axes.
### Response:
def format_coord(self, x, y):
"""Format displayed coordinates during mouseover of axes."""
p, b = stereonet_math.geographic2plunge_bearing(x, y)
s, d = stereonet_math.geographic2pole(x, y)
pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0])
sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0])
return u'{}, {}'.format(pb, sd) |
def strip_number(self):
"""The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_number(
self._handle) | The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError | Below is the the instruction that describes the task:
### Input:
The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
### Response:
def strip_number(self):
"""The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_number(
self._handle) |
def hashable(data, v):
"""Determine whether `v` can be hashed."""
try:
data[v]
except (TypeError, KeyError, IndexError):
return False
return True | Determine whether `v` can be hashed. | Below is the the instruction that describes the task:
### Input:
Determine whether `v` can be hashed.
### Response:
def hashable(data, v):
"""Determine whether `v` can be hashed."""
try:
data[v]
except (TypeError, KeyError, IndexError):
return False
return True |
def get_history_entry_record(endianess, hist_date_time_flag, tm_format, event_number_flag, hist_seq_nbr_flag, data):
"""
Return data formatted into a log entry.
:param str endianess: The endianess to use when packing values ('>' or '<')
:param bool hist_date_time_flag: Whether or not a time stamp is included.
:param int tm_format: The format that the data is packed in, this typically
corresponds with the value in the GEN_CONFIG_TBL (table #0) (1 <= tm_format <= 4)
:param bool event_number_flag: Whether or not an event number is included.
:param bool hist_seq_nbr_flag: Whether or not an history sequence number
is included.
:param str data: The packed and machine-formatted data to parse
:rtype: dict
"""
rcd = {}
if hist_date_time_flag:
tmstmp = format_ltime(endianess, tm_format, data[0:LTIME_LENGTH.get(tm_format)])
if tmstmp:
rcd['Time'] = tmstmp
data = data[LTIME_LENGTH.get(tm_format):]
if event_number_flag:
rcd['Event Number'] = struct.unpack(endianess + 'H', data[:2])[0]
data = data[2:]
if hist_seq_nbr_flag:
rcd['History Sequence Number'] = struct.unpack(endianess + 'H', data[:2])[0]
data = data[2:]
rcd['User ID'] = struct.unpack(endianess + 'H', data[:2])[0]
rcd['Procedure Number'], rcd['Std vs Mfg'] = get_table_idbb_field(endianess, data[2:4])[:2]
rcd['Arguments'] = data[4:]
return rcd | Return data formatted into a log entry.
:param str endianess: The endianess to use when packing values ('>' or '<')
:param bool hist_date_time_flag: Whether or not a time stamp is included.
:param int tm_format: The format that the data is packed in, this typically
corresponds with the value in the GEN_CONFIG_TBL (table #0) (1 <= tm_format <= 4)
:param bool event_number_flag: Whether or not an event number is included.
:param bool hist_seq_nbr_flag: Whether or not an history sequence number
is included.
:param str data: The packed and machine-formatted data to parse
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return data formatted into a log entry.
:param str endianess: The endianess to use when packing values ('>' or '<')
:param bool hist_date_time_flag: Whether or not a time stamp is included.
:param int tm_format: The format that the data is packed in, this typically
corresponds with the value in the GEN_CONFIG_TBL (table #0) (1 <= tm_format <= 4)
:param bool event_number_flag: Whether or not an event number is included.
:param bool hist_seq_nbr_flag: Whether or not an history sequence number
is included.
:param str data: The packed and machine-formatted data to parse
:rtype: dict
### Response:
def get_history_entry_record(endianess, hist_date_time_flag, tm_format, event_number_flag, hist_seq_nbr_flag, data):
"""
Return data formatted into a log entry.
:param str endianess: The endianess to use when packing values ('>' or '<')
:param bool hist_date_time_flag: Whether or not a time stamp is included.
:param int tm_format: The format that the data is packed in, this typically
corresponds with the value in the GEN_CONFIG_TBL (table #0) (1 <= tm_format <= 4)
:param bool event_number_flag: Whether or not an event number is included.
:param bool hist_seq_nbr_flag: Whether or not an history sequence number
is included.
:param str data: The packed and machine-formatted data to parse
:rtype: dict
"""
rcd = {}
if hist_date_time_flag:
tmstmp = format_ltime(endianess, tm_format, data[0:LTIME_LENGTH.get(tm_format)])
if tmstmp:
rcd['Time'] = tmstmp
data = data[LTIME_LENGTH.get(tm_format):]
if event_number_flag:
rcd['Event Number'] = struct.unpack(endianess + 'H', data[:2])[0]
data = data[2:]
if hist_seq_nbr_flag:
rcd['History Sequence Number'] = struct.unpack(endianess + 'H', data[:2])[0]
data = data[2:]
rcd['User ID'] = struct.unpack(endianess + 'H', data[:2])[0]
rcd['Procedure Number'], rcd['Std vs Mfg'] = get_table_idbb_field(endianess, data[2:4])[:2]
rcd['Arguments'] = data[4:]
return rcd |
def get_bench_api(self):
"""
Extend bench functionality with these new commands
:return: Dictionary
"""
# Extend bench functionality with these new commands
ret_dict = dict()
ret_dict["assertTraceDoesNotContain"] = asserts.assertTraceDoesNotContain
ret_dict["assertTraceContains"] = asserts.assertTraceContains
ret_dict["assertDutTraceDoesNotContain"] = self.assert_dut_trace_not_contains
ret_dict["assertDutTraceContains"] = self.assert_dut_trace_contains
ret_dict["assertTrue"] = asserts.assertTrue
ret_dict["assertFalse"] = asserts.assertFalse
ret_dict["assertNone"] = asserts.assertNone
ret_dict["assertNotNone"] = asserts.assertNotNone
ret_dict["assertEqual"] = asserts.assertEqual
ret_dict["assertNotEqual"] = asserts.assertNotEqual
ret_dict["assertJsonContains"] = asserts.assertJsonContains
return ret_dict | Extend bench functionality with these new commands
:return: Dictionary | Below is the the instruction that describes the task:
### Input:
Extend bench functionality with these new commands
:return: Dictionary
### Response:
def get_bench_api(self):
"""
Extend bench functionality with these new commands
:return: Dictionary
"""
# Extend bench functionality with these new commands
ret_dict = dict()
ret_dict["assertTraceDoesNotContain"] = asserts.assertTraceDoesNotContain
ret_dict["assertTraceContains"] = asserts.assertTraceContains
ret_dict["assertDutTraceDoesNotContain"] = self.assert_dut_trace_not_contains
ret_dict["assertDutTraceContains"] = self.assert_dut_trace_contains
ret_dict["assertTrue"] = asserts.assertTrue
ret_dict["assertFalse"] = asserts.assertFalse
ret_dict["assertNone"] = asserts.assertNone
ret_dict["assertNotNone"] = asserts.assertNotNone
ret_dict["assertEqual"] = asserts.assertEqual
ret_dict["assertNotEqual"] = asserts.assertNotEqual
ret_dict["assertJsonContains"] = asserts.assertJsonContains
return ret_dict |
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K | matrix K_m from Wiktorsson2001 | Below is the the instruction that describes the task:
### Input:
matrix K_m from Wiktorsson2001
### Response:
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K |
def deep_merge(dict_one, dict_two):
'''
Deep merge two dicts.
'''
merged = dict_one.copy()
for key, value in dict_two.items():
# value is equivalent to dict_two[key]
if (key in dict_one and
isinstance(dict_one[key], dict) and
isinstance(value, dict)):
merged[key] = deep_merge(dict_one[key], value)
elif (key in dict_one and
isinstance(dict_one[key], list) and
isinstance(value, list)):
merged[key] = list(set(dict_one[key] + value))
else:
merged[key] = value
return merged | Deep merge two dicts. | Below is the the instruction that describes the task:
### Input:
Deep merge two dicts.
### Response:
def deep_merge(dict_one, dict_two):
'''
Deep merge two dicts.
'''
merged = dict_one.copy()
for key, value in dict_two.items():
# value is equivalent to dict_two[key]
if (key in dict_one and
isinstance(dict_one[key], dict) and
isinstance(value, dict)):
merged[key] = deep_merge(dict_one[key], value)
elif (key in dict_one and
isinstance(dict_one[key], list) and
isinstance(value, list)):
merged[key] = list(set(dict_one[key] + value))
else:
merged[key] = value
return merged |
def snapshots(self):
"""
Provides access to snapshot management methods for the given entry.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots
:return: :class:`EntrySnapshotsProxy <contentful_management.entry_snapshots_proxy.EntrySnapshotsProxy>` object.
:rtype: contentful.entry_snapshots_proxy.EntrySnapshotsProxy
Usage:
>>> entry_snapshots_proxy = entry.snapshots()
<EntrySnapshotsProxy space_id="cfexampleapi" environment_id="master" entry_id="nyancat">
"""
return EntrySnapshotsProxy(self._client, self.sys['space'].id, self._environment_id, self.sys['id']) | Provides access to snapshot management methods for the given entry.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots
:return: :class:`EntrySnapshotsProxy <contentful_management.entry_snapshots_proxy.EntrySnapshotsProxy>` object.
:rtype: contentful.entry_snapshots_proxy.EntrySnapshotsProxy
Usage:
>>> entry_snapshots_proxy = entry.snapshots()
<EntrySnapshotsProxy space_id="cfexampleapi" environment_id="master" entry_id="nyancat"> | Below is the the instruction that describes the task:
### Input:
Provides access to snapshot management methods for the given entry.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots
:return: :class:`EntrySnapshotsProxy <contentful_management.entry_snapshots_proxy.EntrySnapshotsProxy>` object.
:rtype: contentful.entry_snapshots_proxy.EntrySnapshotsProxy
Usage:
>>> entry_snapshots_proxy = entry.snapshots()
<EntrySnapshotsProxy space_id="cfexampleapi" environment_id="master" entry_id="nyancat">
### Response:
def snapshots(self):
"""
Provides access to snapshot management methods for the given entry.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots
:return: :class:`EntrySnapshotsProxy <contentful_management.entry_snapshots_proxy.EntrySnapshotsProxy>` object.
:rtype: contentful.entry_snapshots_proxy.EntrySnapshotsProxy
Usage:
>>> entry_snapshots_proxy = entry.snapshots()
<EntrySnapshotsProxy space_id="cfexampleapi" environment_id="master" entry_id="nyancat">
"""
return EntrySnapshotsProxy(self._client, self.sys['space'].id, self._environment_id, self.sys['id']) |
def name(self):
"""str: name of the file entry, which does not include the full path."""
# The root directory file name is typically '.', dfVFS however uses ''.
if self._is_root:
return ''
mft_attribute = getattr(self.path_spec, 'mft_attribute', None)
if mft_attribute is not None:
return self._fsntfs_file_entry.get_name_by_attribute_index(mft_attribute)
return self._fsntfs_file_entry.get_name() | str: name of the file entry, which does not include the full path. | Below is the the instruction that describes the task:
### Input:
str: name of the file entry, which does not include the full path.
### Response:
def name(self):
"""str: name of the file entry, which does not include the full path."""
# The root directory file name is typically '.', dfVFS however uses ''.
if self._is_root:
return ''
mft_attribute = getattr(self.path_spec, 'mft_attribute', None)
if mft_attribute is not None:
return self._fsntfs_file_entry.get_name_by_attribute_index(mft_attribute)
return self._fsntfs_file_entry.get_name() |
def refreshUi(self):
"""
Matches the UI state to the current cursor positioning.
"""
font = self.currentFont()
for name in ('underline', 'bold', 'italic', 'strikeOut'):
getter = getattr(font, name)
act = self._actions[name]
act.blockSignals(True)
act.setChecked(getter())
act.blockSignals(False) | Matches the UI state to the current cursor positioning. | Below is the the instruction that describes the task:
### Input:
Matches the UI state to the current cursor positioning.
### Response:
def refreshUi(self):
"""
Matches the UI state to the current cursor positioning.
"""
font = self.currentFont()
for name in ('underline', 'bold', 'italic', 'strikeOut'):
getter = getattr(font, name)
act = self._actions[name]
act.blockSignals(True)
act.setChecked(getter())
act.blockSignals(False) |
def logpdf(self, mu):
"""
Log PDF for Cauchy prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.cauchy.logpdf(mu, self.loc0, self.scale0) | Log PDF for Cauchy prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu)) | Below is the the instruction that describes the task:
### Input:
Log PDF for Cauchy prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
### Response:
def logpdf(self, mu):
"""
Log PDF for Cauchy prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.cauchy.logpdf(mu, self.loc0, self.scale0) |
async def get_deals(self, **params):
"""Receives all users deals
Accepts:
- buyer public key
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
buyer = params.get("buyer")
if not buyer:
return {"error":400, "reason":"Missed public key"}
deals = {i:[] for i in list(settings.bridges.keys())}
for coinid in list(settings.bridges.keys()):
database = client[coinid]
collection = database[settings.DEAL]
async for document in collection.find({"owner":buyer}):
deals[coinid].append((document["cid"],document.get("txid")))
return deals | Receives all users deals
Accepts:
- buyer public key | Below is the the instruction that describes the task:
### Input:
Receives all users deals
Accepts:
- buyer public key
### Response:
async def get_deals(self, **params):
"""Receives all users deals
Accepts:
- buyer public key
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
buyer = params.get("buyer")
if not buyer:
return {"error":400, "reason":"Missed public key"}
deals = {i:[] for i in list(settings.bridges.keys())}
for coinid in list(settings.bridges.keys()):
database = client[coinid]
collection = database[settings.DEAL]
async for document in collection.find({"owner":buyer}):
deals[coinid].append((document["cid"],document.get("txid")))
return deals |
def get_event_teams_attendees(self, id, team_id, **data):
"""
GET /events/:id/teams/:team_id/attendees/
Returns :format:`attendees` for a single :format:`teams`.
"""
return self.get("/events/{0}/teams/{0}/attendees/".format(id,team_id), data=data) | GET /events/:id/teams/:team_id/attendees/
Returns :format:`attendees` for a single :format:`teams`. | Below is the the instruction that describes the task:
### Input:
GET /events/:id/teams/:team_id/attendees/
Returns :format:`attendees` for a single :format:`teams`.
### Response:
def get_event_teams_attendees(self, id, team_id, **data):
"""
GET /events/:id/teams/:team_id/attendees/
Returns :format:`attendees` for a single :format:`teams`.
"""
return self.get("/events/{0}/teams/{0}/attendees/".format(id,team_id), data=data) |
def _forbidden_attributes(obj):
"""Return the object without the forbidden attributes."""
for key in list(obj.data.keys()):
if key in list(obj.reserved_keys.keys()):
obj.data.pop(key)
return obj | Return the object without the forbidden attributes. | Below is the the instruction that describes the task:
### Input:
Return the object without the forbidden attributes.
### Response:
def _forbidden_attributes(obj):
"""Return the object without the forbidden attributes."""
for key in list(obj.data.keys()):
if key in list(obj.reserved_keys.keys()):
obj.data.pop(key)
return obj |
def install(package_name):
"""Installs a holodeck package.
Args:
package_name (str): The name of the package to install
"""
holodeck_path = util.get_holodeck_path()
binary_website = "https://s3.amazonaws.com/holodeckworlds/"
if package_name not in packages:
raise HolodeckException("Unknown package name " + package_name)
package_url = packages[package_name]
print("Installing " + package_name + " at " + holodeck_path)
install_path = os.path.join(holodeck_path, "worlds")
binary_url = binary_website + util.get_os_key() + "_" + package_url
_download_binary(binary_url, install_path)
if os.name == "posix":
_make_binary_excecutable(package_name, install_path) | Installs a holodeck package.
Args:
package_name (str): The name of the package to install | Below is the the instruction that describes the task:
### Input:
Installs a holodeck package.
Args:
package_name (str): The name of the package to install
### Response:
def install(package_name):
"""Installs a holodeck package.
Args:
package_name (str): The name of the package to install
"""
holodeck_path = util.get_holodeck_path()
binary_website = "https://s3.amazonaws.com/holodeckworlds/"
if package_name not in packages:
raise HolodeckException("Unknown package name " + package_name)
package_url = packages[package_name]
print("Installing " + package_name + " at " + holodeck_path)
install_path = os.path.join(holodeck_path, "worlds")
binary_url = binary_website + util.get_os_key() + "_" + package_url
_download_binary(binary_url, install_path)
if os.name == "posix":
_make_binary_excecutable(package_name, install_path) |
def CreateInstance(r, mode, name, disk_template, disks, nics,
**kwargs):
"""
Creates a new instance.
More details for parameters can be found in the RAPI documentation.
@type mode: string
@param mode: Instance creation mode
@type name: string
@param name: Hostname of the instance to create
@type disk_template: string
@param disk_template: Disk template for instance (e.g. plain, diskless,
file, or drbd)
@type disks: list of dicts
@param disks: List of disk definitions
@type nics: list of dicts
@param nics: List of NIC definitions
@type dry_run: bool
@keyword dry_run: whether to perform a dry run
@type no_install: bool
@keyword no_install: whether to create without installing OS(true=don't install)
@rtype: int
@return: job id
"""
if INST_CREATE_REQV1 not in r.features:
raise GanetiApiError("Cannot create Ganeti 2.1-style instances")
query = {}
if kwargs.get("dry_run"):
query["dry-run"] = 1
if kwargs.get("no_install"):
query["no-install"] = 1
# Make a version 1 request.
body = {
_REQ_DATA_VERSION_FIELD: 1,
"mode": mode,
"name": name,
"disk_template": disk_template,
"disks": disks,
"nics": nics,
}
conflicts = set(kwargs.iterkeys()) & set(body.iterkeys())
if conflicts:
raise GanetiApiError("Required fields can not be specified as"
" keywords: %s" % ", ".join(conflicts))
kwargs.pop("dry_run", None)
body.update(kwargs)
return r.request("post", "/2/instances", query=query, content=body) | Creates a new instance.
More details for parameters can be found in the RAPI documentation.
@type mode: string
@param mode: Instance creation mode
@type name: string
@param name: Hostname of the instance to create
@type disk_template: string
@param disk_template: Disk template for instance (e.g. plain, diskless,
file, or drbd)
@type disks: list of dicts
@param disks: List of disk definitions
@type nics: list of dicts
@param nics: List of NIC definitions
@type dry_run: bool
@keyword dry_run: whether to perform a dry run
@type no_install: bool
@keyword no_install: whether to create without installing OS(true=don't install)
@rtype: int
@return: job id | Below is the the instruction that describes the task:
### Input:
Creates a new instance.
More details for parameters can be found in the RAPI documentation.
@type mode: string
@param mode: Instance creation mode
@type name: string
@param name: Hostname of the instance to create
@type disk_template: string
@param disk_template: Disk template for instance (e.g. plain, diskless,
file, or drbd)
@type disks: list of dicts
@param disks: List of disk definitions
@type nics: list of dicts
@param nics: List of NIC definitions
@type dry_run: bool
@keyword dry_run: whether to perform a dry run
@type no_install: bool
@keyword no_install: whether to create without installing OS(true=don't install)
@rtype: int
@return: job id
### Response:
def CreateInstance(r, mode, name, disk_template, disks, nics,
**kwargs):
"""
Creates a new instance.
More details for parameters can be found in the RAPI documentation.
@type mode: string
@param mode: Instance creation mode
@type name: string
@param name: Hostname of the instance to create
@type disk_template: string
@param disk_template: Disk template for instance (e.g. plain, diskless,
file, or drbd)
@type disks: list of dicts
@param disks: List of disk definitions
@type nics: list of dicts
@param nics: List of NIC definitions
@type dry_run: bool
@keyword dry_run: whether to perform a dry run
@type no_install: bool
@keyword no_install: whether to create without installing OS(true=don't install)
@rtype: int
@return: job id
"""
if INST_CREATE_REQV1 not in r.features:
raise GanetiApiError("Cannot create Ganeti 2.1-style instances")
query = {}
if kwargs.get("dry_run"):
query["dry-run"] = 1
if kwargs.get("no_install"):
query["no-install"] = 1
# Make a version 1 request.
body = {
_REQ_DATA_VERSION_FIELD: 1,
"mode": mode,
"name": name,
"disk_template": disk_template,
"disks": disks,
"nics": nics,
}
conflicts = set(kwargs.iterkeys()) & set(body.iterkeys())
if conflicts:
raise GanetiApiError("Required fields can not be specified as"
" keywords: %s" % ", ".join(conflicts))
kwargs.pop("dry_run", None)
body.update(kwargs)
return r.request("post", "/2/instances", query=query, content=body) |
def get_instance(self, instance_id, **kwargs):
"""Get details about a virtual server instance.
:param integer instance_id: the instance ID
:returns: A dictionary containing a large amount of information about
the specified instance.
Example::
# Print out instance ID 12345.
vsi = mgr.get_instance(12345)
print vsi
# Print out only FQDN and primaryIP for instance 12345
object_mask = "mask[fullyQualifiedDomainName,primaryIpAddress]"
vsi = mgr.get_instance(12345, mask=mask)
print vsi
"""
if 'mask' not in kwargs:
kwargs['mask'] = (
'id,'
'globalIdentifier,'
'fullyQualifiedDomainName,'
'hostname,'
'domain,'
'createDate,'
'modifyDate,'
'provisionDate,'
'notes,'
'dedicatedAccountHostOnlyFlag,'
'privateNetworkOnlyFlag,'
'primaryBackendIpAddress,'
'primaryIpAddress,'
'''networkComponents[id, status, speed, maxSpeed, name,
macAddress, primaryIpAddress, port,
primarySubnet[addressSpace],
securityGroupBindings[
securityGroup[id, name]]],'''
'lastKnownPowerState.name,'
'powerState,'
'status,'
'maxCpu,'
'maxMemory,'
'datacenter,'
'activeTransaction[id, transactionStatus[friendlyName,name]],'
'lastTransaction[transactionStatus],'
'lastOperatingSystemReload.id,'
'blockDevices,'
'blockDeviceTemplateGroup[id, name, globalIdentifier],'
'postInstallScriptUri,'
'''operatingSystem[passwords[username,password],
softwareLicense.softwareDescription[
manufacturer,name,version,
referenceCode]],'''
'''softwareComponents[
passwords[username,password,notes],
softwareLicense[softwareDescription[
manufacturer,name,version,
referenceCode]]],'''
'hourlyBillingFlag,'
'userData,'
'''billingItem[id,nextInvoiceTotalRecurringAmount,
package[id,keyName],
children[categoryCode,nextInvoiceTotalRecurringAmount],
orderItem[id,
order.userRecord[username],
preset.keyName]],'''
'tagReferences[id,tag[name,id]],'
'networkVlans[id,vlanNumber,networkSpace],'
'dedicatedHost.id,'
'placementGroupId'
)
return self.guest.getObject(id=instance_id, **kwargs) | Get details about a virtual server instance.
:param integer instance_id: the instance ID
:returns: A dictionary containing a large amount of information about
the specified instance.
Example::
# Print out instance ID 12345.
vsi = mgr.get_instance(12345)
print vsi
# Print out only FQDN and primaryIP for instance 12345
object_mask = "mask[fullyQualifiedDomainName,primaryIpAddress]"
vsi = mgr.get_instance(12345, mask=mask)
print vsi | Below is the the instruction that describes the task:
### Input:
Get details about a virtual server instance.
:param integer instance_id: the instance ID
:returns: A dictionary containing a large amount of information about
the specified instance.
Example::
# Print out instance ID 12345.
vsi = mgr.get_instance(12345)
print vsi
# Print out only FQDN and primaryIP for instance 12345
object_mask = "mask[fullyQualifiedDomainName,primaryIpAddress]"
vsi = mgr.get_instance(12345, mask=mask)
print vsi
### Response:
def get_instance(self, instance_id, **kwargs):
"""Get details about a virtual server instance.
:param integer instance_id: the instance ID
:returns: A dictionary containing a large amount of information about
the specified instance.
Example::
# Print out instance ID 12345.
vsi = mgr.get_instance(12345)
print vsi
# Print out only FQDN and primaryIP for instance 12345
object_mask = "mask[fullyQualifiedDomainName,primaryIpAddress]"
vsi = mgr.get_instance(12345, mask=mask)
print vsi
"""
if 'mask' not in kwargs:
kwargs['mask'] = (
'id,'
'globalIdentifier,'
'fullyQualifiedDomainName,'
'hostname,'
'domain,'
'createDate,'
'modifyDate,'
'provisionDate,'
'notes,'
'dedicatedAccountHostOnlyFlag,'
'privateNetworkOnlyFlag,'
'primaryBackendIpAddress,'
'primaryIpAddress,'
'''networkComponents[id, status, speed, maxSpeed, name,
macAddress, primaryIpAddress, port,
primarySubnet[addressSpace],
securityGroupBindings[
securityGroup[id, name]]],'''
'lastKnownPowerState.name,'
'powerState,'
'status,'
'maxCpu,'
'maxMemory,'
'datacenter,'
'activeTransaction[id, transactionStatus[friendlyName,name]],'
'lastTransaction[transactionStatus],'
'lastOperatingSystemReload.id,'
'blockDevices,'
'blockDeviceTemplateGroup[id, name, globalIdentifier],'
'postInstallScriptUri,'
'''operatingSystem[passwords[username,password],
softwareLicense.softwareDescription[
manufacturer,name,version,
referenceCode]],'''
'''softwareComponents[
passwords[username,password,notes],
softwareLicense[softwareDescription[
manufacturer,name,version,
referenceCode]]],'''
'hourlyBillingFlag,'
'userData,'
'''billingItem[id,nextInvoiceTotalRecurringAmount,
package[id,keyName],
children[categoryCode,nextInvoiceTotalRecurringAmount],
orderItem[id,
order.userRecord[username],
preset.keyName]],'''
'tagReferences[id,tag[name,id]],'
'networkVlans[id,vlanNumber,networkSpace],'
'dedicatedHost.id,'
'placementGroupId'
)
return self.guest.getObject(id=instance_id, **kwargs) |
def nulldata_script(data: bytes) -> NulldataScript:
'''create nulldata (OP_return) script'''
stack = StackData.from_bytes(data)
return NulldataScript(stack) | create nulldata (OP_return) script | Below is the the instruction that describes the task:
### Input:
create nulldata (OP_return) script
### Response:
def nulldata_script(data: bytes) -> NulldataScript:
'''create nulldata (OP_return) script'''
stack = StackData.from_bytes(data)
return NulldataScript(stack) |
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name)) | Orders attributes names alphabetically, except for the class attribute, which is kept last. | Below is the the instruction that describes the task:
### Input:
Orders attributes names alphabetically, except for the class attribute, which is kept last.
### Response:
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name)) |
def check_balances(self, account=None):
''' Fetches an account balance and makes
necessary conversions
'''
a = self.account(account)
if a is not False and a is not None:
self.sbdbal = Amount(a['sbd_balance']).amount
self.steembal = Amount(a['balance']).amount
self.votepower = a['voting_power']
self.lastvotetime = a['last_vote_time']
vs = Amount(a['vesting_shares']).amount
dvests = Amount(a['delegated_vesting_shares']).amount
rvests = Amount(a['received_vesting_shares']).amount
vests = (float(vs) - float(dvests)) + float(rvests)
try:
self.global_props()
self.steempower_delegated = self.util.vests_to_sp(dvests)
self.steempower_raw = self.util.vests_to_sp(vs)
self.steempower = self.util.vests_to_sp(vests)
except Exception as e:
self.msg.error_message(e)
else:
return [self.sbdbal, self.steembal, self.steempower,
self.votepower, self.lastvotetime]
return False | Fetches an account balance and makes
necessary conversions | Below is the the instruction that describes the task:
### Input:
Fetches an account balance and makes
necessary conversions
### Response:
def check_balances(self, account=None):
''' Fetches an account balance and makes
necessary conversions
'''
a = self.account(account)
if a is not False and a is not None:
self.sbdbal = Amount(a['sbd_balance']).amount
self.steembal = Amount(a['balance']).amount
self.votepower = a['voting_power']
self.lastvotetime = a['last_vote_time']
vs = Amount(a['vesting_shares']).amount
dvests = Amount(a['delegated_vesting_shares']).amount
rvests = Amount(a['received_vesting_shares']).amount
vests = (float(vs) - float(dvests)) + float(rvests)
try:
self.global_props()
self.steempower_delegated = self.util.vests_to_sp(dvests)
self.steempower_raw = self.util.vests_to_sp(vs)
self.steempower = self.util.vests_to_sp(vests)
except Exception as e:
self.msg.error_message(e)
else:
return [self.sbdbal, self.steembal, self.steempower,
self.votepower, self.lastvotetime]
return False |
def get_attributes(self, uuid=None, attribute_names=None):
"""
Send a GetAttributes request to the server.
Args:
uuid (string): The ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of AttributeName values indicating
what object attributes the client wants from the server.
Optional, defaults to None.
Returns:
result (GetAttributesResult): A structure containing the results
of the operation.
"""
batch_item = self._build_get_attributes_batch_item(
uuid,
attribute_names
)
request = self._build_request_message(None, [batch_item])
response = self._send_and_receive_message(request)
results = self._process_batch_items(response)
return results[0] | Send a GetAttributes request to the server.
Args:
uuid (string): The ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of AttributeName values indicating
what object attributes the client wants from the server.
Optional, defaults to None.
Returns:
result (GetAttributesResult): A structure containing the results
of the operation. | Below is the the instruction that describes the task:
### Input:
Send a GetAttributes request to the server.
Args:
uuid (string): The ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of AttributeName values indicating
what object attributes the client wants from the server.
Optional, defaults to None.
Returns:
result (GetAttributesResult): A structure containing the results
of the operation.
### Response:
def get_attributes(self, uuid=None, attribute_names=None):
"""
Send a GetAttributes request to the server.
Args:
uuid (string): The ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of AttributeName values indicating
what object attributes the client wants from the server.
Optional, defaults to None.
Returns:
result (GetAttributesResult): A structure containing the results
of the operation.
"""
batch_item = self._build_get_attributes_batch_item(
uuid,
attribute_names
)
request = self._build_request_message(None, [batch_item])
response = self._send_and_receive_message(request)
results = self._process_batch_items(response)
return results[0] |
def is_instance_throughput_too_low(self, inst_id):
"""
Return whether the throughput of the master instance is greater than the
acceptable threshold
"""
r = self.instance_throughput_ratio(inst_id)
if r is None:
logger.debug("{} instance {} throughput is not "
"measurable.".format(self, inst_id))
return None
too_low = r < self.Delta
if too_low:
logger.display("{}{} instance {} throughput ratio {} is lower than Delta {}.".
format(MONITORING_PREFIX, self, inst_id, r, self.Delta))
else:
logger.trace("{} instance {} throughput ratio {} is acceptable.".
format(self, inst_id, r))
return too_low | Return whether the throughput of the master instance is greater than the
acceptable threshold | Below is the the instruction that describes the task:
### Input:
Return whether the throughput of the master instance is greater than the
acceptable threshold
### Response:
def is_instance_throughput_too_low(self, inst_id):
"""
Return whether the throughput of the master instance is greater than the
acceptable threshold
"""
r = self.instance_throughput_ratio(inst_id)
if r is None:
logger.debug("{} instance {} throughput is not "
"measurable.".format(self, inst_id))
return None
too_low = r < self.Delta
if too_low:
logger.display("{}{} instance {} throughput ratio {} is lower than Delta {}.".
format(MONITORING_PREFIX, self, inst_id, r, self.Delta))
else:
logger.trace("{} instance {} throughput ratio {} is acceptable.".
format(self, inst_id, r))
return too_low |
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__} | Json-serializable dict representation of a kpoint | Below is the the instruction that describes the task:
### Input:
Json-serializable dict representation of a kpoint
### Response:
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__} |
def do_db(self, arg):
"""
[~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes
"""
self.print_memory_display(arg, HexDump.hexblock)
self.last_display_command = self.do_db | [~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes | Below is the the instruction that describes the task:
### Input:
[~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes
### Response:
def do_db(self, arg):
"""
[~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes
"""
self.print_memory_display(arg, HexDump.hexblock)
self.last_display_command = self.do_db |
def isargument(self, node):
""" checks whether node aliases to a parameter."""
try:
node_id, _ = self.node_to_id(node)
return (node_id in self.name_to_nodes and
any([isinstance(n, ast.Name) and
isinstance(n.ctx, ast.Param)
for n in self.name_to_nodes[node_id]]))
except UnboundableRValue:
return False | checks whether node aliases to a parameter. | Below is the the instruction that describes the task:
### Input:
checks whether node aliases to a parameter.
### Response:
def isargument(self, node):
""" checks whether node aliases to a parameter."""
try:
node_id, _ = self.node_to_id(node)
return (node_id in self.name_to_nodes and
any([isinstance(n, ast.Name) and
isinstance(n.ctx, ast.Param)
for n in self.name_to_nodes[node_id]]))
except UnboundableRValue:
return False |
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
poller.register(kernel.control_stream.socket, zmq.POLLIN)
for stream in kernel.shell_streams:
poller.register(stream.socket, zmq.POLLIN)
while True:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
show.mainloop()
sys.excepthook = real_excepthook
# use poller if mainloop returned (no windows)
# scale by extra factor of 10, since it's a real poll
poller.poll(10*poll_interval)
kernel.do_one_iteration()
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
io.raw_print("KeyboardInterrupt caught in kernel")
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook | Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend. | Below is the the instruction that describes the task:
### Input:
Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
### Response:
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
poller.register(kernel.control_stream.socket, zmq.POLLIN)
for stream in kernel.shell_streams:
poller.register(stream.socket, zmq.POLLIN)
while True:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
show.mainloop()
sys.excepthook = real_excepthook
# use poller if mainloop returned (no windows)
# scale by extra factor of 10, since it's a real poll
poller.poll(10*poll_interval)
kernel.do_one_iteration()
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
io.raw_print("KeyboardInterrupt caught in kernel")
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook |
def log_loss(actual, predicted):
"""Log of the loss (error) summed over all entries
The negative of the logarithm of the frequency (probability) of the predicted
label given the true binary label for a category.
Arguments:
predicted (np.array of float): 2-D table of probabilities for each
category (columns) and each record (rows)
actual (np.array of float): True binary labels for each category
Should only have a single 1 on each row indicating the one
correct category (column)
Based On:
https://www.kaggle.com/wiki/LogarithmicLoss
http://scikit-learn.org/stable/modules/model_evaluation.html#log-loss
"""
predicted, actual = np.array(predicted), np.array(actual)
small_value = 1e-15
predicted[predicted < small_value] = small_value
predicted[predicted > 1 - small_value] = 1. - small_value
return (-1. / len(actual)) * np.sum(
actual * np.log(predicted) + (1. - actual) * np.log(1. - predicted)) | Log of the loss (error) summed over all entries
The negative of the logarithm of the frequency (probability) of the predicted
label given the true binary label for a category.
Arguments:
predicted (np.array of float): 2-D table of probabilities for each
category (columns) and each record (rows)
actual (np.array of float): True binary labels for each category
Should only have a single 1 on each row indicating the one
correct category (column)
Based On:
https://www.kaggle.com/wiki/LogarithmicLoss
http://scikit-learn.org/stable/modules/model_evaluation.html#log-loss | Below is the the instruction that describes the task:
### Input:
Log of the loss (error) summed over all entries
The negative of the logarithm of the frequency (probability) of the predicted
label given the true binary label for a category.
Arguments:
predicted (np.array of float): 2-D table of probabilities for each
category (columns) and each record (rows)
actual (np.array of float): True binary labels for each category
Should only have a single 1 on each row indicating the one
correct category (column)
Based On:
https://www.kaggle.com/wiki/LogarithmicLoss
http://scikit-learn.org/stable/modules/model_evaluation.html#log-loss
### Response:
def log_loss(actual, predicted):
"""Log of the loss (error) summed over all entries
The negative of the logarithm of the frequency (probability) of the predicted
label given the true binary label for a category.
Arguments:
predicted (np.array of float): 2-D table of probabilities for each
category (columns) and each record (rows)
actual (np.array of float): True binary labels for each category
Should only have a single 1 on each row indicating the one
correct category (column)
Based On:
https://www.kaggle.com/wiki/LogarithmicLoss
http://scikit-learn.org/stable/modules/model_evaluation.html#log-loss
"""
predicted, actual = np.array(predicted), np.array(actual)
small_value = 1e-15
predicted[predicted < small_value] = small_value
predicted[predicted > 1 - small_value] = 1. - small_value
return (-1. / len(actual)) * np.sum(
actual * np.log(predicted) + (1. - actual) * np.log(1. - predicted)) |
def reset(self):
"""
override reset behavior
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num | override reset behavior | Below is the the instruction that describes the task:
### Input:
override reset behavior
### Response:
def reset(self):
"""
override reset behavior
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num |
def blend(self, proportion=0.2, stratify=False, seed=100, indices=None, add_diff=False):
"""Blends sequence of models.
Parameters
----------
proportion : float, default 0.2
stratify : bool, default False
seed : int, default False
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.blend(seed=15)
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.blend(indicies=(train_index,test_index))
"""
result_train = []
result_test = []
y = None
for model in self.models:
result = model.blend(proportion=proportion, stratify=stratify, seed=seed, indices=indices)
train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name))
test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name))
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1, ignore_index=True)
result_test = pd.concat(result_test, axis=1, ignore_index=True)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
return Dataset(X_train=result_train, y_train=y, X_test=result_test) | Blends sequence of models.
Parameters
----------
proportion : float, default 0.2
stratify : bool, default False
seed : int, default False
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.blend(seed=15)
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.blend(indicies=(train_index,test_index)) | Below is the the instruction that describes the task:
### Input:
Blends sequence of models.
Parameters
----------
proportion : float, default 0.2
stratify : bool, default False
seed : int, default False
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.blend(seed=15)
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.blend(indicies=(train_index,test_index))
### Response:
def blend(self, proportion=0.2, stratify=False, seed=100, indices=None, add_diff=False):
"""Blends sequence of models.
Parameters
----------
proportion : float, default 0.2
stratify : bool, default False
seed : int, default False
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.blend(seed=15)
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.blend(indicies=(train_index,test_index))
"""
result_train = []
result_test = []
y = None
for model in self.models:
result = model.blend(proportion=proportion, stratify=stratify, seed=seed, indices=indices)
train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name))
test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name))
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1, ignore_index=True)
result_test = pd.concat(result_test, axis=1, ignore_index=True)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
return Dataset(X_train=result_train, y_train=y, X_test=result_test) |
def get_by_username(cls, username):
"""Get profile by username.
:param username: A username to query for (case insensitive).
"""
return cls.query.filter(
UserProfile._username == username.lower()
).one() | Get profile by username.
:param username: A username to query for (case insensitive). | Below is the the instruction that describes the task:
### Input:
Get profile by username.
:param username: A username to query for (case insensitive).
### Response:
def get_by_username(cls, username):
"""Get profile by username.
:param username: A username to query for (case insensitive).
"""
return cls.query.filter(
UserProfile._username == username.lower()
).one() |
def add_scan_host_detail(self, scan_id, host='', name='', value=''):
""" Adds a host detail result to scan_id scan. """
self.scan_collection.add_result(scan_id, ResultType.HOST_DETAIL, host,
name, value) | Adds a host detail result to scan_id scan. | Below is the the instruction that describes the task:
### Input:
Adds a host detail result to scan_id scan.
### Response:
def add_scan_host_detail(self, scan_id, host='', name='', value=''):
""" Adds a host detail result to scan_id scan. """
self.scan_collection.add_result(scan_id, ResultType.HOST_DETAIL, host,
name, value) |
def CreateAdGroup(client, campaign_id):
"""Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created.
"""
ad_group_service = client.GetService('AdGroupService', 'v201809')
ad_group = {
'name': 'Dynamic remarketing ad group',
'campaignId': campaign_id,
'status': 'ENABLED'
}
operations = [{
'operator': 'ADD',
'operand': ad_group
}]
return ad_group_service.mutate(operations)['value'][0] | Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created. | Below is the the instruction that describes the task:
### Input:
Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created.
### Response:
def CreateAdGroup(client, campaign_id):
"""Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created.
"""
ad_group_service = client.GetService('AdGroupService', 'v201809')
ad_group = {
'name': 'Dynamic remarketing ad group',
'campaignId': campaign_id,
'status': 'ENABLED'
}
operations = [{
'operator': 'ADD',
'operand': ad_group
}]
return ad_group_service.mutate(operations)['value'][0] |
def __do_log(self, text):
"""
Writes the given text verbatim into the log file (if any)
and/or standard input (if the verbose flag is turned on).
Used internally.
@type text: str
@param text: Text to print.
"""
if isinstance(text, compat.unicode):
text = text.encode('cp1252')
if self.verbose:
print(text)
if self.logfile:
try:
self.fd.writelines('%s\n' % text)
except IOError:
e = sys.exc_info()[1]
self.__logfile_error(e) | Writes the given text verbatim into the log file (if any)
and/or standard input (if the verbose flag is turned on).
Used internally.
@type text: str
@param text: Text to print. | Below is the the instruction that describes the task:
### Input:
Writes the given text verbatim into the log file (if any)
and/or standard input (if the verbose flag is turned on).
Used internally.
@type text: str
@param text: Text to print.
### Response:
def __do_log(self, text):
"""
Writes the given text verbatim into the log file (if any)
and/or standard input (if the verbose flag is turned on).
Used internally.
@type text: str
@param text: Text to print.
"""
if isinstance(text, compat.unicode):
text = text.encode('cp1252')
if self.verbose:
print(text)
if self.logfile:
try:
self.fd.writelines('%s\n' % text)
except IOError:
e = sys.exc_info()[1]
self.__logfile_error(e) |
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator | Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object. | Below is the the instruction that describes the task:
### Input:
Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
### Response:
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator |
def updateProgress(self, time, state='stopped'):
""" Set the watched progress for this video.
Note that setting the time to 0 will not work.
Use `markWatched` or `markUnwatched` to achieve
that goal.
Parameters:
time (int): milliseconds watched
state (string): state of the video, default 'stopped'
"""
key = '/:/progress?key=%s&identifier=com.plexapp.plugins.library&time=%d&state=%s' % (self.ratingKey,
time, state)
self._server.query(key)
self.reload() | Set the watched progress for this video.
Note that setting the time to 0 will not work.
Use `markWatched` or `markUnwatched` to achieve
that goal.
Parameters:
time (int): milliseconds watched
state (string): state of the video, default 'stopped' | Below is the the instruction that describes the task:
### Input:
Set the watched progress for this video.
Note that setting the time to 0 will not work.
Use `markWatched` or `markUnwatched` to achieve
that goal.
Parameters:
time (int): milliseconds watched
state (string): state of the video, default 'stopped'
### Response:
def updateProgress(self, time, state='stopped'):
""" Set the watched progress for this video.
Note that setting the time to 0 will not work.
Use `markWatched` or `markUnwatched` to achieve
that goal.
Parameters:
time (int): milliseconds watched
state (string): state of the video, default 'stopped'
"""
key = '/:/progress?key=%s&identifier=com.plexapp.plugins.library&time=%d&state=%s' % (self.ratingKey,
time, state)
self._server.query(key)
self.reload() |
def _calculate_block_structure(self, inequalities, equalities,
momentinequalities, momentequalities,
extramomentmatrix, removeequalities,
block_struct=None):
"""Calculates the block_struct array for the output file.
"""
block_struct = []
if self.verbose > 0:
print("Calculating block structure...")
block_struct.append(len(self.monomial_sets[0]) *
len(self.monomial_sets[1]))
if extramomentmatrix is not None:
for _ in extramomentmatrix:
block_struct.append(len(self.monomial_sets[0]) *
len(self.monomial_sets[1]))
super(MoroderHierarchy, self).\
_calculate_block_structure(inequalities, equalities,
momentinequalities, momentequalities,
extramomentmatrix,
removeequalities,
block_struct=block_struct) | Calculates the block_struct array for the output file. | Below is the the instruction that describes the task:
### Input:
Calculates the block_struct array for the output file.
### Response:
def _calculate_block_structure(self, inequalities, equalities,
momentinequalities, momentequalities,
extramomentmatrix, removeequalities,
block_struct=None):
"""Calculates the block_struct array for the output file.
"""
block_struct = []
if self.verbose > 0:
print("Calculating block structure...")
block_struct.append(len(self.monomial_sets[0]) *
len(self.monomial_sets[1]))
if extramomentmatrix is not None:
for _ in extramomentmatrix:
block_struct.append(len(self.monomial_sets[0]) *
len(self.monomial_sets[1]))
super(MoroderHierarchy, self).\
_calculate_block_structure(inequalities, equalities,
momentinequalities, momentequalities,
extramomentmatrix,
removeequalities,
block_struct=block_struct) |
def ste(command, nindent, mdir, fpointer):
r"""
Echo terminal output.
Print STDOUT resulting from a given Bash shell command (relative to the
package :code:`pypkg` directory) formatted in reStructuredText
:param command: Bash shell command, relative to
:bash:`${PMISC_DIR}/pypkg`
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: bash
$ ${PMISC_DIR}/pypkg/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
.. ]]]
"""
term_echo(
"${{PMISC_DIR}}{sep}pypkg{sep}{cmd}".format(sep=os.path.sep, cmd=command),
nindent,
{"PMISC_DIR": mdir},
fpointer,
) | r"""
Echo terminal output.
Print STDOUT resulting from a given Bash shell command (relative to the
package :code:`pypkg` directory) formatted in reStructuredText
:param command: Bash shell command, relative to
:bash:`${PMISC_DIR}/pypkg`
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: bash
$ ${PMISC_DIR}/pypkg/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
.. ]]] | Below is the the instruction that describes the task:
### Input:
r"""
Echo terminal output.
Print STDOUT resulting from a given Bash shell command (relative to the
package :code:`pypkg` directory) formatted in reStructuredText
:param command: Bash shell command, relative to
:bash:`${PMISC_DIR}/pypkg`
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: bash
$ ${PMISC_DIR}/pypkg/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
.. ]]]
### Response:
def ste(command, nindent, mdir, fpointer):
r"""
Echo terminal output.
Print STDOUT resulting from a given Bash shell command (relative to the
package :code:`pypkg` directory) formatted in reStructuredText
:param command: Bash shell command, relative to
:bash:`${PMISC_DIR}/pypkg`
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: bash
$ ${PMISC_DIR}/pypkg/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
.. ]]]
"""
term_echo(
"${{PMISC_DIR}}{sep}pypkg{sep}{cmd}".format(sep=os.path.sep, cmd=command),
nindent,
{"PMISC_DIR": mdir},
fpointer,
) |
def PushPopItem(obj, key, value):
'''
A context manager to replace and restore a value using a getter and setter.
:param object obj: The object to replace/restore.
:param object key: The key to replace/restore in the object.
:param object value: The value to replace.
Example::
with PushPop2(sys.modules, 'alpha', None):
pytest.raises(ImportError):
import alpha
'''
if key in obj:
old_value = obj[key]
obj[key] = value
yield value
obj[key] = old_value
else:
obj[key] = value
yield value
del obj[key] | A context manager to replace and restore a value using a getter and setter.
:param object obj: The object to replace/restore.
:param object key: The key to replace/restore in the object.
:param object value: The value to replace.
Example::
with PushPop2(sys.modules, 'alpha', None):
pytest.raises(ImportError):
import alpha | Below is the the instruction that describes the task:
### Input:
A context manager to replace and restore a value using a getter and setter.
:param object obj: The object to replace/restore.
:param object key: The key to replace/restore in the object.
:param object value: The value to replace.
Example::
with PushPop2(sys.modules, 'alpha', None):
pytest.raises(ImportError):
import alpha
### Response:
def PushPopItem(obj, key, value):
'''
A context manager to replace and restore a value using a getter and setter.
:param object obj: The object to replace/restore.
:param object key: The key to replace/restore in the object.
:param object value: The value to replace.
Example::
with PushPop2(sys.modules, 'alpha', None):
pytest.raises(ImportError):
import alpha
'''
if key in obj:
old_value = obj[key]
obj[key] = value
yield value
obj[key] = old_value
else:
obj[key] = value
yield value
del obj[key] |
def readout(self):
"""Readout the detector."""
elec = self.simulate_poisson_variate()
elec_pre = self.saturate(elec)
elec_f = self.pre_readout(elec_pre)
adu_r = self.base_readout(elec_f)
adu_p = self.post_readout(adu_r)
self.clean_up()
return adu_p | Readout the detector. | Below is the the instruction that describes the task:
### Input:
Readout the detector.
### Response:
def readout(self):
"""Readout the detector."""
elec = self.simulate_poisson_variate()
elec_pre = self.saturate(elec)
elec_f = self.pre_readout(elec_pre)
adu_r = self.base_readout(elec_f)
adu_p = self.post_readout(adu_r)
self.clean_up()
return adu_p |
def fix_size(self, content):
"""
Adjusts the width and height of the file switcher
based on the relative size of the parent and content.
"""
# Update size of dialog based on relative size of the parent
if content:
width, height = self.get_item_size(content)
# Width
parent = self.parent()
relative_width = parent.geometry().width() * 0.65
if relative_width > self.MAX_WIDTH:
relative_width = self.MAX_WIDTH
self.list.setMinimumWidth(relative_width)
# Height
if len(content) < 15:
max_entries = len(content)
else:
max_entries = 15
max_height = height * max_entries * 1.7
self.list.setMinimumHeight(max_height)
# Resize
self.list.resize(relative_width, self.list.height()) | Adjusts the width and height of the file switcher
based on the relative size of the parent and content. | Below is the the instruction that describes the task:
### Input:
Adjusts the width and height of the file switcher
based on the relative size of the parent and content.
### Response:
def fix_size(self, content):
"""
Adjusts the width and height of the file switcher
based on the relative size of the parent and content.
"""
# Update size of dialog based on relative size of the parent
if content:
width, height = self.get_item_size(content)
# Width
parent = self.parent()
relative_width = parent.geometry().width() * 0.65
if relative_width > self.MAX_WIDTH:
relative_width = self.MAX_WIDTH
self.list.setMinimumWidth(relative_width)
# Height
if len(content) < 15:
max_entries = len(content)
else:
max_entries = 15
max_height = height * max_entries * 1.7
self.list.setMinimumHeight(max_height)
# Resize
self.list.resize(relative_width, self.list.height()) |
def _on_wheel_event(self, event):
"""
Increments or decrements editor fonts settings on mouse wheel event
if ctrl modifier is on.
:param event: wheel event
:type event: QWheelEvent
"""
try:
delta = event.angleDelta().y()
except AttributeError:
# PyQt4/PySide
delta = event.delta()
if int(event.modifiers()) & QtCore.Qt.ControlModifier > 0:
if delta < self.prev_delta:
self.editor.zoom_out()
event.accept()
else:
self.editor.zoom_in()
event.accept() | Increments or decrements editor fonts settings on mouse wheel event
if ctrl modifier is on.
:param event: wheel event
:type event: QWheelEvent | Below is the the instruction that describes the task:
### Input:
Increments or decrements editor fonts settings on mouse wheel event
if ctrl modifier is on.
:param event: wheel event
:type event: QWheelEvent
### Response:
def _on_wheel_event(self, event):
"""
Increments or decrements editor fonts settings on mouse wheel event
if ctrl modifier is on.
:param event: wheel event
:type event: QWheelEvent
"""
try:
delta = event.angleDelta().y()
except AttributeError:
# PyQt4/PySide
delta = event.delta()
if int(event.modifiers()) & QtCore.Qt.ControlModifier > 0:
if delta < self.prev_delta:
self.editor.zoom_out()
event.accept()
else:
self.editor.zoom_in()
event.accept() |
def connections(self):
"""
Return a :code:`dict` of connections from the configuration settings.
:raises `giraffez.errors.ConfigurationError`: if connections are not present
"""
if "connections" not in self.settings:
raise ConfigurationError("Could not retrieve connections from config file '{}'.".format(self._config_file))
return self.settings.get("connections") | Return a :code:`dict` of connections from the configuration settings.
:raises `giraffez.errors.ConfigurationError`: if connections are not present | Below is the the instruction that describes the task:
### Input:
Return a :code:`dict` of connections from the configuration settings.
:raises `giraffez.errors.ConfigurationError`: if connections are not present
### Response:
def connections(self):
"""
Return a :code:`dict` of connections from the configuration settings.
:raises `giraffez.errors.ConfigurationError`: if connections are not present
"""
if "connections" not in self.settings:
raise ConfigurationError("Could not retrieve connections from config file '{}'.".format(self._config_file))
return self.settings.get("connections") |
def matches(self, path):
"""Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added.
"""
path = self._prepare_path(path)
return self.full_regex.search(path) is not None | Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added. | Below is the the instruction that describes the task:
### Input:
Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added.
### Response:
def matches(self, path):
"""Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added.
"""
path = self._prepare_path(path)
return self.full_regex.search(path) is not None |
def gauge(self, stat, value, tags=None):
"""Set a gauge."""
self.client.gauge(metric=stat, value=value, tags=tags) | Set a gauge. | Below is the the instruction that describes the task:
### Input:
Set a gauge.
### Response:
def gauge(self, stat, value, tags=None):
"""Set a gauge."""
self.client.gauge(metric=stat, value=value, tags=tags) |
def load_module(self, module_name):
"""Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False.
"""
if module_name in self.currently_loading:
_log.warning("Ignoring request to load module '%s' because it "
"is already currently being loaded.", module_name)
return False
try: # ensure that currently_loading gets reset no matter what
self.currently_loading.add(module_name)
if self.loaded_on_this_event is not None:
self.loaded_on_this_event.add(module_name)
# Force the module to actually be reloaded
try:
_temp = reload(importlib.import_module(module_name))
except ImportError:
_log.error("Unable to load module '%s' - module not found.",
module_name)
return False
except SyntaxError:
_log.exception("Unable to load module '%s' - syntax error(s).",
module_name)
return False
if not hasattr(_temp, "module"):
_log.error("Unable to load module '%s' - no 'module' member.",
module_name)
return False
module = _temp.module
if not issubclass(module, Module):
_log.error("Unable to load module '%s' - it's 'module' member "
"is not a kitnirc.modular.Module.", module_name)
return False
self.loaded_modules[module_name] = module(self)
if module_name not in self.module_ordering:
self.module_ordering.append(module_name)
return True
finally:
self.currently_loading.discard(module_name) | Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False. | Below is the the instruction that describes the task:
### Input:
Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False.
### Response:
def load_module(self, module_name):
"""Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False.
"""
if module_name in self.currently_loading:
_log.warning("Ignoring request to load module '%s' because it "
"is already currently being loaded.", module_name)
return False
try: # ensure that currently_loading gets reset no matter what
self.currently_loading.add(module_name)
if self.loaded_on_this_event is not None:
self.loaded_on_this_event.add(module_name)
# Force the module to actually be reloaded
try:
_temp = reload(importlib.import_module(module_name))
except ImportError:
_log.error("Unable to load module '%s' - module not found.",
module_name)
return False
except SyntaxError:
_log.exception("Unable to load module '%s' - syntax error(s).",
module_name)
return False
if not hasattr(_temp, "module"):
_log.error("Unable to load module '%s' - no 'module' member.",
module_name)
return False
module = _temp.module
if not issubclass(module, Module):
_log.error("Unable to load module '%s' - it's 'module' member "
"is not a kitnirc.modular.Module.", module_name)
return False
self.loaded_modules[module_name] = module(self)
if module_name not in self.module_ordering:
self.module_ordering.append(module_name)
return True
finally:
self.currently_loading.discard(module_name) |
def readPrefs_dms_tools_format(f):
"""Reads the amino-acid preferences written by `dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
This is an exact copy of the same code from
`dms_tools.file_io.ReadPreferences`. It is copied because
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_ is currently
only compatible with `python2`, and we needed something that also works
with `python3`.
*f* is the name of an existing file or a readable file-like object.
It should be in the format written by
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
The return value is the tuple: *(sites, wts, pi_means, pi_95credint, h)*
where *sites*, *wts*, *pi_means*, and *pi_95credint* will all
have the same values used to write the file with *WritePreferences*,
and *h* is a dictionary with *h[r]* giving the site entropy (log base
2) for each *r* in *sites*.
"""
charmatch = re.compile('^PI_([A-z\*\-]+)$')
if isinstance(f, str):
f = open(f)
lines = f.readlines()
f.close()
else:
lines = f.readlines()
characters = []
sites = []
wts = {}
pi_means = {}
pi_95credint = {}
h = {}
for line in lines:
if line.isspace():
continue
elif line[0] == '#' and not characters:
entries = line[1 : ].strip().split()
if len(entries) < 4:
raise ValueError("Insufficient entries in header:\n%s" % line)
if not (entries[0] in ['POSITION', 'SITE'] and entries[1][ : 2] == 'WT' and entries[2] == 'SITE_ENTROPY'):
raise ValueError("Not the correct first three header columns:\n%s" % line)
i = 3
while i < len(entries) and charmatch.search(entries[i]):
characters.append(charmatch.search(entries[i]).group(1))
i += 1
if i == len(entries):
pi_95credint = None
linelength = len(characters) + 3
else:
if not len(entries) - i == len(characters):
raise ValueError("Header line does not have valid credible interval format:\n%s" % line)
if not all([entries[i + j] == 'PI_%s_95' % characters[j] for j in range(len(characters))]):
raise ValueError("mean and credible interval character mismatch in header:\n%s" % line)
linelength = 2 * len(characters) + 3
elif line[0] == '#':
continue
elif not characters:
raise ValueError("Found data lines before encountering a valid header")
else:
entries = line.strip().split()
if len(entries) != linelength:
raise ValueError("Line does not have expected %d entries:\n%s" % (linelength, line))
r = entries[0]
assert r not in sites, "Duplicate site of %s" % r
sites.append(r)
wts[r] = entries[1]
assert entries[1] in characters or entries[1] == '?', "Character %s is not one of the valid ones in header. Valid possibilities: %s" % (entries[1], ', '.join(characters))
h[r] = float(entries[2])
pi_means[r] = dict([(x, float(entries[3 + i])) for (i, x) in enumerate(characters)])
if pi_95credint != None:
pi_95credint[r] = dict([(x, (float(entries[3 + len(characters) + i].split(',')[0]), float(entries[3 + len(characters) + i].split(',')[1]))) for (i, x) in enumerate(characters)])
return (sites, wts, pi_means, pi_95credint, h) | Reads the amino-acid preferences written by `dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
This is an exact copy of the same code from
`dms_tools.file_io.ReadPreferences`. It is copied because
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_ is currently
only compatible with `python2`, and we needed something that also works
with `python3`.
*f* is the name of an existing file or a readable file-like object.
It should be in the format written by
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
The return value is the tuple: *(sites, wts, pi_means, pi_95credint, h)*
where *sites*, *wts*, *pi_means*, and *pi_95credint* will all
have the same values used to write the file with *WritePreferences*,
and *h* is a dictionary with *h[r]* giving the site entropy (log base
2) for each *r* in *sites*. | Below is the the instruction that describes the task:
### Input:
Reads the amino-acid preferences written by `dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
This is an exact copy of the same code from
`dms_tools.file_io.ReadPreferences`. It is copied because
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_ is currently
only compatible with `python2`, and we needed something that also works
with `python3`.
*f* is the name of an existing file or a readable file-like object.
It should be in the format written by
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
The return value is the tuple: *(sites, wts, pi_means, pi_95credint, h)*
where *sites*, *wts*, *pi_means*, and *pi_95credint* will all
have the same values used to write the file with *WritePreferences*,
and *h* is a dictionary with *h[r]* giving the site entropy (log base
2) for each *r* in *sites*.
### Response:
def readPrefs_dms_tools_format(f):
"""Reads the amino-acid preferences written by `dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
This is an exact copy of the same code from
`dms_tools.file_io.ReadPreferences`. It is copied because
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_ is currently
only compatible with `python2`, and we needed something that also works
with `python3`.
*f* is the name of an existing file or a readable file-like object.
It should be in the format written by
`dms_tools v1 <http://jbloomlab.github.io/dms_tools/>`_.
The return value is the tuple: *(sites, wts, pi_means, pi_95credint, h)*
where *sites*, *wts*, *pi_means*, and *pi_95credint* will all
have the same values used to write the file with *WritePreferences*,
and *h* is a dictionary with *h[r]* giving the site entropy (log base
2) for each *r* in *sites*.
"""
charmatch = re.compile('^PI_([A-z\*\-]+)$')
if isinstance(f, str):
f = open(f)
lines = f.readlines()
f.close()
else:
lines = f.readlines()
characters = []
sites = []
wts = {}
pi_means = {}
pi_95credint = {}
h = {}
for line in lines:
if line.isspace():
continue
elif line[0] == '#' and not characters:
entries = line[1 : ].strip().split()
if len(entries) < 4:
raise ValueError("Insufficient entries in header:\n%s" % line)
if not (entries[0] in ['POSITION', 'SITE'] and entries[1][ : 2] == 'WT' and entries[2] == 'SITE_ENTROPY'):
raise ValueError("Not the correct first three header columns:\n%s" % line)
i = 3
while i < len(entries) and charmatch.search(entries[i]):
characters.append(charmatch.search(entries[i]).group(1))
i += 1
if i == len(entries):
pi_95credint = None
linelength = len(characters) + 3
else:
if not len(entries) - i == len(characters):
raise ValueError("Header line does not have valid credible interval format:\n%s" % line)
if not all([entries[i + j] == 'PI_%s_95' % characters[j] for j in range(len(characters))]):
raise ValueError("mean and credible interval character mismatch in header:\n%s" % line)
linelength = 2 * len(characters) + 3
elif line[0] == '#':
continue
elif not characters:
raise ValueError("Found data lines before encountering a valid header")
else:
entries = line.strip().split()
if len(entries) != linelength:
raise ValueError("Line does not have expected %d entries:\n%s" % (linelength, line))
r = entries[0]
assert r not in sites, "Duplicate site of %s" % r
sites.append(r)
wts[r] = entries[1]
assert entries[1] in characters or entries[1] == '?', "Character %s is not one of the valid ones in header. Valid possibilities: %s" % (entries[1], ', '.join(characters))
h[r] = float(entries[2])
pi_means[r] = dict([(x, float(entries[3 + i])) for (i, x) in enumerate(characters)])
if pi_95credint != None:
pi_95credint[r] = dict([(x, (float(entries[3 + len(characters) + i].split(',')[0]), float(entries[3 + len(characters) + i].split(',')[1]))) for (i, x) in enumerate(characters)])
return (sites, wts, pi_means, pi_95credint, h) |
def flushIndexes(cls) :
"drops all indexes for a class"
con = RabaConnection(cls._raba_namespace)
for idx in cls.getIndexes() :
con.dropIndexByName(idx[1]) | drops all indexes for a class | Below is the the instruction that describes the task:
### Input:
drops all indexes for a class
### Response:
def flushIndexes(cls) :
"drops all indexes for a class"
con = RabaConnection(cls._raba_namespace)
for idx in cls.getIndexes() :
con.dropIndexByName(idx[1]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.