code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _restore_backup(self):
"""Restore the specified database."""
input_filename, input_file = self._get_backup_file(database=self.database_name,
servername=self.servername)
self.logger.info("Restoring backup for database '%s' and server '%s'",
self.database_name, self.servername)
self.logger.info("Restoring: %s" % input_filename)
if self.decrypt:
unencrypted_file, input_filename = utils.unencrypt_file(input_file, input_filename,
self.passphrase)
input_file.close()
input_file = unencrypted_file
if self.uncompress:
uncompressed_file, input_filename = utils.uncompress_file(input_file, input_filename)
input_file.close()
input_file = uncompressed_file
self.logger.info("Restore tempfile created: %s", utils.handle_size(input_file))
if self.interactive:
self._ask_confirmation()
input_file.seek(0)
self.connector = get_connector(self.database_name)
self.connector.restore_dump(input_file) | Restore the specified database. | Below is the the instruction that describes the task:
### Input:
Restore the specified database.
### Response:
def _restore_backup(self):
"""Restore the specified database."""
input_filename, input_file = self._get_backup_file(database=self.database_name,
servername=self.servername)
self.logger.info("Restoring backup for database '%s' and server '%s'",
self.database_name, self.servername)
self.logger.info("Restoring: %s" % input_filename)
if self.decrypt:
unencrypted_file, input_filename = utils.unencrypt_file(input_file, input_filename,
self.passphrase)
input_file.close()
input_file = unencrypted_file
if self.uncompress:
uncompressed_file, input_filename = utils.uncompress_file(input_file, input_filename)
input_file.close()
input_file = uncompressed_file
self.logger.info("Restore tempfile created: %s", utils.handle_size(input_file))
if self.interactive:
self._ask_confirmation()
input_file.seek(0)
self.connector = get_connector(self.database_name)
self.connector.restore_dump(input_file) |
def set_debug(self, status):
"""Control the logging state."""
if status:
self.logger.setLevel('DEBUG')
else:
self.logger.setLevel('INFO') | Control the logging state. | Below is the the instruction that describes the task:
### Input:
Control the logging state.
### Response:
def set_debug(self, status):
"""Control the logging state."""
if status:
self.logger.setLevel('DEBUG')
else:
self.logger.setLevel('INFO') |
def osu_run1(data_set='osu_run1', sample_every=4):
"""Ohio State University's Run1 motion capture data set."""
path = os.path.join(data_path, data_set)
if not data_available(data_set):
import zipfile
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
from . import mocap
Y, connect = mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set) | Ohio State University's Run1 motion capture data set. | Below is the the instruction that describes the task:
### Input:
Ohio State University's Run1 motion capture data set.
### Response:
def osu_run1(data_set='osu_run1', sample_every=4):
"""Ohio State University's Run1 motion capture data set."""
path = os.path.join(data_path, data_set)
if not data_available(data_set):
import zipfile
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
from . import mocap
Y, connect = mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set) |
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw) | Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/') | Below is the the instruction that describes the task:
### Input:
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
### Response:
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw) |
def main(
context: click.core.Context, method: str, request_type: str, id: Any, send: str
) -> None:
"""
Create a JSON-RPC request.
"""
exit_status = 0
# Extract the jsonrpc arguments
positional = [a for a in context.args if "=" not in a]
named = {a.split("=")[0]: a.split("=")[1] for a in context.args if "=" in a}
# Create the request
if request_type == "notify":
req = Notification(method, *positional, **named)
else:
req = Request(method, *positional, request_id=id, **named) # type: ignore
# Sending?
if send:
client = HTTPClient(send)
try:
response = client.send(req)
except JsonRpcClientError as e:
click.echo(str(e), err=True)
exit_status = 1
else:
click.echo(response.text)
# Otherwise, simply output the JSON-RPC request.
else:
click.echo(str(req))
sys.exit(exit_status) | Create a JSON-RPC request. | Below is the the instruction that describes the task:
### Input:
Create a JSON-RPC request.
### Response:
def main(
context: click.core.Context, method: str, request_type: str, id: Any, send: str
) -> None:
"""
Create a JSON-RPC request.
"""
exit_status = 0
# Extract the jsonrpc arguments
positional = [a for a in context.args if "=" not in a]
named = {a.split("=")[0]: a.split("=")[1] for a in context.args if "=" in a}
# Create the request
if request_type == "notify":
req = Notification(method, *positional, **named)
else:
req = Request(method, *positional, request_id=id, **named) # type: ignore
# Sending?
if send:
client = HTTPClient(send)
try:
response = client.send(req)
except JsonRpcClientError as e:
click.echo(str(e), err=True)
exit_status = 1
else:
click.echo(response.text)
# Otherwise, simply output the JSON-RPC request.
else:
click.echo(str(req))
sys.exit(exit_status) |
def unpack(self, value):
"""Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value.
"""
# Wrap errors in FailReplies with information identifying the parameter
try:
return self._kattype.unpack(value, self.major)
except ValueError, message:
raise FailReply("Error in parameter %s (%s): %s" %
(self.position, self.name, message)) | Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value. | Below is the the instruction that describes the task:
### Input:
Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value.
### Response:
def unpack(self, value):
"""Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value.
"""
# Wrap errors in FailReplies with information identifying the parameter
try:
return self._kattype.unpack(value, self.major)
except ValueError, message:
raise FailReply("Error in parameter %s (%s): %s" %
(self.position, self.name, message)) |
def parse_py_tree(self, pytree):
"""Parse the given Python package tree.
:param str pytree: The absolute path to the Python tree which is to be parsed.
:rtype: dict
:returns: A two-tuple. The first element is a dict where each key is the path of a parsed
Python module (relative to the Python tree) and its value is the expected rst module
name. The second element is a set where each element is a Python package or
sub-package.
:rtype: tuple
"""
parsed_pytree = {}
pypackages = set()
for base, dirs, files in os.walk(pytree):
if self._ignore_pydir(os.path.basename(base)):
continue
# TODO(Anthony): If this is being run against a Python 3 package, this needs to be
# adapted to account for namespace packages.
elif '__init__.py' not in files:
continue
package_basename = self.build_pypackage_basename(pytree=pytree, base=base)
pypackages.add(package_basename)
for filename in files:
if self._ignore_pyfile(filename):
continue
parsed_path = os.path.join(package_basename, filename)
parsed_pytree[parsed_path] = self.build_rst_name_from_pypath(parsed_path)
return parsed_pytree, pypackages | Parse the given Python package tree.
:param str pytree: The absolute path to the Python tree which is to be parsed.
:rtype: dict
:returns: A two-tuple. The first element is a dict where each key is the path of a parsed
Python module (relative to the Python tree) and its value is the expected rst module
name. The second element is a set where each element is a Python package or
sub-package.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Parse the given Python package tree.
:param str pytree: The absolute path to the Python tree which is to be parsed.
:rtype: dict
:returns: A two-tuple. The first element is a dict where each key is the path of a parsed
Python module (relative to the Python tree) and its value is the expected rst module
name. The second element is a set where each element is a Python package or
sub-package.
:rtype: tuple
### Response:
def parse_py_tree(self, pytree):
"""Parse the given Python package tree.
:param str pytree: The absolute path to the Python tree which is to be parsed.
:rtype: dict
:returns: A two-tuple. The first element is a dict where each key is the path of a parsed
Python module (relative to the Python tree) and its value is the expected rst module
name. The second element is a set where each element is a Python package or
sub-package.
:rtype: tuple
"""
parsed_pytree = {}
pypackages = set()
for base, dirs, files in os.walk(pytree):
if self._ignore_pydir(os.path.basename(base)):
continue
# TODO(Anthony): If this is being run against a Python 3 package, this needs to be
# adapted to account for namespace packages.
elif '__init__.py' not in files:
continue
package_basename = self.build_pypackage_basename(pytree=pytree, base=base)
pypackages.add(package_basename)
for filename in files:
if self._ignore_pyfile(filename):
continue
parsed_path = os.path.join(package_basename, filename)
parsed_pytree[parsed_path] = self.build_rst_name_from_pypath(parsed_path)
return parsed_pytree, pypackages |
def histograms(self, analytes=None, bins=25, logy=False,
filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if colourful:
cmap = self.cmaps
else:
cmap = None
self.get_focus(filt=filt)
fig, axes = plot.histograms(self.focus, keys=analytes,
bins=bins, logy=logy, cmap=cmap)
return fig, axes | Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes) | Below is the the instruction that describes the task:
### Input:
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
### Response:
def histograms(self, analytes=None, bins=25, logy=False,
filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if colourful:
cmap = self.cmaps
else:
cmap = None
self.get_focus(filt=filt)
fig, axes = plot.histograms(self.focus, keys=analytes,
bins=bins, logy=logy, cmap=cmap)
return fig, axes |
def register_function(scope=None, as_property=False, name=None):
"""Decorator to register a new function with vaex.
Example:
>>> import vaex
>>> df = vaex.example()
>>> @vaex.register_function()
>>> def invert(x):
>>> return 1/x
>>> df.x.invert()
>>> import numpy as np
>>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64'))
>>> @vaex.register_function(as_property=True, scope='dt')
>>> def dt_relative_day(x):
>>> return vaex.functions.dt_dayofyear(x)/365.
>>> df.departure.dt.relative_day
"""
prefix = ''
if scope:
prefix = scope + "_"
if scope not in scopes:
raise KeyError("unknown scope")
def wrapper(f, name=name):
name = name or f.__name__
# remove possible prefix
if name.startswith(prefix):
name = name[len(prefix):]
full_name = prefix + name
if scope:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.expression.ds.func, full_name)
args = (self.expression, ) + args
return lazy_func(*args, **kwargs)
return functools.wraps(function)(wrapper)
if as_property:
setattr(scopes[scope], name, property(closure()))
else:
setattr(scopes[scope], name, closure())
else:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.ds.func, full_name)
args = (self, ) + args
return lazy_func(*args, **kwargs)
return functools.wraps(function)(wrapper)
setattr(vaex.expression.Expression, name, closure())
vaex.expression.expression_namespace[prefix + name] = f
return f # we leave the original function as is
return wrapper | Decorator to register a new function with vaex.
Example:
>>> import vaex
>>> df = vaex.example()
>>> @vaex.register_function()
>>> def invert(x):
>>> return 1/x
>>> df.x.invert()
>>> import numpy as np
>>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64'))
>>> @vaex.register_function(as_property=True, scope='dt')
>>> def dt_relative_day(x):
>>> return vaex.functions.dt_dayofyear(x)/365.
>>> df.departure.dt.relative_day | Below is the the instruction that describes the task:
### Input:
Decorator to register a new function with vaex.
Example:
>>> import vaex
>>> df = vaex.example()
>>> @vaex.register_function()
>>> def invert(x):
>>> return 1/x
>>> df.x.invert()
>>> import numpy as np
>>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64'))
>>> @vaex.register_function(as_property=True, scope='dt')
>>> def dt_relative_day(x):
>>> return vaex.functions.dt_dayofyear(x)/365.
>>> df.departure.dt.relative_day
### Response:
def register_function(scope=None, as_property=False, name=None):
"""Decorator to register a new function with vaex.
Example:
>>> import vaex
>>> df = vaex.example()
>>> @vaex.register_function()
>>> def invert(x):
>>> return 1/x
>>> df.x.invert()
>>> import numpy as np
>>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64'))
>>> @vaex.register_function(as_property=True, scope='dt')
>>> def dt_relative_day(x):
>>> return vaex.functions.dt_dayofyear(x)/365.
>>> df.departure.dt.relative_day
"""
prefix = ''
if scope:
prefix = scope + "_"
if scope not in scopes:
raise KeyError("unknown scope")
def wrapper(f, name=name):
name = name or f.__name__
# remove possible prefix
if name.startswith(prefix):
name = name[len(prefix):]
full_name = prefix + name
if scope:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.expression.ds.func, full_name)
args = (self.expression, ) + args
return lazy_func(*args, **kwargs)
return functools.wraps(function)(wrapper)
if as_property:
setattr(scopes[scope], name, property(closure()))
else:
setattr(scopes[scope], name, closure())
else:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.ds.func, full_name)
args = (self, ) + args
return lazy_func(*args, **kwargs)
return functools.wraps(function)(wrapper)
setattr(vaex.expression.Expression, name, closure())
vaex.expression.expression_namespace[prefix + name] = f
return f # we leave the original function as is
return wrapper |
def check_cv(self, y):
"""Resolve which cross validation strategy is used."""
y_arr = None
if self.stratified:
# Try to convert y to numpy for sklearn's check_cv; if conversion
# doesn't work, still try.
try:
y_arr = to_numpy(y)
except (AttributeError, TypeError):
y_arr = y
if self._is_float(self.cv):
return self._check_cv_float()
return self._check_cv_non_float(y_arr) | Resolve which cross validation strategy is used. | Below is the the instruction that describes the task:
### Input:
Resolve which cross validation strategy is used.
### Response:
def check_cv(self, y):
"""Resolve which cross validation strategy is used."""
y_arr = None
if self.stratified:
# Try to convert y to numpy for sklearn's check_cv; if conversion
# doesn't work, still try.
try:
y_arr = to_numpy(y)
except (AttributeError, TypeError):
y_arr = y
if self._is_float(self.cv):
return self._check_cv_float()
return self._check_cv_non_float(y_arr) |
def version(self, pretty=False, best=False):
"""
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', ''),
self.uname_attr('release')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version | Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`. | Below is the the instruction that describes the task:
### Input:
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
### Response:
def version(self, pretty=False, best=False):
"""
Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', ''),
self.uname_attr('release')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version |
def _wave(self):
"""Return a wave.Wave_read instance from the ``wave`` module."""
try:
return wave.open(StringIO(self.contents))
except wave.Error, err:
err.message += "\nInvalid wave file: %s" % self
err.args = (err.message,)
raise | Return a wave.Wave_read instance from the ``wave`` module. | Below is the the instruction that describes the task:
### Input:
Return a wave.Wave_read instance from the ``wave`` module.
### Response:
def _wave(self):
"""Return a wave.Wave_read instance from the ``wave`` module."""
try:
return wave.open(StringIO(self.contents))
except wave.Error, err:
err.message += "\nInvalid wave file: %s" % self
err.args = (err.message,)
raise |
def align_yaxis_np(axes):
"""Align zeros of the two axes, zooming them out by same ratio"""
axes = np.array(axes)
extrema = np.array([ax.get_ylim() for ax in axes])
# reset for divide by zero issues
for i in range(len(extrema)):
if np.isclose(extrema[i, 0], 0.0):
extrema[i, 0] = -1
if np.isclose(extrema[i, 1], 0.0):
extrema[i, 1] = 1
# upper and lower limits
lowers = extrema[:, 0]
uppers = extrema[:, 1]
# if all pos or all neg, don't scale
all_positive = False
all_negative = False
if lowers.min() > 0.0:
all_positive = True
if uppers.max() < 0.0:
all_negative = True
if all_negative or all_positive:
# don't scale
return
# pick "most centered" axis
res = abs(uppers+lowers)
min_index = np.argmin(res)
# scale positive or negative part
multiplier1 = abs(uppers[min_index]/lowers[min_index])
multiplier2 = abs(lowers[min_index]/uppers[min_index])
for i in range(len(extrema)):
# scale positive or negative part based on which induces valid
if i != min_index:
lower_change = extrema[i, 1] * -1*multiplier2
upper_change = extrema[i, 0] * -1*multiplier1
if upper_change < extrema[i, 1]:
extrema[i, 0] = lower_change
else:
extrema[i, 1] = upper_change
# bump by 10% for a margin
extrema[i, 0] *= 1.1
extrema[i, 1] *= 1.1
# set axes limits
[axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))] | Align zeros of the two axes, zooming them out by same ratio | Below is the the instruction that describes the task:
### Input:
Align zeros of the two axes, zooming them out by same ratio
### Response:
def align_yaxis_np(axes):
"""Align zeros of the two axes, zooming them out by same ratio"""
axes = np.array(axes)
extrema = np.array([ax.get_ylim() for ax in axes])
# reset for divide by zero issues
for i in range(len(extrema)):
if np.isclose(extrema[i, 0], 0.0):
extrema[i, 0] = -1
if np.isclose(extrema[i, 1], 0.0):
extrema[i, 1] = 1
# upper and lower limits
lowers = extrema[:, 0]
uppers = extrema[:, 1]
# if all pos or all neg, don't scale
all_positive = False
all_negative = False
if lowers.min() > 0.0:
all_positive = True
if uppers.max() < 0.0:
all_negative = True
if all_negative or all_positive:
# don't scale
return
# pick "most centered" axis
res = abs(uppers+lowers)
min_index = np.argmin(res)
# scale positive or negative part
multiplier1 = abs(uppers[min_index]/lowers[min_index])
multiplier2 = abs(lowers[min_index]/uppers[min_index])
for i in range(len(extrema)):
# scale positive or negative part based on which induces valid
if i != min_index:
lower_change = extrema[i, 1] * -1*multiplier2
upper_change = extrema[i, 0] * -1*multiplier1
if upper_change < extrema[i, 1]:
extrema[i, 0] = lower_change
else:
extrema[i, 1] = upper_change
# bump by 10% for a margin
extrema[i, 0] *= 1.1
extrema[i, 1] *= 1.1
# set axes limits
[axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))] |
def from_aid(cls, aid):
"""Retrieve the Assay record for the specified AID.
:param int aid: The PubChem Assay Identifier (AID).
"""
record = json.loads(request(aid, 'aid', 'assay', 'description').read().decode())['PC_AssayContainer'][0]
return cls(record) | Retrieve the Assay record for the specified AID.
:param int aid: The PubChem Assay Identifier (AID). | Below is the the instruction that describes the task:
### Input:
Retrieve the Assay record for the specified AID.
:param int aid: The PubChem Assay Identifier (AID).
### Response:
def from_aid(cls, aid):
"""Retrieve the Assay record for the specified AID.
:param int aid: The PubChem Assay Identifier (AID).
"""
record = json.loads(request(aid, 'aid', 'assay', 'description').read().decode())['PC_AssayContainer'][0]
return cls(record) |
def riseset(self, crd, ev="5deg"):
"""This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs.
"""
a = self.rise(crd, ev)
if isinstance(a['rise'], str):
return {"rise": {"last": a[0], "utc": a[0]},
"set": {"last": a[1], "utc": a[1]},
"solved": False}
ofe = self.measure(self._framestack["epoch"], "utc")
if not is_measure(ofe):
ofe = self.epoch('utc', 'today')
x = a.copy()
for k in x:
x[k] = self.measure(
self.epoch("last",
a[k].totime(),
off=self.epoch("r_utc",
(dq.quantity(ofe["m0"])
+ dq.quantity("0.5d")
))
),
"utc")
return {"rise": {"last": self.epoch("last",
a["rise"].totime()),
"utc": x["rise"]},
"set": {"last": self.epoch("last",
a["set"].totime()),
"utc": x["set"]},
"solved": True
} | This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs. | Below is the the instruction that describes the task:
### Input:
This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs.
### Response:
def riseset(self, crd, ev="5deg"):
"""This will give the rise/set times of a source. It needs the
position in the frame, and a time. If the latter is not set, the
current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: The returned value is a `dict` with a
'solved' key, which is `False` if the source is always
below or above the horizon. In that case the rise and set
fields will all have a string value. The `dict` also returns
a rise and set `dict`, with 'last' and 'utc' keys showing
the rise and set times as epochs.
"""
a = self.rise(crd, ev)
if isinstance(a['rise'], str):
return {"rise": {"last": a[0], "utc": a[0]},
"set": {"last": a[1], "utc": a[1]},
"solved": False}
ofe = self.measure(self._framestack["epoch"], "utc")
if not is_measure(ofe):
ofe = self.epoch('utc', 'today')
x = a.copy()
for k in x:
x[k] = self.measure(
self.epoch("last",
a[k].totime(),
off=self.epoch("r_utc",
(dq.quantity(ofe["m0"])
+ dq.quantity("0.5d")
))
),
"utc")
return {"rise": {"last": self.epoch("last",
a["rise"].totime()),
"utc": x["rise"]},
"set": {"last": self.epoch("last",
a["set"].totime()),
"utc": x["set"]},
"solved": True
} |
def load_from_data(self, data, with_undefined=False):
"""Load index structure.
:param with_undefined: Load undefined keys as well
:type with_undefined: bool
"""
if with_undefined:
defined_values, undefined_values = data
else:
defined_values = data
undefined_values = None
self._index = defaultdict(list, defined_values)
self._reverse_index = defaultdict(list)
for key, values in self._index.items():
for value in values:
self._reverse_index[value].append(key)
if undefined_values:
self._undefined_keys = {key: True for key in undefined_values}
else:
self._undefined_keys = {} | Load index structure.
:param with_undefined: Load undefined keys as well
:type with_undefined: bool | Below is the the instruction that describes the task:
### Input:
Load index structure.
:param with_undefined: Load undefined keys as well
:type with_undefined: bool
### Response:
def load_from_data(self, data, with_undefined=False):
"""Load index structure.
:param with_undefined: Load undefined keys as well
:type with_undefined: bool
"""
if with_undefined:
defined_values, undefined_values = data
else:
defined_values = data
undefined_values = None
self._index = defaultdict(list, defined_values)
self._reverse_index = defaultdict(list)
for key, values in self._index.items():
for value in values:
self._reverse_index[value].append(key)
if undefined_values:
self._undefined_keys = {key: True for key in undefined_values}
else:
self._undefined_keys = {} |
def _create_object(self, data, request):
""" Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given.
"""
if request.method.upper() == 'POST' and self.post_factory:
fac_func = self.post_factory.create
else:
fac_func = self.factory.create
if isinstance(data, (list, tuple)):
return map(fac_func, data)
else:
return fac_func(data) | Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given. | Below is the the instruction that describes the task:
### Input:
Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given.
### Response:
def _create_object(self, data, request):
""" Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given.
"""
if request.method.upper() == 'POST' and self.post_factory:
fac_func = self.post_factory.create
else:
fac_func = self.factory.create
if isinstance(data, (list, tuple)):
return map(fac_func, data)
else:
return fac_func(data) |
def opened(self, *args):
"""Initiates communication with the remote controlled device.
:param args:
"""
self._serial_open = True
self.log("Opened: ", args, lvl=debug)
self._send_command(b'l,1') # Saying hello, shortly
self.log("Turning off engine, pump and neutralizing rudder")
self._send_command(b'v')
self._handle_servo(self._machine_channel, 0)
self._handle_servo(self._rudder_channel, 127)
self._set_digital_pin(self._pump_channel, 0)
# self._send_command(b'h')
self._send_command(b'l,0')
self._send_command(b'm,HFOS Control') | Initiates communication with the remote controlled device.
:param args: | Below is the the instruction that describes the task:
### Input:
Initiates communication with the remote controlled device.
:param args:
### Response:
def opened(self, *args):
"""Initiates communication with the remote controlled device.
:param args:
"""
self._serial_open = True
self.log("Opened: ", args, lvl=debug)
self._send_command(b'l,1') # Saying hello, shortly
self.log("Turning off engine, pump and neutralizing rudder")
self._send_command(b'v')
self._handle_servo(self._machine_channel, 0)
self._handle_servo(self._rudder_channel, 127)
self._set_digital_pin(self._pump_channel, 0)
# self._send_command(b'h')
self._send_command(b'l,0')
self._send_command(b'm,HFOS Control') |
def delete(self, uri, force=False, timeout=-1, custom_headers=None):
"""Deletes current resource.
Args:
force: Flag to delete the resource forcefully, default is False.
timeout: Timeout in seconds.
custom_headers: Allows to set custom http headers.
"""
if force:
uri += '?force=True'
logger.debug("Delete resource (uri = %s)" % (str(uri)))
task, body = self._connection.delete(uri, custom_headers=custom_headers)
if not task:
# 204 NO CONTENT
# Successful return from a synchronous delete operation.
return True
task = self._task_monitor.wait_for_task(task, timeout=timeout)
return task | Deletes current resource.
Args:
force: Flag to delete the resource forcefully, default is False.
timeout: Timeout in seconds.
custom_headers: Allows to set custom http headers. | Below is the the instruction that describes the task:
### Input:
Deletes current resource.
Args:
force: Flag to delete the resource forcefully, default is False.
timeout: Timeout in seconds.
custom_headers: Allows to set custom http headers.
### Response:
def delete(self, uri, force=False, timeout=-1, custom_headers=None):
"""Deletes current resource.
Args:
force: Flag to delete the resource forcefully, default is False.
timeout: Timeout in seconds.
custom_headers: Allows to set custom http headers.
"""
if force:
uri += '?force=True'
logger.debug("Delete resource (uri = %s)" % (str(uri)))
task, body = self._connection.delete(uri, custom_headers=custom_headers)
if not task:
# 204 NO CONTENT
# Successful return from a synchronous delete operation.
return True
task = self._task_monitor.wait_for_task(task, timeout=timeout)
return task |
async def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
"""
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f,
(steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
await self._send_sysex(PrivateConstants.STEPPER_DATA, data) | Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value. | Below is the the instruction that describes the task:
### Input:
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
### Response:
async def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value.
"""
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f,
(steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
await self._send_sysex(PrivateConstants.STEPPER_DATA, data) |
def setCurrentIndex(self, index):
"""
Sets the current item to the item at the inputed index.
:param index | <int>
"""
if self._currentIndex == index:
return
self._currentIndex = index
self.currentIndexChanged.emit(index)
for i, item in enumerate(self.items()):
item.setMenuEnabled(i == index)
self.repaint() | Sets the current item to the item at the inputed index.
:param index | <int> | Below is the the instruction that describes the task:
### Input:
Sets the current item to the item at the inputed index.
:param index | <int>
### Response:
def setCurrentIndex(self, index):
"""
Sets the current item to the item at the inputed index.
:param index | <int>
"""
if self._currentIndex == index:
return
self._currentIndex = index
self.currentIndexChanged.emit(index)
for i, item in enumerate(self.items()):
item.setMenuEnabled(i == index)
self.repaint() |
def from_config(config, kwargs=None):
"""
Creates a solver from a specification dict.
"""
return util.get_object(
obj=config,
predefined=tensorforce.core.optimizers.solvers.solvers,
kwargs=kwargs
) | Creates a solver from a specification dict. | Below is the the instruction that describes the task:
### Input:
Creates a solver from a specification dict.
### Response:
def from_config(config, kwargs=None):
"""
Creates a solver from a specification dict.
"""
return util.get_object(
obj=config,
predefined=tensorforce.core.optimizers.solvers.solvers,
kwargs=kwargs
) |
def _set_pip_ssl(anaconda_dir):
"""Set PIP SSL certificate to installed conda certificate to avoid SSL errors
"""
if anaconda_dir:
cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file | Set PIP SSL certificate to installed conda certificate to avoid SSL errors | Below is the the instruction that describes the task:
### Input:
Set PIP SSL certificate to installed conda certificate to avoid SSL errors
### Response:
def _set_pip_ssl(anaconda_dir):
"""Set PIP SSL certificate to installed conda certificate to avoid SSL errors
"""
if anaconda_dir:
cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file |
def import_bill(data, standalone_votes, categorizer):
"""
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
"""
abbr = data[settings.LEVEL_FIELD]
# clean up bill_ids
data['bill_id'] = fix_bill_id(data['bill_id'])
if 'alternate_bill_ids' in data:
data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in
data['alternate_bill_ids']]
# move subjects to scraped_subjects
# NOTE: intentionally doesn't copy blank lists of subjects
# this avoids the problem where a bill is re-run but we can't
# get subjects anymore (quite common)
subjects = data.pop('subjects', None)
if subjects:
data['scraped_subjects'] = subjects
# update categorized subjects
if categorizer:
categorizer.categorize_bill(data)
# companions
for companion in data['companions']:
companion['bill_id'] = fix_bill_id(companion['bill_id'])
# query based on companion
spec = companion.copy()
spec[settings.LEVEL_FIELD] = abbr
if not spec['chamber']:
spec.pop('chamber')
companion_obj = db.bills.find_one(spec)
if companion_obj:
companion['internal_id'] = companion_obj['_id']
else:
logger.warning('Unknown companion: {chamber} {session} {bill_id}'
.format(**companion))
# look for a prior version of this bill
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr,
'session': data['session'],
'chamber': data['chamber'],
'bill_id': data['bill_id']})
# keep doc ids consistent
doc_matcher = DocumentMatcher(abbr)
if bill:
doc_matcher.learn_ids(bill['versions'] + bill['documents'])
doc_matcher.set_ids(data['versions'] + data['documents'])
# match sponsor leg_ids
match_sponsor_ids(abbr, data)
# process votes ############
# pull votes off bill
bill_votes = data.pop('votes', [])
# grab the external bill votes if present
if metadata(abbr).get('_partial_vote_bill_id'):
# this is a hack initially added for Rhode Island where we can't
# determine the full bill_id, if this key is in the metadata
# we just use the numeric portion, not ideal as it won't work
# where HB/SBs overlap, but in RI they never do
# pull off numeric portion of bill_id
numeric_bill_id = data['bill_id'].split()[1]
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
numeric_bill_id), [])
else:
# add loaded votes to data
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
data['bill_id']), [])
# do id matching and other vote prep
if bill:
prepare_votes(abbr, data['session'], bill['_id'], bill_votes)
else:
prepare_votes(abbr, data['session'], None, bill_votes)
# process actions ###########
dates = {'first': None, 'last': None, 'passed_upper': None,
'passed_lower': None, 'signed': None}
vote_flags = {
"bill:passed",
"bill:failed",
"bill:veto_override:passed",
"bill:veto_override:failed",
"amendment:passed",
"amendment:failed",
"committee:passed",
"committee:passed:favorable",
"committee:passed:unfavorable",
"committee:passed:failed"
}
already_linked = set()
remove_vote = set()
for action in data['actions']:
adate = action['date']
def _match_committee(name):
return get_committee_id(abbr, action['actor'], name)
def _match_legislator(name):
return get_legislator_id(abbr,
data['session'],
action['actor'],
name)
resolvers = {
"committee": _match_committee,
"legislator": _match_legislator
}
if "related_entities" in action:
for entity in action['related_entities']:
try:
resolver = resolvers[entity['type']]
except KeyError as e:
# We don't know how to deal.
logger.error("I don't know how to sort a %s" % e)
continue
id = resolver(entity['name'])
entity['id'] = id
# first & last dates
if not dates['first'] or adate < dates['first']:
dates['first'] = adate
if not dates['last'] or adate > dates['last']:
dates['last'] = adate
# passed & signed dates
if (not dates['passed_upper'] and action['actor'] == 'upper'
and 'bill:passed' in action['type']):
dates['passed_upper'] = adate
elif (not dates['passed_lower'] and action['actor'] == 'lower'
and 'bill:passed' in action['type']):
dates['passed_lower'] = adate
elif (not dates['signed'] and 'governor:signed' in action['type']):
dates['signed'] = adate
# vote-action matching
action_attached = False
# only attempt vote matching if action has a date and is one of the
# designated vote action types
if set(action['type']).intersection(vote_flags) and action['date']:
for vote in bill_votes:
if not vote['date']:
continue
delta = abs(vote['date'] - action['date'])
if (delta < datetime.timedelta(hours=20) and
vote['chamber'] == action['actor']):
if action_attached:
# multiple votes match, we can't guess
action.pop('related_votes', None)
else:
related_vote = vote['vote_id']
if related_vote in already_linked:
remove_vote.add(related_vote)
already_linked.add(related_vote)
action['related_votes'] = [related_vote]
action_attached = True
# remove related_votes that we linked to multiple actions
for action in data['actions']:
for vote in remove_vote:
if vote in action.get('related_votes', []):
action['related_votes'].remove(vote)
# save action dates to data
data['action_dates'] = dates
data['_term'] = term_for_session(abbr, data['session'])
alt_titles = set(data.get('alternate_titles', []))
for version in data['versions']:
# Merge any version titles into the alternate_titles list
if 'title' in version:
alt_titles.add(version['title'])
if '+short_title' in version:
alt_titles.add(version['+short_title'])
try:
# Make sure the primary title isn't included in the
# alternate title list
alt_titles.remove(data['title'])
except KeyError:
pass
data['alternate_titles'] = list(alt_titles)
data = apply_filters(filters, data)
if not bill:
insert_with_id(data)
git_add_bill(data)
save_votes(data, bill_votes)
return "insert"
else:
update(bill, data, db.bills)
git_add_bill(bill)
save_votes(bill, bill_votes)
return "update" | insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization) | Below is the the instruction that describes the task:
### Input:
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
### Response:
def import_bill(data, standalone_votes, categorizer):
"""
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
"""
abbr = data[settings.LEVEL_FIELD]
# clean up bill_ids
data['bill_id'] = fix_bill_id(data['bill_id'])
if 'alternate_bill_ids' in data:
data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in
data['alternate_bill_ids']]
# move subjects to scraped_subjects
# NOTE: intentionally doesn't copy blank lists of subjects
# this avoids the problem where a bill is re-run but we can't
# get subjects anymore (quite common)
subjects = data.pop('subjects', None)
if subjects:
data['scraped_subjects'] = subjects
# update categorized subjects
if categorizer:
categorizer.categorize_bill(data)
# companions
for companion in data['companions']:
companion['bill_id'] = fix_bill_id(companion['bill_id'])
# query based on companion
spec = companion.copy()
spec[settings.LEVEL_FIELD] = abbr
if not spec['chamber']:
spec.pop('chamber')
companion_obj = db.bills.find_one(spec)
if companion_obj:
companion['internal_id'] = companion_obj['_id']
else:
logger.warning('Unknown companion: {chamber} {session} {bill_id}'
.format(**companion))
# look for a prior version of this bill
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr,
'session': data['session'],
'chamber': data['chamber'],
'bill_id': data['bill_id']})
# keep doc ids consistent
doc_matcher = DocumentMatcher(abbr)
if bill:
doc_matcher.learn_ids(bill['versions'] + bill['documents'])
doc_matcher.set_ids(data['versions'] + data['documents'])
# match sponsor leg_ids
match_sponsor_ids(abbr, data)
# process votes ############
# pull votes off bill
bill_votes = data.pop('votes', [])
# grab the external bill votes if present
if metadata(abbr).get('_partial_vote_bill_id'):
# this is a hack initially added for Rhode Island where we can't
# determine the full bill_id, if this key is in the metadata
# we just use the numeric portion, not ideal as it won't work
# where HB/SBs overlap, but in RI they never do
# pull off numeric portion of bill_id
numeric_bill_id = data['bill_id'].split()[1]
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
numeric_bill_id), [])
else:
# add loaded votes to data
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
data['bill_id']), [])
# do id matching and other vote prep
if bill:
prepare_votes(abbr, data['session'], bill['_id'], bill_votes)
else:
prepare_votes(abbr, data['session'], None, bill_votes)
# process actions ###########
dates = {'first': None, 'last': None, 'passed_upper': None,
'passed_lower': None, 'signed': None}
vote_flags = {
"bill:passed",
"bill:failed",
"bill:veto_override:passed",
"bill:veto_override:failed",
"amendment:passed",
"amendment:failed",
"committee:passed",
"committee:passed:favorable",
"committee:passed:unfavorable",
"committee:passed:failed"
}
already_linked = set()
remove_vote = set()
for action in data['actions']:
adate = action['date']
def _match_committee(name):
return get_committee_id(abbr, action['actor'], name)
def _match_legislator(name):
return get_legislator_id(abbr,
data['session'],
action['actor'],
name)
resolvers = {
"committee": _match_committee,
"legislator": _match_legislator
}
if "related_entities" in action:
for entity in action['related_entities']:
try:
resolver = resolvers[entity['type']]
except KeyError as e:
# We don't know how to deal.
logger.error("I don't know how to sort a %s" % e)
continue
id = resolver(entity['name'])
entity['id'] = id
# first & last dates
if not dates['first'] or adate < dates['first']:
dates['first'] = adate
if not dates['last'] or adate > dates['last']:
dates['last'] = adate
# passed & signed dates
if (not dates['passed_upper'] and action['actor'] == 'upper'
and 'bill:passed' in action['type']):
dates['passed_upper'] = adate
elif (not dates['passed_lower'] and action['actor'] == 'lower'
and 'bill:passed' in action['type']):
dates['passed_lower'] = adate
elif (not dates['signed'] and 'governor:signed' in action['type']):
dates['signed'] = adate
# vote-action matching
action_attached = False
# only attempt vote matching if action has a date and is one of the
# designated vote action types
if set(action['type']).intersection(vote_flags) and action['date']:
for vote in bill_votes:
if not vote['date']:
continue
delta = abs(vote['date'] - action['date'])
if (delta < datetime.timedelta(hours=20) and
vote['chamber'] == action['actor']):
if action_attached:
# multiple votes match, we can't guess
action.pop('related_votes', None)
else:
related_vote = vote['vote_id']
if related_vote in already_linked:
remove_vote.add(related_vote)
already_linked.add(related_vote)
action['related_votes'] = [related_vote]
action_attached = True
# remove related_votes that we linked to multiple actions
for action in data['actions']:
for vote in remove_vote:
if vote in action.get('related_votes', []):
action['related_votes'].remove(vote)
# save action dates to data
data['action_dates'] = dates
data['_term'] = term_for_session(abbr, data['session'])
alt_titles = set(data.get('alternate_titles', []))
for version in data['versions']:
# Merge any version titles into the alternate_titles list
if 'title' in version:
alt_titles.add(version['title'])
if '+short_title' in version:
alt_titles.add(version['+short_title'])
try:
# Make sure the primary title isn't included in the
# alternate title list
alt_titles.remove(data['title'])
except KeyError:
pass
data['alternate_titles'] = list(alt_titles)
data = apply_filters(filters, data)
if not bill:
insert_with_id(data)
git_add_bill(data)
save_votes(data, bill_votes)
return "insert"
else:
update(bill, data, db.bills)
git_add_bill(bill)
save_votes(bill, bill_votes)
return "update" |
def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]:
"""Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
"""
if isinstance(pt, list):
return pt
return [ast.If(ast.UnaryOp(ast.Not(), pt),
[self.__exit_scope()],
[])] | Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False | Below is the the instruction that describes the task:
### Input:
Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
### Response:
def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]:
"""Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
"""
if isinstance(pt, list):
return pt
return [ast.If(ast.UnaryOp(ast.Not(), pt),
[self.__exit_scope()],
[])] |
def _check_hash_view(self):
""" Return infohash if view name refers to a single item, else None.
"""
infohash = None
if self.viewname.startswith('#'):
infohash = self.viewname[1:]
elif len(self.viewname) == 40:
try:
int(self.viewname, 16)
except (TypeError, ValueError):
pass
else:
infohash = self.viewname
return infohash | Return infohash if view name refers to a single item, else None. | Below is the the instruction that describes the task:
### Input:
Return infohash if view name refers to a single item, else None.
### Response:
def _check_hash_view(self):
""" Return infohash if view name refers to a single item, else None.
"""
infohash = None
if self.viewname.startswith('#'):
infohash = self.viewname[1:]
elif len(self.viewname) == 40:
try:
int(self.viewname, 16)
except (TypeError, ValueError):
pass
else:
infohash = self.viewname
return infohash |
def executemany(self, query, args):
"""Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times.
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1)
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
yield self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
else:
rows = 0
for arg in args:
yield self.execute(query, arg)
rows += self.rowcount
self.rowcount = rows
raise gen.Return(self.rowcount) | Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times. | Below is the the instruction that describes the task:
### Input:
Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times.
### Response:
def executemany(self, query, args):
"""Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times.
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1)
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
yield self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
else:
rows = 0
for arg in args:
yield self.execute(query, arg)
rows += self.rowcount
self.rowcount = rows
raise gen.Return(self.rowcount) |
def hurst_rs(data, nvals=None, fit="RANSAC", debug_plot=False,
debug_data=False, plot_file=None, corrected=True, unbiased=True):
"""
Calculates the Hurst exponent by a standard rescaled range (R/S) approach.
Explanation of Hurst exponent:
The Hurst exponent is a measure for the "long-term memory" of a
time series, meaning the long statistical dependencies in the data that do
not originate from cycles.
It originates from H.E. Hursts observations of the problem of long-term
storage in water reservoirs. If x_i is the discharge of a river in year i
and we observe this discharge for N years, we can calculate the storage
capacity that would be required to keep the discharge steady at its mean
value.
To do so, we first substract the mean over all x_i from the individual
x_i to obtain the departures x'_i from the mean for each year i. As the
excess or deficit in discharge always carrys over from year i to year i+1,
we need to examine the cumulative sum of x'_i, denoted by y_i. This
cumulative sum represents the filling of our hypothetical storage. If the
sum is above 0, we are storing excess discharge from the river, if it is
below zero we have compensated a deficit in discharge by releasing
water from the storage. The range (maximum - minimum) R of y_i therefore
represents the total capacity required for the storage.
Hurst showed that this value follows a steady trend for varying N if it
is normalized by the standard deviation sigma over the x_i. Namely he
obtained the following formula:
R/sigma = (N/2)^K
In this equation, K is called the Hurst exponent. Its value is 0.5 for
white noise, but becomes greater for time series that exhibit some positive
dependency on previous values. For negative dependencies it becomes less
than 0.5.
Explanation of the algorithm:
The rescaled range (R/S) approach is directly derived from Hurst's
definition. The time series of length N is split into non-overlapping
subseries of length n. Then, R and S (S = sigma) are calculated for each
subseries and the mean is taken over all subseries yielding (R/S)_n. This
process is repeated for several lengths n. Finally, the exponent K is
obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n).
There seems to be no consensus how to chose the subseries lenghts n.
This function therefore leaves the choice to the user. The module provides
some utility functions for "typical" values:
* binary_n: N/2, N/4, N/8, ...
* logarithmic_n: min_n, min_n * f, min_n * f^2, ...
References:
.. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,”
International Association of Scientific Hydrology. Bulletin, vol. 1,
no. 3, pp. 13–27, 1956.
.. [h_2] H. E. Hurst, “A suggested statistical model of some time series
which occur in nature,” Nature, vol. 180, p. 494, 1957.
.. [h_3] R. Weron, “Estimating long-range dependence: finite sample
properties and confidence intervals,” Physica A: Statistical Mechanics
and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
Reference Code:
.. [h_a] "hurst" function in R-package "pracma",
url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
Note: Pracma yields several estimates of the Hurst exponent, which
are listed below. Unless otherwise stated they use the divisors
of the length of the sequence as n. The length is reduced by at
most 1% to find the value that has the most divisors.
* The "Simple R/S" estimate is just log((R/S)_n) / log(n) for
n = N.
* The "theoretical Hurst exponent" is the value that would be
expected of an uncorrected rescaled range approach for random
noise of the size of the input data.
* The "empirical Hurst exponent" is the uncorrected Hurst exponent
obtained by the rescaled range approach.
* The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters
corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to
the (R/S)_n before the log.
* The "corrected R over S Hurst exponent" uses the R-function "lm"
instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ...
by successively halving the subsequences (which means that some
subsequences may be one element longer than others). In contrast
to its name it does not use the Anis-Lloyd-Peters correction
factor.
If you want to compare the output of pracma to the output of
nolds, the "empirical hurst exponent" is the only measure that
exactly corresponds to the Hurst measure implemented in nolds
(by choosing corrected=False, fit="poly" and employing the same
strategy for choosing n as the divisors of the (reduced)
sequence length).
.. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst
exponent using R/S Analysis",
url: https://ideas.repec.org/c/wuu/hscode/m11003.html
Note: When the same values for nvals are used and fit is set to
"poly", nolds yields exactly the same results as this
implementation.
.. [h_c] Bill Davidson, "Hurst exponent",
url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
.. [h_d] Tomaso Aste, "Generalized Hurst exponent",
url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
Args:
data (array-like of float):
time series
Kwargs:
nvals (iterable of int):
sizes of subseries to use
(default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15
logarithmically spaced values in the medium 25% of the logarithmic range)
Generally, the choice for n is a trade-off between the length and the
number of the subsequences that are used for the calculation of the
(R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s``
while very high values may leave too few subsequences that the mean along
them is still meaningful. Logarithmic spacing makes sense, because it
translates to even spacing in the log-log-plot.
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
debug_data (boolean):
if True, debugging data will be returned alongside the result
plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
``plt.show()``
corrected (boolean):
if True, the Anis-Lloyd-Peters correction factor will be applied to the
output according to the expected value for the individual (R/S)_n
(see [h_3]_)
unbiased (boolean):
if True, the standard deviation based on the unbiased variance
(1/(N-1) instead of 1/N) will be used. This should be the default choice,
since the true mean of the sequences is not known. This parameter should
only be changed to recreate results of other implementations.
Returns:
float:
estimated Hurst exponent K using a rescaled range approach (if K = 0.5
there are no long-range correlations in the data, if K < 0.5 there are
negative long-range correlations, if K > 0.5 there are positive
long-range correlations)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n),
``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line
coefficients (``[slope, intercept]``)
"""
data = np.asarray(data)
total_N = len(data)
if nvals is None:
# chooses a default value for nvals that will give 15 logarithmically
# spaced datapoints leaning towards the middle of the logarithmic range
# (since both too small and too large n introduce too much variance)
nvals = logmid_n(total_N, ratio=1/4.0, nsteps=15)
# get individual values for (R/S)_n
rsvals = np.array([rs(data, n, unbiased=unbiased) for n in nvals])
# filter NaNs (zeros should not be possible, because if R is 0 then
# S is also zero)
not_nan = np.logical_not(np.isnan(rsvals))
rsvals = rsvals[not_nan]
nvals = np.asarray(nvals)[not_nan]
# it may happen that no rsvals are left (if all values of data are the same)
if len(rsvals) == 0:
poly = [np.nan, np.nan]
if debug_plot:
warnings.warn("Cannot display debug plot, all (R/S)_n are NaN")
else:
# fit a line to the logarithm of the obtained (R/S)_n
xvals = np.log(nvals)
yvals = np.log(rsvals)
if corrected:
yvals -= np.log([expected_rs(n) for n in nvals])
poly = poly_fit(xvals, yvals, 1, fit=fit)
if debug_plot:
plot_reg(xvals, yvals, poly, "log(n)", "log((R/S)_n)",
fname=plot_file)
# account for correction if necessary
h = poly[0] + 0.5 if corrected else poly[0]
# return line slope (+ correction) as hurst exponent
if debug_data:
return (h, (np.log(nvals), np.log(rsvals), poly))
else:
return h | Calculates the Hurst exponent by a standard rescaled range (R/S) approach.
Explanation of Hurst exponent:
The Hurst exponent is a measure for the "long-term memory" of a
time series, meaning the long statistical dependencies in the data that do
not originate from cycles.
It originates from H.E. Hursts observations of the problem of long-term
storage in water reservoirs. If x_i is the discharge of a river in year i
and we observe this discharge for N years, we can calculate the storage
capacity that would be required to keep the discharge steady at its mean
value.
To do so, we first substract the mean over all x_i from the individual
x_i to obtain the departures x'_i from the mean for each year i. As the
excess or deficit in discharge always carrys over from year i to year i+1,
we need to examine the cumulative sum of x'_i, denoted by y_i. This
cumulative sum represents the filling of our hypothetical storage. If the
sum is above 0, we are storing excess discharge from the river, if it is
below zero we have compensated a deficit in discharge by releasing
water from the storage. The range (maximum - minimum) R of y_i therefore
represents the total capacity required for the storage.
Hurst showed that this value follows a steady trend for varying N if it
is normalized by the standard deviation sigma over the x_i. Namely he
obtained the following formula:
R/sigma = (N/2)^K
In this equation, K is called the Hurst exponent. Its value is 0.5 for
white noise, but becomes greater for time series that exhibit some positive
dependency on previous values. For negative dependencies it becomes less
than 0.5.
Explanation of the algorithm:
The rescaled range (R/S) approach is directly derived from Hurst's
definition. The time series of length N is split into non-overlapping
subseries of length n. Then, R and S (S = sigma) are calculated for each
subseries and the mean is taken over all subseries yielding (R/S)_n. This
process is repeated for several lengths n. Finally, the exponent K is
obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n).
There seems to be no consensus how to chose the subseries lenghts n.
This function therefore leaves the choice to the user. The module provides
some utility functions for "typical" values:
* binary_n: N/2, N/4, N/8, ...
* logarithmic_n: min_n, min_n * f, min_n * f^2, ...
References:
.. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,”
International Association of Scientific Hydrology. Bulletin, vol. 1,
no. 3, pp. 13–27, 1956.
.. [h_2] H. E. Hurst, “A suggested statistical model of some time series
which occur in nature,” Nature, vol. 180, p. 494, 1957.
.. [h_3] R. Weron, “Estimating long-range dependence: finite sample
properties and confidence intervals,” Physica A: Statistical Mechanics
and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
Reference Code:
.. [h_a] "hurst" function in R-package "pracma",
url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
Note: Pracma yields several estimates of the Hurst exponent, which
are listed below. Unless otherwise stated they use the divisors
of the length of the sequence as n. The length is reduced by at
most 1% to find the value that has the most divisors.
* The "Simple R/S" estimate is just log((R/S)_n) / log(n) for
n = N.
* The "theoretical Hurst exponent" is the value that would be
expected of an uncorrected rescaled range approach for random
noise of the size of the input data.
* The "empirical Hurst exponent" is the uncorrected Hurst exponent
obtained by the rescaled range approach.
* The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters
corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to
the (R/S)_n before the log.
* The "corrected R over S Hurst exponent" uses the R-function "lm"
instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ...
by successively halving the subsequences (which means that some
subsequences may be one element longer than others). In contrast
to its name it does not use the Anis-Lloyd-Peters correction
factor.
If you want to compare the output of pracma to the output of
nolds, the "empirical hurst exponent" is the only measure that
exactly corresponds to the Hurst measure implemented in nolds
(by choosing corrected=False, fit="poly" and employing the same
strategy for choosing n as the divisors of the (reduced)
sequence length).
.. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst
exponent using R/S Analysis",
url: https://ideas.repec.org/c/wuu/hscode/m11003.html
Note: When the same values for nvals are used and fit is set to
"poly", nolds yields exactly the same results as this
implementation.
.. [h_c] Bill Davidson, "Hurst exponent",
url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
.. [h_d] Tomaso Aste, "Generalized Hurst exponent",
url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
Args:
data (array-like of float):
time series
Kwargs:
nvals (iterable of int):
sizes of subseries to use
(default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15
logarithmically spaced values in the medium 25% of the logarithmic range)
Generally, the choice for n is a trade-off between the length and the
number of the subsequences that are used for the calculation of the
(R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s``
while very high values may leave too few subsequences that the mean along
them is still meaningful. Logarithmic spacing makes sense, because it
translates to even spacing in the log-log-plot.
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
debug_data (boolean):
if True, debugging data will be returned alongside the result
plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
``plt.show()``
corrected (boolean):
if True, the Anis-Lloyd-Peters correction factor will be applied to the
output according to the expected value for the individual (R/S)_n
(see [h_3]_)
unbiased (boolean):
if True, the standard deviation based on the unbiased variance
(1/(N-1) instead of 1/N) will be used. This should be the default choice,
since the true mean of the sequences is not known. This parameter should
only be changed to recreate results of other implementations.
Returns:
float:
estimated Hurst exponent K using a rescaled range approach (if K = 0.5
there are no long-range correlations in the data, if K < 0.5 there are
negative long-range correlations, if K > 0.5 there are positive
long-range correlations)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n),
``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line
coefficients (``[slope, intercept]``) | Below is the the instruction that describes the task:
### Input:
Calculates the Hurst exponent by a standard rescaled range (R/S) approach.
Explanation of Hurst exponent:
The Hurst exponent is a measure for the "long-term memory" of a
time series, meaning the long statistical dependencies in the data that do
not originate from cycles.
It originates from H.E. Hursts observations of the problem of long-term
storage in water reservoirs. If x_i is the discharge of a river in year i
and we observe this discharge for N years, we can calculate the storage
capacity that would be required to keep the discharge steady at its mean
value.
To do so, we first substract the mean over all x_i from the individual
x_i to obtain the departures x'_i from the mean for each year i. As the
excess or deficit in discharge always carrys over from year i to year i+1,
we need to examine the cumulative sum of x'_i, denoted by y_i. This
cumulative sum represents the filling of our hypothetical storage. If the
sum is above 0, we are storing excess discharge from the river, if it is
below zero we have compensated a deficit in discharge by releasing
water from the storage. The range (maximum - minimum) R of y_i therefore
represents the total capacity required for the storage.
Hurst showed that this value follows a steady trend for varying N if it
is normalized by the standard deviation sigma over the x_i. Namely he
obtained the following formula:
R/sigma = (N/2)^K
In this equation, K is called the Hurst exponent. Its value is 0.5 for
white noise, but becomes greater for time series that exhibit some positive
dependency on previous values. For negative dependencies it becomes less
than 0.5.
Explanation of the algorithm:
The rescaled range (R/S) approach is directly derived from Hurst's
definition. The time series of length N is split into non-overlapping
subseries of length n. Then, R and S (S = sigma) are calculated for each
subseries and the mean is taken over all subseries yielding (R/S)_n. This
process is repeated for several lengths n. Finally, the exponent K is
obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n).
There seems to be no consensus how to chose the subseries lenghts n.
This function therefore leaves the choice to the user. The module provides
some utility functions for "typical" values:
* binary_n: N/2, N/4, N/8, ...
* logarithmic_n: min_n, min_n * f, min_n * f^2, ...
References:
.. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,”
International Association of Scientific Hydrology. Bulletin, vol. 1,
no. 3, pp. 13–27, 1956.
.. [h_2] H. E. Hurst, “A suggested statistical model of some time series
which occur in nature,” Nature, vol. 180, p. 494, 1957.
.. [h_3] R. Weron, “Estimating long-range dependence: finite sample
properties and confidence intervals,” Physica A: Statistical Mechanics
and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
Reference Code:
.. [h_a] "hurst" function in R-package "pracma",
url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
Note: Pracma yields several estimates of the Hurst exponent, which
are listed below. Unless otherwise stated they use the divisors
of the length of the sequence as n. The length is reduced by at
most 1% to find the value that has the most divisors.
* The "Simple R/S" estimate is just log((R/S)_n) / log(n) for
n = N.
* The "theoretical Hurst exponent" is the value that would be
expected of an uncorrected rescaled range approach for random
noise of the size of the input data.
* The "empirical Hurst exponent" is the uncorrected Hurst exponent
obtained by the rescaled range approach.
* The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters
corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to
the (R/S)_n before the log.
* The "corrected R over S Hurst exponent" uses the R-function "lm"
instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ...
by successively halving the subsequences (which means that some
subsequences may be one element longer than others). In contrast
to its name it does not use the Anis-Lloyd-Peters correction
factor.
If you want to compare the output of pracma to the output of
nolds, the "empirical hurst exponent" is the only measure that
exactly corresponds to the Hurst measure implemented in nolds
(by choosing corrected=False, fit="poly" and employing the same
strategy for choosing n as the divisors of the (reduced)
sequence length).
.. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst
exponent using R/S Analysis",
url: https://ideas.repec.org/c/wuu/hscode/m11003.html
Note: When the same values for nvals are used and fit is set to
"poly", nolds yields exactly the same results as this
implementation.
.. [h_c] Bill Davidson, "Hurst exponent",
url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
.. [h_d] Tomaso Aste, "Generalized Hurst exponent",
url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
Args:
data (array-like of float):
time series
Kwargs:
nvals (iterable of int):
sizes of subseries to use
(default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15
logarithmically spaced values in the medium 25% of the logarithmic range)
Generally, the choice for n is a trade-off between the length and the
number of the subsequences that are used for the calculation of the
(R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s``
while very high values may leave too few subsequences that the mean along
them is still meaningful. Logarithmic spacing makes sense, because it
translates to even spacing in the log-log-plot.
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
debug_data (boolean):
if True, debugging data will be returned alongside the result
plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
``plt.show()``
corrected (boolean):
if True, the Anis-Lloyd-Peters correction factor will be applied to the
output according to the expected value for the individual (R/S)_n
(see [h_3]_)
unbiased (boolean):
if True, the standard deviation based on the unbiased variance
(1/(N-1) instead of 1/N) will be used. This should be the default choice,
since the true mean of the sequences is not known. This parameter should
only be changed to recreate results of other implementations.
Returns:
float:
estimated Hurst exponent K using a rescaled range approach (if K = 0.5
there are no long-range correlations in the data, if K < 0.5 there are
negative long-range correlations, if K > 0.5 there are positive
long-range correlations)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n),
``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line
coefficients (``[slope, intercept]``)
### Response:
def hurst_rs(data, nvals=None, fit="RANSAC", debug_plot=False,
debug_data=False, plot_file=None, corrected=True, unbiased=True):
"""
Calculates the Hurst exponent by a standard rescaled range (R/S) approach.
Explanation of Hurst exponent:
The Hurst exponent is a measure for the "long-term memory" of a
time series, meaning the long statistical dependencies in the data that do
not originate from cycles.
It originates from H.E. Hursts observations of the problem of long-term
storage in water reservoirs. If x_i is the discharge of a river in year i
and we observe this discharge for N years, we can calculate the storage
capacity that would be required to keep the discharge steady at its mean
value.
To do so, we first substract the mean over all x_i from the individual
x_i to obtain the departures x'_i from the mean for each year i. As the
excess or deficit in discharge always carrys over from year i to year i+1,
we need to examine the cumulative sum of x'_i, denoted by y_i. This
cumulative sum represents the filling of our hypothetical storage. If the
sum is above 0, we are storing excess discharge from the river, if it is
below zero we have compensated a deficit in discharge by releasing
water from the storage. The range (maximum - minimum) R of y_i therefore
represents the total capacity required for the storage.
Hurst showed that this value follows a steady trend for varying N if it
is normalized by the standard deviation sigma over the x_i. Namely he
obtained the following formula:
R/sigma = (N/2)^K
In this equation, K is called the Hurst exponent. Its value is 0.5 for
white noise, but becomes greater for time series that exhibit some positive
dependency on previous values. For negative dependencies it becomes less
than 0.5.
Explanation of the algorithm:
The rescaled range (R/S) approach is directly derived from Hurst's
definition. The time series of length N is split into non-overlapping
subseries of length n. Then, R and S (S = sigma) are calculated for each
subseries and the mean is taken over all subseries yielding (R/S)_n. This
process is repeated for several lengths n. Finally, the exponent K is
obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n).
There seems to be no consensus how to chose the subseries lenghts n.
This function therefore leaves the choice to the user. The module provides
some utility functions for "typical" values:
* binary_n: N/2, N/4, N/8, ...
* logarithmic_n: min_n, min_n * f, min_n * f^2, ...
References:
.. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,”
International Association of Scientific Hydrology. Bulletin, vol. 1,
no. 3, pp. 13–27, 1956.
.. [h_2] H. E. Hurst, “A suggested statistical model of some time series
which occur in nature,” Nature, vol. 180, p. 494, 1957.
.. [h_3] R. Weron, “Estimating long-range dependence: finite sample
properties and confidence intervals,” Physica A: Statistical Mechanics
and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
Reference Code:
.. [h_a] "hurst" function in R-package "pracma",
url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
Note: Pracma yields several estimates of the Hurst exponent, which
are listed below. Unless otherwise stated they use the divisors
of the length of the sequence as n. The length is reduced by at
most 1% to find the value that has the most divisors.
* The "Simple R/S" estimate is just log((R/S)_n) / log(n) for
n = N.
* The "theoretical Hurst exponent" is the value that would be
expected of an uncorrected rescaled range approach for random
noise of the size of the input data.
* The "empirical Hurst exponent" is the uncorrected Hurst exponent
obtained by the rescaled range approach.
* The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters
corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to
the (R/S)_n before the log.
* The "corrected R over S Hurst exponent" uses the R-function "lm"
instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ...
by successively halving the subsequences (which means that some
subsequences may be one element longer than others). In contrast
to its name it does not use the Anis-Lloyd-Peters correction
factor.
If you want to compare the output of pracma to the output of
nolds, the "empirical hurst exponent" is the only measure that
exactly corresponds to the Hurst measure implemented in nolds
(by choosing corrected=False, fit="poly" and employing the same
strategy for choosing n as the divisors of the (reduced)
sequence length).
.. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst
exponent using R/S Analysis",
url: https://ideas.repec.org/c/wuu/hscode/m11003.html
Note: When the same values for nvals are used and fit is set to
"poly", nolds yields exactly the same results as this
implementation.
.. [h_c] Bill Davidson, "Hurst exponent",
url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
.. [h_d] Tomaso Aste, "Generalized Hurst exponent",
url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
Args:
data (array-like of float):
time series
Kwargs:
nvals (iterable of int):
sizes of subseries to use
(default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15
logarithmically spaced values in the medium 25% of the logarithmic range)
Generally, the choice for n is a trade-off between the length and the
number of the subsequences that are used for the calculation of the
(R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s``
while very high values may leave too few subsequences that the mean along
them is still meaningful. Logarithmic spacing makes sense, because it
translates to even spacing in the log-log-plot.
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
debug_data (boolean):
if True, debugging data will be returned alongside the result
plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
``plt.show()``
corrected (boolean):
if True, the Anis-Lloyd-Peters correction factor will be applied to the
output according to the expected value for the individual (R/S)_n
(see [h_3]_)
unbiased (boolean):
if True, the standard deviation based on the unbiased variance
(1/(N-1) instead of 1/N) will be used. This should be the default choice,
since the true mean of the sequences is not known. This parameter should
only be changed to recreate results of other implementations.
Returns:
float:
estimated Hurst exponent K using a rescaled range approach (if K = 0.5
there are no long-range correlations in the data, if K < 0.5 there are
negative long-range correlations, if K > 0.5 there are positive
long-range correlations)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n),
``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line
coefficients (``[slope, intercept]``)
"""
data = np.asarray(data)
total_N = len(data)
if nvals is None:
# chooses a default value for nvals that will give 15 logarithmically
# spaced datapoints leaning towards the middle of the logarithmic range
# (since both too small and too large n introduce too much variance)
nvals = logmid_n(total_N, ratio=1/4.0, nsteps=15)
# get individual values for (R/S)_n
rsvals = np.array([rs(data, n, unbiased=unbiased) for n in nvals])
# filter NaNs (zeros should not be possible, because if R is 0 then
# S is also zero)
not_nan = np.logical_not(np.isnan(rsvals))
rsvals = rsvals[not_nan]
nvals = np.asarray(nvals)[not_nan]
# it may happen that no rsvals are left (if all values of data are the same)
if len(rsvals) == 0:
poly = [np.nan, np.nan]
if debug_plot:
warnings.warn("Cannot display debug plot, all (R/S)_n are NaN")
else:
# fit a line to the logarithm of the obtained (R/S)_n
xvals = np.log(nvals)
yvals = np.log(rsvals)
if corrected:
yvals -= np.log([expected_rs(n) for n in nvals])
poly = poly_fit(xvals, yvals, 1, fit=fit)
if debug_plot:
plot_reg(xvals, yvals, poly, "log(n)", "log((R/S)_n)",
fname=plot_file)
# account for correction if necessary
h = poly[0] + 0.5 if corrected else poly[0]
# return line slope (+ correction) as hurst exponent
if debug_data:
return (h, (np.log(nvals), np.log(rsvals), poly))
else:
return h |
def wrap(self, data, many):
"""Wrap response in envelope."""
if not many:
return data
else:
data = {'contents': data}
bucket = self.context.get('bucket')
if bucket:
data.update(BucketSchema().dump(bucket).data)
return data | Wrap response in envelope. | Below is the the instruction that describes the task:
### Input:
Wrap response in envelope.
### Response:
def wrap(self, data, many):
"""Wrap response in envelope."""
if not many:
return data
else:
data = {'contents': data}
bucket = self.context.get('bucket')
if bucket:
data.update(BucketSchema().dump(bucket).data)
return data |
def remove(name, path):
'''
Removes installed alternative for defined <name> and <path>
or fallback to default alternative, if some defined before.
name
is the master name for this link group
(e.g. pager)
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less)
'''
ret = {'name': name,
'path': path,
'result': True,
'changes': {},
'comment': ''}
isinstalled = __salt__['alternatives.check_exists'](name, path)
if isinstalled:
if __opts__['test']:
ret['comment'] = ('Alternative for {0} will be removed'
.format(name))
ret['result'] = None
return ret
__salt__['alternatives.remove'](name, path)
current = __salt__['alternatives.show_current'](name)
if current:
ret['result'] = True
ret['comment'] = (
'Alternative for {0} removed. Falling back to path {1}'
).format(name, current)
ret['changes'] = {'path': current}
return ret
ret['comment'] = 'Alternative for {0} removed'.format(name)
ret['changes'] = {}
return ret
current = __salt__['alternatives.show_current'](name)
if current:
ret['result'] = True
ret['comment'] = (
'Alternative for {0} is set to it\'s default path {1}'
).format(name, current)
return ret
ret['result'] = False
ret['comment'] = (
'Alternative for {0} doesn\'t exist'
).format(name)
return ret | Removes installed alternative for defined <name> and <path>
or fallback to default alternative, if some defined before.
name
is the master name for this link group
(e.g. pager)
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less) | Below is the the instruction that describes the task:
### Input:
Removes installed alternative for defined <name> and <path>
or fallback to default alternative, if some defined before.
name
is the master name for this link group
(e.g. pager)
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less)
### Response:
def remove(name, path):
'''
Removes installed alternative for defined <name> and <path>
or fallback to default alternative, if some defined before.
name
is the master name for this link group
(e.g. pager)
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less)
'''
ret = {'name': name,
'path': path,
'result': True,
'changes': {},
'comment': ''}
isinstalled = __salt__['alternatives.check_exists'](name, path)
if isinstalled:
if __opts__['test']:
ret['comment'] = ('Alternative for {0} will be removed'
.format(name))
ret['result'] = None
return ret
__salt__['alternatives.remove'](name, path)
current = __salt__['alternatives.show_current'](name)
if current:
ret['result'] = True
ret['comment'] = (
'Alternative for {0} removed. Falling back to path {1}'
).format(name, current)
ret['changes'] = {'path': current}
return ret
ret['comment'] = 'Alternative for {0} removed'.format(name)
ret['changes'] = {}
return ret
current = __salt__['alternatives.show_current'](name)
if current:
ret['result'] = True
ret['comment'] = (
'Alternative for {0} is set to it\'s default path {1}'
).format(name, current)
return ret
ret['result'] = False
ret['comment'] = (
'Alternative for {0} doesn\'t exist'
).format(name)
return ret |
def remove_field(self, model, field):
"""Ran when a field is removed from a model."""
for keys in self._iterate_uniqueness_keys(field):
self._drop_hstore_unique(
model,
field,
keys
) | Ran when a field is removed from a model. | Below is the the instruction that describes the task:
### Input:
Ran when a field is removed from a model.
### Response:
def remove_field(self, model, field):
"""Ran when a field is removed from a model."""
for keys in self._iterate_uniqueness_keys(field):
self._drop_hstore_unique(
model,
field,
keys
) |
def tree():
"""Return a tree of tuples representing the logger layout.
Each tuple looks like ``('logger-name', <Logger>, [...])`` where the
third element is a list of zero or more child tuples that share the
same layout.
"""
root = ('', logging.root, [])
nodes = {}
items = list(logging.root.manager.loggerDict.items()) # for Python 2 and 3
items.sort()
for name, logger in items:
nodes[name] = node = (name, logger, [])
i = name.rfind('.', 0, len(name) - 1) # same formula used in `logging`
if i == -1:
parent = root
else:
parent = nodes[name[:i]]
parent[2].append(node)
return root | Return a tree of tuples representing the logger layout.
Each tuple looks like ``('logger-name', <Logger>, [...])`` where the
third element is a list of zero or more child tuples that share the
same layout. | Below is the the instruction that describes the task:
### Input:
Return a tree of tuples representing the logger layout.
Each tuple looks like ``('logger-name', <Logger>, [...])`` where the
third element is a list of zero or more child tuples that share the
same layout.
### Response:
def tree():
"""Return a tree of tuples representing the logger layout.
Each tuple looks like ``('logger-name', <Logger>, [...])`` where the
third element is a list of zero or more child tuples that share the
same layout.
"""
root = ('', logging.root, [])
nodes = {}
items = list(logging.root.manager.loggerDict.items()) # for Python 2 and 3
items.sort()
for name, logger in items:
nodes[name] = node = (name, logger, [])
i = name.rfind('.', 0, len(name) - 1) # same formula used in `logging`
if i == -1:
parent = root
else:
parent = nodes[name[:i]]
parent[2].append(node)
return root |
def safe_decode(s):
"""Safely decodes a binary string to unicode"""
if isinstance(s, unicode):
return s
elif isinstance(s, bytes):
return s.decode(defenc, 'surrogateescape')
elif s is not None:
raise TypeError('Expected bytes or text, but got %r' % (s,)) | Safely decodes a binary string to unicode | Below is the the instruction that describes the task:
### Input:
Safely decodes a binary string to unicode
### Response:
def safe_decode(s):
"""Safely decodes a binary string to unicode"""
if isinstance(s, unicode):
return s
elif isinstance(s, bytes):
return s.decode(defenc, 'surrogateescape')
elif s is not None:
raise TypeError('Expected bytes or text, but got %r' % (s,)) |
def add_manager_view(request):
''' View to add a new manager position. Restricted to superadmins and presidents. '''
form = ManagerForm(request.POST or None)
if form.is_valid():
manager = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['MANAGER_ADDED'].format(managerTitle=manager.title))
return HttpResponseRedirect(reverse('managers:add_manager'))
return render_to_response('edit_manager.html', {
'page_name': "Admin - Add Manager",
'managerset': Manager.objects.all(),
'form': form,
}, context_instance=RequestContext(request)) | View to add a new manager position. Restricted to superadmins and presidents. | Below is the the instruction that describes the task:
### Input:
View to add a new manager position. Restricted to superadmins and presidents.
### Response:
def add_manager_view(request):
''' View to add a new manager position. Restricted to superadmins and presidents. '''
form = ManagerForm(request.POST or None)
if form.is_valid():
manager = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['MANAGER_ADDED'].format(managerTitle=manager.title))
return HttpResponseRedirect(reverse('managers:add_manager'))
return render_to_response('edit_manager.html', {
'page_name': "Admin - Add Manager",
'managerset': Manager.objects.all(),
'form': form,
}, context_instance=RequestContext(request)) |
def ElemMatch(q, *conditions):
"""
The ElemMatch operator matches documents that contain an array field with at
least one element that matches all the specified query criteria.
"""
new_condition = {}
for condition in conditions:
deep_merge(condition.to_dict(), new_condition)
return Condition(q._path, new_condition, '$elemMatch') | The ElemMatch operator matches documents that contain an array field with at
least one element that matches all the specified query criteria. | Below is the the instruction that describes the task:
### Input:
The ElemMatch operator matches documents that contain an array field with at
least one element that matches all the specified query criteria.
### Response:
def ElemMatch(q, *conditions):
"""
The ElemMatch operator matches documents that contain an array field with at
least one element that matches all the specified query criteria.
"""
new_condition = {}
for condition in conditions:
deep_merge(condition.to_dict(), new_condition)
return Condition(q._path, new_condition, '$elemMatch') |
def mesh_multiplane(mesh,
plane_origin,
plane_normal,
heights):
"""
A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
"""
# check input plane
plane_normal = util.unitize(plane_normal)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
heights = np.asanyarray(heights, dtype=np.float64)
# dot product of every vertex with plane
vertex_dots = np.dot(plane_normal,
(mesh.vertices - plane_origin).T)
# reconstruct transforms for each 2D section
base_transform = geometry.plane_transform(origin=plane_origin,
normal=plane_normal)
base_transform = np.linalg.inv(base_transform)
# alter translation Z inside loop
translation = np.eye(4)
# store results
transforms = []
face_index = []
segments = []
# loop through user specified heights
for height in heights:
# offset the origin by the height
new_origin = plane_origin + (plane_normal * height)
# offset the dot products by height and index by faces
new_dots = (vertex_dots - height)[mesh.faces]
# run the intersection with the cached dot products
lines, index = mesh_plane(mesh=mesh,
plane_origin=new_origin,
plane_normal=plane_normal,
return_faces=True,
cached_dots=new_dots)
# get the transforms to 3D space and back
translation[2, 3] = height
to_3D = np.dot(base_transform, translation)
to_2D = np.linalg.inv(to_3D)
transforms.append(to_3D)
# transform points to 2D frame
lines_2D = transformations.transform_points(
lines.reshape((-1, 3)),
to_2D)
# if we didn't screw up the transform all
# of the Z values should be zero
assert np.allclose(lines_2D[:, 2], 0.0)
# reshape back in to lines and discard Z
lines_2D = lines_2D[:, :2].reshape((-1, 2, 2))
# store (n, 2, 2) float lines
segments.append(lines_2D)
# store (n,) int indexes of mesh.faces
face_index.append(face_index)
# (n, 4, 4) transforms from 2D to 3D
transforms = np.array(transforms, dtype=np.float64)
return segments, transforms, face_index | A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment | Below is the the instruction that describes the task:
### Input:
A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
### Response:
def mesh_multiplane(mesh,
plane_origin,
plane_normal,
heights):
"""
A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
"""
# check input plane
plane_normal = util.unitize(plane_normal)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
heights = np.asanyarray(heights, dtype=np.float64)
# dot product of every vertex with plane
vertex_dots = np.dot(plane_normal,
(mesh.vertices - plane_origin).T)
# reconstruct transforms for each 2D section
base_transform = geometry.plane_transform(origin=plane_origin,
normal=plane_normal)
base_transform = np.linalg.inv(base_transform)
# alter translation Z inside loop
translation = np.eye(4)
# store results
transforms = []
face_index = []
segments = []
# loop through user specified heights
for height in heights:
# offset the origin by the height
new_origin = plane_origin + (plane_normal * height)
# offset the dot products by height and index by faces
new_dots = (vertex_dots - height)[mesh.faces]
# run the intersection with the cached dot products
lines, index = mesh_plane(mesh=mesh,
plane_origin=new_origin,
plane_normal=plane_normal,
return_faces=True,
cached_dots=new_dots)
# get the transforms to 3D space and back
translation[2, 3] = height
to_3D = np.dot(base_transform, translation)
to_2D = np.linalg.inv(to_3D)
transforms.append(to_3D)
# transform points to 2D frame
lines_2D = transformations.transform_points(
lines.reshape((-1, 3)),
to_2D)
# if we didn't screw up the transform all
# of the Z values should be zero
assert np.allclose(lines_2D[:, 2], 0.0)
# reshape back in to lines and discard Z
lines_2D = lines_2D[:, :2].reshape((-1, 2, 2))
# store (n, 2, 2) float lines
segments.append(lines_2D)
# store (n,) int indexes of mesh.faces
face_index.append(face_index)
# (n, 4, 4) transforms from 2D to 3D
transforms = np.array(transforms, dtype=np.float64)
return segments, transforms, face_index |
def set(self,
agent_id,
name=None,
description=None,
redirect_domain=None,
logo_media_id=None,
report_location_flag=0,
is_report_user=True,
is_report_enter=True):
"""
设置应用
https://work.weixin.qq.com/api/doc#90000/90135/90228
:param agent_id: 企业应用的id
:param name: 企业应用名称,长度不超过32个utf8字符
:param description: 企业应用详情,长度为4至120个utf8字符
:param redirect_domain: 企业应用可信域名。注意:域名需通过所有权校验,否则jssdk功能将受限,此时返回错误码85005
:param logo_media_id: 企业应用头像的mediaid,通过素材管理接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像
:param report_location_flag: 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报;
:param is_report_enter: 是否上报用户进入应用事件。0:不接收;1:接收。
:param is_report_user: 是否接收用户变更通知。0:不接收;1:接收。
:return: 返回的 JSON 数据包
"""
agent_data = optionaldict()
agent_data['agentid'] = agent_id
agent_data['name'] = name
agent_data['description'] = description
agent_data['redirect_domain'] = redirect_domain
agent_data['logo_mediaid'] = logo_media_id
agent_data['report_location_flag'] = report_location_flag
agent_data['isreportenter'] = 1 if is_report_enter else 0
agent_data['isreportuser'] = 1 if is_report_user else 0
return self._post(
'agent/set',
data=agent_data
) | 设置应用
https://work.weixin.qq.com/api/doc#90000/90135/90228
:param agent_id: 企业应用的id
:param name: 企业应用名称,长度不超过32个utf8字符
:param description: 企业应用详情,长度为4至120个utf8字符
:param redirect_domain: 企业应用可信域名。注意:域名需通过所有权校验,否则jssdk功能将受限,此时返回错误码85005
:param logo_media_id: 企业应用头像的mediaid,通过素材管理接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像
:param report_location_flag: 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报;
:param is_report_enter: 是否上报用户进入应用事件。0:不接收;1:接收。
:param is_report_user: 是否接收用户变更通知。0:不接收;1:接收。
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
设置应用
https://work.weixin.qq.com/api/doc#90000/90135/90228
:param agent_id: 企业应用的id
:param name: 企业应用名称,长度不超过32个utf8字符
:param description: 企业应用详情,长度为4至120个utf8字符
:param redirect_domain: 企业应用可信域名。注意:域名需通过所有权校验,否则jssdk功能将受限,此时返回错误码85005
:param logo_media_id: 企业应用头像的mediaid,通过素材管理接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像
:param report_location_flag: 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报;
:param is_report_enter: 是否上报用户进入应用事件。0:不接收;1:接收。
:param is_report_user: 是否接收用户变更通知。0:不接收;1:接收。
:return: 返回的 JSON 数据包
### Response:
def set(self,
agent_id,
name=None,
description=None,
redirect_domain=None,
logo_media_id=None,
report_location_flag=0,
is_report_user=True,
is_report_enter=True):
"""
设置应用
https://work.weixin.qq.com/api/doc#90000/90135/90228
:param agent_id: 企业应用的id
:param name: 企业应用名称,长度不超过32个utf8字符
:param description: 企业应用详情,长度为4至120个utf8字符
:param redirect_domain: 企业应用可信域名。注意:域名需通过所有权校验,否则jssdk功能将受限,此时返回错误码85005
:param logo_media_id: 企业应用头像的mediaid,通过素材管理接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像
:param report_location_flag: 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报;
:param is_report_enter: 是否上报用户进入应用事件。0:不接收;1:接收。
:param is_report_user: 是否接收用户变更通知。0:不接收;1:接收。
:return: 返回的 JSON 数据包
"""
agent_data = optionaldict()
agent_data['agentid'] = agent_id
agent_data['name'] = name
agent_data['description'] = description
agent_data['redirect_domain'] = redirect_domain
agent_data['logo_mediaid'] = logo_media_id
agent_data['report_location_flag'] = report_location_flag
agent_data['isreportenter'] = 1 if is_report_enter else 0
agent_data['isreportuser'] = 1 if is_report_user else 0
return self._post(
'agent/set',
data=agent_data
) |
def _activate(self):
"""Activate the application (bringing menus and windows forward)."""
ra = AppKit.NSRunningApplication
app = ra.runningApplicationWithProcessIdentifier_(
self._getPid())
# NSApplicationActivateAllWindows | NSApplicationActivateIgnoringOtherApps
# == 3 - PyObjC in 10.6 does not expose these constants though so I have
# to use the int instead of the symbolic names
app.activateWithOptions_(3) | Activate the application (bringing menus and windows forward). | Below is the the instruction that describes the task:
### Input:
Activate the application (bringing menus and windows forward).
### Response:
def _activate(self):
"""Activate the application (bringing menus and windows forward)."""
ra = AppKit.NSRunningApplication
app = ra.runningApplicationWithProcessIdentifier_(
self._getPid())
# NSApplicationActivateAllWindows | NSApplicationActivateIgnoringOtherApps
# == 3 - PyObjC in 10.6 does not expose these constants though so I have
# to use the int instead of the symbolic names
app.activateWithOptions_(3) |
def calculate_top_down_likelihood(tree, character, frequencies, sf, kappa=None, model=F81):
"""
Calculates the top-down likelihood for the given tree.
The likelihood for each node is stored in the corresponding feature,
given by get_personalised_feature_name(feature, TD_LH).
To calculate the top-down likelihood of a node, we assume that the tree is rooted in this node
and combine the likelihoods of the “up-subtrees”,
e.g. to calculate the top-down likelihood of a node N1 being in a state i,
given that its parent node is P and its brother node is N2, we imagine that the tree is re-rooted in N1,
therefore P becoming the child of N1, and N2 its grandchild.
We then calculate the bottom-up likelihood from the P subtree:
L_top_down(N1, i) = \sum_j P(i -> j, dist(N1, P)) * L_top_down(P) * \sum_k P(j -> k, dist(N2, P)) * L_bottom_up (N2).
For the root node we assume its top-down likelihood to be 1 for all the states.
:param model: model of character evolution
:type model: str
:param sf: scaling factor
:type sf: float
:param character: character whose ancestral state likelihood is being calculated
:type character: str
:param tree: tree of interest (with bottom-up likelihood pre-calculated)
:type tree: ete3.Tree
:param frequencies: state frequencies
:type frequencies: numpy.array
:return: void, stores the node top-down likelihoods in the get_personalised_feature_name(feature, TD_LH) feature.
"""
lh_feature = get_personalized_feature_name(character, TD_LH)
lh_sf_feature = get_personalized_feature_name(character, TD_LH_SF)
bu_lh_feature = get_personalized_feature_name(character, BU_LH)
bu_lh_sf_feature = get_personalized_feature_name(character, BU_LH_SF)
get_pij = get_pij_method(model, frequencies, kappa)
for node in tree.traverse('preorder'):
if node.is_root():
node.add_feature(lh_feature, np.ones(len(frequencies), np.float64))
node.add_feature(lh_sf_feature, 0)
continue
parent = node.up
parent_bu_likelihood = getattr(parent, bu_lh_feature)
node_pjis = np.transpose(get_pij(node.dist * sf))
node_contribution = getattr(node, bu_lh_feature).dot(node_pjis)
parent_likelihood = getattr(parent, lh_feature) * parent_bu_likelihood
parent_likelihood[np.nonzero(parent_likelihood)] /= node_contribution[np.nonzero(parent_likelihood)]
factors = getattr(parent, lh_sf_feature) + getattr(parent, bu_lh_sf_feature) - getattr(node, bu_lh_sf_feature)
td_likelihood = parent_likelihood.dot(node_pjis)
factors += rescale(td_likelihood, fraction_of_limit=len(node.children) if not node.is_leaf() else 1)
node.add_feature(lh_feature, td_likelihood)
node.add_feature(lh_sf_feature, factors) | Calculates the top-down likelihood for the given tree.
The likelihood for each node is stored in the corresponding feature,
given by get_personalised_feature_name(feature, TD_LH).
To calculate the top-down likelihood of a node, we assume that the tree is rooted in this node
and combine the likelihoods of the “up-subtrees”,
e.g. to calculate the top-down likelihood of a node N1 being in a state i,
given that its parent node is P and its brother node is N2, we imagine that the tree is re-rooted in N1,
therefore P becoming the child of N1, and N2 its grandchild.
We then calculate the bottom-up likelihood from the P subtree:
L_top_down(N1, i) = \sum_j P(i -> j, dist(N1, P)) * L_top_down(P) * \sum_k P(j -> k, dist(N2, P)) * L_bottom_up (N2).
For the root node we assume its top-down likelihood to be 1 for all the states.
:param model: model of character evolution
:type model: str
:param sf: scaling factor
:type sf: float
:param character: character whose ancestral state likelihood is being calculated
:type character: str
:param tree: tree of interest (with bottom-up likelihood pre-calculated)
:type tree: ete3.Tree
:param frequencies: state frequencies
:type frequencies: numpy.array
:return: void, stores the node top-down likelihoods in the get_personalised_feature_name(feature, TD_LH) feature. | Below is the the instruction that describes the task:
### Input:
Calculates the top-down likelihood for the given tree.
The likelihood for each node is stored in the corresponding feature,
given by get_personalised_feature_name(feature, TD_LH).
To calculate the top-down likelihood of a node, we assume that the tree is rooted in this node
and combine the likelihoods of the “up-subtrees”,
e.g. to calculate the top-down likelihood of a node N1 being in a state i,
given that its parent node is P and its brother node is N2, we imagine that the tree is re-rooted in N1,
therefore P becoming the child of N1, and N2 its grandchild.
We then calculate the bottom-up likelihood from the P subtree:
L_top_down(N1, i) = \sum_j P(i -> j, dist(N1, P)) * L_top_down(P) * \sum_k P(j -> k, dist(N2, P)) * L_bottom_up (N2).
For the root node we assume its top-down likelihood to be 1 for all the states.
:param model: model of character evolution
:type model: str
:param sf: scaling factor
:type sf: float
:param character: character whose ancestral state likelihood is being calculated
:type character: str
:param tree: tree of interest (with bottom-up likelihood pre-calculated)
:type tree: ete3.Tree
:param frequencies: state frequencies
:type frequencies: numpy.array
:return: void, stores the node top-down likelihoods in the get_personalised_feature_name(feature, TD_LH) feature.
### Response:
def calculate_top_down_likelihood(tree, character, frequencies, sf, kappa=None, model=F81):
"""
Calculates the top-down likelihood for the given tree.
The likelihood for each node is stored in the corresponding feature,
given by get_personalised_feature_name(feature, TD_LH).
To calculate the top-down likelihood of a node, we assume that the tree is rooted in this node
and combine the likelihoods of the “up-subtrees”,
e.g. to calculate the top-down likelihood of a node N1 being in a state i,
given that its parent node is P and its brother node is N2, we imagine that the tree is re-rooted in N1,
therefore P becoming the child of N1, and N2 its grandchild.
We then calculate the bottom-up likelihood from the P subtree:
L_top_down(N1, i) = \sum_j P(i -> j, dist(N1, P)) * L_top_down(P) * \sum_k P(j -> k, dist(N2, P)) * L_bottom_up (N2).
For the root node we assume its top-down likelihood to be 1 for all the states.
:param model: model of character evolution
:type model: str
:param sf: scaling factor
:type sf: float
:param character: character whose ancestral state likelihood is being calculated
:type character: str
:param tree: tree of interest (with bottom-up likelihood pre-calculated)
:type tree: ete3.Tree
:param frequencies: state frequencies
:type frequencies: numpy.array
:return: void, stores the node top-down likelihoods in the get_personalised_feature_name(feature, TD_LH) feature.
"""
lh_feature = get_personalized_feature_name(character, TD_LH)
lh_sf_feature = get_personalized_feature_name(character, TD_LH_SF)
bu_lh_feature = get_personalized_feature_name(character, BU_LH)
bu_lh_sf_feature = get_personalized_feature_name(character, BU_LH_SF)
get_pij = get_pij_method(model, frequencies, kappa)
for node in tree.traverse('preorder'):
if node.is_root():
node.add_feature(lh_feature, np.ones(len(frequencies), np.float64))
node.add_feature(lh_sf_feature, 0)
continue
parent = node.up
parent_bu_likelihood = getattr(parent, bu_lh_feature)
node_pjis = np.transpose(get_pij(node.dist * sf))
node_contribution = getattr(node, bu_lh_feature).dot(node_pjis)
parent_likelihood = getattr(parent, lh_feature) * parent_bu_likelihood
parent_likelihood[np.nonzero(parent_likelihood)] /= node_contribution[np.nonzero(parent_likelihood)]
factors = getattr(parent, lh_sf_feature) + getattr(parent, bu_lh_sf_feature) - getattr(node, bu_lh_sf_feature)
td_likelihood = parent_likelihood.dot(node_pjis)
factors += rescale(td_likelihood, fraction_of_limit=len(node.children) if not node.is_leaf() else 1)
node.add_feature(lh_feature, td_likelihood)
node.add_feature(lh_sf_feature, factors) |
def parse(self, valstr):
# type: (bytes) -> None
'''
A method to parse an El Torito Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Entry out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry already initialized')
(self.boot_indicator, self.boot_media_type, self.load_segment,
self.system_type, unused1, self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) = struct.unpack_from(self.FMT, valstr, 0)
if self.boot_indicator not in (0x88, 0x00):
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito initial entry boot indicator')
if self.boot_media_type > 4:
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito boot media type')
# FIXME: check that the system type matches the partition table
if unused1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('El Torito unused field must be 0')
# According to the specification, the El Torito unused end field (bytes
# 0xc - 0x1f, unused2 field) should be all zero. However, we have found
# ISOs in the wild where that is not the case, so skip that particular
# check here.
self._initialized = True | A method to parse an El Torito Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Entry out of.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to parse an El Torito Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Entry out of.
Returns:
Nothing.
### Response:
def parse(self, valstr):
# type: (bytes) -> None
'''
A method to parse an El Torito Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Entry out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry already initialized')
(self.boot_indicator, self.boot_media_type, self.load_segment,
self.system_type, unused1, self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) = struct.unpack_from(self.FMT, valstr, 0)
if self.boot_indicator not in (0x88, 0x00):
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito initial entry boot indicator')
if self.boot_media_type > 4:
raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito boot media type')
# FIXME: check that the system type matches the partition table
if unused1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('El Torito unused field must be 0')
# According to the specification, the El Torito unused end field (bytes
# 0xc - 0x1f, unused2 field) should be all zero. However, we have found
# ISOs in the wild where that is not the case, so skip that particular
# check here.
self._initialized = True |
def writeGenerator(self, gen):
"""
Iterates over a generator object and encodes all that is returned.
"""
n = getattr(gen, 'next')
while True:
try:
self.writeElement(n())
except StopIteration:
break | Iterates over a generator object and encodes all that is returned. | Below is the the instruction that describes the task:
### Input:
Iterates over a generator object and encodes all that is returned.
### Response:
def writeGenerator(self, gen):
"""
Iterates over a generator object and encodes all that is returned.
"""
n = getattr(gen, 'next')
while True:
try:
self.writeElement(n())
except StopIteration:
break |
def errors_handler(self, *custom_filters, exception=None, run_task=None, **kwargs):
"""
Decorator for errors handler
:param exception: you can make handler for specific errors type
:param run_task: run callback in task (no wait results)
:return:
"""
def decorator(callback):
self.register_errors_handler(self._wrap_async_task(callback, run_task),
*custom_filters, exception=exception, **kwargs)
return callback
return decorator | Decorator for errors handler
:param exception: you can make handler for specific errors type
:param run_task: run callback in task (no wait results)
:return: | Below is the the instruction that describes the task:
### Input:
Decorator for errors handler
:param exception: you can make handler for specific errors type
:param run_task: run callback in task (no wait results)
:return:
### Response:
def errors_handler(self, *custom_filters, exception=None, run_task=None, **kwargs):
"""
Decorator for errors handler
:param exception: you can make handler for specific errors type
:param run_task: run callback in task (no wait results)
:return:
"""
def decorator(callback):
self.register_errors_handler(self._wrap_async_task(callback, run_task),
*custom_filters, exception=exception, **kwargs)
return callback
return decorator |
def submit(self, func, *args, **kwargs):
"""Submit a function for serialized execution on sqs
"""
self.op_sequence += 1
self.sqs.send_message(
QueueUrl=self.map_queue,
MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}),
MessageAttributes={
'sequence_id': {
'StringValue': str(self.op_sequence),
'DataType': 'Number'},
'op': {
'StringValue': named(func),
'DataType': 'String',
},
'ser': {
'StringValue': 'json',
'DataType': 'String'}}
)
self.futures[self.op_sequence] = f = SQSFuture(
self.op_sequence)
return f | Submit a function for serialized execution on sqs | Below is the the instruction that describes the task:
### Input:
Submit a function for serialized execution on sqs
### Response:
def submit(self, func, *args, **kwargs):
"""Submit a function for serialized execution on sqs
"""
self.op_sequence += 1
self.sqs.send_message(
QueueUrl=self.map_queue,
MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}),
MessageAttributes={
'sequence_id': {
'StringValue': str(self.op_sequence),
'DataType': 'Number'},
'op': {
'StringValue': named(func),
'DataType': 'String',
},
'ser': {
'StringValue': 'json',
'DataType': 'String'}}
)
self.futures[self.op_sequence] = f = SQSFuture(
self.op_sequence)
return f |
def disable_event(self, event_type, mechanism):
"""Disables notification of the specified event type(s) via the specified mechanism(s).
:param event_type: Logical event identifier.
:param mechanism: Specifies event handling mechanisms to be disabled.
(Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR, .VI_ALL_MECH)
"""
self.visalib.disable_event(self.session, event_type, mechanism) | Disables notification of the specified event type(s) via the specified mechanism(s).
:param event_type: Logical event identifier.
:param mechanism: Specifies event handling mechanisms to be disabled.
(Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR, .VI_ALL_MECH) | Below is the the instruction that describes the task:
### Input:
Disables notification of the specified event type(s) via the specified mechanism(s).
:param event_type: Logical event identifier.
:param mechanism: Specifies event handling mechanisms to be disabled.
(Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR, .VI_ALL_MECH)
### Response:
def disable_event(self, event_type, mechanism):
"""Disables notification of the specified event type(s) via the specified mechanism(s).
:param event_type: Logical event identifier.
:param mechanism: Specifies event handling mechanisms to be disabled.
(Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR, .VI_ALL_MECH)
"""
self.visalib.disable_event(self.session, event_type, mechanism) |
def relaxedEMDS(X0, N, d, C, b, KE, print_out=False, lamda=10):
""" Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation.
"""
E = C.shape[1]
X = Variable((E, E), PSD=True)
constraints = [C[i, :] * X == b[i] for i in range(C.shape[0])]
obj = Minimize(trace(X) + lamda * norm(KE - X))
prob = Problem(obj, constraints)
try:
# CVXOPT is more accurate than SCS, even though slower.
total_cost = prob.solve(solver='CVXOPT', verbose=print_out)
except:
try:
print('CVXOPT with default cholesky failed. Trying kktsolver...')
# kktsolver is more robust than default (cholesky), even though slower.
total_cost = prob.solve(
solver='CVXOPT', verbose=print_out, kktsolver="robust")
except:
try:
print('CVXOPT with robust kktsovler failed. Trying SCS...')
# SCS is fast and robust, but inaccurate (last choice).
total_cost = prob.solve(solver='SCS', verbose=print_out)
except:
print('SCS and CVXOPT solver with default and kktsolver failed .')
if print_out:
print('status:', prob.status)
Xhat_KE, Vhat_KE = superMDS(X0, N, d, KE=X.value)
return Xhat_KE, Vhat_KE | Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation. | Below is the the instruction that describes the task:
### Input:
Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation.
### Response:
def relaxedEMDS(X0, N, d, C, b, KE, print_out=False, lamda=10):
""" Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation.
"""
E = C.shape[1]
X = Variable((E, E), PSD=True)
constraints = [C[i, :] * X == b[i] for i in range(C.shape[0])]
obj = Minimize(trace(X) + lamda * norm(KE - X))
prob = Problem(obj, constraints)
try:
# CVXOPT is more accurate than SCS, even though slower.
total_cost = prob.solve(solver='CVXOPT', verbose=print_out)
except:
try:
print('CVXOPT with default cholesky failed. Trying kktsolver...')
# kktsolver is more robust than default (cholesky), even though slower.
total_cost = prob.solve(
solver='CVXOPT', verbose=print_out, kktsolver="robust")
except:
try:
print('CVXOPT with robust kktsovler failed. Trying SCS...')
# SCS is fast and robust, but inaccurate (last choice).
total_cost = prob.solve(solver='SCS', verbose=print_out)
except:
print('SCS and CVXOPT solver with default and kktsolver failed .')
if print_out:
print('status:', prob.status)
Xhat_KE, Vhat_KE = superMDS(X0, N, d, KE=X.value)
return Xhat_KE, Vhat_KE |
def set_zone_order(self, zone_ids):
""" reorder zones per the passed in list
:param zone_ids:
:return:
"""
reordered_zones = []
current_zone_ids = [z['id'] for z in self.my_osid_object_form._my_map['zones']]
if set(zone_ids) != set(current_zone_ids):
raise IllegalState('zone_ids do not match existing zones')
for zone_id in zone_ids:
for current_zone in self.my_osid_object_form._my_map['zones']:
if zone_id == current_zone['id']:
reordered_zones.append(current_zone)
break
self.my_osid_object_form._my_map['zones'] = reordered_zones | reorder zones per the passed in list
:param zone_ids:
:return: | Below is the the instruction that describes the task:
### Input:
reorder zones per the passed in list
:param zone_ids:
:return:
### Response:
def set_zone_order(self, zone_ids):
""" reorder zones per the passed in list
:param zone_ids:
:return:
"""
reordered_zones = []
current_zone_ids = [z['id'] for z in self.my_osid_object_form._my_map['zones']]
if set(zone_ids) != set(current_zone_ids):
raise IllegalState('zone_ids do not match existing zones')
for zone_id in zone_ids:
for current_zone in self.my_osid_object_form._my_map['zones']:
if zone_id == current_zone['id']:
reordered_zones.append(current_zone)
break
self.my_osid_object_form._my_map['zones'] = reordered_zones |
def iter_traceback_frames(tb):
"""
Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable.
"""
# Some versions of celery have hacked traceback objects that might
# miss tb_frame.
while tb and hasattr(tb, 'tb_frame'):
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
f_locals = getattr(tb.tb_frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield tb.tb_frame, getattr(tb, 'tb_lineno', None)
tb = tb.tb_next | Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable. | Below is the the instruction that describes the task:
### Input:
Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable.
### Response:
def iter_traceback_frames(tb):
"""
Given a traceback object, it will iterate over all
frames that do not contain the ``__traceback_hide__``
local variable.
"""
# Some versions of celery have hacked traceback objects that might
# miss tb_frame.
while tb and hasattr(tb, 'tb_frame'):
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
f_locals = getattr(tb.tb_frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
yield tb.tb_frame, getattr(tb, 'tb_lineno', None)
tb = tb.tb_next |
def get_swagger_operation(self, context=default_context):
"""
get the swagger_schema operation representation.
"""
consumes = produces = context.contenttype_serializers.keys()
parameters = get_swagger_parameters(self.parameters, context)
responses = {
"400": Response(
{
"description": "invalid input received",
"schema": Schema(
{
"title": "FailureObject",
"type": "object",
"properties": {
"success": {"type": "boolean"},
"result": {"type": "string"},
},
"required": ["success", "result"],
}
),
}
)
}
for code, details in self.response_types.items():
responses[str(code)] = details.swagger_definition(context)
return Operation(
{
"summary": self.summary,
"description": self.description,
"consumes": consumes,
"produces": produces,
"parameters": parameters,
"responses": responses,
"operationId": self.raw_func.__name__,
"tags": self.tags,
}
) | get the swagger_schema operation representation. | Below is the the instruction that describes the task:
### Input:
get the swagger_schema operation representation.
### Response:
def get_swagger_operation(self, context=default_context):
"""
get the swagger_schema operation representation.
"""
consumes = produces = context.contenttype_serializers.keys()
parameters = get_swagger_parameters(self.parameters, context)
responses = {
"400": Response(
{
"description": "invalid input received",
"schema": Schema(
{
"title": "FailureObject",
"type": "object",
"properties": {
"success": {"type": "boolean"},
"result": {"type": "string"},
},
"required": ["success", "result"],
}
),
}
)
}
for code, details in self.response_types.items():
responses[str(code)] = details.swagger_definition(context)
return Operation(
{
"summary": self.summary,
"description": self.description,
"consumes": consumes,
"produces": produces,
"parameters": parameters,
"responses": responses,
"operationId": self.raw_func.__name__,
"tags": self.tags,
}
) |
def update_list_function(self, list_name, list_func):
"""
Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function.
"""
if self.get_list_function(list_name) is None:
raise CloudantArgumentError(113, list_name)
self.lists.__setitem__(list_name, codify(list_func)) | Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function. | Below is the the instruction that describes the task:
### Input:
Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function.
### Response:
def update_list_function(self, list_name, list_func):
"""
Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function.
"""
if self.get_list_function(list_name) is None:
raise CloudantArgumentError(113, list_name)
self.lists.__setitem__(list_name, codify(list_func)) |
def run_simulation(self):
"""Runs the complete simulation"""
print('Starting simulations...')
for i in range(self.num_trials):
print('---Trial {}---'.format(i))
self.run_trial(i)
print('Simulation completed.') | Runs the complete simulation | Below is the the instruction that describes the task:
### Input:
Runs the complete simulation
### Response:
def run_simulation(self):
"""Runs the complete simulation"""
print('Starting simulations...')
for i in range(self.num_trials):
print('---Trial {}---'.format(i))
self.run_trial(i)
print('Simulation completed.') |
def remove(image):
"""Remove an image to the GUI img library."""
path = os.path.join(IMG_DIR, image)
if os.path.isfile(path):
os.remove(path) | Remove an image to the GUI img library. | Below is the the instruction that describes the task:
### Input:
Remove an image to the GUI img library.
### Response:
def remove(image):
"""Remove an image to the GUI img library."""
path = os.path.join(IMG_DIR, image)
if os.path.isfile(path):
os.remove(path) |
def create_request_url(self, interface, method, version, parameters):
"""Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method.
"""
if 'format' in parameters:
parameters['key'] = self.apikey
else:
parameters.update({'key' : self.apikey, 'format' : self.format})
version = "v%04d" % (version)
url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method,
version, urlencode(parameters))
return url | Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method. | Below is the the instruction that describes the task:
### Input:
Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method.
### Response:
def create_request_url(self, interface, method, version, parameters):
"""Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method.
"""
if 'format' in parameters:
parameters['key'] = self.apikey
else:
parameters.update({'key' : self.apikey, 'format' : self.format})
version = "v%04d" % (version)
url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method,
version, urlencode(parameters))
return url |
def add(self, files, items):
"""
Add a list of files with a reference to a list of objects.
"""
if isinstance(files, (str, bytes)):
files = iter([files])
for pathname in files:
try:
values = self._filemap[pathname]
except KeyError:
self._filemap[pathname] = items
else:
values.extend(items) | Add a list of files with a reference to a list of objects. | Below is the the instruction that describes the task:
### Input:
Add a list of files with a reference to a list of objects.
### Response:
def add(self, files, items):
"""
Add a list of files with a reference to a list of objects.
"""
if isinstance(files, (str, bytes)):
files = iter([files])
for pathname in files:
try:
values = self._filemap[pathname]
except KeyError:
self._filemap[pathname] = items
else:
values.extend(items) |
def parse_args(self, ctx, args):
"""Check if the first argument is an existing command."""
if args and args[0] in self.commands:
args.insert(0, '')
super(OptionalGroup, self).parse_args(ctx, args) | Check if the first argument is an existing command. | Below is the the instruction that describes the task:
### Input:
Check if the first argument is an existing command.
### Response:
def parse_args(self, ctx, args):
"""Check if the first argument is an existing command."""
if args and args[0] in self.commands:
args.insert(0, '')
super(OptionalGroup, self).parse_args(ctx, args) |
def import_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr | Import an object given its fully qualified name. | Below is the the instruction that describes the task:
### Input:
Import an object given its fully qualified name.
### Response:
def import_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr |
def setDragTable(self, table):
"""
Sets the table that will be linked with the drag query for this
record. This information will be added to the drag & drop information
when this record is dragged from the tree and will be set into
the application/x-table format for mime data.
:sa setDragQuery, XTreeWidgetItem.setDragData
:param table | <subclass of orb.Table>
"""
if table and table.schema():
self.setDragData('application/x-orb-table', table.schema().name())
else:
self.setDragData('application/x-orb-table', None) | Sets the table that will be linked with the drag query for this
record. This information will be added to the drag & drop information
when this record is dragged from the tree and will be set into
the application/x-table format for mime data.
:sa setDragQuery, XTreeWidgetItem.setDragData
:param table | <subclass of orb.Table> | Below is the the instruction that describes the task:
### Input:
Sets the table that will be linked with the drag query for this
record. This information will be added to the drag & drop information
when this record is dragged from the tree and will be set into
the application/x-table format for mime data.
:sa setDragQuery, XTreeWidgetItem.setDragData
:param table | <subclass of orb.Table>
### Response:
def setDragTable(self, table):
"""
Sets the table that will be linked with the drag query for this
record. This information will be added to the drag & drop information
when this record is dragged from the tree and will be set into
the application/x-table format for mime data.
:sa setDragQuery, XTreeWidgetItem.setDragData
:param table | <subclass of orb.Table>
"""
if table and table.schema():
self.setDragData('application/x-orb-table', table.schema().name())
else:
self.setDragData('application/x-orb-table', None) |
def remove_peer_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a peer from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_bgp_peer", body=body) | Removes a peer from BGP speaker. | Below is the the instruction that describes the task:
### Input:
Removes a peer from BGP speaker.
### Response:
def remove_peer_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a peer from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_bgp_peer", body=body) |
def _set_desire_distance(self, v, load=False):
"""
Setter method for desire_distance, mapped from YANG variable /interface/fc_port/desire_distance (desire-distance-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_desire_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_desire_distance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="desire-distance", rest_name="desire-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """desire_distance must be of a type compatible with desire-distance-type""",
'defined-type': "brocade-interface:desire-distance-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="desire-distance", rest_name="desire-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True)""",
})
self.__desire_distance = t
if hasattr(self, '_set'):
self._set() | Setter method for desire_distance, mapped from YANG variable /interface/fc_port/desire_distance (desire-distance-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_desire_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_desire_distance() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for desire_distance, mapped from YANG variable /interface/fc_port/desire_distance (desire-distance-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_desire_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_desire_distance() directly.
### Response:
def _set_desire_distance(self, v, load=False):
"""
Setter method for desire_distance, mapped from YANG variable /interface/fc_port/desire_distance (desire-distance-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_desire_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_desire_distance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="desire-distance", rest_name="desire-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """desire_distance must be of a type compatible with desire-distance-type""",
'defined-type': "brocade-interface:desire-distance-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="desire-distance", rest_name="desire-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True)""",
})
self.__desire_distance = t
if hasattr(self, '_set'):
self._set() |
def root_hash(self):
"""Returns the root hash of this tree. (Only re-computed on change.)"""
if self.__root_hash is None:
self.__root_hash = (
self.__hasher._hash_fold(self.__hashes)
if self.__hashes else self.__hasher.hash_empty())
return self.__root_hash | Returns the root hash of this tree. (Only re-computed on change.) | Below is the the instruction that describes the task:
### Input:
Returns the root hash of this tree. (Only re-computed on change.)
### Response:
def root_hash(self):
"""Returns the root hash of this tree. (Only re-computed on change.)"""
if self.__root_hash is None:
self.__root_hash = (
self.__hasher._hash_fold(self.__hashes)
if self.__hashes else self.__hasher.hash_empty())
return self.__root_hash |
def check_version(current_version: str):
""" Check periodically for a new release """
app_version = parse_version(current_version)
while True:
try:
_do_check_version(app_version)
except requests.exceptions.HTTPError as herr:
click.secho('Error while checking for version', fg='red')
print(herr)
except ValueError as verr:
click.secho('Error while checking the version', fg='red')
print(verr)
finally:
# repeat the process once every 3h
gevent.sleep(CHECK_VERSION_INTERVAL) | Check periodically for a new release | Below is the the instruction that describes the task:
### Input:
Check periodically for a new release
### Response:
def check_version(current_version: str):
""" Check periodically for a new release """
app_version = parse_version(current_version)
while True:
try:
_do_check_version(app_version)
except requests.exceptions.HTTPError as herr:
click.secho('Error while checking for version', fg='red')
print(herr)
except ValueError as verr:
click.secho('Error while checking the version', fg='red')
print(verr)
finally:
# repeat the process once every 3h
gevent.sleep(CHECK_VERSION_INTERVAL) |
def _copy_and_clean_up_expectations_from_indexes(
self,
match_indexes,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
):
"""Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations.
Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \
`DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations.
Args:
match_indexes (List): \
Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned.
discard_result_format_kwargs (boolean): \
if True, will remove the kwarg `output_format` key-value pair from the copied expectation.
discard_include_configs_kwargs (boolean):
if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.
discard_catch_exceptions_kwargs (boolean):
if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.
Returns:
A list of the copied expectations with `success_on_last_run` and other specified \
key-value pairs removed.
See also:
_copy_and_clean_expectation
"""
rval = []
for i in match_indexes:
rval.append(
self._copy_and_clean_up_expectation(
self._expectations_config.expectations[i],
discard_result_format_kwargs,
discard_include_configs_kwargs,
discard_catch_exceptions_kwargs,
)
)
return rval | Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations.
Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \
`DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations.
Args:
match_indexes (List): \
Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned.
discard_result_format_kwargs (boolean): \
if True, will remove the kwarg `output_format` key-value pair from the copied expectation.
discard_include_configs_kwargs (boolean):
if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.
discard_catch_exceptions_kwargs (boolean):
if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.
Returns:
A list of the copied expectations with `success_on_last_run` and other specified \
key-value pairs removed.
See also:
_copy_and_clean_expectation | Below is the the instruction that describes the task:
### Input:
Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations.
Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \
`DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations.
Args:
match_indexes (List): \
Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned.
discard_result_format_kwargs (boolean): \
if True, will remove the kwarg `output_format` key-value pair from the copied expectation.
discard_include_configs_kwargs (boolean):
if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.
discard_catch_exceptions_kwargs (boolean):
if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.
Returns:
A list of the copied expectations with `success_on_last_run` and other specified \
key-value pairs removed.
See also:
_copy_and_clean_expectation
### Response:
def _copy_and_clean_up_expectations_from_indexes(
self,
match_indexes,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
):
"""Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations.
Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \
`DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations.
Args:
match_indexes (List): \
Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned.
discard_result_format_kwargs (boolean): \
if True, will remove the kwarg `output_format` key-value pair from the copied expectation.
discard_include_configs_kwargs (boolean):
if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.
discard_catch_exceptions_kwargs (boolean):
if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.
Returns:
A list of the copied expectations with `success_on_last_run` and other specified \
key-value pairs removed.
See also:
_copy_and_clean_expectation
"""
rval = []
for i in match_indexes:
rval.append(
self._copy_and_clean_up_expectation(
self._expectations_config.expectations[i],
discard_result_format_kwargs,
discard_include_configs_kwargs,
discard_catch_exceptions_kwargs,
)
)
return rval |
def get_perm_codename(perm, fail_silently=True):
"""
Get permission codename from permission-string.
Examples
--------
>>> get_perm_codename('app_label.codename_model')
'codename_model'
>>> get_perm_codename('app_label.codename')
'codename'
>>> get_perm_codename('codename_model')
'codename_model'
>>> get_perm_codename('codename')
'codename'
>>> get_perm_codename('app_label.app_label.codename_model')
'app_label.codename_model'
"""
try:
perm = perm.split('.', 1)[1]
except IndexError as e:
if not fail_silently:
raise e
return perm | Get permission codename from permission-string.
Examples
--------
>>> get_perm_codename('app_label.codename_model')
'codename_model'
>>> get_perm_codename('app_label.codename')
'codename'
>>> get_perm_codename('codename_model')
'codename_model'
>>> get_perm_codename('codename')
'codename'
>>> get_perm_codename('app_label.app_label.codename_model')
'app_label.codename_model' | Below is the the instruction that describes the task:
### Input:
Get permission codename from permission-string.
Examples
--------
>>> get_perm_codename('app_label.codename_model')
'codename_model'
>>> get_perm_codename('app_label.codename')
'codename'
>>> get_perm_codename('codename_model')
'codename_model'
>>> get_perm_codename('codename')
'codename'
>>> get_perm_codename('app_label.app_label.codename_model')
'app_label.codename_model'
### Response:
def get_perm_codename(perm, fail_silently=True):
"""
Get permission codename from permission-string.
Examples
--------
>>> get_perm_codename('app_label.codename_model')
'codename_model'
>>> get_perm_codename('app_label.codename')
'codename'
>>> get_perm_codename('codename_model')
'codename_model'
>>> get_perm_codename('codename')
'codename'
>>> get_perm_codename('app_label.app_label.codename_model')
'app_label.codename_model'
"""
try:
perm = perm.split('.', 1)[1]
except IndexError as e:
if not fail_silently:
raise e
return perm |
def get_files(dir_name):
"""Simple directory walker"""
return [(os.path.join('.', d), [os.path.join(d, f) for f in files]) for d, _, files in os.walk(dir_name)] | Simple directory walker | Below is the the instruction that describes the task:
### Input:
Simple directory walker
### Response:
def get_files(dir_name):
"""Simple directory walker"""
return [(os.path.join('.', d), [os.path.join(d, f) for f in files]) for d, _, files in os.walk(dir_name)] |
def fix_e712(self, result):
"""Fix (trivial case of) comparison with boolean."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# Handle very easy "not" special cases.
if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right | Fix (trivial case of) comparison with boolean. | Below is the the instruction that describes the task:
### Input:
Fix (trivial case of) comparison with boolean.
### Response:
def fix_e712(self, result):
"""Fix (trivial case of) comparison with boolean."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# Handle very easy "not" special cases.
if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right |
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str | Return a .kibana index-pattern doc_type | Below is the the instruction that describes the task:
### Input:
Return a .kibana index-pattern doc_type
### Response:
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str |
def _validate_ports(reactor, ports):
"""
Internal helper for Onion services. Validates an incoming list of
port mappings and returns a list of strings suitable for passing
to other onion-services functions.
Accepts 3 different ways of specifying ports:
- list of ints: each int is the public port, local port random
- list of 2-tuples of ints: (pubic, local) ports.
- list of strings like "80 127.0.0.1:1234"
This is async in case it needs to ask for a random, unallocated
local port.
"""
if not isinstance(ports, (list, tuple)):
raise ValueError("'ports' must be a list of strings, ints or 2-tuples")
processed_ports = []
for port in ports:
if isinstance(port, (set, list, tuple)):
if len(port) != 2:
raise ValueError(
"'ports' must contain a single int or a 2-tuple of ints"
)
remote, local = port
try:
remote = int(remote)
except ValueError:
raise ValueError(
"'ports' has a tuple with a non-integer "
"component: {}".format(port)
)
try:
local = int(local)
except ValueError:
if local.startswith('unix:/'):
pass
else:
if ':' not in local:
raise ValueError(
"local port must be either an integer"
" or start with unix:/ or be an IP:port"
)
ip, port = local.split(':')
if not _is_non_public_numeric_address(ip):
log.msg(
"'{}' used as onion port doesn't appear to be a "
"local, numeric address".format(ip)
)
processed_ports.append(
"{} {}".format(remote, local)
)
else:
processed_ports.append(
"{} 127.0.0.1:{}".format(remote, local)
)
elif isinstance(port, (six.text_type, str)):
_validate_single_port_string(port)
processed_ports.append(port)
else:
try:
remote = int(port)
except (ValueError, TypeError):
raise ValueError(
"'ports' has a non-integer entry: {}".format(port)
)
local = yield available_tcp_port(reactor)
processed_ports.append(
"{} 127.0.0.1:{}".format(remote, local)
)
defer.returnValue(processed_ports) | Internal helper for Onion services. Validates an incoming list of
port mappings and returns a list of strings suitable for passing
to other onion-services functions.
Accepts 3 different ways of specifying ports:
- list of ints: each int is the public port, local port random
- list of 2-tuples of ints: (pubic, local) ports.
- list of strings like "80 127.0.0.1:1234"
This is async in case it needs to ask for a random, unallocated
local port. | Below is the the instruction that describes the task:
### Input:
Internal helper for Onion services. Validates an incoming list of
port mappings and returns a list of strings suitable for passing
to other onion-services functions.
Accepts 3 different ways of specifying ports:
- list of ints: each int is the public port, local port random
- list of 2-tuples of ints: (pubic, local) ports.
- list of strings like "80 127.0.0.1:1234"
This is async in case it needs to ask for a random, unallocated
local port.
### Response:
def _validate_ports(reactor, ports):
"""
Internal helper for Onion services. Validates an incoming list of
port mappings and returns a list of strings suitable for passing
to other onion-services functions.
Accepts 3 different ways of specifying ports:
- list of ints: each int is the public port, local port random
- list of 2-tuples of ints: (pubic, local) ports.
- list of strings like "80 127.0.0.1:1234"
This is async in case it needs to ask for a random, unallocated
local port.
"""
if not isinstance(ports, (list, tuple)):
raise ValueError("'ports' must be a list of strings, ints or 2-tuples")
processed_ports = []
for port in ports:
if isinstance(port, (set, list, tuple)):
if len(port) != 2:
raise ValueError(
"'ports' must contain a single int or a 2-tuple of ints"
)
remote, local = port
try:
remote = int(remote)
except ValueError:
raise ValueError(
"'ports' has a tuple with a non-integer "
"component: {}".format(port)
)
try:
local = int(local)
except ValueError:
if local.startswith('unix:/'):
pass
else:
if ':' not in local:
raise ValueError(
"local port must be either an integer"
" or start with unix:/ or be an IP:port"
)
ip, port = local.split(':')
if not _is_non_public_numeric_address(ip):
log.msg(
"'{}' used as onion port doesn't appear to be a "
"local, numeric address".format(ip)
)
processed_ports.append(
"{} {}".format(remote, local)
)
else:
processed_ports.append(
"{} 127.0.0.1:{}".format(remote, local)
)
elif isinstance(port, (six.text_type, str)):
_validate_single_port_string(port)
processed_ports.append(port)
else:
try:
remote = int(port)
except (ValueError, TypeError):
raise ValueError(
"'ports' has a non-integer entry: {}".format(port)
)
local = yield available_tcp_port(reactor)
processed_ports.append(
"{} 127.0.0.1:{}".format(remote, local)
)
defer.returnValue(processed_ports) |
def save_list(self, list_name, emails):
"""
Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string
"""
data = {'list': list_name,
'emails': ','.join(emails) if isinstance(emails, list) else emails}
return self.api_post('list', data) | Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string | Below is the the instruction that describes the task:
### Input:
Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string
### Response:
def save_list(self, list_name, emails):
"""
Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string
"""
data = {'list': list_name,
'emails': ','.join(emails) if isinstance(emails, list) else emails}
return self.api_post('list', data) |
def expected_record(self, node):
"""
Constructs the provenance record that would be saved in the given node
if the pipeline was run on the current state of the repository
Parameters
----------
node : arcana.repository.tree.TreeNode
A node of the Tree representation of the study data stored in the
repository (i.e. a Session, Visit, Subject or Tree node)
Returns
-------
expected_record : arcana.provenance.Record
The record that would be produced if the pipeline is run over the
study tree.
"""
exp_inputs = {}
# Get checksums/values of all inputs that would have been used in
# previous runs of an equivalent pipeline to compare with that saved
# in provenance to see if any have been updated.
for inpt in self.inputs: # @ReservedAssignment
# Get iterators present in the input that aren't in this node
# and need to be joined
iterators_to_join = (self.iterators(inpt.frequency) -
self.iterators(node.frequency))
if not iterators_to_join:
# No iterators to join so we can just extract the checksums
# of the corresponding input
exp_inputs[inpt.name] = inpt.collection.item(
node.subject_id, node.visit_id).checksums
elif len(iterators_to_join) == 1:
# Get list of checksums dicts for each node of the input
# frequency that relates to the current node
exp_inputs[inpt.name] = [
inpt.collection.item(n.subject_id, n.visit_id).checksums
for n in node.nodes(inpt.frequency)]
else:
# In the case where the node is the whole treee and the input
# is per_seession, we need to create a list of lists to match
# how the checksums are joined in the processor
exp_inputs[inpt.name] = []
for subj in node.subjects:
exp_inputs[inpt.name].append([
inpt.collection.item(s.subject_id,
s.visit_id).checksums
for s in subj.sessions])
# Get checksums/value for all outputs of the pipeline. We are assuming
# that they exist here (otherwise they will be None)
exp_outputs = {
o.name: o.collection.item(node.subject_id, node.visit_id).checksums
for o in self.outputs}
exp_prov = copy(self.prov)
if PY2:
# Need to convert to unicode strings for Python 2
exp_inputs = json.loads(json.dumps(exp_inputs))
exp_outputs = json.loads(json.dumps(exp_outputs))
exp_prov['inputs'] = exp_inputs
exp_prov['outputs'] = exp_outputs
exp_prov['joined_ids'] = self._joined_ids()
return Record(
self.name, node.frequency, node.subject_id, node.visit_id,
self.study.name, exp_prov) | Constructs the provenance record that would be saved in the given node
if the pipeline was run on the current state of the repository
Parameters
----------
node : arcana.repository.tree.TreeNode
A node of the Tree representation of the study data stored in the
repository (i.e. a Session, Visit, Subject or Tree node)
Returns
-------
expected_record : arcana.provenance.Record
The record that would be produced if the pipeline is run over the
study tree. | Below is the the instruction that describes the task:
### Input:
Constructs the provenance record that would be saved in the given node
if the pipeline was run on the current state of the repository
Parameters
----------
node : arcana.repository.tree.TreeNode
A node of the Tree representation of the study data stored in the
repository (i.e. a Session, Visit, Subject or Tree node)
Returns
-------
expected_record : arcana.provenance.Record
The record that would be produced if the pipeline is run over the
study tree.
### Response:
def expected_record(self, node):
"""
Constructs the provenance record that would be saved in the given node
if the pipeline was run on the current state of the repository
Parameters
----------
node : arcana.repository.tree.TreeNode
A node of the Tree representation of the study data stored in the
repository (i.e. a Session, Visit, Subject or Tree node)
Returns
-------
expected_record : arcana.provenance.Record
The record that would be produced if the pipeline is run over the
study tree.
"""
exp_inputs = {}
# Get checksums/values of all inputs that would have been used in
# previous runs of an equivalent pipeline to compare with that saved
# in provenance to see if any have been updated.
for inpt in self.inputs: # @ReservedAssignment
# Get iterators present in the input that aren't in this node
# and need to be joined
iterators_to_join = (self.iterators(inpt.frequency) -
self.iterators(node.frequency))
if not iterators_to_join:
# No iterators to join so we can just extract the checksums
# of the corresponding input
exp_inputs[inpt.name] = inpt.collection.item(
node.subject_id, node.visit_id).checksums
elif len(iterators_to_join) == 1:
# Get list of checksums dicts for each node of the input
# frequency that relates to the current node
exp_inputs[inpt.name] = [
inpt.collection.item(n.subject_id, n.visit_id).checksums
for n in node.nodes(inpt.frequency)]
else:
# In the case where the node is the whole treee and the input
# is per_seession, we need to create a list of lists to match
# how the checksums are joined in the processor
exp_inputs[inpt.name] = []
for subj in node.subjects:
exp_inputs[inpt.name].append([
inpt.collection.item(s.subject_id,
s.visit_id).checksums
for s in subj.sessions])
# Get checksums/value for all outputs of the pipeline. We are assuming
# that they exist here (otherwise they will be None)
exp_outputs = {
o.name: o.collection.item(node.subject_id, node.visit_id).checksums
for o in self.outputs}
exp_prov = copy(self.prov)
if PY2:
# Need to convert to unicode strings for Python 2
exp_inputs = json.loads(json.dumps(exp_inputs))
exp_outputs = json.loads(json.dumps(exp_outputs))
exp_prov['inputs'] = exp_inputs
exp_prov['outputs'] = exp_outputs
exp_prov['joined_ids'] = self._joined_ids()
return Record(
self.name, node.frequency, node.subject_id, node.visit_id,
self.study.name, exp_prov) |
def generate_simple_chemical_query(self, name=None, chemical_formula=None, property_name=None, property_value=None,
property_min=None, property_max=None, property_units=None, reference_doi=None,
include_datasets=[], exclude_datasets=[], from_index=None, size=None):
"""
This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery`
"""
pif_system_query = PifSystemQuery()
pif_system_query.names = FieldQuery(
extract_as='name',
filter=[Filter(equal=i) for i in self._get_list(name)])
pif_system_query.chemical_formula = ChemicalFieldQuery(
extract_as='chemical_formula',
filter=[ChemicalFilter(equal=i) for i in self._get_list(chemical_formula)])
pif_system_query.references = ReferenceQuery(doi=FieldQuery(
extract_as='reference_doi',
filter=[Filter(equal=i) for i in self._get_list(reference_doi)]))
# Generate the parts of the property query
property_name_query = FieldQuery(
extract_as='property_name',
filter=[Filter(equal=i) for i in self._get_list(property_name)])
property_units_query = FieldQuery(
extract_as='property_units',
filter=[Filter(equal=i) for i in self._get_list(property_units)])
property_value_query = FieldQuery(
extract_as='property_value',
filter=[])
for i in self._get_list(property_value):
property_value_query.filter.append(Filter(equal=i))
if property_min is not None or property_max is not None:
property_value_query.filter.append(Filter(min=property_min, max=property_max))
# Generate the full property query
pif_system_query.properties = PropertyQuery(
name=property_name_query,
value=property_value_query,
units=property_units_query)
# Generate the dataset query
dataset_query = list()
if include_datasets:
dataset_query.append(DatasetQuery(logic='MUST', id=[Filter(equal=i) for i in include_datasets]))
if exclude_datasets:
dataset_query.append(DatasetQuery(logic='MUST_NOT', id=[Filter(equal=i) for i in exclude_datasets]))
# Run the query
pif_system_returning_query = PifSystemReturningQuery(
query=DataQuery(
system=pif_system_query,
dataset=dataset_query),
from_index=from_index,
size=size,
score_relevance=True)
return pif_system_returning_query | This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery` | Below is the the instruction that describes the task:
### Input:
This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery`
### Response:
def generate_simple_chemical_query(self, name=None, chemical_formula=None, property_name=None, property_value=None,
property_min=None, property_max=None, property_units=None, reference_doi=None,
include_datasets=[], exclude_datasets=[], from_index=None, size=None):
"""
This method generates a :class:`PifSystemReturningQuery` object using the
supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.
This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name
equal to 'A' or 'B'.
Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The
name will appear under the key "name", chemical formula under "chemical_formula", property name under
"property_name", value of the property under "property_value", units of the property under "property_units",
and reference DOI under "reference_doi".
This method is only meant for execution of very simple queries. More complex queries must use the search method
that accepts a :class:`PifSystemReturningQuery` object.
:param name: One or more strings with the names of the chemical system to match.
:type name: str or list of str
:param chemical_formula: One or more strings with the chemical formulas to match.
:type chemical_formula: str or list of str
:param property_name: One or more strings with the names of the property to match.
:type property_name: str or list of str
:param property_value: One or more strings or numbers with the exact values to match.
:type property_value: str or int or float or list of str or int or float
:param property_min: A single string or number with the minimum value to match.
:type property_min: str or int or float
:param property_max: A single string or number with the maximum value to match.
:type property_max: str or int or float
:param property_units: One or more strings with the property units to match.
:type property_units: str or list of str
:param reference_doi: One or more strings with the DOI to match.
:type reference_doin: str or list of str
:param include_datasets: One or more integers with dataset IDs to match.
:type include_datasets: int or list of int
:param exclude_datasets: One or more integers with dataset IDs that must not match.
:type exclude_datasets: int or list of int
:param from_index: Index of the first record to match.
:type from_index: int
:param size: Total number of records to return.
:type size: int
:return: A query to to be submitted with the pif_search method
:rtype: :class:`PifSystemReturningQuery`
"""
pif_system_query = PifSystemQuery()
pif_system_query.names = FieldQuery(
extract_as='name',
filter=[Filter(equal=i) for i in self._get_list(name)])
pif_system_query.chemical_formula = ChemicalFieldQuery(
extract_as='chemical_formula',
filter=[ChemicalFilter(equal=i) for i in self._get_list(chemical_formula)])
pif_system_query.references = ReferenceQuery(doi=FieldQuery(
extract_as='reference_doi',
filter=[Filter(equal=i) for i in self._get_list(reference_doi)]))
# Generate the parts of the property query
property_name_query = FieldQuery(
extract_as='property_name',
filter=[Filter(equal=i) for i in self._get_list(property_name)])
property_units_query = FieldQuery(
extract_as='property_units',
filter=[Filter(equal=i) for i in self._get_list(property_units)])
property_value_query = FieldQuery(
extract_as='property_value',
filter=[])
for i in self._get_list(property_value):
property_value_query.filter.append(Filter(equal=i))
if property_min is not None or property_max is not None:
property_value_query.filter.append(Filter(min=property_min, max=property_max))
# Generate the full property query
pif_system_query.properties = PropertyQuery(
name=property_name_query,
value=property_value_query,
units=property_units_query)
# Generate the dataset query
dataset_query = list()
if include_datasets:
dataset_query.append(DatasetQuery(logic='MUST', id=[Filter(equal=i) for i in include_datasets]))
if exclude_datasets:
dataset_query.append(DatasetQuery(logic='MUST_NOT', id=[Filter(equal=i) for i in exclude_datasets]))
# Run the query
pif_system_returning_query = PifSystemReturningQuery(
query=DataQuery(
system=pif_system_query,
dataset=dataset_query),
from_index=from_index,
size=size,
score_relevance=True)
return pif_system_returning_query |
def giving_up(self, message):
"""
Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received
"""
logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r',
self.name, message.id, message.attempts, self.max_tries, message.body) | Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received | Below is the the instruction that describes the task:
### Input:
Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received
### Response:
def giving_up(self, message):
"""
Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received
"""
logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r',
self.name, message.id, message.attempts, self.max_tries, message.body) |
def save(self, *args, **kwargs):
"""
This method autogenerates the auto_generated_description field
"""
# Cache basic data
self.cache_data()
# Ensure slug doesn't change
if self.id is not None:
db_company = Company.objects.get(id=self.id)
if self.slug_name != db_company.slug_name:
raise ValueError("Cannot reset slug_name")
if str(self.trade_name).strip() == "":
self.trade_name = None
# Short description check
if len(str(self.short_description)) > 370:
raise AssertionError("Short description must be no more than 370 characters")
if self.sub_industry is not None:
# Cache GICS
self.industry = self.sub_industry.industry
self.industry_group = self.sub_industry.industry.industry_group
self.sector = self.sub_industry.industry.industry_group.sector
# Cache GICS names
self.sub_industry_name = self.sub_industry.name
self.industry_name = self.industry.name
self.industry_group_name = self.industry_group.name
self.sector_name = self.sector.name
# Call save method
super(Company, self).save(*args, **kwargs) | This method autogenerates the auto_generated_description field | Below is the the instruction that describes the task:
### Input:
This method autogenerates the auto_generated_description field
### Response:
def save(self, *args, **kwargs):
"""
This method autogenerates the auto_generated_description field
"""
# Cache basic data
self.cache_data()
# Ensure slug doesn't change
if self.id is not None:
db_company = Company.objects.get(id=self.id)
if self.slug_name != db_company.slug_name:
raise ValueError("Cannot reset slug_name")
if str(self.trade_name).strip() == "":
self.trade_name = None
# Short description check
if len(str(self.short_description)) > 370:
raise AssertionError("Short description must be no more than 370 characters")
if self.sub_industry is not None:
# Cache GICS
self.industry = self.sub_industry.industry
self.industry_group = self.sub_industry.industry.industry_group
self.sector = self.sub_industry.industry.industry_group.sector
# Cache GICS names
self.sub_industry_name = self.sub_industry.name
self.industry_name = self.industry.name
self.industry_group_name = self.industry_group.name
self.sector_name = self.sector.name
# Call save method
super(Company, self).save(*args, **kwargs) |
def create_arguments(primary, pyfunction, call_node, scope):
"""A factory for creating `Arguments`"""
args = list(call_node.args)
args.extend(call_node.keywords)
called = call_node.func
# XXX: Handle constructors
if _is_method_call(primary, pyfunction) and \
isinstance(called, ast.Attribute):
args.insert(0, called.value)
return Arguments(args, scope) | A factory for creating `Arguments` | Below is the the instruction that describes the task:
### Input:
A factory for creating `Arguments`
### Response:
def create_arguments(primary, pyfunction, call_node, scope):
"""A factory for creating `Arguments`"""
args = list(call_node.args)
args.extend(call_node.keywords)
called = call_node.func
# XXX: Handle constructors
if _is_method_call(primary, pyfunction) and \
isinstance(called, ast.Attribute):
args.insert(0, called.value)
return Arguments(args, scope) |
def _set_vrf(self, v, load=False):
"""
Setter method for vrf, mapped from YANG variable /nas/server_ip/vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vrf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__vrf = t
if hasattr(self, '_set'):
self._set() | Setter method for vrf, mapped from YANG variable /nas/server_ip/vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrf() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for vrf, mapped from YANG variable /nas/server_ip/vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrf() directly.
### Response:
def _set_vrf(self, v, load=False):
"""
Setter method for vrf, mapped from YANG variable /nas/server_ip/vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vrf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__vrf = t
if hasattr(self, '_set'):
self._set() |
def detect_mobile(view):
"""View Decorator that adds a "mobile" attribute to the request which is
True or False depending on whether the request should be considered
to come from a small-screen device such as a phone or a PDA"""
@wraps(view)
def detected(request, *args, **kwargs):
MobileDetectionMiddleware.process_request(request)
return view(request, *args, **kwargs)
detected.__doc__ = '%s\n[Wrapped by detect_mobile which detects if the request is from a phone]' % view.__doc__
return detected | View Decorator that adds a "mobile" attribute to the request which is
True or False depending on whether the request should be considered
to come from a small-screen device such as a phone or a PDA | Below is the the instruction that describes the task:
### Input:
View Decorator that adds a "mobile" attribute to the request which is
True or False depending on whether the request should be considered
to come from a small-screen device such as a phone or a PDA
### Response:
def detect_mobile(view):
"""View Decorator that adds a "mobile" attribute to the request which is
True or False depending on whether the request should be considered
to come from a small-screen device such as a phone or a PDA"""
@wraps(view)
def detected(request, *args, **kwargs):
MobileDetectionMiddleware.process_request(request)
return view(request, *args, **kwargs)
detected.__doc__ = '%s\n[Wrapped by detect_mobile which detects if the request is from a phone]' % view.__doc__
return detected |
def prepare_build_dir(self):
'''Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs.'''
self.build_dir = self.get_build_dir()
self.common_dir = self.get_common_dir()
copy_files(join(self.bootstrap_dir, 'build'), self.build_dir)
copy_files(join(self.common_dir, 'build'), self.build_dir,
override=False)
if self.ctx.symlink_java_src:
info('Symlinking java src instead of copying')
shprint(sh.rm, '-r', join(self.build_dir, 'src'))
shprint(sh.mkdir, join(self.build_dir, 'src'))
for dirn in listdir(join(self.bootstrap_dir, 'build', 'src')):
shprint(sh.ln, '-s', join(self.bootstrap_dir, 'build', 'src', dirn),
join(self.build_dir, 'src'))
with current_directory(self.build_dir):
with open('project.properties', 'w') as fileh:
fileh.write('target=android-{}'.format(self.ctx.android_api)) | Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs. | Below is the the instruction that describes the task:
### Input:
Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs.
### Response:
def prepare_build_dir(self):
'''Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs.'''
self.build_dir = self.get_build_dir()
self.common_dir = self.get_common_dir()
copy_files(join(self.bootstrap_dir, 'build'), self.build_dir)
copy_files(join(self.common_dir, 'build'), self.build_dir,
override=False)
if self.ctx.symlink_java_src:
info('Symlinking java src instead of copying')
shprint(sh.rm, '-r', join(self.build_dir, 'src'))
shprint(sh.mkdir, join(self.build_dir, 'src'))
for dirn in listdir(join(self.bootstrap_dir, 'build', 'src')):
shprint(sh.ln, '-s', join(self.bootstrap_dir, 'build', 'src', dirn),
join(self.build_dir, 'src'))
with current_directory(self.build_dir):
with open('project.properties', 'w') as fileh:
fileh.write('target=android-{}'.format(self.ctx.android_api)) |
def int_to_array(i, length=2):
"""Convert an length byte integer to an array of bytes."""
res = []
for dummy in range(0, length):
res.append(i & 0xff)
i = i >> 8
return reversed(res) | Convert an length byte integer to an array of bytes. | Below is the the instruction that describes the task:
### Input:
Convert an length byte integer to an array of bytes.
### Response:
def int_to_array(i, length=2):
"""Convert an length byte integer to an array of bytes."""
res = []
for dummy in range(0, length):
res.append(i & 0xff)
i = i >> 8
return reversed(res) |
def create_upload_and_chunk_url(self, project_id, path_data, hash_data, remote_filename=None,
storage_provider_id=None):
"""
Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow
additional upload urls. For single chunk files this method is more efficient than
create_upload/create_file_chunk_url.
:param project_id: str: uuid of the project
:param path_data: PathData: holds file system data about the file we are uploading
:param hash_data: HashData: contains hash alg and value for the file we are uploading
:param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise)
:param storage_provider_id:str: optional storage provider id
:return: str, dict: uuid for the upload, upload chunk url dict
"""
upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename,
storage_provider_id=storage_provider_id, chunked=False)
return upload_response['id'], upload_response['signed_url'] | Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow
additional upload urls. For single chunk files this method is more efficient than
create_upload/create_file_chunk_url.
:param project_id: str: uuid of the project
:param path_data: PathData: holds file system data about the file we are uploading
:param hash_data: HashData: contains hash alg and value for the file we are uploading
:param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise)
:param storage_provider_id:str: optional storage provider id
:return: str, dict: uuid for the upload, upload chunk url dict | Below is the the instruction that describes the task:
### Input:
Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow
additional upload urls. For single chunk files this method is more efficient than
create_upload/create_file_chunk_url.
:param project_id: str: uuid of the project
:param path_data: PathData: holds file system data about the file we are uploading
:param hash_data: HashData: contains hash alg and value for the file we are uploading
:param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise)
:param storage_provider_id:str: optional storage provider id
:return: str, dict: uuid for the upload, upload chunk url dict
### Response:
def create_upload_and_chunk_url(self, project_id, path_data, hash_data, remote_filename=None,
storage_provider_id=None):
"""
Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow
additional upload urls. For single chunk files this method is more efficient than
create_upload/create_file_chunk_url.
:param project_id: str: uuid of the project
:param path_data: PathData: holds file system data about the file we are uploading
:param hash_data: HashData: contains hash alg and value for the file we are uploading
:param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise)
:param storage_provider_id:str: optional storage provider id
:return: str, dict: uuid for the upload, upload chunk url dict
"""
upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename,
storage_provider_id=storage_provider_id, chunked=False)
return upload_response['id'], upload_response['signed_url'] |
def apply(self, strain, detector_name, f_lower=None, distance_scale=1,
simulation_ids=None, inj_filter_rejector=None):
"""Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance; optional, default=None
If given send each injected waveform to the InjFilterRejector
instance so that it can store a reduced representation of that
injection if necessary.
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`.
"""
if strain.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not " \
+ str(strain.dtype))
lalstrain = strain.lal()
earth_travel_time = lal.REARTH_SI / lal.C_SI
t0 = float(strain.start_time) - earth_travel_time
t1 = float(strain.end_time) + earth_travel_time
# pick lalsimulation injection function
add_injection = injection_func_map[strain.dtype]
injections = self.table
if simulation_ids:
injections = [inj for inj in injections \
if inj.simulation_id in simulation_ids]
injection_parameters = []
for inj in injections:
if f_lower is None:
f_l = inj.f_lower
else:
f_l = f_lower
# roughly estimate if the injection may overlap with the segment
# Add 2s to end_time to account for ringdown and light-travel delay
end_time = inj.get_time_geocent() + 2
inj_length = sim.SimInspiralTaylorLength(
strain.delta_t, inj.mass1 * lal.MSUN_SI,
inj.mass2 * lal.MSUN_SI, f_l, 0)
# Start time is taken as twice approx waveform length with a 1s
# safety buffer
start_time = inj.get_time_geocent() - 2 * (inj_length+1)
if end_time < t0 or start_time > t1:
continue
signal = self.make_strain_from_inj_object(inj, strain.delta_t,
detector_name, f_lower=f_l, distance_scale=distance_scale)
if float(signal.start_time) > t1:
continue
signal = signal.astype(strain.dtype)
signal_lal = signal.lal()
add_injection(lalstrain, signal_lal, None)
injection_parameters.append(inj)
if inj_filter_rejector is not None:
sid = inj.simulation_id
inj_filter_rejector.generate_short_inj_from_inj(signal, sid)
strain.data[:] = lalstrain.data.data[:]
injected = copy.copy(self)
injected.table = lsctables.SimInspiralTable()
injected.table += injection_parameters
if inj_filter_rejector is not None:
inj_filter_rejector.injection_params = injected
return injected | Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance; optional, default=None
If given send each injected waveform to the InjFilterRejector
instance so that it can store a reduced representation of that
injection if necessary.
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`. | Below is the the instruction that describes the task:
### Input:
Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance; optional, default=None
If given send each injected waveform to the InjFilterRejector
instance so that it can store a reduced representation of that
injection if necessary.
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`.
### Response:
def apply(self, strain, detector_name, f_lower=None, distance_scale=1,
simulation_ids=None, inj_filter_rejector=None):
"""Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance; optional, default=None
If given send each injected waveform to the InjFilterRejector
instance so that it can store a reduced representation of that
injection if necessary.
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`.
"""
if strain.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not " \
+ str(strain.dtype))
lalstrain = strain.lal()
earth_travel_time = lal.REARTH_SI / lal.C_SI
t0 = float(strain.start_time) - earth_travel_time
t1 = float(strain.end_time) + earth_travel_time
# pick lalsimulation injection function
add_injection = injection_func_map[strain.dtype]
injections = self.table
if simulation_ids:
injections = [inj for inj in injections \
if inj.simulation_id in simulation_ids]
injection_parameters = []
for inj in injections:
if f_lower is None:
f_l = inj.f_lower
else:
f_l = f_lower
# roughly estimate if the injection may overlap with the segment
# Add 2s to end_time to account for ringdown and light-travel delay
end_time = inj.get_time_geocent() + 2
inj_length = sim.SimInspiralTaylorLength(
strain.delta_t, inj.mass1 * lal.MSUN_SI,
inj.mass2 * lal.MSUN_SI, f_l, 0)
# Start time is taken as twice approx waveform length with a 1s
# safety buffer
start_time = inj.get_time_geocent() - 2 * (inj_length+1)
if end_time < t0 or start_time > t1:
continue
signal = self.make_strain_from_inj_object(inj, strain.delta_t,
detector_name, f_lower=f_l, distance_scale=distance_scale)
if float(signal.start_time) > t1:
continue
signal = signal.astype(strain.dtype)
signal_lal = signal.lal()
add_injection(lalstrain, signal_lal, None)
injection_parameters.append(inj)
if inj_filter_rejector is not None:
sid = inj.simulation_id
inj_filter_rejector.generate_short_inj_from_inj(signal, sid)
strain.data[:] = lalstrain.data.data[:]
injected = copy.copy(self)
injected.table = lsctables.SimInspiralTable()
injected.table += injection_parameters
if inj_filter_rejector is not None:
inj_filter_rejector.injection_params = injected
return injected |
def getElementText(self, node, preserve_ws=None):
"""Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value."""
result = []
for child in node.childNodes:
nodetype = child.nodeType
if nodetype == child.TEXT_NODE or \
nodetype == child.CDATA_SECTION_NODE:
result.append(child.nodeValue)
value = join(result, '')
if preserve_ws is None:
value = strip(value)
return value | Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value. | Below is the the instruction that describes the task:
### Input:
Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value.
### Response:
def getElementText(self, node, preserve_ws=None):
"""Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value."""
result = []
for child in node.childNodes:
nodetype = child.nodeType
if nodetype == child.TEXT_NODE or \
nodetype == child.CDATA_SECTION_NODE:
result.append(child.nodeValue)
value = join(result, '')
if preserve_ws is None:
value = strip(value)
return value |
def set_device_offset(self, x_offset, y_offset):
""" Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units
"""
cairo.cairo_surface_set_device_offset(
self._pointer, x_offset, y_offset)
self._check_status() | Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units | Below is the the instruction that describes the task:
### Input:
Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units
### Response:
def set_device_offset(self, x_offset, y_offset):
""" Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units
"""
cairo.cairo_surface_set_device_offset(
self._pointer, x_offset, y_offset)
self._check_status() |
def tb_h_file_creation(target, source, env):
"""Compile tilebus file into only .h files corresponding to config variables for inclusion in a library"""
files = [str(x) for x in source]
try:
desc = TBDescriptor(files)
except pyparsing.ParseException as e:
raise BuildError("Could not parse tilebus file", parsing_exception=e)
block = desc.get_block(config_only=True)
block.render_template(block.CommandHeaderTemplate, out_path=str(target[0]))
block.render_template(block.ConfigHeaderTemplate, out_path=str(target[1])) | Compile tilebus file into only .h files corresponding to config variables for inclusion in a library | Below is the the instruction that describes the task:
### Input:
Compile tilebus file into only .h files corresponding to config variables for inclusion in a library
### Response:
def tb_h_file_creation(target, source, env):
"""Compile tilebus file into only .h files corresponding to config variables for inclusion in a library"""
files = [str(x) for x in source]
try:
desc = TBDescriptor(files)
except pyparsing.ParseException as e:
raise BuildError("Could not parse tilebus file", parsing_exception=e)
block = desc.get_block(config_only=True)
block.render_template(block.CommandHeaderTemplate, out_path=str(target[0]))
block.render_template(block.ConfigHeaderTemplate, out_path=str(target[1])) |
def start(self):
"""
Start all the processes
"""
Global.LOGGER.info("starting the flow manager")
self._start_actions()
self._start_message_fetcher()
Global.LOGGER.debug("flow manager started") | Start all the processes | Below is the the instruction that describes the task:
### Input:
Start all the processes
### Response:
def start(self):
"""
Start all the processes
"""
Global.LOGGER.info("starting the flow manager")
self._start_actions()
self._start_message_fetcher()
Global.LOGGER.debug("flow manager started") |
def mesh2fc(script, all_visible_layers=False):
"""Transfer mesh colors to face colors
Args:
script: the FilterScript object or script filename to write
the filter to.
all_visible_layers (bool): If true the color mapping is applied to all the meshes
"""
filter_xml = ''.join([
' <filter name="Transfer Color: Mesh to Face">\n',
' <Param name="allVisibleMesh" ',
'value="%s" ' % str(all_visible_layers).lower(),
'description="Apply to all Meshes" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Transfer mesh colors to face colors
Args:
script: the FilterScript object or script filename to write
the filter to.
all_visible_layers (bool): If true the color mapping is applied to all the meshes | Below is the the instruction that describes the task:
### Input:
Transfer mesh colors to face colors
Args:
script: the FilterScript object or script filename to write
the filter to.
all_visible_layers (bool): If true the color mapping is applied to all the meshes
### Response:
def mesh2fc(script, all_visible_layers=False):
"""Transfer mesh colors to face colors
Args:
script: the FilterScript object or script filename to write
the filter to.
all_visible_layers (bool): If true the color mapping is applied to all the meshes
"""
filter_xml = ''.join([
' <filter name="Transfer Color: Mesh to Face">\n',
' <Param name="allVisibleMesh" ',
'value="%s" ' % str(all_visible_layers).lower(),
'description="Apply to all Meshes" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def allck():
''' 檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。 '''
for i in twseno().allstockno:
a = goristock.goristock(i)
try:
if a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#a.goback(3) ## 倒退天數
ck4m(a)
except:
pass | 檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。 | Below is the the instruction that describes the task:
### Input:
檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。
### Response:
def allck():
''' 檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。 '''
for i in twseno().allstockno:
a = goristock.goristock(i)
try:
if a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#a.goback(3) ## 倒退天數
ck4m(a)
except:
pass |
def set_start(self,t):
"""
Override the GPS start time (and set the duration) of this ScienceSegment.
@param t: new GPS start time.
"""
self.__dur += self.__start - t
self.__start = t | Override the GPS start time (and set the duration) of this ScienceSegment.
@param t: new GPS start time. | Below is the the instruction that describes the task:
### Input:
Override the GPS start time (and set the duration) of this ScienceSegment.
@param t: new GPS start time.
### Response:
def set_start(self,t):
"""
Override the GPS start time (and set the duration) of this ScienceSegment.
@param t: new GPS start time.
"""
self.__dur += self.__start - t
self.__start = t |
async def load_variant(self, elem_type, params=None, elem=None, wrapped=None, obj=None):
"""
Loads variant type from the reader.
Supports both wrapped and raw variant.
:param elem_type:
:param params:
:param elem:
:param wrapped:
:param obj:
:return:
"""
is_wrapped = elem_type.WRAPS_VALUE if wrapped is None else wrapped
if is_wrapped:
elem = elem_type() if elem is None else elem
fname = list(obj.keys())[0]
for field in elem_type.f_specs():
if field[0] != fname:
continue
try:
self.tracker.push_variant(field[1])
fvalue = await self._load_field(field[1], field[2:], elem if not is_wrapped else None, obj=obj[fname])
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if is_wrapped:
elem.set_variant(field[0], fvalue)
return elem if is_wrapped else fvalue
raise ValueError('Unknown tag: %s' % fname) | Loads variant type from the reader.
Supports both wrapped and raw variant.
:param elem_type:
:param params:
:param elem:
:param wrapped:
:param obj:
:return: | Below is the the instruction that describes the task:
### Input:
Loads variant type from the reader.
Supports both wrapped and raw variant.
:param elem_type:
:param params:
:param elem:
:param wrapped:
:param obj:
:return:
### Response:
async def load_variant(self, elem_type, params=None, elem=None, wrapped=None, obj=None):
"""
Loads variant type from the reader.
Supports both wrapped and raw variant.
:param elem_type:
:param params:
:param elem:
:param wrapped:
:param obj:
:return:
"""
is_wrapped = elem_type.WRAPS_VALUE if wrapped is None else wrapped
if is_wrapped:
elem = elem_type() if elem is None else elem
fname = list(obj.keys())[0]
for field in elem_type.f_specs():
if field[0] != fname:
continue
try:
self.tracker.push_variant(field[1])
fvalue = await self._load_field(field[1], field[2:], elem if not is_wrapped else None, obj=obj[fname])
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if is_wrapped:
elem.set_variant(field[0], fvalue)
return elem if is_wrapped else fvalue
raise ValueError('Unknown tag: %s' % fname) |
def cutout(vol, requested_bbox, steps, channel_slice=slice(None), parallel=1,
shared_memory_location=None, output_to_shared_memory=False):
"""Cutout a requested bounding box from storage and return it as a numpy array."""
global fs_lock
cloudpath_bbox = requested_bbox.expand_to_chunk_size(vol.underlying, offset=vol.voxel_offset)
cloudpath_bbox = Bbox.clamp(cloudpath_bbox, vol.bounds)
cloudpaths = list(chunknames(cloudpath_bbox, vol.bounds, vol.key, vol.underlying))
shape = list(requested_bbox.size3()) + [ vol.num_channels ]
handle = None
if parallel == 1:
if output_to_shared_memory:
array_like, renderbuffer = shm.bbox2array(vol, requested_bbox,
location=shared_memory_location, lock=fs_lock)
shm.track_mmap(array_like)
else:
renderbuffer = np.zeros(shape=shape, dtype=vol.dtype, order='F')
def process(img3d, bbox):
shade(renderbuffer, requested_bbox, img3d, bbox)
download_multiple(vol, cloudpaths, fn=process)
else:
handle, renderbuffer = multi_process_cutout(vol, requested_bbox, cloudpaths, parallel,
shared_memory_location, output_to_shared_memory)
renderbuffer = renderbuffer[ ::steps.x, ::steps.y, ::steps.z, channel_slice ]
return VolumeCutout.from_volume(vol, renderbuffer, requested_bbox, handle=handle) | Cutout a requested bounding box from storage and return it as a numpy array. | Below is the the instruction that describes the task:
### Input:
Cutout a requested bounding box from storage and return it as a numpy array.
### Response:
def cutout(vol, requested_bbox, steps, channel_slice=slice(None), parallel=1,
shared_memory_location=None, output_to_shared_memory=False):
"""Cutout a requested bounding box from storage and return it as a numpy array."""
global fs_lock
cloudpath_bbox = requested_bbox.expand_to_chunk_size(vol.underlying, offset=vol.voxel_offset)
cloudpath_bbox = Bbox.clamp(cloudpath_bbox, vol.bounds)
cloudpaths = list(chunknames(cloudpath_bbox, vol.bounds, vol.key, vol.underlying))
shape = list(requested_bbox.size3()) + [ vol.num_channels ]
handle = None
if parallel == 1:
if output_to_shared_memory:
array_like, renderbuffer = shm.bbox2array(vol, requested_bbox,
location=shared_memory_location, lock=fs_lock)
shm.track_mmap(array_like)
else:
renderbuffer = np.zeros(shape=shape, dtype=vol.dtype, order='F')
def process(img3d, bbox):
shade(renderbuffer, requested_bbox, img3d, bbox)
download_multiple(vol, cloudpaths, fn=process)
else:
handle, renderbuffer = multi_process_cutout(vol, requested_bbox, cloudpaths, parallel,
shared_memory_location, output_to_shared_memory)
renderbuffer = renderbuffer[ ::steps.x, ::steps.y, ::steps.z, channel_slice ]
return VolumeCutout.from_volume(vol, renderbuffer, requested_bbox, handle=handle) |
def make_clean_visible_from_raw(_html, tag_replacement_char=' '):
'''Takes an HTML-like Unicode (or UTF-8 encoded) string as input and
returns a Unicode string with all tags replaced by whitespace. In
particular, all Unicode characters inside HTML are replaced with a
single whitespace character.
This *does* detect comments, style, script, link tags and replaces
them with whitespace. This is subtle because these tags can be
self-closing or not.
It does do anything with HTML-escaped characters.
Pre-existing whitespace of any kind *except* newlines (\n) and
linefeeds (\r\n) is converted to single spaces ' ', which has the
same byte length (and character length). Newlines and linefeeds
are left unchanged.
This is a simple state machine iterator without regexes
'''
if not isinstance(_html, unicode):
_html = unicode(_html, 'utf-8')
#Strip tags with logic above
non_tag = ''.join(non_tag_chars_from_raw(_html))
return non_tag.encode('utf-8') | Takes an HTML-like Unicode (or UTF-8 encoded) string as input and
returns a Unicode string with all tags replaced by whitespace. In
particular, all Unicode characters inside HTML are replaced with a
single whitespace character.
This *does* detect comments, style, script, link tags and replaces
them with whitespace. This is subtle because these tags can be
self-closing or not.
It does do anything with HTML-escaped characters.
Pre-existing whitespace of any kind *except* newlines (\n) and
linefeeds (\r\n) is converted to single spaces ' ', which has the
same byte length (and character length). Newlines and linefeeds
are left unchanged.
This is a simple state machine iterator without regexes | Below is the the instruction that describes the task:
### Input:
Takes an HTML-like Unicode (or UTF-8 encoded) string as input and
returns a Unicode string with all tags replaced by whitespace. In
particular, all Unicode characters inside HTML are replaced with a
single whitespace character.
This *does* detect comments, style, script, link tags and replaces
them with whitespace. This is subtle because these tags can be
self-closing or not.
It does do anything with HTML-escaped characters.
Pre-existing whitespace of any kind *except* newlines (\n) and
linefeeds (\r\n) is converted to single spaces ' ', which has the
same byte length (and character length). Newlines and linefeeds
are left unchanged.
This is a simple state machine iterator without regexes
### Response:
def make_clean_visible_from_raw(_html, tag_replacement_char=' '):
'''Takes an HTML-like Unicode (or UTF-8 encoded) string as input and
returns a Unicode string with all tags replaced by whitespace. In
particular, all Unicode characters inside HTML are replaced with a
single whitespace character.
This *does* detect comments, style, script, link tags and replaces
them with whitespace. This is subtle because these tags can be
self-closing or not.
It does do anything with HTML-escaped characters.
Pre-existing whitespace of any kind *except* newlines (\n) and
linefeeds (\r\n) is converted to single spaces ' ', which has the
same byte length (and character length). Newlines and linefeeds
are left unchanged.
This is a simple state machine iterator without regexes
'''
if not isinstance(_html, unicode):
_html = unicode(_html, 'utf-8')
#Strip tags with logic above
non_tag = ''.join(non_tag_chars_from_raw(_html))
return non_tag.encode('utf-8') |
def sas_interconnect_types(self):
"""
Gets the SasInterconnectTypes API client.
Returns:
SasInterconnectTypes:
"""
if not self.__sas_interconnect_types:
self.__sas_interconnect_types = SasInterconnectTypes(self.__connection)
return self.__sas_interconnect_types | Gets the SasInterconnectTypes API client.
Returns:
SasInterconnectTypes: | Below is the the instruction that describes the task:
### Input:
Gets the SasInterconnectTypes API client.
Returns:
SasInterconnectTypes:
### Response:
def sas_interconnect_types(self):
"""
Gets the SasInterconnectTypes API client.
Returns:
SasInterconnectTypes:
"""
if not self.__sas_interconnect_types:
self.__sas_interconnect_types = SasInterconnectTypes(self.__connection)
return self.__sas_interconnect_types |
async def _write(self, path, data, *,
flags=None, cas=None, acquire=None, release=None):
"""Sets the key to the given value.
Returns:
bool: ``True`` on success
"""
if not isinstance(data, bytes):
raise ValueError("value must be bytes")
path = "/v1/kv/%s" % path
response = await self._api.put(
path,
params={
"flags": flags,
"cas": cas,
"acquire": acquire,
"release": release
},
data=data,
headers={"Content-Type": "application/octet-stream"})
return response | Sets the key to the given value.
Returns:
bool: ``True`` on success | Below is the the instruction that describes the task:
### Input:
Sets the key to the given value.
Returns:
bool: ``True`` on success
### Response:
async def _write(self, path, data, *,
flags=None, cas=None, acquire=None, release=None):
"""Sets the key to the given value.
Returns:
bool: ``True`` on success
"""
if not isinstance(data, bytes):
raise ValueError("value must be bytes")
path = "/v1/kv/%s" % path
response = await self._api.put(
path,
params={
"flags": flags,
"cas": cas,
"acquire": acquire,
"release": release
},
data=data,
headers={"Content-Type": "application/octet-stream"})
return response |
def get_listing(path):
"""
Returns the list of files and directories in a path.
Prepents a ".." (parent directory link) if path is not current dir.
"""
if path != ".":
listing = sorted(['..'] + os.listdir(path))
else:
listing = sorted(os.listdir(path))
return listing | Returns the list of files and directories in a path.
Prepents a ".." (parent directory link) if path is not current dir. | Below is the the instruction that describes the task:
### Input:
Returns the list of files and directories in a path.
Prepents a ".." (parent directory link) if path is not current dir.
### Response:
def get_listing(path):
"""
Returns the list of files and directories in a path.
Prepents a ".." (parent directory link) if path is not current dir.
"""
if path != ".":
listing = sorted(['..'] + os.listdir(path))
else:
listing = sorted(os.listdir(path))
return listing |
def meta(self, file_list, **kwargs):
"""获得文件(s)的metainfo
:param file_list: 文件路径列表,如 ['/aaa.txt']
:type file_list: list
:return: requests.Response
.. note ::
示例
* 文件不存在
{"errno":12,"info":[{"errno":-9}],"request_id":3294861771}
* 文件存在
{
"errno": 0,
"info": [
{
"fs_id": 文件id,
"path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar",
"server_filename": "mi2s\u5237recovery.rar",
"size": 8292134,
"server_mtime": 1391274570,
"server_ctime": 1391274570,
"local_mtime": 1391274570,
"local_ctime": 1391274570,
"isdir": 0,
"category": 6,
"path_md5": 279827390796736883,
"delete_fs_id": 0,
"object_key": "84221121-2193956150-1391274570512754",
"block_list": [
"76b469302a02b42fd0a548f1a50dd8ac"
],
"md5": "76b469302a02b42fd0a548f1a50dd8ac",
"errno": 0
}
],
"request_id": 2964868977
}
"""
if not isinstance(file_list, list):
file_list = [file_list]
data = {'target': json.dumps(file_list)}
return self._request('filemetas?blocks=0&dlink=1', 'filemetas', data=data, **kwargs) | 获得文件(s)的metainfo
:param file_list: 文件路径列表,如 ['/aaa.txt']
:type file_list: list
:return: requests.Response
.. note ::
示例
* 文件不存在
{"errno":12,"info":[{"errno":-9}],"request_id":3294861771}
* 文件存在
{
"errno": 0,
"info": [
{
"fs_id": 文件id,
"path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar",
"server_filename": "mi2s\u5237recovery.rar",
"size": 8292134,
"server_mtime": 1391274570,
"server_ctime": 1391274570,
"local_mtime": 1391274570,
"local_ctime": 1391274570,
"isdir": 0,
"category": 6,
"path_md5": 279827390796736883,
"delete_fs_id": 0,
"object_key": "84221121-2193956150-1391274570512754",
"block_list": [
"76b469302a02b42fd0a548f1a50dd8ac"
],
"md5": "76b469302a02b42fd0a548f1a50dd8ac",
"errno": 0
}
],
"request_id": 2964868977
} | Below is the the instruction that describes the task:
### Input:
获得文件(s)的metainfo
:param file_list: 文件路径列表,如 ['/aaa.txt']
:type file_list: list
:return: requests.Response
.. note ::
示例
* 文件不存在
{"errno":12,"info":[{"errno":-9}],"request_id":3294861771}
* 文件存在
{
"errno": 0,
"info": [
{
"fs_id": 文件id,
"path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar",
"server_filename": "mi2s\u5237recovery.rar",
"size": 8292134,
"server_mtime": 1391274570,
"server_ctime": 1391274570,
"local_mtime": 1391274570,
"local_ctime": 1391274570,
"isdir": 0,
"category": 6,
"path_md5": 279827390796736883,
"delete_fs_id": 0,
"object_key": "84221121-2193956150-1391274570512754",
"block_list": [
"76b469302a02b42fd0a548f1a50dd8ac"
],
"md5": "76b469302a02b42fd0a548f1a50dd8ac",
"errno": 0
}
],
"request_id": 2964868977
}
### Response:
def meta(self, file_list, **kwargs):
"""获得文件(s)的metainfo
:param file_list: 文件路径列表,如 ['/aaa.txt']
:type file_list: list
:return: requests.Response
.. note ::
示例
* 文件不存在
{"errno":12,"info":[{"errno":-9}],"request_id":3294861771}
* 文件存在
{
"errno": 0,
"info": [
{
"fs_id": 文件id,
"path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar",
"server_filename": "mi2s\u5237recovery.rar",
"size": 8292134,
"server_mtime": 1391274570,
"server_ctime": 1391274570,
"local_mtime": 1391274570,
"local_ctime": 1391274570,
"isdir": 0,
"category": 6,
"path_md5": 279827390796736883,
"delete_fs_id": 0,
"object_key": "84221121-2193956150-1391274570512754",
"block_list": [
"76b469302a02b42fd0a548f1a50dd8ac"
],
"md5": "76b469302a02b42fd0a548f1a50dd8ac",
"errno": 0
}
],
"request_id": 2964868977
}
"""
if not isinstance(file_list, list):
file_list = [file_list]
data = {'target': json.dumps(file_list)}
return self._request('filemetas?blocks=0&dlink=1', 'filemetas', data=data, **kwargs) |
def segments(self, using=None, **kwargs):
"""
Provide low level segments information that a Lucene index (shard
level) is built with.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.segments`` unchanged.
"""
return self._get_connection(using).indices.segments(index=self._name, **kwargs) | Provide low level segments information that a Lucene index (shard
level) is built with.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.segments`` unchanged. | Below is the the instruction that describes the task:
### Input:
Provide low level segments information that a Lucene index (shard
level) is built with.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.segments`` unchanged.
### Response:
def segments(self, using=None, **kwargs):
"""
Provide low level segments information that a Lucene index (shard
level) is built with.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.segments`` unchanged.
"""
return self._get_connection(using).indices.segments(index=self._name, **kwargs) |
def process_hive(vargs):
"""
Main Hive.co path.
"""
artist_url = vargs['artist_url']
if 'hive.co' in artist_url:
mc_url = artist_url
else:
mc_url = 'https://www.hive.co/downloads/download/' + artist_url
filenames = scrape_hive_url(mc_url, num_tracks=vargs['num_tracks'], folders=vargs['folders'], custom_path=vargs['path'])
if vargs['open']:
open_files(filenames)
return | Main Hive.co path. | Below is the the instruction that describes the task:
### Input:
Main Hive.co path.
### Response:
def process_hive(vargs):
"""
Main Hive.co path.
"""
artist_url = vargs['artist_url']
if 'hive.co' in artist_url:
mc_url = artist_url
else:
mc_url = 'https://www.hive.co/downloads/download/' + artist_url
filenames = scrape_hive_url(mc_url, num_tracks=vargs['num_tracks'], folders=vargs['folders'], custom_path=vargs['path'])
if vargs['open']:
open_files(filenames)
return |
def get_list_filter(self, request):
"""
Adds the period filter to the filters list.
:param request: Current request.
:return: Iterable of filters.
"""
original = super(TrackedLiveAdmin, self).get_list_filter(request)
return original + type(original)([PeriodFilter]) | Adds the period filter to the filters list.
:param request: Current request.
:return: Iterable of filters. | Below is the the instruction that describes the task:
### Input:
Adds the period filter to the filters list.
:param request: Current request.
:return: Iterable of filters.
### Response:
def get_list_filter(self, request):
"""
Adds the period filter to the filters list.
:param request: Current request.
:return: Iterable of filters.
"""
original = super(TrackedLiveAdmin, self).get_list_filter(request)
return original + type(original)([PeriodFilter]) |
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit() | Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table. | Below is the the instruction that describes the task:
### Input:
Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
### Response:
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit() |
def import_classes(name, currmodule):
# type: (unicode, unicode) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
# import class or module using currmodule
if currmodule:
target = try_import(currmodule + '.' + name)
# import class or module without currmodule
if target is None:
target = try_import(name)
if target is None:
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % name)
if inspect.isclass(target):
# If imported object is a class, just return it
return [target]
elif inspect.ismodule(target):
# If imported object is a module, return classes defined on it
classes = []
for cls in target.__dict__.values():
if inspect.isclass(cls) and cls_is_in_module(cls, mod=target):
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name) | Import a class using its fully-qualified *name*. | Below is the the instruction that describes the task:
### Input:
Import a class using its fully-qualified *name*.
### Response:
def import_classes(name, currmodule):
# type: (unicode, unicode) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
# import class or module using currmodule
if currmodule:
target = try_import(currmodule + '.' + name)
# import class or module without currmodule
if target is None:
target = try_import(name)
if target is None:
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % name)
if inspect.isclass(target):
# If imported object is a class, just return it
return [target]
elif inspect.ismodule(target):
# If imported object is a module, return classes defined on it
classes = []
for cls in target.__dict__.values():
if inspect.isclass(cls) and cls_is_in_module(cls, mod=target):
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name) |
def restrict_bond_dict(self, bond_dict):
"""Restrict a bond dictionary to self.
Args:
bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,
to see examples for a bond_dict.
Returns:
bond dictionary
"""
return {j: bond_dict[j] & set(self.index) for j in self.index} | Restrict a bond dictionary to self.
Args:
bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,
to see examples for a bond_dict.
Returns:
bond dictionary | Below is the the instruction that describes the task:
### Input:
Restrict a bond dictionary to self.
Args:
bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,
to see examples for a bond_dict.
Returns:
bond dictionary
### Response:
def restrict_bond_dict(self, bond_dict):
"""Restrict a bond dictionary to self.
Args:
bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,
to see examples for a bond_dict.
Returns:
bond dictionary
"""
return {j: bond_dict[j] & set(self.index) for j in self.index} |
def _traverse_tree(tree, path):
"""Traverses the permission tree, returning the permission at given permission path."""
path_steps = (step for step in path.split('.') if step != '')
# Special handling for first step, because the first step isn't under 'objects'
first_step = path_steps.next()
subtree = tree[first_step]
for step in path_steps:
subtree = subtree['children'][step]
return subtree | Traverses the permission tree, returning the permission at given permission path. | Below is the the instruction that describes the task:
### Input:
Traverses the permission tree, returning the permission at given permission path.
### Response:
def _traverse_tree(tree, path):
"""Traverses the permission tree, returning the permission at given permission path."""
path_steps = (step for step in path.split('.') if step != '')
# Special handling for first step, because the first step isn't under 'objects'
first_step = path_steps.next()
subtree = tree[first_step]
for step in path_steps:
subtree = subtree['children'][step]
return subtree |
def get(self, field):
"""
Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value
"""
if field in ('username', 'uuid', 'app_data'):
return self.data[field]
else:
return self.data.get('app_data', {})[field] | Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value | Below is the the instruction that describes the task:
### Input:
Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value
### Response:
def get(self, field):
"""
Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value
"""
if field in ('username', 'uuid', 'app_data'):
return self.data[field]
else:
return self.data.get('app_data', {})[field] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.