code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the DeriveKey request payload to a stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_buffer = utils.BytearrayStream()
if self._object_type:
self._object_type.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the object type "
"field."
)
if self._unique_identifiers:
for unique_identifier in self._unique_identifiers:
unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the unique "
"identifiers field."
)
if self._derivation_method:
self._derivation_method.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the derivation "
"method field."
)
if self._derivation_parameters:
self._derivation_parameters.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the derivation "
"parameters field."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self._template_attribute:
self._template_attribute.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the template "
"attribute field."
)
else:
if self._template_attribute:
attrs = objects.convert_template_attribute_to_attributes(
self._template_attribute
)
attrs.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the template "
"attribute field."
)
self.length = local_buffer.length()
super(DeriveKeyRequestPayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer) | Write the data encoding the DeriveKey request payload to a stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined. | Below is the the instruction that describes the task:
### Input:
Write the data encoding the DeriveKey request payload to a stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
### Response:
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the DeriveKey request payload to a stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_buffer = utils.BytearrayStream()
if self._object_type:
self._object_type.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the object type "
"field."
)
if self._unique_identifiers:
for unique_identifier in self._unique_identifiers:
unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the unique "
"identifiers field."
)
if self._derivation_method:
self._derivation_method.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the derivation "
"method field."
)
if self._derivation_parameters:
self._derivation_parameters.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the derivation "
"parameters field."
)
if kmip_version < enums.KMIPVersion.KMIP_2_0:
if self._template_attribute:
self._template_attribute.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the template "
"attribute field."
)
else:
if self._template_attribute:
attrs = objects.convert_template_attribute_to_attributes(
self._template_attribute
)
attrs.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The DeriveKey request payload is missing the template "
"attribute field."
)
self.length = local_buffer.length()
super(DeriveKeyRequestPayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer) |
def id_(reset=False):
'''
.. versionadded:: 2016.3.0
Return monit unique id.
reset : False
Reset current id and generate a new id when it's True.
CLI Example:
.. code-block:: bash
salt '*' monit.id [reset=True]
'''
if reset:
id_pattern = re.compile(r'Monit id (?P<id>[^ ]+)')
cmd = 'echo y|monit -r'
out = __salt__['cmd.run_all'](cmd, python_shell=True)
ret = id_pattern.search(out['stdout']).group('id')
return ret if ret else False
else:
cmd = 'monit -i'
out = __salt__['cmd.run'](cmd)
ret = out.split(':')[-1].strip()
return ret | .. versionadded:: 2016.3.0
Return monit unique id.
reset : False
Reset current id and generate a new id when it's True.
CLI Example:
.. code-block:: bash
salt '*' monit.id [reset=True] | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2016.3.0
Return monit unique id.
reset : False
Reset current id and generate a new id when it's True.
CLI Example:
.. code-block:: bash
salt '*' monit.id [reset=True]
### Response:
def id_(reset=False):
'''
.. versionadded:: 2016.3.0
Return monit unique id.
reset : False
Reset current id and generate a new id when it's True.
CLI Example:
.. code-block:: bash
salt '*' monit.id [reset=True]
'''
if reset:
id_pattern = re.compile(r'Monit id (?P<id>[^ ]+)')
cmd = 'echo y|monit -r'
out = __salt__['cmd.run_all'](cmd, python_shell=True)
ret = id_pattern.search(out['stdout']).group('id')
return ret if ret else False
else:
cmd = 'monit -i'
out = __salt__['cmd.run'](cmd)
ret = out.split(':')[-1].strip()
return ret |
def db020(self, value=None):
""" Corresponds to IDD Field `db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db020`'.format(value))
self._db020 = value | Corresponds to IDD Field `db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def db020(self, value=None):
""" Corresponds to IDD Field `db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db020`'.format(value))
self._db020 = value |
def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
"""Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
"""
args = ['--instance-check-only']
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
) | Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails. | Below is the the instruction that describes the task:
### Input:
Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
### Response:
def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
"""Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
"""
args = ['--instance-check-only']
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
) |
def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
dac_gain = self._ch_cal[channel]['ADCV']['gain']
voltage = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'V':
return voltage / 1000
elif unit == 'mV':
return voltage
else:
raise TypeError("Invalid unit type.") | Reading voltage | Below is the the instruction that describes the task:
### Input:
Reading voltage
### Response:
def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
dac_gain = self._ch_cal[channel]['ADCV']['gain']
voltage = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'V':
return voltage / 1000
elif unit == 'mV':
return voltage
else:
raise TypeError("Invalid unit type.") |
def visit(self, func):
"""Run ``func`` on each object's path.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> g2 = g1.create_group('foo')
>>> g3 = g1.create_group('bar')
>>> g4 = g3.create_group('baz')
>>> g5 = g3.create_group('quux')
>>> def print_visitor(name):
... print(name)
>>> g1.visit(print_visitor)
bar
bar/baz
bar/quux
foo
>>> g3.visit(print_visitor)
baz
quux
"""
base_len = len(self.name)
return self.visitvalues(lambda o: func(o.name[base_len:].lstrip("/"))) | Run ``func`` on each object's path.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> g2 = g1.create_group('foo')
>>> g3 = g1.create_group('bar')
>>> g4 = g3.create_group('baz')
>>> g5 = g3.create_group('quux')
>>> def print_visitor(name):
... print(name)
>>> g1.visit(print_visitor)
bar
bar/baz
bar/quux
foo
>>> g3.visit(print_visitor)
baz
quux | Below is the the instruction that describes the task:
### Input:
Run ``func`` on each object's path.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> g2 = g1.create_group('foo')
>>> g3 = g1.create_group('bar')
>>> g4 = g3.create_group('baz')
>>> g5 = g3.create_group('quux')
>>> def print_visitor(name):
... print(name)
>>> g1.visit(print_visitor)
bar
bar/baz
bar/quux
foo
>>> g3.visit(print_visitor)
baz
quux
### Response:
def visit(self, func):
"""Run ``func`` on each object's path.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
>>> g2 = g1.create_group('foo')
>>> g3 = g1.create_group('bar')
>>> g4 = g3.create_group('baz')
>>> g5 = g3.create_group('quux')
>>> def print_visitor(name):
... print(name)
>>> g1.visit(print_visitor)
bar
bar/baz
bar/quux
foo
>>> g3.visit(print_visitor)
baz
quux
"""
base_len = len(self.name)
return self.visitvalues(lambda o: func(o.name[base_len:].lstrip("/"))) |
def _process_elem_text(elem, dic, subdic, text="@text", **options):
"""
:param elem: ET Element object which has elem.text
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating elem.text, dic and subdic as side effects
"""
elem.text = elem.text.strip()
if elem.text:
etext = _parse_text(elem.text, **options)
if len(elem) or elem.attrib:
subdic[text] = etext
else:
dic[elem.tag] = etext | :param elem: ET Element object which has elem.text
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating elem.text, dic and subdic as side effects | Below is the the instruction that describes the task:
### Input:
:param elem: ET Element object which has elem.text
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating elem.text, dic and subdic as side effects
### Response:
def _process_elem_text(elem, dic, subdic, text="@text", **options):
"""
:param elem: ET Element object which has elem.text
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating elem.text, dic and subdic as side effects
"""
elem.text = elem.text.strip()
if elem.text:
etext = _parse_text(elem.text, **options)
if len(elem) or elem.attrib:
subdic[text] = etext
else:
dic[elem.tag] = etext |
def number_cwt_peaks(x, n):
"""
This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for
widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales
and with sufficiently high Signal-to-Noise-Ratio (SNR)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param n: maximum width to consider
:type n: int
:return: the value of this feature
:return type: int
"""
return len(find_peaks_cwt(vector=x, widths=np.array(list(range(1, n + 1))), wavelet=ricker)) | This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for
widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales
and with sufficiently high Signal-to-Noise-Ratio (SNR)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param n: maximum width to consider
:type n: int
:return: the value of this feature
:return type: int | Below is the the instruction that describes the task:
### Input:
This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for
widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales
and with sufficiently high Signal-to-Noise-Ratio (SNR)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param n: maximum width to consider
:type n: int
:return: the value of this feature
:return type: int
### Response:
def number_cwt_peaks(x, n):
"""
This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for
widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales
and with sufficiently high Signal-to-Noise-Ratio (SNR)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param n: maximum width to consider
:type n: int
:return: the value of this feature
:return type: int
"""
return len(find_peaks_cwt(vector=x, widths=np.array(list(range(1, n + 1))), wavelet=ricker)) |
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None):
"""
聚宽实现方式
save current day's stock_min data
"""
# 导入聚宽模块且进行登录
try:
import jqdatasdk
# 请自行将 JQUSERNAME 和 JQUSERPASSWD 修改为自己的账号密码
jqdatasdk.auth("JQUSERNAME", "JQUSERPASSWD")
except:
raise ModuleNotFoundError
# 股票代码格式化
code_list = list(
map(
lambda x: x + ".XSHG" if x[0] == "6" else x + ".XSHE",
QA_fetch_get_stock_list().code.unique().tolist(),
))
coll = client.stock_min
coll.create_index([
("code", pymongo.ASCENDING),
("time_stamp", pymongo.ASCENDING),
("date_stamp", pymongo.ASCENDING),
])
err = []
def __transform_jq_to_qa(df, code, type_):
"""
处理 jqdata 分钟数据为 qa 格式,并存入数据库
1. jdatasdk 数据格式:
open close high low volume money
2018-12-03 09:31:00 10.59 10.61 10.61 10.59 8339100.0 88377836.0
2. 与 QUANTAXIS.QAFetch.QATdx.QA_fetch_get_stock_min 获取数据进行匹配,具体处理详见相应源码
open close high low vol amount ...
datetime
2018-12-03 09:31:00 10.99 10.90 10.99 10.90 2.211700e+06 2.425626e+07 ...
"""
if df is None or len(df) == 0:
raise ValueError("没有聚宽数据")
df = df.reset_index().rename(columns={
"index": "datetime",
"volume": "vol",
"money": "amount"
})
df["code"] = code
df["date"] = df.datetime.map(str).str.slice(0, 10)
df = df.set_index("datetime", drop=False)
df["date_stamp"] = df["date"].apply(lambda x: QA_util_date_stamp(x))
df["time_stamp"] = (
df["datetime"].map(str).apply(lambda x: QA_util_time_stamp(x)))
df["type"] = type_
return df[[
"open",
"close",
"high",
"low",
"vol",
"amount",
"datetime",
"code",
"date",
"date_stamp",
"time_stamp",
"type",
]]
def __saving_work(code, coll):
QA_util_log_info(
"##JOB03 Now Saving STOCK_MIN ==== {}".format(code), ui_log=ui_log)
try:
for type_ in ["1min", "5min", "15min", "30min", "60min"]:
col_filter = {"code": str(code)[0:6], "type": type_}
ref_ = coll.find(col_filter)
end_time = str(now_time())[0:19]
if coll.count_documents(col_filter) > 0:
start_time = ref_[coll.count_documents(
col_filter) - 1]["datetime"]
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
["1min",
"5min",
"15min",
"30min",
"60min"].index(type_),
str(code)[0:6],
start_time,
end_time,
type_,
),
ui_log=ui_log,
)
if start_time != end_time:
df = jqdatasdk.get_price(
security=code,
start_date=start_time,
end_date=end_time,
frequency=type_.split("min")[0]+"m",
)
__data = __transform_jq_to_qa(
df, code=code[:6], type_=type_)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
else:
start_time = "2015-01-01 09:30:00"
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
["1min",
"5min",
"15min",
"30min",
"60min"].index(type_),
str(code)[0:6],
start_time,
end_time,
type_,
),
ui_log=ui_log,
)
if start_time != end_time:
__data == __transform_jq_to_qa(
jqdatasdk.get_price(
security=code,
start_date=start_time,
end_date=end_time,
frequency=type_.split("min")[0]+"m",
),
code=code[:6],
type_=type_
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
err.append(code)
QA_util_log_info(err, ui_log=ui_log)
# 聚宽之多允许三个线程连接
executor = ThreadPoolExecutor(max_workers=2)
res = {
executor.submit(__saving_work, code_list[i_], coll)
for i_ in range(len(code_list))
}
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(code_list)),
ui_log=ui_log
)
strProgress = "DOWNLOAD PROGRESS {} ".format(
str(float(count / len(code_list) * 100))[0:4] + "%")
intProgress = int(count / len(code_list) * 10000.0)
QA_util_log_info(
strProgress,
ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info("SUCCESS", ui_log=ui_log)
else:
QA_util_log_info(" ERROR CODE \n ", ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) | 聚宽实现方式
save current day's stock_min data | Below is the the instruction that describes the task:
### Input:
聚宽实现方式
save current day's stock_min data
### Response:
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None):
"""
聚宽实现方式
save current day's stock_min data
"""
# 导入聚宽模块且进行登录
try:
import jqdatasdk
# 请自行将 JQUSERNAME 和 JQUSERPASSWD 修改为自己的账号密码
jqdatasdk.auth("JQUSERNAME", "JQUSERPASSWD")
except:
raise ModuleNotFoundError
# 股票代码格式化
code_list = list(
map(
lambda x: x + ".XSHG" if x[0] == "6" else x + ".XSHE",
QA_fetch_get_stock_list().code.unique().tolist(),
))
coll = client.stock_min
coll.create_index([
("code", pymongo.ASCENDING),
("time_stamp", pymongo.ASCENDING),
("date_stamp", pymongo.ASCENDING),
])
err = []
def __transform_jq_to_qa(df, code, type_):
"""
处理 jqdata 分钟数据为 qa 格式,并存入数据库
1. jdatasdk 数据格式:
open close high low volume money
2018-12-03 09:31:00 10.59 10.61 10.61 10.59 8339100.0 88377836.0
2. 与 QUANTAXIS.QAFetch.QATdx.QA_fetch_get_stock_min 获取数据进行匹配,具体处理详见相应源码
open close high low vol amount ...
datetime
2018-12-03 09:31:00 10.99 10.90 10.99 10.90 2.211700e+06 2.425626e+07 ...
"""
if df is None or len(df) == 0:
raise ValueError("没有聚宽数据")
df = df.reset_index().rename(columns={
"index": "datetime",
"volume": "vol",
"money": "amount"
})
df["code"] = code
df["date"] = df.datetime.map(str).str.slice(0, 10)
df = df.set_index("datetime", drop=False)
df["date_stamp"] = df["date"].apply(lambda x: QA_util_date_stamp(x))
df["time_stamp"] = (
df["datetime"].map(str).apply(lambda x: QA_util_time_stamp(x)))
df["type"] = type_
return df[[
"open",
"close",
"high",
"low",
"vol",
"amount",
"datetime",
"code",
"date",
"date_stamp",
"time_stamp",
"type",
]]
def __saving_work(code, coll):
QA_util_log_info(
"##JOB03 Now Saving STOCK_MIN ==== {}".format(code), ui_log=ui_log)
try:
for type_ in ["1min", "5min", "15min", "30min", "60min"]:
col_filter = {"code": str(code)[0:6], "type": type_}
ref_ = coll.find(col_filter)
end_time = str(now_time())[0:19]
if coll.count_documents(col_filter) > 0:
start_time = ref_[coll.count_documents(
col_filter) - 1]["datetime"]
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
["1min",
"5min",
"15min",
"30min",
"60min"].index(type_),
str(code)[0:6],
start_time,
end_time,
type_,
),
ui_log=ui_log,
)
if start_time != end_time:
df = jqdatasdk.get_price(
security=code,
start_date=start_time,
end_date=end_time,
frequency=type_.split("min")[0]+"m",
)
__data = __transform_jq_to_qa(
df, code=code[:6], type_=type_)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
else:
start_time = "2015-01-01 09:30:00"
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
["1min",
"5min",
"15min",
"30min",
"60min"].index(type_),
str(code)[0:6],
start_time,
end_time,
type_,
),
ui_log=ui_log,
)
if start_time != end_time:
__data == __transform_jq_to_qa(
jqdatasdk.get_price(
security=code,
start_date=start_time,
end_date=end_time,
frequency=type_.split("min")[0]+"m",
),
code=code[:6],
type_=type_
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
err.append(code)
QA_util_log_info(err, ui_log=ui_log)
# 聚宽之多允许三个线程连接
executor = ThreadPoolExecutor(max_workers=2)
res = {
executor.submit(__saving_work, code_list[i_], coll)
for i_ in range(len(code_list))
}
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(code_list)),
ui_log=ui_log
)
strProgress = "DOWNLOAD PROGRESS {} ".format(
str(float(count / len(code_list) * 100))[0:4] + "%")
intProgress = int(count / len(code_list) * 10000.0)
QA_util_log_info(
strProgress,
ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info("SUCCESS", ui_log=ui_log)
else:
QA_util_log_info(" ERROR CODE \n ", ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
def horizontal_layout(self, draw, slide):
""" Augment slide with horizontal layout info """
padding = self.padding
heading = slide['heading']
top = padding
left = padding
top += heading['height'] + padding
rows = slide['rows']
for row in rows:
images = row.get('images', 0)
items = row['items']
used_width = sum(x.get('width', 0) for x in items)
available_width = WIDTH - (
used_width + ((1 + len(items)) * padding))
if images:
image_width = available_width // images
# OK, now set left for all items and image_width for images
left = padding
for item in row['items']:
if item.get('image'):
item['width'] = image_width
item['left'] = left
left += item['width'] + padding
return | Augment slide with horizontal layout info | Below is the the instruction that describes the task:
### Input:
Augment slide with horizontal layout info
### Response:
def horizontal_layout(self, draw, slide):
""" Augment slide with horizontal layout info """
padding = self.padding
heading = slide['heading']
top = padding
left = padding
top += heading['height'] + padding
rows = slide['rows']
for row in rows:
images = row.get('images', 0)
items = row['items']
used_width = sum(x.get('width', 0) for x in items)
available_width = WIDTH - (
used_width + ((1 + len(items)) * padding))
if images:
image_width = available_width // images
# OK, now set left for all items and image_width for images
left = padding
for item in row['items']:
if item.get('image'):
item['width'] = image_width
item['left'] = left
left += item['width'] + padding
return |
def set_slug(apps, schema_editor):
"""
Create a slug for each Event already in the DB.
"""
Event = apps.get_model('spectator_events', 'Event')
for e in Event.objects.all():
e.slug = generate_slug(e.pk)
e.save(update_fields=['slug']) | Create a slug for each Event already in the DB. | Below is the the instruction that describes the task:
### Input:
Create a slug for each Event already in the DB.
### Response:
def set_slug(apps, schema_editor):
"""
Create a slug for each Event already in the DB.
"""
Event = apps.get_model('spectator_events', 'Event')
for e in Event.objects.all():
e.slug = generate_slug(e.pk)
e.save(update_fields=['slug']) |
def filesessionmaker(sessionmaker, file_manager, file_managers=None):
u'''Wrapper of session maker adding link to a FileManager instance
to session.::
file_manager = FileManager(cfg.TRANSIENT_ROOT,
cfg.PERSISTENT_ROOT)
filesessionmaker(sessionmaker(...), file_manager)
'''
registry = WeakKeyDictionary()
if file_managers:
for k, v in six.iteritems(file_managers):
if isinstance(k, FileAttribute):
raise NotImplementedError()
registry[k] = v
def find_file_manager(self, target):
if isinstance(target, FileAttribute):
assert hasattr(target, 'class_')
target = target.class_
else:
if not inspect.isclass(target):
target = type(target)
assert hasattr(target, 'metadata')
assert class_mapper(target) is not None
if target in registry:
return registry[target]
if target.metadata in registry:
return registry[target.metadata]
return file_manager
def session_maker(*args, **kwargs):
session = sessionmaker(*args, **kwargs)
# XXX in case we want to use session manager somehow bound
# to request environment. For example, to generate user-specific
# URLs.
#session.file_manager = \
# kwargs.get('file_manager', file_manager)
session.file_manager = file_manager
session.find_file_manager = six.create_bound_method(
find_file_manager,
session)
return session
return session_maker | u'''Wrapper of session maker adding link to a FileManager instance
to session.::
file_manager = FileManager(cfg.TRANSIENT_ROOT,
cfg.PERSISTENT_ROOT)
filesessionmaker(sessionmaker(...), file_manager) | Below is the the instruction that describes the task:
### Input:
u'''Wrapper of session maker adding link to a FileManager instance
to session.::
file_manager = FileManager(cfg.TRANSIENT_ROOT,
cfg.PERSISTENT_ROOT)
filesessionmaker(sessionmaker(...), file_manager)
### Response:
def filesessionmaker(sessionmaker, file_manager, file_managers=None):
u'''Wrapper of session maker adding link to a FileManager instance
to session.::
file_manager = FileManager(cfg.TRANSIENT_ROOT,
cfg.PERSISTENT_ROOT)
filesessionmaker(sessionmaker(...), file_manager)
'''
registry = WeakKeyDictionary()
if file_managers:
for k, v in six.iteritems(file_managers):
if isinstance(k, FileAttribute):
raise NotImplementedError()
registry[k] = v
def find_file_manager(self, target):
if isinstance(target, FileAttribute):
assert hasattr(target, 'class_')
target = target.class_
else:
if not inspect.isclass(target):
target = type(target)
assert hasattr(target, 'metadata')
assert class_mapper(target) is not None
if target in registry:
return registry[target]
if target.metadata in registry:
return registry[target.metadata]
return file_manager
def session_maker(*args, **kwargs):
session = sessionmaker(*args, **kwargs)
# XXX in case we want to use session manager somehow bound
# to request environment. For example, to generate user-specific
# URLs.
#session.file_manager = \
# kwargs.get('file_manager', file_manager)
session.file_manager = file_manager
session.find_file_manager = six.create_bound_method(
find_file_manager,
session)
return session
return session_maker |
def setup(self):
"""Setup does stuff only if there are no networks.
This is so it only runs once at the start of the experiment. It first
calls the same function in the super (see experiments.py in wallace).
Then it adds a source to each network.
"""
if not self.networks():
super(FunctionLearning, self).setup()
for net in self.networks():
self.source(network=net) | Setup does stuff only if there are no networks.
This is so it only runs once at the start of the experiment. It first
calls the same function in the super (see experiments.py in wallace).
Then it adds a source to each network. | Below is the the instruction that describes the task:
### Input:
Setup does stuff only if there are no networks.
This is so it only runs once at the start of the experiment. It first
calls the same function in the super (see experiments.py in wallace).
Then it adds a source to each network.
### Response:
def setup(self):
"""Setup does stuff only if there are no networks.
This is so it only runs once at the start of the experiment. It first
calls the same function in the super (see experiments.py in wallace).
Then it adds a source to each network.
"""
if not self.networks():
super(FunctionLearning, self).setup()
for net in self.networks():
self.source(network=net) |
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregation and make sure that result will be everytime
CommandCursor. Will take care for pymongo version differencies
:param pipeline: {list} of aggregation pipeline stages
:return: {pymongo.command_cursor.CommandCursor}
"""
result = self.collection.aggregate(pipeline, **kwargs)
if pymongo.version_tuple < (3, 0, 0):
result = result['result']
return result | Perform an aggregation and make sure that result will be everytime
CommandCursor. Will take care for pymongo version differencies
:param pipeline: {list} of aggregation pipeline stages
:return: {pymongo.command_cursor.CommandCursor} | Below is the the instruction that describes the task:
### Input:
Perform an aggregation and make sure that result will be everytime
CommandCursor. Will take care for pymongo version differencies
:param pipeline: {list} of aggregation pipeline stages
:return: {pymongo.command_cursor.CommandCursor}
### Response:
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregation and make sure that result will be everytime
CommandCursor. Will take care for pymongo version differencies
:param pipeline: {list} of aggregation pipeline stages
:return: {pymongo.command_cursor.CommandCursor}
"""
result = self.collection.aggregate(pipeline, **kwargs)
if pymongo.version_tuple < (3, 0, 0):
result = result['result']
return result |
def countByValue(self):
"""Apply countByValue to every RDD.abs
:rtype: DStream
.. warning::
Implemented as a local operation.
Example:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... .queueStream([[1, 1, 5, 5, 5, 2]])
... .countByValue()
... .foreachRDD(lambda rdd: print(sorted(rdd.collect())))
... )
>>> ssc.start()
>>> ssc.awaitTermination(0.15)
[(1, 2), (2, 1), (5, 3)]
"""
return self.transform(
lambda rdd: self._context._context.parallelize(
rdd.countByValue().items())) | Apply countByValue to every RDD.abs
:rtype: DStream
.. warning::
Implemented as a local operation.
Example:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... .queueStream([[1, 1, 5, 5, 5, 2]])
... .countByValue()
... .foreachRDD(lambda rdd: print(sorted(rdd.collect())))
... )
>>> ssc.start()
>>> ssc.awaitTermination(0.15)
[(1, 2), (2, 1), (5, 3)] | Below is the the instruction that describes the task:
### Input:
Apply countByValue to every RDD.abs
:rtype: DStream
.. warning::
Implemented as a local operation.
Example:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... .queueStream([[1, 1, 5, 5, 5, 2]])
... .countByValue()
... .foreachRDD(lambda rdd: print(sorted(rdd.collect())))
... )
>>> ssc.start()
>>> ssc.awaitTermination(0.15)
[(1, 2), (2, 1), (5, 3)]
### Response:
def countByValue(self):
"""Apply countByValue to every RDD.abs
:rtype: DStream
.. warning::
Implemented as a local operation.
Example:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... .queueStream([[1, 1, 5, 5, 5, 2]])
... .countByValue()
... .foreachRDD(lambda rdd: print(sorted(rdd.collect())))
... )
>>> ssc.start()
>>> ssc.awaitTermination(0.15)
[(1, 2), (2, 1), (5, 3)]
"""
return self.transform(
lambda rdd: self._context._context.parallelize(
rdd.countByValue().items())) |
def _get_address_family(table, instance):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {"inet": "ipv4", "inet6": "ipv6", "inetflow": "flow"}
if instance == "master":
family = table.rsplit(".", 1)[-2]
else:
family = table.split(".")[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = None
return address_family | Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family | Below is the the instruction that describes the task:
### Input:
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
### Response:
def _get_address_family(table, instance):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {"inet": "ipv4", "inet6": "ipv6", "inetflow": "flow"}
if instance == "master":
family = table.rsplit(".", 1)[-2]
else:
family = table.split(".")[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = None
return address_family |
def generate_additional_context(self, matching_datasets):
"""Return additional information about matching datasets.
Includes upload counts, related hubs, related tags.
"""
dataset_ids = [upload.id for upload in matching_datasets]
tags = Tag.objects.filter(
dataset__in=dataset_ids
).distinct().annotate(
Count('word')
).order_by('-word__count')[:5]
hubs = matching_datasets.values("hub_slug").annotate(
Count('hub_slug')
).order_by('-hub_slug__count')
if hubs:
most_used_hub = get_hub_name_from_slug(hubs[0]['hub_slug'])
hub_slug = hubs[0]['hub_slug']
else:
most_used_hub = None
hub_slug = None
return {
'tags': tags,
'hub': most_used_hub,
'hub_slug': hub_slug,
} | Return additional information about matching datasets.
Includes upload counts, related hubs, related tags. | Below is the the instruction that describes the task:
### Input:
Return additional information about matching datasets.
Includes upload counts, related hubs, related tags.
### Response:
def generate_additional_context(self, matching_datasets):
"""Return additional information about matching datasets.
Includes upload counts, related hubs, related tags.
"""
dataset_ids = [upload.id for upload in matching_datasets]
tags = Tag.objects.filter(
dataset__in=dataset_ids
).distinct().annotate(
Count('word')
).order_by('-word__count')[:5]
hubs = matching_datasets.values("hub_slug").annotate(
Count('hub_slug')
).order_by('-hub_slug__count')
if hubs:
most_used_hub = get_hub_name_from_slug(hubs[0]['hub_slug'])
hub_slug = hubs[0]['hub_slug']
else:
most_used_hub = None
hub_slug = None
return {
'tags': tags,
'hub': most_used_hub,
'hub_slug': hub_slug,
} |
def plot_pnlmoney(self):
"""
画出pnl盈亏额散点图
"""
plt.scatter(x=self.pnl.sell_date.apply(str), y=self.pnl.pnl_money)
plt.gcf().autofmt_xdate()
return plt | 画出pnl盈亏额散点图 | Below is the the instruction that describes the task:
### Input:
画出pnl盈亏额散点图
### Response:
def plot_pnlmoney(self):
"""
画出pnl盈亏额散点图
"""
plt.scatter(x=self.pnl.sell_date.apply(str), y=self.pnl.pnl_money)
plt.gcf().autofmt_xdate()
return plt |
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks | Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G]) | Below is the the instruction that describes the task:
### Input:
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
### Response:
def to_perseus(graphs):
"""
Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G])
"""
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_attributes.append(attributes)
if len(graph) > 0:
edge_table = pd.DataFrame([dict(data, **{"Source": str(f), "Target": str(t)}) for f,t,data in graph.edges(data=True)])
edge_table.columns.name = "Column Name"
node_table = pd.DataFrame([dict(data, **{"Node": str(n)}) for n,data in graph.nodes(data=True)])
node_table.columns.name = "Column Name"
else:
edge_table = pd.DataFrame(columns=pd.Index(['Source', 'Target'], name='Column Name'))
node_table = pd.DataFrame(columns=pd.Index(['Node'], name='Column Name'))
guid = attributes['GUID']
networks[guid] = {
'edge_table': edge_table,
'node_table': node_table,
'name': attributes['Name'],
'guid': guid }
network_table = pd.DataFrame(graph_attributes)
network_table.columns.name = "Column Name"
return network_table, networks |
def calcDrawingProbs(self):
"""
Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position.
"""
wmg = self.wmg
phi = self.phi
# We say the weight of the candidate in position i is phi^i.
weights = []
for i in range(0, len(wmg.keys())):
weights.append(phi**i)
# Calculate the probabilty that an item at each weight is drawn.
totalWeight = sum(weights)
for i in range(0, len(wmg.keys())):
weights[i] = weights[i]/totalWeight
return weights | Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position. | Below is the the instruction that describes the task:
### Input:
Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position.
### Response:
def calcDrawingProbs(self):
"""
Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position.
"""
wmg = self.wmg
phi = self.phi
# We say the weight of the candidate in position i is phi^i.
weights = []
for i in range(0, len(wmg.keys())):
weights.append(phi**i)
# Calculate the probabilty that an item at each weight is drawn.
totalWeight = sum(weights)
for i in range(0, len(wmg.keys())):
weights[i] = weights[i]/totalWeight
return weights |
def _from_p(self, mode):
"""Convert the image from P or PA to RGB or RGBA."""
self._check_modes(("P", "PA"))
if not self.palette:
raise RuntimeError("Can't convert palettized image, missing palette.")
pal = np.array(self.palette)
pal = da.from_array(pal, chunks=pal.shape)
if pal.shape[1] == 4:
# colormap's alpha overrides data alpha
mode = "RGBA"
alpha = None
elif self.mode.endswith("A"):
# add a new/fake 'bands' dimension to the end
alpha = self.data.sel(bands="A").data[..., None]
mode = mode + "A" if not mode.endswith("A") else mode
else:
alpha = None
flat_indexes = self.data.sel(bands='P').data.ravel().astype('int64')
dim_sizes = ((key, val) for key, val in self.data.sizes.items() if key != 'bands')
dims, new_shape = zip(*dim_sizes)
dims = dims + ('bands',)
new_shape = new_shape + (pal.shape[1],)
new_data = pal[flat_indexes].reshape(new_shape)
coords = dict(self.data.coords)
coords["bands"] = list(mode)
if alpha is not None:
new_arr = da.concatenate((new_data, alpha), axis=-1)
data = xr.DataArray(new_arr, coords=coords, attrs=self.data.attrs, dims=dims)
else:
data = xr.DataArray(new_data, coords=coords, attrs=self.data.attrs, dims=dims)
return data | Convert the image from P or PA to RGB or RGBA. | Below is the the instruction that describes the task:
### Input:
Convert the image from P or PA to RGB or RGBA.
### Response:
def _from_p(self, mode):
"""Convert the image from P or PA to RGB or RGBA."""
self._check_modes(("P", "PA"))
if not self.palette:
raise RuntimeError("Can't convert palettized image, missing palette.")
pal = np.array(self.palette)
pal = da.from_array(pal, chunks=pal.shape)
if pal.shape[1] == 4:
# colormap's alpha overrides data alpha
mode = "RGBA"
alpha = None
elif self.mode.endswith("A"):
# add a new/fake 'bands' dimension to the end
alpha = self.data.sel(bands="A").data[..., None]
mode = mode + "A" if not mode.endswith("A") else mode
else:
alpha = None
flat_indexes = self.data.sel(bands='P').data.ravel().astype('int64')
dim_sizes = ((key, val) for key, val in self.data.sizes.items() if key != 'bands')
dims, new_shape = zip(*dim_sizes)
dims = dims + ('bands',)
new_shape = new_shape + (pal.shape[1],)
new_data = pal[flat_indexes].reshape(new_shape)
coords = dict(self.data.coords)
coords["bands"] = list(mode)
if alpha is not None:
new_arr = da.concatenate((new_data, alpha), axis=-1)
data = xr.DataArray(new_arr, coords=coords, attrs=self.data.attrs, dims=dims)
else:
data = xr.DataArray(new_data, coords=coords, attrs=self.data.attrs, dims=dims)
return data |
def register_on_clipboard_mode_changed(self, callback):
"""Set the callback function to consume on clipboard mode changed
events.
Callback receives a IClipboardModeChangedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_clipboard_mode_changed
return self.event_source.register_callback(callback, event_type) | Set the callback function to consume on clipboard mode changed
events.
Callback receives a IClipboardModeChangedEvent object.
Returns the callback_id | Below is the the instruction that describes the task:
### Input:
Set the callback function to consume on clipboard mode changed
events.
Callback receives a IClipboardModeChangedEvent object.
Returns the callback_id
### Response:
def register_on_clipboard_mode_changed(self, callback):
"""Set the callback function to consume on clipboard mode changed
events.
Callback receives a IClipboardModeChangedEvent object.
Returns the callback_id
"""
event_type = library.VBoxEventType.on_clipboard_mode_changed
return self.event_source.register_callback(callback, event_type) |
def role_add(self, role=None, login=None, envs=[], query='/roles/'):
"""
`login` - Login or username of user to add to `role`
`role` - Role to add user to
Add user to role
"""
data = {'login': self.args.login}
juicer.utils.Log.log_debug(
"Add Role '%s' to '%s'", role, login)
for env in self.args.envs:
if not juicer.utils.role_exists_p(role, self.connectors[env]):
juicer.utils.Log.log_info("role `%s` doesn't exist in %s... skipping!",
(role, env))
continue
elif not juicer.utils.user_exists_p(login, self.connectors[env]):
juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!",
(login, env))
else:
url = "%s%s/users/" % (query, role)
_r = self.connectors[env].post(url, data)
if _r.status_code == Constants.PULP_POST_OK:
juicer.utils.Log.log_info("added user `%s` to role `%s` in %s",
(login, role, env))
else:
_r.raise_for_status()
return True | `login` - Login or username of user to add to `role`
`role` - Role to add user to
Add user to role | Below is the the instruction that describes the task:
### Input:
`login` - Login or username of user to add to `role`
`role` - Role to add user to
Add user to role
### Response:
def role_add(self, role=None, login=None, envs=[], query='/roles/'):
"""
`login` - Login or username of user to add to `role`
`role` - Role to add user to
Add user to role
"""
data = {'login': self.args.login}
juicer.utils.Log.log_debug(
"Add Role '%s' to '%s'", role, login)
for env in self.args.envs:
if not juicer.utils.role_exists_p(role, self.connectors[env]):
juicer.utils.Log.log_info("role `%s` doesn't exist in %s... skipping!",
(role, env))
continue
elif not juicer.utils.user_exists_p(login, self.connectors[env]):
juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!",
(login, env))
else:
url = "%s%s/users/" % (query, role)
_r = self.connectors[env].post(url, data)
if _r.status_code == Constants.PULP_POST_OK:
juicer.utils.Log.log_info("added user `%s` to role `%s` in %s",
(login, role, env))
else:
_r.raise_for_status()
return True |
def track_trace(self, name, properties=None, severity=None):
"""Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
"""
data = channel.contracts.MessageData()
data.message = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if severity is not None:
data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity)
self.track(data, self._context) | Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL | Below is the the instruction that describes the task:
### Input:
Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
### Response:
def track_trace(self, name, properties=None, severity=None):
"""Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
"""
data = channel.contracts.MessageData()
data.message = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if severity is not None:
data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity)
self.track(data, self._context) |
def append(self, other: 'List') -> 'List':
"""Append other list to this list."""
if self.null():
return other
return (self.tail().append(other)).cons(self.head()) | Append other list to this list. | Below is the the instruction that describes the task:
### Input:
Append other list to this list.
### Response:
def append(self, other: 'List') -> 'List':
"""Append other list to this list."""
if self.null():
return other
return (self.tail().append(other)).cons(self.head()) |
def current_position(self):
"""Return a tuple of (start, end)."""
token = self.tokenizer.peek(0)
if token:
return token.start, token.end
return self.tokenizer.position, self.tokenizer.position + 1 | Return a tuple of (start, end). | Below is the the instruction that describes the task:
### Input:
Return a tuple of (start, end).
### Response:
def current_position(self):
"""Return a tuple of (start, end)."""
token = self.tokenizer.peek(0)
if token:
return token.start, token.end
return self.tokenizer.position, self.tokenizer.position + 1 |
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``[email protected]`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip()) | Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``[email protected]`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True | Below is the the instruction that describes the task:
### Input:
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``[email protected]`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
### Response:
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``[email protected]`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip()) |
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_manager
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_manager.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list | return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed. | Below is the the instruction that describes the task:
### Input:
return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
### Response:
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_manager
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_manager.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list |
def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
} | Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels. | Below is the the instruction that describes the task:
### Input:
Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
### Response:
def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
} |
def _lval_add_towards_polarity(x, polarity):
"""Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity.
"""
if x < 0:
if polarity < 0:
return Lval('toinf', x)
return Lval('pastzero', x)
elif polarity > 0:
return Lval('toinf', x)
return Lval('pastzero', x) | Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity. | Below is the the instruction that describes the task:
### Input:
Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity.
### Response:
def _lval_add_towards_polarity(x, polarity):
"""Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity.
"""
if x < 0:
if polarity < 0:
return Lval('toinf', x)
return Lval('pastzero', x)
elif polarity > 0:
return Lval('toinf', x)
return Lval('pastzero', x) |
def pairs(a):
"""Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]])
"""
a = np.asarray(a)
return as_strided(a, shape=(a.size - 1, 2), strides=a.strides * 2) | Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]]) | Below is the the instruction that describes the task:
### Input:
Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]])
### Response:
def pairs(a):
"""Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]])
"""
a = np.asarray(a)
return as_strided(a, shape=(a.size - 1, 2), strides=a.strides * 2) |
def getgroupcustominformationurl(idgroup, customfield="", *args, **kwargs):
"""Request Group Custom Information URL.
See mambugroup module and pydoc for further information.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
groupidparam = "/" + idgroup
url = getmambuurl(*args, **kwargs) + "groups" + groupidparam + "/custominformation" + ( ("/"+customfield) if customfield else "" )
return url | Request Group Custom Information URL.
See mambugroup module and pydoc for further information.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future. | Below is the the instruction that describes the task:
### Input:
Request Group Custom Information URL.
See mambugroup module and pydoc for further information.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
### Response:
def getgroupcustominformationurl(idgroup, customfield="", *args, **kwargs):
"""Request Group Custom Information URL.
See mambugroup module and pydoc for further information.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
groupidparam = "/" + idgroup
url = getmambuurl(*args, **kwargs) + "groups" + groupidparam + "/custominformation" + ( ("/"+customfield) if customfield else "" )
return url |
def setup(self, settings):
'''
Setup kafka
'''
KafkaBaseMonitor.setup(self, settings)
self.zoo_client = KazooClient(hosts=settings['ZOOKEEPER_HOSTS'])
self.zoo_client.start()
self.path = settings['ZOOKEEPER_ASSIGN_PATH'] + settings['ZOOKEEPER_ID']
if not self.zoo_client.exists(self.path):
self.zoo_client.ensure_path(self.path) | Setup kafka | Below is the the instruction that describes the task:
### Input:
Setup kafka
### Response:
def setup(self, settings):
'''
Setup kafka
'''
KafkaBaseMonitor.setup(self, settings)
self.zoo_client = KazooClient(hosts=settings['ZOOKEEPER_HOSTS'])
self.zoo_client.start()
self.path = settings['ZOOKEEPER_ASSIGN_PATH'] + settings['ZOOKEEPER_ID']
if not self.zoo_client.exists(self.path):
self.zoo_client.ensure_path(self.path) |
def render_field_errors(field):
"""
Render field errors as html.
"""
if field.errors:
html = """<p class="help-block">Error: {errors}</p>""".format(
errors='. '.join(field.errors)
)
return HTMLString(html)
return None | Render field errors as html. | Below is the the instruction that describes the task:
### Input:
Render field errors as html.
### Response:
def render_field_errors(field):
"""
Render field errors as html.
"""
if field.errors:
html = """<p class="help-block">Error: {errors}</p>""".format(
errors='. '.join(field.errors)
)
return HTMLString(html)
return None |
def _additions_remove_use_cd(**kwargs):
'''
Remove VirtualBox Guest Additions.
It uses the CD, connected by VirtualBox.
'''
with _additions_mounted() as mount_point:
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
return _additions_remove_linux_use_cd(mount_point, **kwargs) | Remove VirtualBox Guest Additions.
It uses the CD, connected by VirtualBox. | Below is the the instruction that describes the task:
### Input:
Remove VirtualBox Guest Additions.
It uses the CD, connected by VirtualBox.
### Response:
def _additions_remove_use_cd(**kwargs):
'''
Remove VirtualBox Guest Additions.
It uses the CD, connected by VirtualBox.
'''
with _additions_mounted() as mount_point:
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
return _additions_remove_linux_use_cd(mount_point, **kwargs) |
def on_status_update(self, channel, callback):
"""
Callback to execute on status of update of channel
"""
if channel not in self._callbacks:
self._callbacks[channel] = []
self._callbacks[channel].append(callback) | Callback to execute on status of update of channel | Below is the the instruction that describes the task:
### Input:
Callback to execute on status of update of channel
### Response:
def on_status_update(self, channel, callback):
"""
Callback to execute on status of update of channel
"""
if channel not in self._callbacks:
self._callbacks[channel] = []
self._callbacks[channel].append(callback) |
def set_input_value(self, selector, value):
"""Set the value of the input matched by given selector."""
script = 'document.querySelector("%s").setAttribute("value", "%s")'
script = script % (selector, value)
self.evaluate(script) | Set the value of the input matched by given selector. | Below is the the instruction that describes the task:
### Input:
Set the value of the input matched by given selector.
### Response:
def set_input_value(self, selector, value):
"""Set the value of the input matched by given selector."""
script = 'document.querySelector("%s").setAttribute("value", "%s")'
script = script % (selector, value)
self.evaluate(script) |
async def wait_stream(aiterable):
"""Wait for an asynchronous iterable to finish and return the last item.
The iterable is executed within a safe stream context.
A StreamEmpty exception is raised if the sequence is empty.
"""
async with streamcontext(aiterable) as streamer:
async for item in streamer:
item
try:
return item
except NameError:
raise StreamEmpty() | Wait for an asynchronous iterable to finish and return the last item.
The iterable is executed within a safe stream context.
A StreamEmpty exception is raised if the sequence is empty. | Below is the the instruction that describes the task:
### Input:
Wait for an asynchronous iterable to finish and return the last item.
The iterable is executed within a safe stream context.
A StreamEmpty exception is raised if the sequence is empty.
### Response:
async def wait_stream(aiterable):
"""Wait for an asynchronous iterable to finish and return the last item.
The iterable is executed within a safe stream context.
A StreamEmpty exception is raised if the sequence is empty.
"""
async with streamcontext(aiterable) as streamer:
async for item in streamer:
item
try:
return item
except NameError:
raise StreamEmpty() |
def graph_to_dimacs(g, f):
"""
Persists the supplied graph in valid dimacs format into the file.
Parameters
----------
g : `~medpy.graphcut.graph.Graph`
A graph object to persist.
f : file
A file-like object.
"""
# write comments
f.write('c Created by medpy\n')
f.write('c Oskar Maier, [email protected]\n')
f.write('c\n')
# write problem
f.write('c problem line\n')
f.write('p max {} {}\n'.format(g.get_node_count() + 2, len(g.get_edges()))) # +2 as terminal nodes also count in dimacs format # no-nodes / no-edges
# denote source and sink
f.write('c source descriptor\n')
f.write('n 1 s\n')
f.write('c sink descriptor\n')
f.write('n 2 t\n')
# write terminal arcs (t-weights)
f.write('c terminal arcs (t-weights)\n')
for node, weight in list(g.get_tweights().items()):
# Note: the nodes ids of the graph start from 1, but 1 and 2 are reserved for source and sink respectively, therefore add 2
if not 0 == weight[0]: # 0 weights are implicit
f.write('a 1 {} {}\n'.format(node + 2, weight[0]))
if not 0 == weight[1]: # 0 weights are implicit
f.write('a {} 2 {}\n'.format(node + 2, weight[1]))
# write inter-node arcs (n-weights)
f.write('c inter-node arcs (n-weights)\n')
for edge, weight in list(g.get_nweights().items()):
if not 0 == weight[0]: # 0 weights are implicit
f.write('a {} {} {}\n'.format(edge[0] + 2, edge[1] + 2, weight[0]))
# reversed weights have to follow directly in the next line
if not 0 == weight[1]: # 0 weights are implicit
f.write('a {} {} {}\n'.format(edge[1] + 2, edge[0] + 2, weight[1]))
# end comment
f.write('c end-of-file') | Persists the supplied graph in valid dimacs format into the file.
Parameters
----------
g : `~medpy.graphcut.graph.Graph`
A graph object to persist.
f : file
A file-like object. | Below is the the instruction that describes the task:
### Input:
Persists the supplied graph in valid dimacs format into the file.
Parameters
----------
g : `~medpy.graphcut.graph.Graph`
A graph object to persist.
f : file
A file-like object.
### Response:
def graph_to_dimacs(g, f):
"""
Persists the supplied graph in valid dimacs format into the file.
Parameters
----------
g : `~medpy.graphcut.graph.Graph`
A graph object to persist.
f : file
A file-like object.
"""
# write comments
f.write('c Created by medpy\n')
f.write('c Oskar Maier, [email protected]\n')
f.write('c\n')
# write problem
f.write('c problem line\n')
f.write('p max {} {}\n'.format(g.get_node_count() + 2, len(g.get_edges()))) # +2 as terminal nodes also count in dimacs format # no-nodes / no-edges
# denote source and sink
f.write('c source descriptor\n')
f.write('n 1 s\n')
f.write('c sink descriptor\n')
f.write('n 2 t\n')
# write terminal arcs (t-weights)
f.write('c terminal arcs (t-weights)\n')
for node, weight in list(g.get_tweights().items()):
# Note: the nodes ids of the graph start from 1, but 1 and 2 are reserved for source and sink respectively, therefore add 2
if not 0 == weight[0]: # 0 weights are implicit
f.write('a 1 {} {}\n'.format(node + 2, weight[0]))
if not 0 == weight[1]: # 0 weights are implicit
f.write('a {} 2 {}\n'.format(node + 2, weight[1]))
# write inter-node arcs (n-weights)
f.write('c inter-node arcs (n-weights)\n')
for edge, weight in list(g.get_nweights().items()):
if not 0 == weight[0]: # 0 weights are implicit
f.write('a {} {} {}\n'.format(edge[0] + 2, edge[1] + 2, weight[0]))
# reversed weights have to follow directly in the next line
if not 0 == weight[1]: # 0 weights are implicit
f.write('a {} {} {}\n'.format(edge[1] + 2, edge[0] + 2, weight[1]))
# end comment
f.write('c end-of-file') |
def diet_expert(x, hidden_size, params):
"""A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape [batch, io_size]
"""
@fn_with_diet_vars(params)
def diet_expert_internal(x):
dim = x.get_shape().as_list()[-1]
h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)
y = tf.layers.dense(h, dim, use_bias=False)
y *= tf.rsqrt(tf.to_float(dim * hidden_size))
return y
return diet_expert_internal(x) | A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape [batch, io_size] | Below is the the instruction that describes the task:
### Input:
A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape [batch, io_size]
### Response:
def diet_expert(x, hidden_size, params):
"""A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape [batch, io_size]
"""
@fn_with_diet_vars(params)
def diet_expert_internal(x):
dim = x.get_shape().as_list()[-1]
h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)
y = tf.layers.dense(h, dim, use_bias=False)
y *= tf.rsqrt(tf.to_float(dim * hidden_size))
return y
return diet_expert_internal(x) |
def add(self, effect):
"""
Add an LV2 plugin encapsulated as a jack client
:param Lv2Effect effect: Effect that will be loaded as LV2 plugin encapsulated
"""
effect.instance = self.instance_index
self.instance_index += 1
self.connection.send(ProtocolParser.add(effect)) | Add an LV2 plugin encapsulated as a jack client
:param Lv2Effect effect: Effect that will be loaded as LV2 plugin encapsulated | Below is the the instruction that describes the task:
### Input:
Add an LV2 plugin encapsulated as a jack client
:param Lv2Effect effect: Effect that will be loaded as LV2 plugin encapsulated
### Response:
def add(self, effect):
"""
Add an LV2 plugin encapsulated as a jack client
:param Lv2Effect effect: Effect that will be loaded as LV2 plugin encapsulated
"""
effect.instance = self.instance_index
self.instance_index += 1
self.connection.send(ProtocolParser.add(effect)) |
def rpc_get_blockstack_ops_hash_at( self, block_id, **con_info ):
"""
Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error
"""
if not check_block(block_id):
return {'error': 'Invalid block height', 'http_status': 400}
db = get_db_state(self.working_dir)
ops_hash = db.get_block_ops_hash( block_id )
db.close()
return self.success_response( {'ops_hash': ops_hash} ) | Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error | Below is the the instruction that describes the task:
### Input:
Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error
### Response:
def rpc_get_blockstack_ops_hash_at( self, block_id, **con_info ):
"""
Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error
"""
if not check_block(block_id):
return {'error': 'Invalid block height', 'http_status': 400}
db = get_db_state(self.working_dir)
ops_hash = db.get_block_ops_hash( block_id )
db.close()
return self.success_response( {'ops_hash': ops_hash} ) |
def value(self):
"""
Get the value of the match, using formatter if defined.
:return:
:rtype:
"""
if self._value:
return self._value
if self.formatter:
return self.formatter(self.raw)
return self.raw | Get the value of the match, using formatter if defined.
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
Get the value of the match, using formatter if defined.
:return:
:rtype:
### Response:
def value(self):
"""
Get the value of the match, using formatter if defined.
:return:
:rtype:
"""
if self._value:
return self._value
if self.formatter:
return self.formatter(self.raw)
return self.raw |
def get_banks(self):
"""Pass through to provider BankLookupSession.get_banks"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('bank_lookup_session').get_banks()
cat_list = []
for cat in catalogs:
cat_list.append(Bank(self._provider_manager, cat, self._runtime, self._proxy))
return BankList(cat_list) | Pass through to provider BankLookupSession.get_banks | Below is the the instruction that describes the task:
### Input:
Pass through to provider BankLookupSession.get_banks
### Response:
def get_banks(self):
"""Pass through to provider BankLookupSession.get_banks"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('bank_lookup_session').get_banks()
cat_list = []
for cat in catalogs:
cat_list.append(Bank(self._provider_manager, cat, self._runtime, self._proxy))
return BankList(cat_list) |
def make_assess_status_func(*args, **kwargs):
"""Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail()
"""
def _assess_status_func():
state, message = _determine_os_workload_status(*args, **kwargs)
status_set(state, message)
if state not in ['maintenance', 'active']:
return message
return None
return _assess_status_func | Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail() | Below is the the instruction that describes the task:
### Input:
Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail()
### Response:
def make_assess_status_func(*args, **kwargs):
"""Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail()
"""
def _assess_status_func():
state, message = _determine_os_workload_status(*args, **kwargs)
status_set(state, message)
if state not in ['maintenance', 'active']:
return message
return None
return _assess_status_func |
def check_generic_request(self, item_session: ItemSession) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record)
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason | Check URL filters and scripting hook.
Returns:
tuple: (bool, str) | Below is the the instruction that describes the task:
### Input:
Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
### Response:
def check_generic_request(self, item_session: ItemSession) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record)
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason |
def geodetic2geocentric(theta, alt):
"""
Conversion from geodetic to geocentric coordinates by using the WGS84 spheroid.
:param theta: colatitude (float, rad)
:param alt: altitude (float, km)
:return gccolat: geocentric colatitude (float, rad)
d: gccolat minus theta (float, rad)
r: geocentric radius (float, km)
"""
ct = np.cos(theta)
st = np.sin(theta)
a2 = 40680631.6
b2 = 40408296.0
one = a2 * st * st
two = b2 * ct * ct
three = one + two
rho = np.sqrt(three)
r = np.sqrt(alt * (alt + 2.0 * rho) + (a2 * one + b2 * two) / three)
cd = (alt + rho) / r
sd = (a2 - b2) / rho * ct * st / r
one = ct
ct = ct * cd - st * sd
st = st * cd + one * sd
gccolat = np.arctan2(st, ct)
d = np.arctan2(sd, cd)
return gccolat, d, r | Conversion from geodetic to geocentric coordinates by using the WGS84 spheroid.
:param theta: colatitude (float, rad)
:param alt: altitude (float, km)
:return gccolat: geocentric colatitude (float, rad)
d: gccolat minus theta (float, rad)
r: geocentric radius (float, km) | Below is the the instruction that describes the task:
### Input:
Conversion from geodetic to geocentric coordinates by using the WGS84 spheroid.
:param theta: colatitude (float, rad)
:param alt: altitude (float, km)
:return gccolat: geocentric colatitude (float, rad)
d: gccolat minus theta (float, rad)
r: geocentric radius (float, km)
### Response:
def geodetic2geocentric(theta, alt):
"""
Conversion from geodetic to geocentric coordinates by using the WGS84 spheroid.
:param theta: colatitude (float, rad)
:param alt: altitude (float, km)
:return gccolat: geocentric colatitude (float, rad)
d: gccolat minus theta (float, rad)
r: geocentric radius (float, km)
"""
ct = np.cos(theta)
st = np.sin(theta)
a2 = 40680631.6
b2 = 40408296.0
one = a2 * st * st
two = b2 * ct * ct
three = one + two
rho = np.sqrt(three)
r = np.sqrt(alt * (alt + 2.0 * rho) + (a2 * one + b2 * two) / three)
cd = (alt + rho) / r
sd = (a2 - b2) / rho * ct * st / r
one = ct
ct = ct * cd - st * sd
st = st * cd + one * sd
gccolat = np.arctan2(st, ct)
d = np.arctan2(sd, cd)
return gccolat, d, r |
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize) | Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0. | Below is the the instruction that describes the task:
### Input:
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
### Response:
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize) |
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs | Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments) | Below is the the instruction that describes the task:
### Input:
Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
### Response:
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs |
def load_wc(cls, stream):
"""Return a `Wilson` instance initialized by a WCxf file-like object"""
wc = wcxf.WC.load(stream)
return cls.from_wc(wc) | Return a `Wilson` instance initialized by a WCxf file-like object | Below is the the instruction that describes the task:
### Input:
Return a `Wilson` instance initialized by a WCxf file-like object
### Response:
def load_wc(cls, stream):
"""Return a `Wilson` instance initialized by a WCxf file-like object"""
wc = wcxf.WC.load(stream)
return cls.from_wc(wc) |
def add_rule(name, localport, protocol='tcp', action='allow', dir='in',
remoteip='any'):
'''
.. versionadded:: 2015.5.0
Add a new inbound or outbound rule to the firewall policy
Args:
name (str): The name of the rule. Must be unique and cannot be "all".
Required.
localport (int): The port the rule applies to. Must be a number between
0 and 65535. Can be a range. Can specify multiple ports separated by
commas. Required.
protocol (Optional[str]): The protocol. Can be any of the following:
- A number between 0 and 255
- icmpv4
- icmpv6
- tcp
- udp
- any
action (Optional[str]): The action the rule performs. Can be any of the
following:
- allow
- block
- bypass
dir (Optional[str]): The direction. Can be ``in`` or ``out``.
remoteip (Optional [str]): The remote IP. Can be any of the following:
- any
- localsubnet
- dns
- dhcp
- wins
- defaultgateway
- Any valid IPv4 address (192.168.0.12)
- Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43)
- Any valid subnet (192.168.1.0/24)
- Any valid range of IP addresses (192.168.0.1-192.168.0.12)
- A list of valid IP addresses
Can be combinations of the above separated by commas.
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.add_rule 'test' '8080' 'tcp'
salt '*' firewall.add_rule 'test' '1' 'icmpv4'
salt '*' firewall.add_rule 'test_remote_ip' '8000' 'tcp' 'allow' 'in' '192.168.0.1'
'''
cmd = ['netsh', 'advfirewall', 'firewall', 'add', 'rule',
'name={0}'.format(name),
'protocol={0}'.format(protocol),
'dir={0}'.format(dir),
'action={0}'.format(action),
'remoteip={0}'.format(remoteip)]
if protocol is None \
or ('icmpv4' not in protocol and 'icmpv6' not in protocol):
cmd.append('localport={0}'.format(localport))
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stdout'])
return True | .. versionadded:: 2015.5.0
Add a new inbound or outbound rule to the firewall policy
Args:
name (str): The name of the rule. Must be unique and cannot be "all".
Required.
localport (int): The port the rule applies to. Must be a number between
0 and 65535. Can be a range. Can specify multiple ports separated by
commas. Required.
protocol (Optional[str]): The protocol. Can be any of the following:
- A number between 0 and 255
- icmpv4
- icmpv6
- tcp
- udp
- any
action (Optional[str]): The action the rule performs. Can be any of the
following:
- allow
- block
- bypass
dir (Optional[str]): The direction. Can be ``in`` or ``out``.
remoteip (Optional [str]): The remote IP. Can be any of the following:
- any
- localsubnet
- dns
- dhcp
- wins
- defaultgateway
- Any valid IPv4 address (192.168.0.12)
- Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43)
- Any valid subnet (192.168.1.0/24)
- Any valid range of IP addresses (192.168.0.1-192.168.0.12)
- A list of valid IP addresses
Can be combinations of the above separated by commas.
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.add_rule 'test' '8080' 'tcp'
salt '*' firewall.add_rule 'test' '1' 'icmpv4'
salt '*' firewall.add_rule 'test_remote_ip' '8000' 'tcp' 'allow' 'in' '192.168.0.1' | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.5.0
Add a new inbound or outbound rule to the firewall policy
Args:
name (str): The name of the rule. Must be unique and cannot be "all".
Required.
localport (int): The port the rule applies to. Must be a number between
0 and 65535. Can be a range. Can specify multiple ports separated by
commas. Required.
protocol (Optional[str]): The protocol. Can be any of the following:
- A number between 0 and 255
- icmpv4
- icmpv6
- tcp
- udp
- any
action (Optional[str]): The action the rule performs. Can be any of the
following:
- allow
- block
- bypass
dir (Optional[str]): The direction. Can be ``in`` or ``out``.
remoteip (Optional [str]): The remote IP. Can be any of the following:
- any
- localsubnet
- dns
- dhcp
- wins
- defaultgateway
- Any valid IPv4 address (192.168.0.12)
- Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43)
- Any valid subnet (192.168.1.0/24)
- Any valid range of IP addresses (192.168.0.1-192.168.0.12)
- A list of valid IP addresses
Can be combinations of the above separated by commas.
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.add_rule 'test' '8080' 'tcp'
salt '*' firewall.add_rule 'test' '1' 'icmpv4'
salt '*' firewall.add_rule 'test_remote_ip' '8000' 'tcp' 'allow' 'in' '192.168.0.1'
### Response:
def add_rule(name, localport, protocol='tcp', action='allow', dir='in',
remoteip='any'):
'''
.. versionadded:: 2015.5.0
Add a new inbound or outbound rule to the firewall policy
Args:
name (str): The name of the rule. Must be unique and cannot be "all".
Required.
localport (int): The port the rule applies to. Must be a number between
0 and 65535. Can be a range. Can specify multiple ports separated by
commas. Required.
protocol (Optional[str]): The protocol. Can be any of the following:
- A number between 0 and 255
- icmpv4
- icmpv6
- tcp
- udp
- any
action (Optional[str]): The action the rule performs. Can be any of the
following:
- allow
- block
- bypass
dir (Optional[str]): The direction. Can be ``in`` or ``out``.
remoteip (Optional [str]): The remote IP. Can be any of the following:
- any
- localsubnet
- dns
- dhcp
- wins
- defaultgateway
- Any valid IPv4 address (192.168.0.12)
- Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43)
- Any valid subnet (192.168.1.0/24)
- Any valid range of IP addresses (192.168.0.1-192.168.0.12)
- A list of valid IP addresses
Can be combinations of the above separated by commas.
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.add_rule 'test' '8080' 'tcp'
salt '*' firewall.add_rule 'test' '1' 'icmpv4'
salt '*' firewall.add_rule 'test_remote_ip' '8000' 'tcp' 'allow' 'in' '192.168.0.1'
'''
cmd = ['netsh', 'advfirewall', 'firewall', 'add', 'rule',
'name={0}'.format(name),
'protocol={0}'.format(protocol),
'dir={0}'.format(dir),
'action={0}'.format(action),
'remoteip={0}'.format(remoteip)]
if protocol is None \
or ('icmpv4' not in protocol and 'icmpv6' not in protocol):
cmd.append('localport={0}'.format(localport))
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stdout'])
return True |
def _handle_chat_event(self, event: events.ChatMessageWasReceived) -> None:
"""
Not thread-safe.
"""
for subscriber in self._chat_subscribers:
try:
subscriber(event)
except Exception:
LOG.exception(self._prefix_log_message(
f"failed to send chat event {event} to "
f"subscriber {subscriber}"
)) | Not thread-safe. | Below is the the instruction that describes the task:
### Input:
Not thread-safe.
### Response:
def _handle_chat_event(self, event: events.ChatMessageWasReceived) -> None:
"""
Not thread-safe.
"""
for subscriber in self._chat_subscribers:
try:
subscriber(event)
except Exception:
LOG.exception(self._prefix_log_message(
f"failed to send chat event {event} to "
f"subscriber {subscriber}"
)) |
def requires_role(self, roles):
"""
Require specific configured roles for access to a :mod:`flask` route.
:param roles: Role or list of roles to test for access
(only one role is required to pass).
:type roles: str OR list(str)
:raises: FlaskKeystoneForbidden
This method will gate a particular endpoint to only be accessed by
:class:`FlaskKeystone.User`'s with a particular configured role. If the
role given does not exist, or if the user does not have the requested
role, a FlaskKeystoneForbidden exception will be thrown, resulting in a
403 response to the client.
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
if isinstance(roles, list):
if any(current_user.has_role(role) for role in roles):
return f(*args, **kwargs)
elif isinstance(roles, str):
if current_user.has_role(roles):
return f(*args, **kwargs)
else:
msg = ("roles parameter for requires_role on endpoint %s "
"should be a list or str, but is type %s.")
self.logger.error(msg % (
request.path,
type(roles)
))
msg = ("Rejected User '%s' access to '%s' "
"due to RBAC. (Requires '%s')")
self.logger.info(msg % (
current_user.user_id,
request.path,
roles
))
raise FlaskKeystoneForbidden()
return wrapped_f
return wrap | Require specific configured roles for access to a :mod:`flask` route.
:param roles: Role or list of roles to test for access
(only one role is required to pass).
:type roles: str OR list(str)
:raises: FlaskKeystoneForbidden
This method will gate a particular endpoint to only be accessed by
:class:`FlaskKeystone.User`'s with a particular configured role. If the
role given does not exist, or if the user does not have the requested
role, a FlaskKeystoneForbidden exception will be thrown, resulting in a
403 response to the client. | Below is the the instruction that describes the task:
### Input:
Require specific configured roles for access to a :mod:`flask` route.
:param roles: Role or list of roles to test for access
(only one role is required to pass).
:type roles: str OR list(str)
:raises: FlaskKeystoneForbidden
This method will gate a particular endpoint to only be accessed by
:class:`FlaskKeystone.User`'s with a particular configured role. If the
role given does not exist, or if the user does not have the requested
role, a FlaskKeystoneForbidden exception will be thrown, resulting in a
403 response to the client.
### Response:
def requires_role(self, roles):
"""
Require specific configured roles for access to a :mod:`flask` route.
:param roles: Role or list of roles to test for access
(only one role is required to pass).
:type roles: str OR list(str)
:raises: FlaskKeystoneForbidden
This method will gate a particular endpoint to only be accessed by
:class:`FlaskKeystone.User`'s with a particular configured role. If the
role given does not exist, or if the user does not have the requested
role, a FlaskKeystoneForbidden exception will be thrown, resulting in a
403 response to the client.
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
if isinstance(roles, list):
if any(current_user.has_role(role) for role in roles):
return f(*args, **kwargs)
elif isinstance(roles, str):
if current_user.has_role(roles):
return f(*args, **kwargs)
else:
msg = ("roles parameter for requires_role on endpoint %s "
"should be a list or str, but is type %s.")
self.logger.error(msg % (
request.path,
type(roles)
))
msg = ("Rejected User '%s' access to '%s' "
"due to RBAC. (Requires '%s')")
self.logger.info(msg % (
current_user.user_id,
request.path,
roles
))
raise FlaskKeystoneForbidden()
return wrapped_f
return wrap |
async def _send_frame(self, message: bytes, opcode: int,
compress: Optional[int]=None) -> None:
"""Send a frame over the websocket with message as its payload."""
if self._closing:
ws_logger.warning('websocket connection is closing.')
rsv = 0
# Only compress larger packets (disabled)
# Does small packet needs to be compressed?
# if self.compress and opcode < 8 and len(message) > 124:
if (compress or self.compress) and opcode < 8:
if compress:
# Do not set self._compress if compressing is for this frame
compressobj = zlib.compressobj(wbits=-compress)
else: # self.compress
if not self._compressobj:
self._compressobj = zlib.compressobj(wbits=-self.compress)
compressobj = self._compressobj
message = compressobj.compress(message)
message = message + compressobj.flush(
zlib.Z_FULL_FLUSH if self.notakeover else zlib.Z_SYNC_FLUSH)
if message.endswith(_WS_DEFLATE_TRAILING):
message = message[:-4]
rsv = rsv | 0x40
msg_length = len(message)
use_mask = self.use_mask
if use_mask:
mask_bit = 0x80
else:
mask_bit = 0
if msg_length < 126:
header = PACK_LEN1(0x80 | rsv | opcode, msg_length | mask_bit)
elif msg_length < (1 << 16):
header = PACK_LEN2(0x80 | rsv | opcode, 126 | mask_bit, msg_length)
else:
header = PACK_LEN3(0x80 | rsv | opcode, 127 | mask_bit, msg_length)
if use_mask:
mask = self.randrange(0, 0xffffffff)
mask = mask.to_bytes(4, 'big')
message = bytearray(message)
_websocket_mask(mask, message)
self.transport.write(header + mask + message)
self._output_size += len(header) + len(mask) + len(message)
else:
if len(message) > MSG_SIZE:
self.transport.write(header)
self.transport.write(message)
else:
self.transport.write(header + message)
self._output_size += len(header) + len(message)
if self._output_size > self._limit:
self._output_size = 0
await self.protocol._drain_helper() | Send a frame over the websocket with message as its payload. | Below is the the instruction that describes the task:
### Input:
Send a frame over the websocket with message as its payload.
### Response:
async def _send_frame(self, message: bytes, opcode: int,
compress: Optional[int]=None) -> None:
"""Send a frame over the websocket with message as its payload."""
if self._closing:
ws_logger.warning('websocket connection is closing.')
rsv = 0
# Only compress larger packets (disabled)
# Does small packet needs to be compressed?
# if self.compress and opcode < 8 and len(message) > 124:
if (compress or self.compress) and opcode < 8:
if compress:
# Do not set self._compress if compressing is for this frame
compressobj = zlib.compressobj(wbits=-compress)
else: # self.compress
if not self._compressobj:
self._compressobj = zlib.compressobj(wbits=-self.compress)
compressobj = self._compressobj
message = compressobj.compress(message)
message = message + compressobj.flush(
zlib.Z_FULL_FLUSH if self.notakeover else zlib.Z_SYNC_FLUSH)
if message.endswith(_WS_DEFLATE_TRAILING):
message = message[:-4]
rsv = rsv | 0x40
msg_length = len(message)
use_mask = self.use_mask
if use_mask:
mask_bit = 0x80
else:
mask_bit = 0
if msg_length < 126:
header = PACK_LEN1(0x80 | rsv | opcode, msg_length | mask_bit)
elif msg_length < (1 << 16):
header = PACK_LEN2(0x80 | rsv | opcode, 126 | mask_bit, msg_length)
else:
header = PACK_LEN3(0x80 | rsv | opcode, 127 | mask_bit, msg_length)
if use_mask:
mask = self.randrange(0, 0xffffffff)
mask = mask.to_bytes(4, 'big')
message = bytearray(message)
_websocket_mask(mask, message)
self.transport.write(header + mask + message)
self._output_size += len(header) + len(mask) + len(message)
else:
if len(message) > MSG_SIZE:
self.transport.write(header)
self.transport.write(message)
else:
self.transport.write(header + message)
self._output_size += len(header) + len(message)
if self._output_size > self._limit:
self._output_size = 0
await self.protocol._drain_helper() |
def override_colors(self, colors):
"""Override default color of elements.
:param colors: New color value for given elements
:type colors: dict
"""
if not isinstance(colors, dict):
return
for key in self._color[True]:
if key in colors:
self._color[True][key] = colors[key] | Override default color of elements.
:param colors: New color value for given elements
:type colors: dict | Below is the the instruction that describes the task:
### Input:
Override default color of elements.
:param colors: New color value for given elements
:type colors: dict
### Response:
def override_colors(self, colors):
"""Override default color of elements.
:param colors: New color value for given elements
:type colors: dict
"""
if not isinstance(colors, dict):
return
for key in self._color[True]:
if key in colors:
self._color[True][key] = colors[key] |
def _text_position(size, text, font):
"""
Returns the left-top point where the text should be positioned.
"""
width, height = font.getsize(text)
left = (size - width) / 2.0
top = (size - height) / 3.0
return left, top | Returns the left-top point where the text should be positioned. | Below is the the instruction that describes the task:
### Input:
Returns the left-top point where the text should be positioned.
### Response:
def _text_position(size, text, font):
"""
Returns the left-top point where the text should be positioned.
"""
width, height = font.getsize(text)
left = (size - width) / 2.0
top = (size - height) / 3.0
return left, top |
def delete_note(self, note_id):
""" Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
"""
# notes have to be trashed before deletion
note, status = self.trash_note(note_id)
if (status == -1):
return note, status
params = '/i/%s' % (str(note_id))
request = Request(url=DATA_URL+params, method='DELETE')
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request)
except IOError as e:
return e, -1
except HTTPError as e:
if e.code == 401:
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')
else:
return e, -1
return {}, 0 | Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise | Below is the the instruction that describes the task:
### Input:
Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
### Response:
def delete_note(self, note_id):
""" Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
"""
# notes have to be trashed before deletion
note, status = self.trash_note(note_id)
if (status == -1):
return note, status
params = '/i/%s' % (str(note_id))
request = Request(url=DATA_URL+params, method='DELETE')
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request)
except IOError as e:
return e, -1
except HTTPError as e:
if e.code == 401:
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')
else:
return e, -1
return {}, 0 |
def registry_comparison(registry0, registry1):
"""Compares two dictionaries of registry keys returning their difference."""
comparison = {'created_keys': {},
'deleted_keys': [],
'created_values': {},
'deleted_values': {},
'modified_values': {}}
for key, info in registry1.items():
if key in registry0:
if info[1] != registry0[key][1]:
created, deleted, modified = compare_values(
registry0[key][1], info[1])
if created:
comparison['created_values'][key] = (info[0], created)
if deleted:
comparison['deleted_values'][key] = (info[0], deleted)
if modified:
comparison['modified_values'][key] = (info[0], modified)
else:
comparison['created_keys'][key] = info
for key in registry0.keys():
if key not in registry1:
comparison['deleted_keys'].append(key)
return comparison | Compares two dictionaries of registry keys returning their difference. | Below is the the instruction that describes the task:
### Input:
Compares two dictionaries of registry keys returning their difference.
### Response:
def registry_comparison(registry0, registry1):
"""Compares two dictionaries of registry keys returning their difference."""
comparison = {'created_keys': {},
'deleted_keys': [],
'created_values': {},
'deleted_values': {},
'modified_values': {}}
for key, info in registry1.items():
if key in registry0:
if info[1] != registry0[key][1]:
created, deleted, modified = compare_values(
registry0[key][1], info[1])
if created:
comparison['created_values'][key] = (info[0], created)
if deleted:
comparison['deleted_values'][key] = (info[0], deleted)
if modified:
comparison['modified_values'][key] = (info[0], modified)
else:
comparison['created_keys'][key] = info
for key in registry0.keys():
if key not in registry1:
comparison['deleted_keys'].append(key)
return comparison |
def hexdump(src, length=8, colorize=False):
""" Produce a string hexdump of src, for debug output.
Input: bytestring; output: text string
"""
if not src:
return str(src)
if type(src) is not bytes:
raise yubico_exception.InputError('Hexdump \'src\' must be bytestring (got %s)' % type(src))
offset = 0
result = ''
for this in group(src, length):
if colorize:
last, this = this[-1], this[:-1]
colors = DumpColors()
color = colors.get('RESET')
if ord_byte(last) & yubikey_defs.RESP_PENDING_FLAG:
# write to key
color = colors.get('BLUE')
elif ord_byte(last) & yubikey_defs.SLOT_WRITE_FLAG:
color = colors.get('GREEN')
hex_s = color + ' '.join(["%02x" % ord_byte(x) for x in this]) + colors.get('RESET')
hex_s += " %02x" % ord_byte(last)
else:
hex_s = ' '.join(["%02x" % ord_byte(x) for x in this])
result += "%04X %s\n" % (offset, hex_s)
offset += length
return result | Produce a string hexdump of src, for debug output.
Input: bytestring; output: text string | Below is the the instruction that describes the task:
### Input:
Produce a string hexdump of src, for debug output.
Input: bytestring; output: text string
### Response:
def hexdump(src, length=8, colorize=False):
""" Produce a string hexdump of src, for debug output.
Input: bytestring; output: text string
"""
if not src:
return str(src)
if type(src) is not bytes:
raise yubico_exception.InputError('Hexdump \'src\' must be bytestring (got %s)' % type(src))
offset = 0
result = ''
for this in group(src, length):
if colorize:
last, this = this[-1], this[:-1]
colors = DumpColors()
color = colors.get('RESET')
if ord_byte(last) & yubikey_defs.RESP_PENDING_FLAG:
# write to key
color = colors.get('BLUE')
elif ord_byte(last) & yubikey_defs.SLOT_WRITE_FLAG:
color = colors.get('GREEN')
hex_s = color + ' '.join(["%02x" % ord_byte(x) for x in this]) + colors.get('RESET')
hex_s += " %02x" % ord_byte(last)
else:
hex_s = ' '.join(["%02x" % ord_byte(x) for x in this])
result += "%04X %s\n" % (offset, hex_s)
offset += length
return result |
def purge_object(self, pid, log_message=None):
"""
Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean
"""
kwargs = {'pid': pid}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.purgeObject(**kwargs)
return response.status_code == requests.codes.ok | Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean
### Response:
def purge_object(self, pid, log_message=None):
"""
Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean
"""
kwargs = {'pid': pid}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.purgeObject(**kwargs)
return response.status_code == requests.codes.ok |
def dist(self, point, exponent=2.0):
"""Return the distance of ``point`` to this set.
Parameters
----------
point : `array-like` or float
Point whose distance to calculate. Its length must be equal
to the set's dimension. Can be a float in the 1d case.
exponent : non-zero float or ``float('inf')``, optional
Exponent of the norm used in the distance calculation.
Returns
-------
dist : float
Distance to the interior of the IntervalProd.
Points strictly inside have distance ``0.0``, points with
``NaN`` have distance ``float('inf')``.
See Also
--------
numpy.linalg.norm : norm used to compute the distance
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.dist([-5, 3, 2])
5.0
>>> rbox.dist([-5, 3, 2], exponent=float('inf'))
4.0
"""
point = np.atleast_1d(point)
if len(point) != self.ndim:
raise ValueError('`point` must have length {}, got {}'
''.format(self.ndim, len(point)))
if np.any(np.isnan(point)):
return float('inf')
i_larger = np.where(point > self.max_pt)
i_smaller = np.where(point < self.min_pt)
# Access [0] since np.where returns a tuple.
if len(i_larger[0]) == 0 and len(i_smaller[0]) == 0:
return 0.0
else:
proj = np.concatenate((point[i_larger], point[i_smaller]))
border = np.concatenate((self.max_pt[i_larger],
self.min_pt[i_smaller]))
return np.linalg.norm(proj - border, ord=exponent) | Return the distance of ``point`` to this set.
Parameters
----------
point : `array-like` or float
Point whose distance to calculate. Its length must be equal
to the set's dimension. Can be a float in the 1d case.
exponent : non-zero float or ``float('inf')``, optional
Exponent of the norm used in the distance calculation.
Returns
-------
dist : float
Distance to the interior of the IntervalProd.
Points strictly inside have distance ``0.0``, points with
``NaN`` have distance ``float('inf')``.
See Also
--------
numpy.linalg.norm : norm used to compute the distance
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.dist([-5, 3, 2])
5.0
>>> rbox.dist([-5, 3, 2], exponent=float('inf'))
4.0 | Below is the the instruction that describes the task:
### Input:
Return the distance of ``point`` to this set.
Parameters
----------
point : `array-like` or float
Point whose distance to calculate. Its length must be equal
to the set's dimension. Can be a float in the 1d case.
exponent : non-zero float or ``float('inf')``, optional
Exponent of the norm used in the distance calculation.
Returns
-------
dist : float
Distance to the interior of the IntervalProd.
Points strictly inside have distance ``0.0``, points with
``NaN`` have distance ``float('inf')``.
See Also
--------
numpy.linalg.norm : norm used to compute the distance
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.dist([-5, 3, 2])
5.0
>>> rbox.dist([-5, 3, 2], exponent=float('inf'))
4.0
### Response:
def dist(self, point, exponent=2.0):
"""Return the distance of ``point`` to this set.
Parameters
----------
point : `array-like` or float
Point whose distance to calculate. Its length must be equal
to the set's dimension. Can be a float in the 1d case.
exponent : non-zero float or ``float('inf')``, optional
Exponent of the norm used in the distance calculation.
Returns
-------
dist : float
Distance to the interior of the IntervalProd.
Points strictly inside have distance ``0.0``, points with
``NaN`` have distance ``float('inf')``.
See Also
--------
numpy.linalg.norm : norm used to compute the distance
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.dist([-5, 3, 2])
5.0
>>> rbox.dist([-5, 3, 2], exponent=float('inf'))
4.0
"""
point = np.atleast_1d(point)
if len(point) != self.ndim:
raise ValueError('`point` must have length {}, got {}'
''.format(self.ndim, len(point)))
if np.any(np.isnan(point)):
return float('inf')
i_larger = np.where(point > self.max_pt)
i_smaller = np.where(point < self.min_pt)
# Access [0] since np.where returns a tuple.
if len(i_larger[0]) == 0 and len(i_smaller[0]) == 0:
return 0.0
else:
proj = np.concatenate((point[i_larger], point[i_smaller]))
border = np.concatenate((self.max_pt[i_larger],
self.min_pt[i_smaller]))
return np.linalg.norm(proj - border, ord=exponent) |
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices') | Open pty master and return (master_fd, tty_name). | Below is the the instruction that describes the task:
### Input:
Open pty master and return (master_fd, tty_name).
### Response:
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices') |
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
"""
return len(get_task_trackers(properties, hadoop_conf_dir, offline)) | Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`. | Below is the the instruction that describes the task:
### Input:
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
### Response:
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
"""
return len(get_task_trackers(properties, hadoop_conf_dir, offline)) |
def _landsat_parse_scene_id(sceneid):
"""
Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com
"""
pre_collection = r"(L[COTEM]8\d{6}\d{7}[A-Z]{3}\d{2})"
collection_1 = r"(L[COTEM]08_L\d{1}[A-Z]{2}_\d{6}_\d{8}_\d{8}_\d{2}_(T1|T2|RT))"
if not re.match("^{}|{}$".format(pre_collection, collection_1), sceneid):
raise InvalidLandsatSceneId("Could not match {}".format(sceneid))
precollection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{1})"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionJulianDay>[0-9]{3})"
r"(?P<groundStationIdentifier>\w{3})"
r"(?P<archiveVersion>[0-9]{2})$"
)
collection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{2})"
r"_"
r"(?P<processingCorrectionLevel>\w{4})"
r"_"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<processingYear>[0-9]{4})"
r"(?P<processingMonth>[0-9]{2})"
r"(?P<processingDay>[0-9]{2})"
r"_"
r"(?P<collectionNumber>\w{2})"
r"_"
r"(?P<collectionCategory>\w{2})$"
)
meta = None
for pattern in [collection_pattern, precollection_pattern]:
match = re.match(pattern, sceneid, re.IGNORECASE)
if match:
meta = match.groupdict()
break
if meta.get("acquisitionJulianDay"):
date = datetime.datetime(
int(meta["acquisitionYear"]), 1, 1
) + datetime.timedelta(int(meta["acquisitionJulianDay"]) - 1)
meta["date"] = date.strftime("%Y-%m-%d")
else:
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
collection = meta.get("collectionNumber", "")
if collection != "":
collection = "c{}".format(int(collection))
meta["key"] = os.path.join(
collection, "L8", meta["path"], meta["row"], sceneid, sceneid
)
meta["scene"] = sceneid
return meta | Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com | Below is the the instruction that describes the task:
### Input:
Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com
### Response:
def _landsat_parse_scene_id(sceneid):
"""
Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com
"""
pre_collection = r"(L[COTEM]8\d{6}\d{7}[A-Z]{3}\d{2})"
collection_1 = r"(L[COTEM]08_L\d{1}[A-Z]{2}_\d{6}_\d{8}_\d{8}_\d{2}_(T1|T2|RT))"
if not re.match("^{}|{}$".format(pre_collection, collection_1), sceneid):
raise InvalidLandsatSceneId("Could not match {}".format(sceneid))
precollection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{1})"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionJulianDay>[0-9]{3})"
r"(?P<groundStationIdentifier>\w{3})"
r"(?P<archiveVersion>[0-9]{2})$"
)
collection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{2})"
r"_"
r"(?P<processingCorrectionLevel>\w{4})"
r"_"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<processingYear>[0-9]{4})"
r"(?P<processingMonth>[0-9]{2})"
r"(?P<processingDay>[0-9]{2})"
r"_"
r"(?P<collectionNumber>\w{2})"
r"_"
r"(?P<collectionCategory>\w{2})$"
)
meta = None
for pattern in [collection_pattern, precollection_pattern]:
match = re.match(pattern, sceneid, re.IGNORECASE)
if match:
meta = match.groupdict()
break
if meta.get("acquisitionJulianDay"):
date = datetime.datetime(
int(meta["acquisitionYear"]), 1, 1
) + datetime.timedelta(int(meta["acquisitionJulianDay"]) - 1)
meta["date"] = date.strftime("%Y-%m-%d")
else:
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
collection = meta.get("collectionNumber", "")
if collection != "":
collection = "c{}".format(int(collection))
meta["key"] = os.path.join(
collection, "L8", meta["path"], meta["row"], sceneid, sceneid
)
meta["scene"] = sceneid
return meta |
def key2elements(key):
"""split key to elements"""
# words = key.split('.')
# if len(words) == 4:
# return words
# # there is a dot in object name
# fieldword = words.pop(-1)
# nameword = '.'.join(words[-2:])
# if nameword[-1] in ('"', "'"):
# # The object name is in quotes
# nameword = nameword[1:-1]
# elements = words[:-2] + [nameword, fieldword, ]
# return elements
words = key.split('.')
first2words = words[:2]
lastword = words[-1]
namewords = words[2:-1]
namephrase = '.'.join(namewords)
if namephrase.startswith("'") and namephrase.endswith("'"):
namephrase = namephrase[1:-1]
return first2words + [namephrase] + [lastword] | split key to elements | Below is the the instruction that describes the task:
### Input:
split key to elements
### Response:
def key2elements(key):
"""split key to elements"""
# words = key.split('.')
# if len(words) == 4:
# return words
# # there is a dot in object name
# fieldword = words.pop(-1)
# nameword = '.'.join(words[-2:])
# if nameword[-1] in ('"', "'"):
# # The object name is in quotes
# nameword = nameword[1:-1]
# elements = words[:-2] + [nameword, fieldword, ]
# return elements
words = key.split('.')
first2words = words[:2]
lastword = words[-1]
namewords = words[2:-1]
namephrase = '.'.join(namewords)
if namephrase.startswith("'") and namephrase.endswith("'"):
namephrase = namephrase[1:-1]
return first2words + [namephrase] + [lastword] |
def with_context(cls, setup_phases, teardown_phases):
"""Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases.
"""
setup = flatten_phases_and_groups(setup_phases)
teardown = flatten_phases_and_groups(teardown_phases)
def _context_wrapper(*phases):
return cls(setup=setup,
main=flatten_phases_and_groups(phases),
teardown=teardown)
return _context_wrapper | Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases. | Below is the the instruction that describes the task:
### Input:
Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases.
### Response:
def with_context(cls, setup_phases, teardown_phases):
"""Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the teardown for the
PhaseGroup returned from the created function.
Returns:
Function that takes *phases and returns a PhaseGroup with the predefined
setup and teardown phases, with *phases as the main phases.
"""
setup = flatten_phases_and_groups(setup_phases)
teardown = flatten_phases_and_groups(teardown_phases)
def _context_wrapper(*phases):
return cls(setup=setup,
main=flatten_phases_and_groups(phases),
teardown=teardown)
return _context_wrapper |
def uncontract_general(basis, use_copy=True):
"""
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False) | Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified. | Below is the the instruction that describes the task:
### Input:
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
### Response:
def uncontract_general(basis, use_copy=True):
"""
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False) |
def shape_rb_data(raw_rb):
"""Take the raw rb data and convert it into averages and std dev
Args:
raw_rb (numpy.array): m x n x l list where m is the number of seeds, n
is the number of Clifford sequences and l is the number of qubits
Return:
numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is
the std dev overseeds
"""
rb_data = []
rb_data.append(np.mean(raw_rb, 0))
rb_data.append(np.std(raw_rb, 0))
return rb_data | Take the raw rb data and convert it into averages and std dev
Args:
raw_rb (numpy.array): m x n x l list where m is the number of seeds, n
is the number of Clifford sequences and l is the number of qubits
Return:
numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is
the std dev overseeds | Below is the the instruction that describes the task:
### Input:
Take the raw rb data and convert it into averages and std dev
Args:
raw_rb (numpy.array): m x n x l list where m is the number of seeds, n
is the number of Clifford sequences and l is the number of qubits
Return:
numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is
the std dev overseeds
### Response:
def shape_rb_data(raw_rb):
"""Take the raw rb data and convert it into averages and std dev
Args:
raw_rb (numpy.array): m x n x l list where m is the number of seeds, n
is the number of Clifford sequences and l is the number of qubits
Return:
numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is
the std dev overseeds
"""
rb_data = []
rb_data.append(np.mean(raw_rb, 0))
rb_data.append(np.std(raw_rb, 0))
return rb_data |
def load(self, path):
"""
Load the catalog from file
Parameters
----------
path: str
The path to the file
"""
# Get the object
DB = joblib.load(path)
# Load the attributes
self.catalog = DB.catalog
self.n_sources = DB.n_sources
self.name = DB.name
self.history = DB.history
del DB | Load the catalog from file
Parameters
----------
path: str
The path to the file | Below is the the instruction that describes the task:
### Input:
Load the catalog from file
Parameters
----------
path: str
The path to the file
### Response:
def load(self, path):
"""
Load the catalog from file
Parameters
----------
path: str
The path to the file
"""
# Get the object
DB = joblib.load(path)
# Load the attributes
self.catalog = DB.catalog
self.n_sources = DB.n_sources
self.name = DB.name
self.history = DB.history
del DB |
def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card',
identity_document=None, billing_address=None, additional_details=None):
"""
When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns:
"""
headers = self.client._get_public_headers()
payload = {
"token_type": token_type,
"credit_card_cvv": credit_card_cvv,
"card_number": card_number,
"expiration_date": expiration_date,
"holder_name": holder_name,
"identity_document": identity_document,
"billing_address": billing_address,
"additional_details": additional_details,
}
endpoint = '/tokens'
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers) | When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns: | Below is the the instruction that describes the task:
### Input:
When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns:
### Response:
def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card',
identity_document=None, billing_address=None, additional_details=None):
"""
When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns:
"""
headers = self.client._get_public_headers()
payload = {
"token_type": token_type,
"credit_card_cvv": credit_card_cvv,
"card_number": card_number,
"expiration_date": expiration_date,
"holder_name": holder_name,
"identity_document": identity_document,
"billing_address": billing_address,
"additional_details": additional_details,
}
endpoint = '/tokens'
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers) |
def Beta(alpha: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some tensorShape of alpha and beta to
a matching tensorShaped Beta.
:param alpha: the alpha of the Beta with either the same tensorShape as specified for this vertex or a scalar
:param beta: the beta of the Beta with either the same tensorShape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().BetaVertex, label, cast_to_double_vertex(alpha), cast_to_double_vertex(beta)) | One to one constructor for mapping some tensorShape of alpha and beta to
a matching tensorShaped Beta.
:param alpha: the alpha of the Beta with either the same tensorShape as specified for this vertex or a scalar
:param beta: the beta of the Beta with either the same tensorShape as specified for this vertex or a scalar | Below is the the instruction that describes the task:
### Input:
One to one constructor for mapping some tensorShape of alpha and beta to
a matching tensorShaped Beta.
:param alpha: the alpha of the Beta with either the same tensorShape as specified for this vertex or a scalar
:param beta: the beta of the Beta with either the same tensorShape as specified for this vertex or a scalar
### Response:
def Beta(alpha: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some tensorShape of alpha and beta to
a matching tensorShaped Beta.
:param alpha: the alpha of the Beta with either the same tensorShape as specified for this vertex or a scalar
:param beta: the beta of the Beta with either the same tensorShape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().BetaVertex, label, cast_to_double_vertex(alpha), cast_to_double_vertex(beta)) |
def close_all_files(self):
"""Close all open files (so that we can open more)."""
while len(self.open_file_infos) > 0:
file_info = self.open_file_infos.pop(0)
file_info.file_handle.close()
file_info.file_handle = None
self.closed_file_infos.append(file_info)
self.can_open_more_files = True | Close all open files (so that we can open more). | Below is the the instruction that describes the task:
### Input:
Close all open files (so that we can open more).
### Response:
def close_all_files(self):
"""Close all open files (so that we can open more)."""
while len(self.open_file_infos) > 0:
file_info = self.open_file_infos.pop(0)
file_info.file_handle.close()
file_info.file_handle = None
self.closed_file_infos.append(file_info)
self.can_open_more_files = True |
def leaveWhitespace( self ):
"""Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self | Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions. | Below is the the instruction that describes the task:
### Input:
Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions.
### Response:
def leaveWhitespace( self ):
"""Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self |
def kind(self):
"""Kind."""
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None | Kind. | Below is the the instruction that describes the task:
### Input:
Kind.
### Response:
def kind(self):
"""Kind."""
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None |
def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
"""Generate random price in BTC.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price in BTC.
"""
return '{} BTC'.format(
self.random.uniform(
minimum,
maximum,
precision=7,
),
) | Generate random price in BTC.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price in BTC. | Below is the the instruction that describes the task:
### Input:
Generate random price in BTC.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price in BTC.
### Response:
def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
"""Generate random price in BTC.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price in BTC.
"""
return '{} BTC'.format(
self.random.uniform(
minimum,
maximum,
precision=7,
),
) |
def _sorted_copy(self, comparison, reversed=False):
"""
Returns a sorted copy with the colors arranged according to the given comparison.
"""
sorted = self.copy()
_list.sort(sorted, comparison)
if reversed:
_list.reverse(sorted)
return sorted | Returns a sorted copy with the colors arranged according to the given comparison. | Below is the the instruction that describes the task:
### Input:
Returns a sorted copy with the colors arranged according to the given comparison.
### Response:
def _sorted_copy(self, comparison, reversed=False):
"""
Returns a sorted copy with the colors arranged according to the given comparison.
"""
sorted = self.copy()
_list.sort(sorted, comparison)
if reversed:
_list.reverse(sorted)
return sorted |
def include_theme_files(self, fragment):
"""
Gets theme configuration and renders theme css into fragment
"""
theme = self.get_theme()
if not theme or 'package' not in theme:
return
theme_package, theme_files = theme.get('package', None), theme.get('locations', [])
resource_loader = ResourceLoader(theme_package)
for theme_file in theme_files:
fragment.add_css(resource_loader.load_unicode(theme_file)) | Gets theme configuration and renders theme css into fragment | Below is the the instruction that describes the task:
### Input:
Gets theme configuration and renders theme css into fragment
### Response:
def include_theme_files(self, fragment):
"""
Gets theme configuration and renders theme css into fragment
"""
theme = self.get_theme()
if not theme or 'package' not in theme:
return
theme_package, theme_files = theme.get('package', None), theme.get('locations', [])
resource_loader = ResourceLoader(theme_package)
for theme_file in theme_files:
fragment.add_css(resource_loader.load_unicode(theme_file)) |
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message) | Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported. | Below is the the instruction that describes the task:
### Input:
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
### Response:
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message) |
def print_summary(self, w=0, objs=(), **print3opts):
'''Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0
'''
self._printf('%*d bytes%s%s', w, self._total, _SI(self._total), self._incl, **print3opts)
if self._mask:
self._printf('%*d byte aligned', w, self._mask + 1, **print3opts)
self._printf('%*d byte sizeof(void*)', w, _sizeof_Cvoidp, **print3opts)
n = len(objs or ())
if n > 0:
d = self._duplicate or ''
if d:
d = ', %d duplicate' % self._duplicate
self._printf('%*d object%s given%s', w, n, _plural(n), d, **print3opts)
t = _sum([1 for t in _values(self._seen) if t != 0]) # [] for Python 2.2
self._printf('%*d object%s sized', w, t, _plural(t), **print3opts)
if self._excl_d:
t = _sum(_values(self._excl_d))
self._printf('%*d object%s excluded', w, t, _plural(t), **print3opts)
t = _sum(_values(self._seen))
self._printf('%*d object%s seen', w, t, _plural(t), **print3opts)
if self._missed > 0:
self._printf('%*d object%s missed', w, self._missed, _plural(self._missed), **print3opts)
if self._depth > 0:
self._printf('%*d recursion depth', w, self._depth, **print3opts) | Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0 | Below is the the instruction that describes the task:
### Input:
Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0
### Response:
def print_summary(self, w=0, objs=(), **print3opts):
'''Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0
'''
self._printf('%*d bytes%s%s', w, self._total, _SI(self._total), self._incl, **print3opts)
if self._mask:
self._printf('%*d byte aligned', w, self._mask + 1, **print3opts)
self._printf('%*d byte sizeof(void*)', w, _sizeof_Cvoidp, **print3opts)
n = len(objs or ())
if n > 0:
d = self._duplicate or ''
if d:
d = ', %d duplicate' % self._duplicate
self._printf('%*d object%s given%s', w, n, _plural(n), d, **print3opts)
t = _sum([1 for t in _values(self._seen) if t != 0]) # [] for Python 2.2
self._printf('%*d object%s sized', w, t, _plural(t), **print3opts)
if self._excl_d:
t = _sum(_values(self._excl_d))
self._printf('%*d object%s excluded', w, t, _plural(t), **print3opts)
t = _sum(_values(self._seen))
self._printf('%*d object%s seen', w, t, _plural(t), **print3opts)
if self._missed > 0:
self._printf('%*d object%s missed', w, self._missed, _plural(self._missed), **print3opts)
if self._depth > 0:
self._printf('%*d recursion depth', w, self._depth, **print3opts) |
def build_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]):
""" Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image
"""
if self.inherit_image is not None:
return self.build_image_from_inherited_image(image_name, image_tag, repo_path, requirements_option)
if requirements_option == RequirementsOptions.no_requirements:
python_version = self.get_python_version()
# no requirements and no dependencies, just return the basic image with the correct python installed
if dependencies is None:
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
image = self.get_image(base_name, base_tag)
# tag the image so ``build_image`` doesn't have to be called next time
image.tag(image_name, image_tag)
return image
# extend the image with correct python by installing the dependencies
def install_dependencies_dockerfile():
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
return self.INSTALL_DEPENDENCIES.format(
name=base_name,
tag=base_tag,
dependencies=" ".join(dependencies)
)
self.get_or_build_image(image_name, image_tag, install_dependencies_dockerfile)
return self.get_image(image_name, image_tag)
else: # doesn't have to be here, but the return right above was confusing
def install_requirements_dockerfile():
""" Returns a Dockerfile for installing pip requirements,
based on a image with installed dependencies (or no extra dependencies)
"""
dependencies_name, dependencies_tag = self.get_image_with_installed_dependencies(image_name,
dependencies)
return self.get_install_requirements_dockerfile(
name=dependencies_name,
tag=dependencies_tag,
repo_path=repo_path,
requirements_option=requirements_option,
)
self.get_or_build_image(image_name, image_tag, install_requirements_dockerfile,
build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag) | Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image | Below is the the instruction that describes the task:
### Input:
Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image
### Response:
def build_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]):
""" Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image
"""
if self.inherit_image is not None:
return self.build_image_from_inherited_image(image_name, image_tag, repo_path, requirements_option)
if requirements_option == RequirementsOptions.no_requirements:
python_version = self.get_python_version()
# no requirements and no dependencies, just return the basic image with the correct python installed
if dependencies is None:
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
image = self.get_image(base_name, base_tag)
# tag the image so ``build_image`` doesn't have to be called next time
image.tag(image_name, image_tag)
return image
# extend the image with correct python by installing the dependencies
def install_dependencies_dockerfile():
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
return self.INSTALL_DEPENDENCIES.format(
name=base_name,
tag=base_tag,
dependencies=" ".join(dependencies)
)
self.get_or_build_image(image_name, image_tag, install_dependencies_dockerfile)
return self.get_image(image_name, image_tag)
else: # doesn't have to be here, but the return right above was confusing
def install_requirements_dockerfile():
""" Returns a Dockerfile for installing pip requirements,
based on a image with installed dependencies (or no extra dependencies)
"""
dependencies_name, dependencies_tag = self.get_image_with_installed_dependencies(image_name,
dependencies)
return self.get_install_requirements_dockerfile(
name=dependencies_name,
tag=dependencies_tag,
repo_path=repo_path,
requirements_option=requirements_option,
)
self.get_or_build_image(image_name, image_tag, install_requirements_dockerfile,
build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag) |
def writeConfig(self):
"""
Persists the value of the :attr:`AbstractJobStore.config` attribute to the
job store, so that it can be retrieved later by other instances of this class.
"""
with self.writeSharedFileStream('config.pickle', isProtected=False) as fileHandle:
pickle.dump(self.__config, fileHandle, pickle.HIGHEST_PROTOCOL) | Persists the value of the :attr:`AbstractJobStore.config` attribute to the
job store, so that it can be retrieved later by other instances of this class. | Below is the the instruction that describes the task:
### Input:
Persists the value of the :attr:`AbstractJobStore.config` attribute to the
job store, so that it can be retrieved later by other instances of this class.
### Response:
def writeConfig(self):
"""
Persists the value of the :attr:`AbstractJobStore.config` attribute to the
job store, so that it can be retrieved later by other instances of this class.
"""
with self.writeSharedFileStream('config.pickle', isProtected=False) as fileHandle:
pickle.dump(self.__config, fileHandle, pickle.HIGHEST_PROTOCOL) |
def send_email(template_name, context=None, *args, **kwargs):
"""
Send a templated email.
To generate the message used for the email, the method first
searches for an HTML template with the given name
(eg: <template>.html), and renders it with the provided context. The
process is repeated for the plain text message except a 'txt'
extension is used. All other options are forwarded to Django's
``send_mail`` function.
Args:
template_name:
The name of the template to use without an extension. The
extensions ``html`` and ``txt`` are appended to the template
name and then rendered to provide the email content.
context:
A dictionary containing the context to render the message
with. Defaults to an empty dictionary.
Returns:
``1`` if the email is succesfully sent and ``0`` otherwise. The
return values come from Django's ``send_mail`` function.
Throws:
NoTemplatesException:
If neither the HTML nor plain text template can be loaded.
"""
context = context or {}
try:
html = render_to_string(
context=context,
template_name='{}.html'.format(template_name),
)
except TemplateDoesNotExist:
html = ''
try:
text = render_to_string(
context=context,
template_name='{}.txt'.format(template_name),
)
except TemplateDoesNotExist:
text = ''
if not html and not text:
raise NoTemplatesException(template_name)
return mail.send_mail(
*args,
html_message=html,
message=text,
**kwargs
) | Send a templated email.
To generate the message used for the email, the method first
searches for an HTML template with the given name
(eg: <template>.html), and renders it with the provided context. The
process is repeated for the plain text message except a 'txt'
extension is used. All other options are forwarded to Django's
``send_mail`` function.
Args:
template_name:
The name of the template to use without an extension. The
extensions ``html`` and ``txt`` are appended to the template
name and then rendered to provide the email content.
context:
A dictionary containing the context to render the message
with. Defaults to an empty dictionary.
Returns:
``1`` if the email is succesfully sent and ``0`` otherwise. The
return values come from Django's ``send_mail`` function.
Throws:
NoTemplatesException:
If neither the HTML nor plain text template can be loaded. | Below is the the instruction that describes the task:
### Input:
Send a templated email.
To generate the message used for the email, the method first
searches for an HTML template with the given name
(eg: <template>.html), and renders it with the provided context. The
process is repeated for the plain text message except a 'txt'
extension is used. All other options are forwarded to Django's
``send_mail`` function.
Args:
template_name:
The name of the template to use without an extension. The
extensions ``html`` and ``txt`` are appended to the template
name and then rendered to provide the email content.
context:
A dictionary containing the context to render the message
with. Defaults to an empty dictionary.
Returns:
``1`` if the email is succesfully sent and ``0`` otherwise. The
return values come from Django's ``send_mail`` function.
Throws:
NoTemplatesException:
If neither the HTML nor plain text template can be loaded.
### Response:
def send_email(template_name, context=None, *args, **kwargs):
"""
Send a templated email.
To generate the message used for the email, the method first
searches for an HTML template with the given name
(eg: <template>.html), and renders it with the provided context. The
process is repeated for the plain text message except a 'txt'
extension is used. All other options are forwarded to Django's
``send_mail`` function.
Args:
template_name:
The name of the template to use without an extension. The
extensions ``html`` and ``txt`` are appended to the template
name and then rendered to provide the email content.
context:
A dictionary containing the context to render the message
with. Defaults to an empty dictionary.
Returns:
``1`` if the email is succesfully sent and ``0`` otherwise. The
return values come from Django's ``send_mail`` function.
Throws:
NoTemplatesException:
If neither the HTML nor plain text template can be loaded.
"""
context = context or {}
try:
html = render_to_string(
context=context,
template_name='{}.html'.format(template_name),
)
except TemplateDoesNotExist:
html = ''
try:
text = render_to_string(
context=context,
template_name='{}.txt'.format(template_name),
)
except TemplateDoesNotExist:
text = ''
if not html and not text:
raise NoTemplatesException(template_name)
return mail.send_mail(
*args,
html_message=html,
message=text,
**kwargs
) |
def aggregate_daily_with_joins(image_coll, start_date, end_date,
agg_type='mean'):
"""Aggregate images by day (using joins)
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string
End date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
# Build a collection of time "features" to join to
# "Flatten" dates to 0 UTC time
if start_date and end_date:
date_list = ee.List.sequence(
ee.Date(start_date).millis(), ee.Date(end_date).millis(),
24 * 3600 * 1000)
# elif start_date:
# end_date = ee.Date(ee.Image(image_coll.limit(
# 1, 'system:time_start', False).first()).get('system:time_start')
# end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # end_date = ee.Date.fromYMD(end_date.get('year'), end_date.get('month'),
# # end_date.get('day')).advance(1, 'day')
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), end_date.millis(), 24 * 3600 * 1000)
# elif end_date:
# start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # start_date = ee.Date.fromYMD(
# # start_date.get('year'), start_date.get('month'),
# # start_date.get('day')).advance(1, 'day')
# date_list = ee.List.sequence(
# start_date.millis(), ee.Date(end_date).millis(), 24 * 3600 * 1000)
# else:
# start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# end_date = ee.Date(ee.Image(image_coll.limit(
# 1, 'system:time_start', False).first()).get('system:time_start')
# end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# 24 * 3600 * 1000)
def set_date(time):
return ee.Feature(None, {
'system:index': ee.Date(time).format('yyyyMMdd'),
'system:time_start': ee.Number(time).int64(),
'date': ee.Date(time).format('yyyy-MM-dd')})
# Add a date property to the image collection
def set_image_date(img):
return ee.Image(img.set({
'date': ee.Date(img.get('system:time_start')).format('yyyy-MM-dd')}))
join_coll = ee.FeatureCollection(
ee.Join.saveAll('join').apply(
ee.FeatureCollection(date_list.map(set_date)),
ee.ImageCollection(image_coll.map(set_image_date)),
ee.Filter.equals(leftField='date', rightField='date')))
def aggregate_func(ftr):
# The composite image time will be 0 UTC (not Landsat time)
agg_coll = ee.ImageCollection.fromImages(ftr.get('join'))
# if agg_type.lower() == 'mean':
agg_img = agg_coll.mean()
# elif agg_type.lower() == 'median':
# agg_img = agg_coll.median()
return agg_img.set({
'system:index': ftr.get('system:index'),
'system:time_start': ftr.get('system:time_start'),
'date': ftr.get('date'),
})
return ee.ImageCollection(join_coll.map(aggregate_func)) | Aggregate images by day (using joins)
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string
End date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time). | Below is the the instruction that describes the task:
### Input:
Aggregate images by day (using joins)
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string
End date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
### Response:
def aggregate_daily_with_joins(image_coll, start_date, end_date,
agg_type='mean'):
"""Aggregate images by day (using joins)
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string
End date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
# Build a collection of time "features" to join to
# "Flatten" dates to 0 UTC time
if start_date and end_date:
date_list = ee.List.sequence(
ee.Date(start_date).millis(), ee.Date(end_date).millis(),
24 * 3600 * 1000)
# elif start_date:
# end_date = ee.Date(ee.Image(image_coll.limit(
# 1, 'system:time_start', False).first()).get('system:time_start')
# end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # end_date = ee.Date.fromYMD(end_date.get('year'), end_date.get('month'),
# # end_date.get('day')).advance(1, 'day')
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), end_date.millis(), 24 * 3600 * 1000)
# elif end_date:
# start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # start_date = ee.Date.fromYMD(
# # start_date.get('year'), start_date.get('month'),
# # start_date.get('day')).advance(1, 'day')
# date_list = ee.List.sequence(
# start_date.millis(), ee.Date(end_date).millis(), 24 * 3600 * 1000)
# else:
# start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# end_date = ee.Date(ee.Image(image_coll.limit(
# 1, 'system:time_start', False).first()).get('system:time_start')
# end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# 24 * 3600 * 1000)
def set_date(time):
return ee.Feature(None, {
'system:index': ee.Date(time).format('yyyyMMdd'),
'system:time_start': ee.Number(time).int64(),
'date': ee.Date(time).format('yyyy-MM-dd')})
# Add a date property to the image collection
def set_image_date(img):
return ee.Image(img.set({
'date': ee.Date(img.get('system:time_start')).format('yyyy-MM-dd')}))
join_coll = ee.FeatureCollection(
ee.Join.saveAll('join').apply(
ee.FeatureCollection(date_list.map(set_date)),
ee.ImageCollection(image_coll.map(set_image_date)),
ee.Filter.equals(leftField='date', rightField='date')))
def aggregate_func(ftr):
# The composite image time will be 0 UTC (not Landsat time)
agg_coll = ee.ImageCollection.fromImages(ftr.get('join'))
# if agg_type.lower() == 'mean':
agg_img = agg_coll.mean()
# elif agg_type.lower() == 'median':
# agg_img = agg_coll.median()
return agg_img.set({
'system:index': ftr.get('system:index'),
'system:time_start': ftr.get('system:time_start'),
'date': ftr.get('date'),
})
return ee.ImageCollection(join_coll.map(aggregate_func)) |
def merge_dict(dict1, dict2):
# type: (dict, dict) -> dict
"""Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary
"""
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError('dict1 or dict2 is not a dictionary')
result = copy.deepcopy(dict1)
for k, v in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v)
else:
result[k] = copy.deepcopy(v)
return result | Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary | Below is the the instruction that describes the task:
### Input:
Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary
### Response:
def merge_dict(dict1, dict2):
# type: (dict, dict) -> dict
"""Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
:return: merged dictionary
"""
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError('dict1 or dict2 is not a dictionary')
result = copy.deepcopy(dict1)
for k, v in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v)
else:
result[k] = copy.deepcopy(v)
return result |
def __threshold(self, ymx_i):
"""
Calculates the difference threshold for a
given difference local maximum.
Parameters
-----------
ymx_i : float
The normalized y value of a local maximum.
"""
return ymx_i - (self.S * np.diff(self.xsn).mean()) | Calculates the difference threshold for a
given difference local maximum.
Parameters
-----------
ymx_i : float
The normalized y value of a local maximum. | Below is the the instruction that describes the task:
### Input:
Calculates the difference threshold for a
given difference local maximum.
Parameters
-----------
ymx_i : float
The normalized y value of a local maximum.
### Response:
def __threshold(self, ymx_i):
"""
Calculates the difference threshold for a
given difference local maximum.
Parameters
-----------
ymx_i : float
The normalized y value of a local maximum.
"""
return ymx_i - (self.S * np.diff(self.xsn).mean()) |
def find_runner(program):
"""Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except PermissionError:
return None
first_line = opened.readline().strip()
if first_line.startswith('#!'):
return shlex.split(first_line[2:])
if program.endswith('.py'):
return [sys.executable]
return None | Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None | Below is the the instruction that describes the task:
### Input:
Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
### Response:
def find_runner(program):
"""Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except PermissionError:
return None
first_line = opened.readline().strip()
if first_line.startswith('#!'):
return shlex.split(first_line[2:])
if program.endswith('.py'):
return [sys.executable]
return None |
def add_path_argument(cls, group, argname, dest=None, help_=None):
"""
Subclasses may call this to expose a path argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
"""
prefixed = '%s-%s' % (cls.argument_prefix, argname)
if dest is None:
dest = prefixed.replace('-', '_')
final_dest = dest[len(cls.argument_prefix) + 1:]
else:
final_dest = dest
dest = '%s_%s' % (cls.argument_prefix, dest)
group.add_argument('--%s' % prefixed, action='store',
dest=dest, help=help_)
cls.path_arguments[dest] = final_dest | Subclasses may call this to expose a path argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`. | Below is the the instruction that describes the task:
### Input:
Subclasses may call this to expose a path argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
### Response:
def add_path_argument(cls, group, argname, dest=None, help_=None):
"""
Subclasses may call this to expose a path argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
"""
prefixed = '%s-%s' % (cls.argument_prefix, argname)
if dest is None:
dest = prefixed.replace('-', '_')
final_dest = dest[len(cls.argument_prefix) + 1:]
else:
final_dest = dest
dest = '%s_%s' % (cls.argument_prefix, dest)
group.add_argument('--%s' % prefixed, action='store',
dest=dest, help=help_)
cls.path_arguments[dest] = final_dest |
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump() | Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data. | Below is the the instruction that describes the task:
### Input:
Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
### Response:
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump() |
def _get_answer_spans(answer_list, answer_start_list):
"""Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question
"""
return [(answer_start_list[i], answer_start_list[i] + len(answer))
for i, answer in enumerate(answer_list)] | Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question | Below is the the instruction that describes the task:
### Input:
Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question
### Response:
def _get_answer_spans(answer_list, answer_start_list):
"""Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_end_index) per question
"""
return [(answer_start_list[i], answer_start_list[i] + len(answer))
for i, answer in enumerate(answer_list)] |
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
"""Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
"""
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
# Load data.
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(data_dir)
assert isinstance(which_epoch_data, int), \
"{}".format(type(which_epoch_data))
t2t_env.start_new_epoch(which_epoch_data, data_dir)
else:
t2t_env.start_new_epoch(-999)
return t2t_env | Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env. | Below is the the instruction that describes the task:
### Input:
Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
### Response:
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
"""Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
"""
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
# Load data.
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(data_dir)
assert isinstance(which_epoch_data, int), \
"{}".format(type(which_epoch_data))
t2t_env.start_new_epoch(which_epoch_data, data_dir)
else:
t2t_env.start_new_epoch(-999)
return t2t_env |
def set_checks(self, checks, position=None):
"""Sets the checks at the position."""
if position is None:
position = self.position
self.checkdefs[position][0] = checks | Sets the checks at the position. | Below is the the instruction that describes the task:
### Input:
Sets the checks at the position.
### Response:
def set_checks(self, checks, position=None):
"""Sets the checks at the position."""
if position is None:
position = self.position
self.checkdefs[position][0] = checks |
def show_type(cls, result):
"""
:param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode
"""
if result.ok:
return ' :: '.join([result.expr, result.type])
return result.value | :param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode | Below is the the instruction that describes the task:
### Input:
:param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode
### Response:
def show_type(cls, result):
"""
:param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode
"""
if result.ok:
return ' :: '.join([result.expr, result.type])
return result.value |
def reset_mode(self):
"""Send a Reset command to set the operation mode to 0."""
self.command(0x18, b"\x01", timeout=0.1)
self.transport.write(Chipset.ACK)
time.sleep(0.010) | Send a Reset command to set the operation mode to 0. | Below is the the instruction that describes the task:
### Input:
Send a Reset command to set the operation mode to 0.
### Response:
def reset_mode(self):
"""Send a Reset command to set the operation mode to 0."""
self.command(0x18, b"\x01", timeout=0.1)
self.transport.write(Chipset.ACK)
time.sleep(0.010) |
def save_admin_log(build, **kwargs):
"""Saves an action to the admin log."""
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument')
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum)
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.get_id()
log = models.AdminLog(
build_id=build.id,
log_type=log_type,
message=message,
user_id=user_id)
if release:
log.release_id = release.id
if run:
log.run_id = run.id
log.release_id = run.release_id
db.session.add(log) | Saves an action to the admin log. | Below is the the instruction that describes the task:
### Input:
Saves an action to the admin log.
### Response:
def save_admin_log(build, **kwargs):
"""Saves an action to the admin log."""
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument')
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum)
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.get_id()
log = models.AdminLog(
build_id=build.id,
log_type=log_type,
message=message,
user_id=user_id)
if release:
log.release_id = release.id
if run:
log.run_id = run.id
log.release_id = run.release_id
db.session.add(log) |
def _print_MatMul(self, expr):
"""
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
"""
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append(').dot(')
else:
links.append('*')
printouts = [self._print(i) for i in expr.args]
result = [printouts[0]]
for link, printout in zip(links, printouts[1:]):
result.extend([link, printout])
return '({0})'.format(''.join(result)) | Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking. | Below is the the instruction that describes the task:
### Input:
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
### Response:
def _print_MatMul(self, expr):
"""
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
"""
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append(').dot(')
else:
links.append('*')
printouts = [self._print(i) for i in expr.args]
result = [printouts[0]]
for link, printout in zip(links, printouts[1:]):
result.extend([link, printout])
return '({0})'.format(''.join(result)) |
def extract_root_meta(cls, serializer, resource):
"""
Calls a `get_root_meta` function on a serializer, if it exists.
"""
many = False
if hasattr(serializer, 'child'):
many = True
serializer = serializer.child
data = {}
if getattr(serializer, 'get_root_meta', None):
json_api_meta = serializer.get_root_meta(resource, many)
assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict'
data.update(json_api_meta)
return data | Calls a `get_root_meta` function on a serializer, if it exists. | Below is the the instruction that describes the task:
### Input:
Calls a `get_root_meta` function on a serializer, if it exists.
### Response:
def extract_root_meta(cls, serializer, resource):
"""
Calls a `get_root_meta` function on a serializer, if it exists.
"""
many = False
if hasattr(serializer, 'child'):
many = True
serializer = serializer.child
data = {}
if getattr(serializer, 'get_root_meta', None):
json_api_meta = serializer.get_root_meta(resource, many)
assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict'
data.update(json_api_meta)
return data |
def check_config(data):
"""Check if metadata is right
TODO(crow): check more
"""
is_right = True
if "title" not in data:
logging.error("No 'title' in _config.yml")
is_right = False
return is_right | Check if metadata is right
TODO(crow): check more | Below is the the instruction that describes the task:
### Input:
Check if metadata is right
TODO(crow): check more
### Response:
def check_config(data):
"""Check if metadata is right
TODO(crow): check more
"""
is_right = True
if "title" not in data:
logging.error("No 'title' in _config.yml")
is_right = False
return is_right |
def _get_date(day=None, month=None, year=None):
"""Returns a datetime object with optional params or today."""
now = datetime.date.today()
if day is None:
return now
try:
return datetime.date(
day=int(day),
month=int(month or now.month),
year=int(year or now.year),
)
except ValueError as error:
print("error: {0}".format(error), file=sys.stderr) | Returns a datetime object with optional params or today. | Below is the the instruction that describes the task:
### Input:
Returns a datetime object with optional params or today.
### Response:
def _get_date(day=None, month=None, year=None):
"""Returns a datetime object with optional params or today."""
now = datetime.date.today()
if day is None:
return now
try:
return datetime.date(
day=int(day),
month=int(month or now.month),
year=int(year or now.year),
)
except ValueError as error:
print("error: {0}".format(error), file=sys.stderr) |
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Standardize, self)._save_to_input_dict()
input_dict["class"] = "GPy.util.normalizer.Standardize"
if self.mean is not None:
input_dict["mean"] = self.mean.tolist()
input_dict["std"] = self.std.tolist()
return input_dict | Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object | Below is the the instruction that describes the task:
### Input:
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
### Response:
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Standardize, self)._save_to_input_dict()
input_dict["class"] = "GPy.util.normalizer.Standardize"
if self.mean is not None:
input_dict["mean"] = self.mean.tolist()
input_dict["std"] = self.std.tolist()
return input_dict |
def set_tag(self, project, repository, tag_name, commit_revision, description=None):
"""
Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project:
:param repository:
:param tag_name:
:param commit_revision: commit hash
:param description: OPTIONAL:
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/tags'.format(project=project,
repository=repository)
body = {}
if tag_name is not None:
body['name'] = tag_name
if tag_name is not None:
body['startPoint'] = commit_revision
if tag_name is not None:
body['message'] = description
return self.post(url, data=body) | Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project:
:param repository:
:param tag_name:
:param commit_revision: commit hash
:param description: OPTIONAL:
:return: | Below is the the instruction that describes the task:
### Input:
Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project:
:param repository:
:param tag_name:
:param commit_revision: commit hash
:param description: OPTIONAL:
:return:
### Response:
def set_tag(self, project, repository, tag_name, commit_revision, description=None):
"""
Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project:
:param repository:
:param tag_name:
:param commit_revision: commit hash
:param description: OPTIONAL:
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/tags'.format(project=project,
repository=repository)
body = {}
if tag_name is not None:
body['name'] = tag_name
if tag_name is not None:
body['startPoint'] = commit_revision
if tag_name is not None:
body['message'] = description
return self.post(url, data=body) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.