code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
for lim in self.limits.values():
lim._reset_usage()
try:
self.connect()
resp = self.conn.get_send_quota()
except EndpointConnectionError as ex:
logger.warning('Skipping SES: %s', str(ex))
return
except ClientError as ex:
if ex.response['Error']['Code'] in ['AccessDenied', '503']:
logger.warning('Skipping SES: %s', ex)
return
raise
self.limits['Daily sending quota']._add_current_usage(
resp['SentLast24Hours']
)
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. | Below is the the instruction that describes the task:
### Input:
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
### Response:
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
for lim in self.limits.values():
lim._reset_usage()
try:
self.connect()
resp = self.conn.get_send_quota()
except EndpointConnectionError as ex:
logger.warning('Skipping SES: %s', str(ex))
return
except ClientError as ex:
if ex.response['Error']['Code'] in ['AccessDenied', '503']:
logger.warning('Skipping SES: %s', ex)
return
raise
self.limits['Daily sending quota']._add_current_usage(
resp['SentLast24Hours']
)
self._have_usage = True
logger.debug("Done checking usage.") |
def upload_by_gemfury(self):
"""
upload to gemfury
:return:
"""
check_call_no_output([
'{}'.format(self.python),
'setup.py',
'sdist',
])
filename = self.get_package_filename()
# The command line is the one recommended by gemfury at
# https://manage.fury.io/dashboard/[username]/push
check_call_no_output([
'fury',
'push',
filename,
'--as={}'.format(self.gemfury_user),
]) | upload to gemfury
:return: | Below is the the instruction that describes the task:
### Input:
upload to gemfury
:return:
### Response:
def upload_by_gemfury(self):
"""
upload to gemfury
:return:
"""
check_call_no_output([
'{}'.format(self.python),
'setup.py',
'sdist',
])
filename = self.get_package_filename()
# The command line is the one recommended by gemfury at
# https://manage.fury.io/dashboard/[username]/push
check_call_no_output([
'fury',
'push',
filename,
'--as={}'.format(self.gemfury_user),
]) |
def ToBitmap(self, x: int = 0, y: int = 0, width: int = 0, height: int = 0) -> Bitmap:
"""
Capture control to a Bitmap object.
x, y: int, the point in control's internal position(from 0,0).
width, height: int, image's width and height from x, y, use 0 for entire area.
If width(or height) < 0, image size will be control's width(or height) - width(or height).
"""
bitmap = Bitmap()
bitmap.FromControl(self, x, y, width, height)
return bitmap | Capture control to a Bitmap object.
x, y: int, the point in control's internal position(from 0,0).
width, height: int, image's width and height from x, y, use 0 for entire area.
If width(or height) < 0, image size will be control's width(or height) - width(or height). | Below is the the instruction that describes the task:
### Input:
Capture control to a Bitmap object.
x, y: int, the point in control's internal position(from 0,0).
width, height: int, image's width and height from x, y, use 0 for entire area.
If width(or height) < 0, image size will be control's width(or height) - width(or height).
### Response:
def ToBitmap(self, x: int = 0, y: int = 0, width: int = 0, height: int = 0) -> Bitmap:
"""
Capture control to a Bitmap object.
x, y: int, the point in control's internal position(from 0,0).
width, height: int, image's width and height from x, y, use 0 for entire area.
If width(or height) < 0, image size will be control's width(or height) - width(or height).
"""
bitmap = Bitmap()
bitmap.FromControl(self, x, y, width, height)
return bitmap |
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest() | Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password | Below is the the instruction that describes the task:
### Input:
Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
### Response:
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest() |
def boost(self, dtrain, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
len(grad))) | Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient. | Below is the the instruction that describes the task:
### Input:
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
### Response:
def boost(self, dtrain, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
len(grad))) |
def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs) | Determine the vertical total stress at depth z, where z can be a number or an array of numbers. | Below is the the instruction that describes the task:
### Input:
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
### Response:
def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs) |
def handle_fractal():
"""Get fractal coordinates from query string, call mandelbrot to generate image.
Returns:
The image, wrapped in an HTML response.
"""
if check_etag():
return flask.make_response(), 304
level = int(flask.request.args.get("l", "0"))
x = float(int(flask.request.args.get("x", "0")))
y = float(int(flask.request.args.get("y", "0")))
if level < 0:
level = 0
grid_size = math.pow(2, level)
x0 = "%.30g" % ((x - 0) / grid_size)
y0 = "%.30g" % ((y - 0) / grid_size)
x1 = "%.30g" % ((x + 1) / grid_size)
y1 = "%.30g" % ((y + 1) / grid_size)
print "Tile: %s %s %s %s" % (x0, y0, x1, y1)
width = str(CONF['width'])
height = str(CONF['height'])
iters = str(CONF['iters'])
cmd = ['./mandelbrot', width, height, iters, x0, y0, x1, y1]
image_data = subprocess.check_output(cmd)
response = flask.make_response(image_data)
response.headers["Content-Type"] = "image/png"
response.headers["cache-control"] = "public, max-age=600"
response.headers["ETag"] = ETAG
return response | Get fractal coordinates from query string, call mandelbrot to generate image.
Returns:
The image, wrapped in an HTML response. | Below is the the instruction that describes the task:
### Input:
Get fractal coordinates from query string, call mandelbrot to generate image.
Returns:
The image, wrapped in an HTML response.
### Response:
def handle_fractal():
"""Get fractal coordinates from query string, call mandelbrot to generate image.
Returns:
The image, wrapped in an HTML response.
"""
if check_etag():
return flask.make_response(), 304
level = int(flask.request.args.get("l", "0"))
x = float(int(flask.request.args.get("x", "0")))
y = float(int(flask.request.args.get("y", "0")))
if level < 0:
level = 0
grid_size = math.pow(2, level)
x0 = "%.30g" % ((x - 0) / grid_size)
y0 = "%.30g" % ((y - 0) / grid_size)
x1 = "%.30g" % ((x + 1) / grid_size)
y1 = "%.30g" % ((y + 1) / grid_size)
print "Tile: %s %s %s %s" % (x0, y0, x1, y1)
width = str(CONF['width'])
height = str(CONF['height'])
iters = str(CONF['iters'])
cmd = ['./mandelbrot', width, height, iters, x0, y0, x1, y1]
image_data = subprocess.check_output(cmd)
response = flask.make_response(image_data)
response.headers["Content-Type"] = "image/png"
response.headers["cache-control"] = "public, max-age=600"
response.headers["ETag"] = ETAG
return response |
def iter_(obj):
"""A custom replacement for iter(), dispatching a few custom picklable
iterators for known types.
"""
if six.PY2:
file_types = file, # noqa
if six.PY3:
file_types = io.IOBase,
dict_items = {}.items().__class__
dict_values = {}.values().__class__
dict_keys = {}.keys().__class__
dict_view = (dict_items, dict_values, dict_keys)
if isinstance(obj, dict):
return ordered_sequence_iterator(list(obj.keys()))
if isinstance(obj, file_types):
return file_iterator(obj)
if six.PY2:
if isinstance(obj, (list, tuple)):
return ordered_sequence_iterator(obj)
if isinstance(obj, xrange): # noqa
return range_iterator(obj)
if NUMPY_AVAILABLE and isinstance(obj, numpy.ndarray):
return ordered_sequence_iterator(obj)
if six.PY3 and isinstance(obj, dict_view):
return ordered_sequence_iterator(list(obj))
return iter(obj) | A custom replacement for iter(), dispatching a few custom picklable
iterators for known types. | Below is the the instruction that describes the task:
### Input:
A custom replacement for iter(), dispatching a few custom picklable
iterators for known types.
### Response:
def iter_(obj):
"""A custom replacement for iter(), dispatching a few custom picklable
iterators for known types.
"""
if six.PY2:
file_types = file, # noqa
if six.PY3:
file_types = io.IOBase,
dict_items = {}.items().__class__
dict_values = {}.values().__class__
dict_keys = {}.keys().__class__
dict_view = (dict_items, dict_values, dict_keys)
if isinstance(obj, dict):
return ordered_sequence_iterator(list(obj.keys()))
if isinstance(obj, file_types):
return file_iterator(obj)
if six.PY2:
if isinstance(obj, (list, tuple)):
return ordered_sequence_iterator(obj)
if isinstance(obj, xrange): # noqa
return range_iterator(obj)
if NUMPY_AVAILABLE and isinstance(obj, numpy.ndarray):
return ordered_sequence_iterator(obj)
if six.PY3 and isinstance(obj, dict_view):
return ordered_sequence_iterator(list(obj))
return iter(obj) |
def check_and_adjust_sighandlers(self):
"""Check to see if any of the signal handlers we are interested in have
changed or is not initially set. Change any that are not right. """
for signame in list(self.sigs.keys()):
if not self.check_and_adjust_sighandler(signame, self.sigs):
break
pass
return | Check to see if any of the signal handlers we are interested in have
changed or is not initially set. Change any that are not right. | Below is the the instruction that describes the task:
### Input:
Check to see if any of the signal handlers we are interested in have
changed or is not initially set. Change any that are not right.
### Response:
def check_and_adjust_sighandlers(self):
"""Check to see if any of the signal handlers we are interested in have
changed or is not initially set. Change any that are not right. """
for signame in list(self.sigs.keys()):
if not self.check_and_adjust_sighandler(signame, self.sigs):
break
pass
return |
def logging_syslog_server_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
syslog_server = ET.SubElement(logging, "syslog-server")
syslogip_key = ET.SubElement(syslog_server, "syslogip")
syslogip_key.text = kwargs.pop('syslogip')
use_vrf_key = ET.SubElement(syslog_server, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
port = ET.SubElement(syslog_server, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def logging_syslog_server_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
syslog_server = ET.SubElement(logging, "syslog-server")
syslogip_key = ET.SubElement(syslog_server, "syslogip")
syslogip_key.text = kwargs.pop('syslogip')
use_vrf_key = ET.SubElement(syslog_server, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
port = ET.SubElement(syslog_server, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_organism(self):
"""Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
"""
organism = {'default': ['', 'hs', 'mm', 'human','mouse',
'homo sapiens', 'mus musculus',
'h. sapiens', 'm. musculus'],
'Fly': ['fly', 'd. melanogaster', 'drosophila melanogaster'],
'Yeast': ['yeast', 's. cerevisiae', 'saccharomyces cerevisiae'],
'Worm': ['worm', 'c. elegans', 'caenorhabditis elegans', 'nematode'],
'Fish': ['fish', 'd. rerio', 'danio rerio', 'zebrafish']
}
for k, v in organism.items():
if self.organism.lower() in v :
self._organism = k
if self._organism is None:
raise Exception("No supported organism found !!!")
if self._organism == 'default':
self._organism = ''
return | Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio | Below is the the instruction that describes the task:
### Input:
Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
### Response:
def get_organism(self):
"""Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
"""
organism = {'default': ['', 'hs', 'mm', 'human','mouse',
'homo sapiens', 'mus musculus',
'h. sapiens', 'm. musculus'],
'Fly': ['fly', 'd. melanogaster', 'drosophila melanogaster'],
'Yeast': ['yeast', 's. cerevisiae', 'saccharomyces cerevisiae'],
'Worm': ['worm', 'c. elegans', 'caenorhabditis elegans', 'nematode'],
'Fish': ['fish', 'd. rerio', 'danio rerio', 'zebrafish']
}
for k, v in organism.items():
if self.organism.lower() in v :
self._organism = k
if self._organism is None:
raise Exception("No supported organism found !!!")
if self._organism == 'default':
self._organism = ''
return |
def cpd3_convolution(inp, outmaps, kernel, r,
pad=None, stride=None, dilation=None,
oik_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
max_iter=500, stopping_criterion=1e-5, lambda_reg=0.0):
"""CP convolution is a low rank approximation of a convolution layer. A 3D tensor containing the parameter is built by collapsing the N-D kernels into 1D, then the tensor is decomposed into three matrices. The decomposed layer can be seen as linear combinations of the input feature maps to :math:`{R}` feature maps followed by a depthwise convolution and followed by linear combinations of the feature maps to compute the output feature maps.
The CP decomposition allows to approximate the kernel tensor by :math:`{R}` rank-1 tensors of the form:
.. math::
\\sum_{r=1}^{R} \\lambda_r {\\mathbf{o}^{(r)} \\otimes \\mathbf{i}^{(r)} \\otimes \\mathbf{k}^{(r)}},
where :math:`{\\lambda}_r` is the normalization coefficient and :math:`{\\otimes}` is the outer product.
If `oik_init` is a numpy array, U and V are computed so that uv_init can be approximates from UV
If `oik_init` is None or an initializer, the product of U and V approximate the randomly initialized array
If `O`, `I` and `K` exist in context, they are used to initialize the layer and oik_init is not used.
Suppose the kernel tensor of the affine is of :math:`{I \\times O}` and
the compression rate you want to specify is :math:`{CR}`, then you
set :math:`{R}` as
.. math::
R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{O + I + K^2} \\right\\rfloor.
References:
- Lebedev, Vadim, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky, "Speeding-up convolutional neural networks using fine-tuned cp-decomposition.", arXiv preprint arXiv:1412.6553 (2014).
- Marcella Astrid, Seung-Ik Lee, "CP-decomposition with Tensor Power Method for Convolutional Neural Networks Compression", BigComp 2017.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
r (int): rank of the factorized layer
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
oik_init (numpy array or :obj:`nnabla.initializer.BaseInitializer`): Initializer for weight. Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. It is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
max_iter (int): Max iteration of the ALS.
stopping_criterion (float): Threshold for stopping the ALS.
If the value is negative, the convergence check is ignored;
in other words, it may reduce the computation time.
lambda_reg (float): regularization parameter for the ALS. Larger
lambda_reg means larger regularization.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if oik_init is None:
oik_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if type(oik_init) is np.ndarray:
# TODO: Assert that size of uv_init is correct
# uv is initialize with numpy array
oik = oik_init
else:
# uv is initialize from initializer
oik = oik_init((outmaps, inp.shape[base_axis]) + tuple(kernel))
# flatten kernels
oik = oik.reshape((outmaps, inp.shape[base_axis], np.prod(kernel)))
o = get_parameter('O')
i = get_parameter('I')
k = get_parameter('K')
if (o is None) or (i is None) or (k is None):
assert r > 0, "cpd3_convolution: The rank must larger than zero"
from nnabla.utils.factorization import cpd
als = cpd.ALS()
U, lmbda = als.solve(X=oik, rank=r,
max_iter=max_iter,
stopping_criterion=stopping_criterion,
lambda_reg=lambda_reg,
dtype=oik.dtype,
rng=rng)
o_ = U[0] * lmbda
i_ = U[1]
k_ = U[2]
kernel_one = (1,) * len(kernel) # 1x1 for 2D convolution
inmaps = inp.shape[base_axis]
# reshape I : (I,r) -> (r,I,1,1)
i = nn.Variable((r, inmaps) + kernel_one, need_grad=True)
i.d = np.transpose(i_).reshape((r, inmaps) + kernel_one)
nn.parameter.set_parameter("I", i)
# reshape O : (O,r) -> (O,r,1,1)
o = nn.Variable((outmaps, r) + kernel_one,
need_grad=True)
o.d = o_.reshape((outmaps, r) + kernel_one)
nn.parameter.set_parameter("O", o)
# reshape K : (K*K,r) -> (r,K,K)
k = nn.Variable((r,) + kernel, need_grad=True)
k.d = np.transpose(k_).reshape((r,) + kernel)
nn.parameter.set_parameter("K", k)
if fix_parameters == o.need_grad:
o = o.get_unlinked_variable(need_grad=not fix_parameters)
if fix_parameters == i.need_grad:
i = i.get_unlinked_variable(need_grad=not fix_parameters)
if fix_parameters == k.need_grad:
k = k.get_unlinked_variable(need_grad=not fix_parameters)
if with_bias and b_init is None:
b_init = ConstantInitializer()
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
y = F.convolution(inp, i, bias=None, base_axis=base_axis, pad=None, stride=None,
dilation=None, group=1)
y = F.depthwise_convolution(y, k, bias=None, base_axis=base_axis,
pad=pad, stride=stride, dilation=dilation,
multiplier=1)
y = F.convolution(y, o, bias=b, base_axis=base_axis, pad=None, stride=None,
dilation=None, group=1)
return y | CP convolution is a low rank approximation of a convolution layer. A 3D tensor containing the parameter is built by collapsing the N-D kernels into 1D, then the tensor is decomposed into three matrices. The decomposed layer can be seen as linear combinations of the input feature maps to :math:`{R}` feature maps followed by a depthwise convolution and followed by linear combinations of the feature maps to compute the output feature maps.
The CP decomposition allows to approximate the kernel tensor by :math:`{R}` rank-1 tensors of the form:
.. math::
\\sum_{r=1}^{R} \\lambda_r {\\mathbf{o}^{(r)} \\otimes \\mathbf{i}^{(r)} \\otimes \\mathbf{k}^{(r)}},
where :math:`{\\lambda}_r` is the normalization coefficient and :math:`{\\otimes}` is the outer product.
If `oik_init` is a numpy array, U and V are computed so that uv_init can be approximates from UV
If `oik_init` is None or an initializer, the product of U and V approximate the randomly initialized array
If `O`, `I` and `K` exist in context, they are used to initialize the layer and oik_init is not used.
Suppose the kernel tensor of the affine is of :math:`{I \\times O}` and
the compression rate you want to specify is :math:`{CR}`, then you
set :math:`{R}` as
.. math::
R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{O + I + K^2} \\right\\rfloor.
References:
- Lebedev, Vadim, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky, "Speeding-up convolutional neural networks using fine-tuned cp-decomposition.", arXiv preprint arXiv:1412.6553 (2014).
- Marcella Astrid, Seung-Ik Lee, "CP-decomposition with Tensor Power Method for Convolutional Neural Networks Compression", BigComp 2017.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
r (int): rank of the factorized layer
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
oik_init (numpy array or :obj:`nnabla.initializer.BaseInitializer`): Initializer for weight. Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. It is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
max_iter (int): Max iteration of the ALS.
stopping_criterion (float): Threshold for stopping the ALS.
If the value is negative, the convergence check is ignored;
in other words, it may reduce the computation time.
lambda_reg (float): regularization parameter for the ALS. Larger
lambda_reg means larger regularization.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) | Below is the the instruction that describes the task:
### Input:
CP convolution is a low rank approximation of a convolution layer. A 3D tensor containing the parameter is built by collapsing the N-D kernels into 1D, then the tensor is decomposed into three matrices. The decomposed layer can be seen as linear combinations of the input feature maps to :math:`{R}` feature maps followed by a depthwise convolution and followed by linear combinations of the feature maps to compute the output feature maps.
The CP decomposition allows to approximate the kernel tensor by :math:`{R}` rank-1 tensors of the form:
.. math::
\\sum_{r=1}^{R} \\lambda_r {\\mathbf{o}^{(r)} \\otimes \\mathbf{i}^{(r)} \\otimes \\mathbf{k}^{(r)}},
where :math:`{\\lambda}_r` is the normalization coefficient and :math:`{\\otimes}` is the outer product.
If `oik_init` is a numpy array, U and V are computed so that uv_init can be approximates from UV
If `oik_init` is None or an initializer, the product of U and V approximate the randomly initialized array
If `O`, `I` and `K` exist in context, they are used to initialize the layer and oik_init is not used.
Suppose the kernel tensor of the affine is of :math:`{I \\times O}` and
the compression rate you want to specify is :math:`{CR}`, then you
set :math:`{R}` as
.. math::
R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{O + I + K^2} \\right\\rfloor.
References:
- Lebedev, Vadim, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky, "Speeding-up convolutional neural networks using fine-tuned cp-decomposition.", arXiv preprint arXiv:1412.6553 (2014).
- Marcella Astrid, Seung-Ik Lee, "CP-decomposition with Tensor Power Method for Convolutional Neural Networks Compression", BigComp 2017.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
r (int): rank of the factorized layer
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
oik_init (numpy array or :obj:`nnabla.initializer.BaseInitializer`): Initializer for weight. Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. It is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
max_iter (int): Max iteration of the ALS.
stopping_criterion (float): Threshold for stopping the ALS.
If the value is negative, the convergence check is ignored;
in other words, it may reduce the computation time.
lambda_reg (float): regularization parameter for the ALS. Larger
lambda_reg means larger regularization.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
### Response:
def cpd3_convolution(inp, outmaps, kernel, r,
pad=None, stride=None, dilation=None,
oik_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
max_iter=500, stopping_criterion=1e-5, lambda_reg=0.0):
"""CP convolution is a low rank approximation of a convolution layer. A 3D tensor containing the parameter is built by collapsing the N-D kernels into 1D, then the tensor is decomposed into three matrices. The decomposed layer can be seen as linear combinations of the input feature maps to :math:`{R}` feature maps followed by a depthwise convolution and followed by linear combinations of the feature maps to compute the output feature maps.
The CP decomposition allows to approximate the kernel tensor by :math:`{R}` rank-1 tensors of the form:
.. math::
\\sum_{r=1}^{R} \\lambda_r {\\mathbf{o}^{(r)} \\otimes \\mathbf{i}^{(r)} \\otimes \\mathbf{k}^{(r)}},
where :math:`{\\lambda}_r` is the normalization coefficient and :math:`{\\otimes}` is the outer product.
If `oik_init` is a numpy array, U and V are computed so that uv_init can be approximates from UV
If `oik_init` is None or an initializer, the product of U and V approximate the randomly initialized array
If `O`, `I` and `K` exist in context, they are used to initialize the layer and oik_init is not used.
Suppose the kernel tensor of the affine is of :math:`{I \\times O}` and
the compression rate you want to specify is :math:`{CR}`, then you
set :math:`{R}` as
.. math::
R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{O + I + K^2} \\right\\rfloor.
References:
- Lebedev, Vadim, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky, "Speeding-up convolutional neural networks using fine-tuned cp-decomposition.", arXiv preprint arXiv:1412.6553 (2014).
- Marcella Astrid, Seung-Ik Lee, "CP-decomposition with Tensor Power Method for Convolutional Neural Networks Compression", BigComp 2017.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
r (int): rank of the factorized layer
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
oik_init (numpy array or :obj:`nnabla.initializer.BaseInitializer`): Initializer for weight. Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. It is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
max_iter (int): Max iteration of the ALS.
stopping_criterion (float): Threshold for stopping the ALS.
If the value is negative, the convergence check is ignored;
in other words, it may reduce the computation time.
lambda_reg (float): regularization parameter for the ALS. Larger
lambda_reg means larger regularization.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if oik_init is None:
oik_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if type(oik_init) is np.ndarray:
# TODO: Assert that size of uv_init is correct
# uv is initialize with numpy array
oik = oik_init
else:
# uv is initialize from initializer
oik = oik_init((outmaps, inp.shape[base_axis]) + tuple(kernel))
# flatten kernels
oik = oik.reshape((outmaps, inp.shape[base_axis], np.prod(kernel)))
o = get_parameter('O')
i = get_parameter('I')
k = get_parameter('K')
if (o is None) or (i is None) or (k is None):
assert r > 0, "cpd3_convolution: The rank must larger than zero"
from nnabla.utils.factorization import cpd
als = cpd.ALS()
U, lmbda = als.solve(X=oik, rank=r,
max_iter=max_iter,
stopping_criterion=stopping_criterion,
lambda_reg=lambda_reg,
dtype=oik.dtype,
rng=rng)
o_ = U[0] * lmbda
i_ = U[1]
k_ = U[2]
kernel_one = (1,) * len(kernel) # 1x1 for 2D convolution
inmaps = inp.shape[base_axis]
# reshape I : (I,r) -> (r,I,1,1)
i = nn.Variable((r, inmaps) + kernel_one, need_grad=True)
i.d = np.transpose(i_).reshape((r, inmaps) + kernel_one)
nn.parameter.set_parameter("I", i)
# reshape O : (O,r) -> (O,r,1,1)
o = nn.Variable((outmaps, r) + kernel_one,
need_grad=True)
o.d = o_.reshape((outmaps, r) + kernel_one)
nn.parameter.set_parameter("O", o)
# reshape K : (K*K,r) -> (r,K,K)
k = nn.Variable((r,) + kernel, need_grad=True)
k.d = np.transpose(k_).reshape((r,) + kernel)
nn.parameter.set_parameter("K", k)
if fix_parameters == o.need_grad:
o = o.get_unlinked_variable(need_grad=not fix_parameters)
if fix_parameters == i.need_grad:
i = i.get_unlinked_variable(need_grad=not fix_parameters)
if fix_parameters == k.need_grad:
k = k.get_unlinked_variable(need_grad=not fix_parameters)
if with_bias and b_init is None:
b_init = ConstantInitializer()
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
y = F.convolution(inp, i, bias=None, base_axis=base_axis, pad=None, stride=None,
dilation=None, group=1)
y = F.depthwise_convolution(y, k, bias=None, base_axis=base_axis,
pad=pad, stride=stride, dilation=dilation,
multiplier=1)
y = F.convolution(y, o, bias=b, base_axis=base_axis, pad=None, stride=None,
dilation=None, group=1)
return y |
def edit( plugins, parent = None, default = None, modal = True ):
"""
Prompts the user to edit the config settings for the inputed config \
plugins.
:param plugins | [<XConfigPlugin>, ..]
parent | <QWidget>
default | <XConfigPlugin> || None
:return <bool> success
"""
if ( XConfigDialog._instance ):
XConfigDialog._instance.show()
XConfigDialog._instance.activateWindow()
return True
dlg = XConfigDialog( parent )
dlg.setPlugins(plugins)
dlg.setCurrentPlugin(default)
if ( not modal ):
XConfigDialog._instance = dlg
dlg.setAttribute(Qt.WA_DeleteOnClose)
dlg.show()
return True
if ( dlg.exec_() ):
return True
return False | Prompts the user to edit the config settings for the inputed config \
plugins.
:param plugins | [<XConfigPlugin>, ..]
parent | <QWidget>
default | <XConfigPlugin> || None
:return <bool> success | Below is the the instruction that describes the task:
### Input:
Prompts the user to edit the config settings for the inputed config \
plugins.
:param plugins | [<XConfigPlugin>, ..]
parent | <QWidget>
default | <XConfigPlugin> || None
:return <bool> success
### Response:
def edit( plugins, parent = None, default = None, modal = True ):
"""
Prompts the user to edit the config settings for the inputed config \
plugins.
:param plugins | [<XConfigPlugin>, ..]
parent | <QWidget>
default | <XConfigPlugin> || None
:return <bool> success
"""
if ( XConfigDialog._instance ):
XConfigDialog._instance.show()
XConfigDialog._instance.activateWindow()
return True
dlg = XConfigDialog( parent )
dlg.setPlugins(plugins)
dlg.setCurrentPlugin(default)
if ( not modal ):
XConfigDialog._instance = dlg
dlg.setAttribute(Qt.WA_DeleteOnClose)
dlg.show()
return True
if ( dlg.exec_() ):
return True
return False |
def get_followers(self, first_user_id=None):
"""
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
"""
params = dict()
if first_user_id:
params['next_openid'] = first_user_id
return self.request.get('https://api.weixin.qq.com/cgi-bin/user/get', params=params) | 获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
### Response:
def get_followers(self, first_user_id=None):
"""
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
"""
params = dict()
if first_user_id:
params['next_openid'] = first_user_id
return self.request.get('https://api.weixin.qq.com/cgi-bin/user/get', params=params) |
def _init_qualifier_decl(qualifier_decl, qual_repo):
"""
Initialize the flavors of a qualifier declaration if they are not
already set.
"""
assert qualifier_decl.name not in qual_repo
if qualifier_decl.tosubclass is None:
qualifier_decl.tosubclass = True
if qualifier_decl.overridable is None:
qualifier_decl.overridable = True
if qualifier_decl.translatable is None:
qualifier_decl.translatable = False | Initialize the flavors of a qualifier declaration if they are not
already set. | Below is the the instruction that describes the task:
### Input:
Initialize the flavors of a qualifier declaration if they are not
already set.
### Response:
def _init_qualifier_decl(qualifier_decl, qual_repo):
"""
Initialize the flavors of a qualifier declaration if they are not
already set.
"""
assert qualifier_decl.name not in qual_repo
if qualifier_decl.tosubclass is None:
qualifier_decl.tosubclass = True
if qualifier_decl.overridable is None:
qualifier_decl.overridable = True
if qualifier_decl.translatable is None:
qualifier_decl.translatable = False |
def select(cls, dataset, selection_mask=None, **selection):
"""
Apply a selection to the data.
"""
import iris
constraint = cls.select_to_constraint(dataset, selection)
pre_dim_coords = [c.name() for c in dataset.data.dim_coords]
indexed = cls.indexed(dataset, selection)
extracted = dataset.data.extract(constraint)
if indexed and not extracted.dim_coords:
return extracted.data.item()
post_dim_coords = [c.name() for c in extracted.dim_coords]
dropped = [c for c in pre_dim_coords if c not in post_dim_coords]
for d in dropped:
extracted = iris.util.new_axis(extracted, d)
return extracted | Apply a selection to the data. | Below is the the instruction that describes the task:
### Input:
Apply a selection to the data.
### Response:
def select(cls, dataset, selection_mask=None, **selection):
"""
Apply a selection to the data.
"""
import iris
constraint = cls.select_to_constraint(dataset, selection)
pre_dim_coords = [c.name() for c in dataset.data.dim_coords]
indexed = cls.indexed(dataset, selection)
extracted = dataset.data.extract(constraint)
if indexed and not extracted.dim_coords:
return extracted.data.item()
post_dim_coords = [c.name() for c in extracted.dim_coords]
dropped = [c for c in pre_dim_coords if c not in post_dim_coords]
for d in dropped:
extracted = iris.util.new_axis(extracted, d)
return extracted |
def list_recipes(full=False):
"""Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
"""
logger.info(colored_print(
"\n===== L I S T O F R E C I P E S =====\n",
"green_bold"))
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
recipe_cls = cls()
if hasattr(recipe_cls, "name"):
logger.info(colored_print("=> {}".format(recipe_cls.name), "blue_bold"))
if full:
logger.info(colored_print("\t {}".format(recipe_cls.__doc__), "purple_bold"))
logger.info(colored_print("Pipeline string: {}\n".format(recipe_cls.pipeline_str), "yellow_bold"))
sys.exit(0) | Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name | Below is the the instruction that describes the task:
### Input:
Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
### Response:
def list_recipes(full=False):
"""Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
"""
logger.info(colored_print(
"\n===== L I S T O F R E C I P E S =====\n",
"green_bold"))
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
recipe_cls = cls()
if hasattr(recipe_cls, "name"):
logger.info(colored_print("=> {}".format(recipe_cls.name), "blue_bold"))
if full:
logger.info(colored_print("\t {}".format(recipe_cls.__doc__), "purple_bold"))
logger.info(colored_print("Pipeline string: {}\n".format(recipe_cls.pipeline_str), "yellow_bold"))
sys.exit(0) |
def with_text(self, text, markdown=None):
'''Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``.
'''
self._text = text
self._markdown = markdown or False
return self | Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``. | Below is the the instruction that describes the task:
### Input:
Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``.
### Response:
def with_text(self, text, markdown=None):
'''Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``.
'''
self._text = text
self._markdown = markdown or False
return self |
def compute_wed(self):
"""
Computes weight error derivative for all connections in
self.connections starting with the last connection.
"""
if len(self.cacheConnections) != 0:
changeConnections = self.cacheConnections
else:
changeConnections = self.connections
for connect in reverse(changeConnections):
if connect.active and connect.fromLayer.active and connect.toLayer.active:
connect.wed = connect.wed + Numeric.outerproduct(connect.fromLayer.activation,
connect.toLayer.delta)
if len(self.cacheLayers) != 0:
changeLayers = self.cacheLayers
else:
changeLayers = self.layers
for layer in changeLayers:
if layer.active:
layer.wed = layer.wed + layer.delta | Computes weight error derivative for all connections in
self.connections starting with the last connection. | Below is the the instruction that describes the task:
### Input:
Computes weight error derivative for all connections in
self.connections starting with the last connection.
### Response:
def compute_wed(self):
"""
Computes weight error derivative for all connections in
self.connections starting with the last connection.
"""
if len(self.cacheConnections) != 0:
changeConnections = self.cacheConnections
else:
changeConnections = self.connections
for connect in reverse(changeConnections):
if connect.active and connect.fromLayer.active and connect.toLayer.active:
connect.wed = connect.wed + Numeric.outerproduct(connect.fromLayer.activation,
connect.toLayer.delta)
if len(self.cacheLayers) != 0:
changeLayers = self.cacheLayers
else:
changeLayers = self.layers
for layer in changeLayers:
if layer.active:
layer.wed = layer.wed + layer.delta |
def p_statement_if(p):
'''statement : IF LPAREN expr RPAREN statement elseif_list else_single
| IF LPAREN expr RPAREN COLON inner_statement_list new_elseif_list new_else_single ENDIF SEMI'''
if len(p) == 8:
p[0] = ast.If(p[3], p[5], p[6], p[7], lineno=p.lineno(1))
else:
p[0] = ast.If(p[3], ast.Block(p[6], lineno=p.lineno(5)),
p[7], p[8], lineno=p.lineno(1)) | statement : IF LPAREN expr RPAREN statement elseif_list else_single
| IF LPAREN expr RPAREN COLON inner_statement_list new_elseif_list new_else_single ENDIF SEMI | Below is the the instruction that describes the task:
### Input:
statement : IF LPAREN expr RPAREN statement elseif_list else_single
| IF LPAREN expr RPAREN COLON inner_statement_list new_elseif_list new_else_single ENDIF SEMI
### Response:
def p_statement_if(p):
'''statement : IF LPAREN expr RPAREN statement elseif_list else_single
| IF LPAREN expr RPAREN COLON inner_statement_list new_elseif_list new_else_single ENDIF SEMI'''
if len(p) == 8:
p[0] = ast.If(p[3], p[5], p[6], p[7], lineno=p.lineno(1))
else:
p[0] = ast.If(p[3], ast.Block(p[6], lineno=p.lineno(5)),
p[7], p[8], lineno=p.lineno(1)) |
def _entries_sorted(self):
""":return: list of entries, in a sorted fashion, first by path, then by stage"""
return sorted(self.entries.values(), key=lambda e: (e.path, e.stage)) | :return: list of entries, in a sorted fashion, first by path, then by stage | Below is the the instruction that describes the task:
### Input:
:return: list of entries, in a sorted fashion, first by path, then by stage
### Response:
def _entries_sorted(self):
""":return: list of entries, in a sorted fashion, first by path, then by stage"""
return sorted(self.entries.values(), key=lambda e: (e.path, e.stage)) |
def summary(self):
"""Returns the docstring summary for the code element if it exists."""
if self._summary is None:
self._summary = "No summary for element."
for doc in self.docstring:
if doc.doctype == "summary":
self._summary = doc.contents
break
#If a parameter, member or local tag has dimensions or other children,
#then the inner-text is not the right thing to use; find a grand-child
#summary tag instead.
if self._summary == "No summary for element." and len(self.docstring) > 0:
summary = self.doc_children("summary")
if len(summary) > 0:
self._summary = summary[0].contents
else:
self._summary = self.docstring[0].contents
return self._summary | Returns the docstring summary for the code element if it exists. | Below is the the instruction that describes the task:
### Input:
Returns the docstring summary for the code element if it exists.
### Response:
def summary(self):
"""Returns the docstring summary for the code element if it exists."""
if self._summary is None:
self._summary = "No summary for element."
for doc in self.docstring:
if doc.doctype == "summary":
self._summary = doc.contents
break
#If a parameter, member or local tag has dimensions or other children,
#then the inner-text is not the right thing to use; find a grand-child
#summary tag instead.
if self._summary == "No summary for element." and len(self.docstring) > 0:
summary = self.doc_children("summary")
if len(summary) > 0:
self._summary = summary[0].contents
else:
self._summary = self.docstring[0].contents
return self._summary |
def stats_advanced(self, kind='R', summary=False):
"""Returns a DataFrame of advanced stats."""
return self._get_stats_table('advanced', kind=kind, summary=summary) | Returns a DataFrame of advanced stats. | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame of advanced stats.
### Response:
def stats_advanced(self, kind='R', summary=False):
"""Returns a DataFrame of advanced stats."""
return self._get_stats_table('advanced', kind=kind, summary=summary) |
def camera_disable(self, camera_id, **kwargs):
"""Disable camera."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'Disable',
'version': 9,
'idList': camera_id,
}, **kwargs)
print(api['url'])
print(payload)
response = self._get(api['url'], payload)
return response['success'] | Disable camera. | Below is the the instruction that describes the task:
### Input:
Disable camera.
### Response:
def camera_disable(self, camera_id, **kwargs):
"""Disable camera."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'Disable',
'version': 9,
'idList': camera_id,
}, **kwargs)
print(api['url'])
print(payload)
response = self._get(api['url'], payload)
return response['success'] |
def filter(self, record):
"""
Filter record
:param record: Record to filter
:return:
"""
def modify(value):
"""
Modify logged record, truncating it to max length and logging remaining length
:param value: Record to modify
:return:
"""
if isinstance(value, six.string_types):
if len(value) < ContextFilter.MAXIMUM_LENGTH:
return value
try:
return "{}...[{} more bytes]".format(
value[:ContextFilter.REVEAL_LENGTH],
len(value) - ContextFilter.REVEAL_LENGTH)
except UnicodeError:
return "{}...[{} more bytes]".format(
repr(value[:ContextFilter.REVEAL_LENGTH]),
len(value) - ContextFilter.REVEAL_LENGTH)
elif isinstance(value, six.binary_type):
return "{}...[{} more bytes]".format(
repr(value[:ContextFilter.REVEAL_LENGTH]),
len(value) - ContextFilter.REVEAL_LENGTH)
else:
return value
record.msg = traverse_json_obj(record.msg, callback=modify)
return True | Filter record
:param record: Record to filter
:return: | Below is the the instruction that describes the task:
### Input:
Filter record
:param record: Record to filter
:return:
### Response:
def filter(self, record):
"""
Filter record
:param record: Record to filter
:return:
"""
def modify(value):
"""
Modify logged record, truncating it to max length and logging remaining length
:param value: Record to modify
:return:
"""
if isinstance(value, six.string_types):
if len(value) < ContextFilter.MAXIMUM_LENGTH:
return value
try:
return "{}...[{} more bytes]".format(
value[:ContextFilter.REVEAL_LENGTH],
len(value) - ContextFilter.REVEAL_LENGTH)
except UnicodeError:
return "{}...[{} more bytes]".format(
repr(value[:ContextFilter.REVEAL_LENGTH]),
len(value) - ContextFilter.REVEAL_LENGTH)
elif isinstance(value, six.binary_type):
return "{}...[{} more bytes]".format(
repr(value[:ContextFilter.REVEAL_LENGTH]),
len(value) - ContextFilter.REVEAL_LENGTH)
else:
return value
record.msg = traverse_json_obj(record.msg, callback=modify)
return True |
def _normalize_merge_diff(diff):
"""Make compare_config() for merge look similar to replace config diff."""
new_diff = []
for line in diff.splitlines():
# Filter blank lines and prepend +sign
if line.strip():
new_diff.append("+" + line)
if new_diff:
new_diff.insert(
0, "! incremental-diff failed; falling back to echo of merge file"
)
else:
new_diff.append("! No changes specified in merge file.")
return "\n".join(new_diff) | Make compare_config() for merge look similar to replace config diff. | Below is the the instruction that describes the task:
### Input:
Make compare_config() for merge look similar to replace config diff.
### Response:
def _normalize_merge_diff(diff):
"""Make compare_config() for merge look similar to replace config diff."""
new_diff = []
for line in diff.splitlines():
# Filter blank lines and prepend +sign
if line.strip():
new_diff.append("+" + line)
if new_diff:
new_diff.insert(
0, "! incremental-diff failed; falling back to echo of merge file"
)
else:
new_diff.append("! No changes specified in merge file.")
return "\n".join(new_diff) |
def html_table(data, header=True, limit=None, withtype=False):
"""
Return a double iterable as an HTML table
@param data (iterable): the data to format
@param header (bool): if the first row is a header row
@param limit (int): maximum number of rows to render (excluding header)
@param withtype (bool): if columns are to have an alternating CSS class
(even/odd) or not.
@return (int,string): a pair <number-of-rendered-rows>, <html-table>
"""
if header and limit:
limit += 1
ct = 'th' if header else 'td'
rc = 'hdr' if header else 'odd'
# import codecs
# import datetime
# with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f:
# print( '************', datetime.datetime.now(), file=f )
# for n, row in enumerate(data):
# print( '-------', n, file=f )
# for n, c in enumerate(row):
# print( type(c), repr(c), file=f )
html = u'<table>'
rn = -1
for rn, row in enumerate(data):
html += u'<tr class={}>'.format(rc)
html += '\n'.join((html_elem(c, ct, withtype) for c in row))
html += u'</tr>'
rc = 'even' if rc == 'odd' else 'odd'
ct = 'td'
if limit:
limit -= 1
if not limit:
break
return (0, '') if rn < 0 else (rn+1-header, html+u'</table>') | Return a double iterable as an HTML table
@param data (iterable): the data to format
@param header (bool): if the first row is a header row
@param limit (int): maximum number of rows to render (excluding header)
@param withtype (bool): if columns are to have an alternating CSS class
(even/odd) or not.
@return (int,string): a pair <number-of-rendered-rows>, <html-table> | Below is the the instruction that describes the task:
### Input:
Return a double iterable as an HTML table
@param data (iterable): the data to format
@param header (bool): if the first row is a header row
@param limit (int): maximum number of rows to render (excluding header)
@param withtype (bool): if columns are to have an alternating CSS class
(even/odd) or not.
@return (int,string): a pair <number-of-rendered-rows>, <html-table>
### Response:
def html_table(data, header=True, limit=None, withtype=False):
"""
Return a double iterable as an HTML table
@param data (iterable): the data to format
@param header (bool): if the first row is a header row
@param limit (int): maximum number of rows to render (excluding header)
@param withtype (bool): if columns are to have an alternating CSS class
(even/odd) or not.
@return (int,string): a pair <number-of-rendered-rows>, <html-table>
"""
if header and limit:
limit += 1
ct = 'th' if header else 'td'
rc = 'hdr' if header else 'odd'
# import codecs
# import datetime
# with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f:
# print( '************', datetime.datetime.now(), file=f )
# for n, row in enumerate(data):
# print( '-------', n, file=f )
# for n, c in enumerate(row):
# print( type(c), repr(c), file=f )
html = u'<table>'
rn = -1
for rn, row in enumerate(data):
html += u'<tr class={}>'.format(rc)
html += '\n'.join((html_elem(c, ct, withtype) for c in row))
html += u'</tr>'
rc = 'even' if rc == 'odd' else 'odd'
ct = 'td'
if limit:
limit -= 1
if not limit:
break
return (0, '') if rn < 0 else (rn+1-header, html+u'</table>') |
def sanity_check_insdcio(handle, id_marker, fake_id_line):
"""Sanity check for insdcio style files"""
found_id = False
found_end_marker = False
for line in handle:
line = line.strip()
if not line:
continue
if line.startswith(id_marker):
found_id = True
break
if line.startswith('//'):
found_end_marker = True
break
handle.seek(0)
# We found an ID, file looks good.
if found_id:
return handle
# If there's no ID and no end marker, just give up.
if not found_end_marker:
return handle
# If we found an end marker but no ID, fake one.
new_handle = StringIO()
new_handle.write("%s\n" % fake_id_line)
new_handle.write(handle.read())
new_handle.seek(0)
return new_handle | Sanity check for insdcio style files | Below is the the instruction that describes the task:
### Input:
Sanity check for insdcio style files
### Response:
def sanity_check_insdcio(handle, id_marker, fake_id_line):
"""Sanity check for insdcio style files"""
found_id = False
found_end_marker = False
for line in handle:
line = line.strip()
if not line:
continue
if line.startswith(id_marker):
found_id = True
break
if line.startswith('//'):
found_end_marker = True
break
handle.seek(0)
# We found an ID, file looks good.
if found_id:
return handle
# If there's no ID and no end marker, just give up.
if not found_end_marker:
return handle
# If we found an end marker but no ID, fake one.
new_handle = StringIO()
new_handle.write("%s\n" % fake_id_line)
new_handle.write(handle.read())
new_handle.seek(0)
return new_handle |
def _parse_attrs(elem, container=dict, **options):
"""
:param elem: ET Element object has attributes (elem.attrib)
:param container: callble to make a container object
:return: Parsed value or value itself depends on 'ac_parse_value'
"""
adic = dict((_tweak_ns(a, **options), v) for a, v in elem.attrib.items())
if options.get("ac_parse_value", False):
return container(dict((k, anyconfig.parser.parse_single(v))
for k, v in adic.items()))
return container(adic) | :param elem: ET Element object has attributes (elem.attrib)
:param container: callble to make a container object
:return: Parsed value or value itself depends on 'ac_parse_value' | Below is the the instruction that describes the task:
### Input:
:param elem: ET Element object has attributes (elem.attrib)
:param container: callble to make a container object
:return: Parsed value or value itself depends on 'ac_parse_value'
### Response:
def _parse_attrs(elem, container=dict, **options):
"""
:param elem: ET Element object has attributes (elem.attrib)
:param container: callble to make a container object
:return: Parsed value or value itself depends on 'ac_parse_value'
"""
adic = dict((_tweak_ns(a, **options), v) for a, v in elem.attrib.items())
if options.get("ac_parse_value", False):
return container(dict((k, anyconfig.parser.parse_single(v))
for k, v in adic.items()))
return container(adic) |
def webhooks(self):
"""Instance depends on the API version:
* 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.operations.WebhooksOperations>`
* 2018-02-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.WebhooksOperations>`
* 2018-09-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_09_01.operations.WebhooksOperations>`
"""
api_version = self._get_api_version('webhooks')
if api_version == '2017-10-01':
from .v2017_10_01.operations import WebhooksOperations as OperationClass
elif api_version == '2018-02-01-preview':
from .v2018_02_01_preview.operations import WebhooksOperations as OperationClass
elif api_version == '2018-09-01':
from .v2018_09_01.operations import WebhooksOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.operations.WebhooksOperations>`
* 2018-02-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.WebhooksOperations>`
* 2018-09-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_09_01.operations.WebhooksOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.operations.WebhooksOperations>`
* 2018-02-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.WebhooksOperations>`
* 2018-09-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_09_01.operations.WebhooksOperations>`
### Response:
def webhooks(self):
"""Instance depends on the API version:
* 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.operations.WebhooksOperations>`
* 2018-02-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.WebhooksOperations>`
* 2018-09-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2018_09_01.operations.WebhooksOperations>`
"""
api_version = self._get_api_version('webhooks')
if api_version == '2017-10-01':
from .v2017_10_01.operations import WebhooksOperations as OperationClass
elif api_version == '2018-02-01-preview':
from .v2018_02_01_preview.operations import WebhooksOperations as OperationClass
elif api_version == '2018-09-01':
from .v2018_09_01.operations import WebhooksOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def any2mb(s):
"""Convert string or number to memory in megabytes."""
if is_string(s):
return int(Memory.from_string(s).to("Mb"))
else:
return int(s) | Convert string or number to memory in megabytes. | Below is the the instruction that describes the task:
### Input:
Convert string or number to memory in megabytes.
### Response:
def any2mb(s):
"""Convert string or number to memory in megabytes."""
if is_string(s):
return int(Memory.from_string(s).to("Mb"))
else:
return int(s) |
def to_nested_php_args(data, prefix_key=None):
"""
This function will take either a dict or list and will recursively loop
through the values converting it into a format similar to a PHP array which
Ubersmith requires for the info portion of the API's order.create method.
"""
is_root = prefix_key is None
prefix_key = prefix_key if prefix_key else ''
if islist(data):
data_iter = data if is_root else enumerate(data)
new_data = [] if is_root else {}
elif isdict(data):
data_iter = list(data.items())
new_data = {}
else:
raise TypeError('expected dict or list, got {0}'.format(type(data)))
if islist(new_data):
def data_set(k, v):
new_data.append((k, v))
def data_update(d):
for k, v in list(d.items()):
new_data.append((k, v))
else:
def data_set(k, v):
new_data[k] = v
data_update = new_data.update
for key, value in data_iter:
end_key = prefix_key + (str(key) if is_root else '[{0}]'.format(key))
if _is_leaf(value):
data_set(end_key, value)
else:
nested_args = to_nested_php_args(value, end_key)
data_update(nested_args)
return new_data | This function will take either a dict or list and will recursively loop
through the values converting it into a format similar to a PHP array which
Ubersmith requires for the info portion of the API's order.create method. | Below is the the instruction that describes the task:
### Input:
This function will take either a dict or list and will recursively loop
through the values converting it into a format similar to a PHP array which
Ubersmith requires for the info portion of the API's order.create method.
### Response:
def to_nested_php_args(data, prefix_key=None):
"""
This function will take either a dict or list and will recursively loop
through the values converting it into a format similar to a PHP array which
Ubersmith requires for the info portion of the API's order.create method.
"""
is_root = prefix_key is None
prefix_key = prefix_key if prefix_key else ''
if islist(data):
data_iter = data if is_root else enumerate(data)
new_data = [] if is_root else {}
elif isdict(data):
data_iter = list(data.items())
new_data = {}
else:
raise TypeError('expected dict or list, got {0}'.format(type(data)))
if islist(new_data):
def data_set(k, v):
new_data.append((k, v))
def data_update(d):
for k, v in list(d.items()):
new_data.append((k, v))
else:
def data_set(k, v):
new_data[k] = v
data_update = new_data.update
for key, value in data_iter:
end_key = prefix_key + (str(key) if is_root else '[{0}]'.format(key))
if _is_leaf(value):
data_set(end_key, value)
else:
nested_args = to_nested_php_args(value, end_key)
data_update(nested_args)
return new_data |
def _build_kernel_function_declaration(self, name='kernel'):
"""Build and return kernel function declaration"""
array_declarations, array_dimensions = self._build_array_declarations(with_init=False)
scalar_declarations = self._build_scalar_declarations(with_init=False)
const_declarations = self._build_const_declartions(with_init=False)
return c_ast.FuncDecl(args=c_ast.ParamList(params=array_declarations + scalar_declarations +
const_declarations),
type=c_ast.TypeDecl(declname=name,
quals=[],
type=c_ast.IdentifierType(names=['void']))) | Build and return kernel function declaration | Below is the the instruction that describes the task:
### Input:
Build and return kernel function declaration
### Response:
def _build_kernel_function_declaration(self, name='kernel'):
"""Build and return kernel function declaration"""
array_declarations, array_dimensions = self._build_array_declarations(with_init=False)
scalar_declarations = self._build_scalar_declarations(with_init=False)
const_declarations = self._build_const_declartions(with_init=False)
return c_ast.FuncDecl(args=c_ast.ParamList(params=array_declarations + scalar_declarations +
const_declarations),
type=c_ast.TypeDecl(declname=name,
quals=[],
type=c_ast.IdentifierType(names=['void']))) |
def cli(ctx, hostname, username, password, config_dir, https):
"""Command-line interface for interacting with a WVA device"""
ctx.is_root = True
ctx.user_values_entered = False
ctx.config_dir = os.path.abspath(os.path.expanduser(config_dir))
ctx.config = load_config(ctx)
ctx.hostname = hostname
ctx.username = username
ctx.password = password
ctx.https = https
# Creating the WVA object is deferred as some commands like clearconfig
# should not require a username/password to perform them
ctx.wva = None | Command-line interface for interacting with a WVA device | Below is the the instruction that describes the task:
### Input:
Command-line interface for interacting with a WVA device
### Response:
def cli(ctx, hostname, username, password, config_dir, https):
"""Command-line interface for interacting with a WVA device"""
ctx.is_root = True
ctx.user_values_entered = False
ctx.config_dir = os.path.abspath(os.path.expanduser(config_dir))
ctx.config = load_config(ctx)
ctx.hostname = hostname
ctx.username = username
ctx.password = password
ctx.https = https
# Creating the WVA object is deferred as some commands like clearconfig
# should not require a username/password to perform them
ctx.wva = None |
def image(self):
r"""
Tuple with a CAPTCHA text and a Image object.
Images are generated on the fly, using given text source, TTF font and
other parameters passable through __init__. All letters in used text
are morphed. Also a line is morphed and pased onto CAPTCHA text.
Additionaly, if self.noise > 1/255, a "snowy" image is merged with
CAPTCHA image with a 50/50 ratio.
Property returns a pair containing a string with text in returned
image and image itself.
:returns: ``tuple`` (CAPTCHA text, Image object)
"""
text = self.text
w, h = self.font.getsize(text)
margin_x = round(self.margin_x * w / self.w)
margin_y = round(self.margin_y * h / self.h)
image = Image.new('RGB',
(w + 2*margin_x, h + 2*margin_y),
(255, 255, 255))
# Text
self._writeText(image, text, pos=(margin_x, margin_y))
# Line
self._drawLine(image)
# White noise
noise = self._whiteNoise(image.size)
if noise is not None:
image = Image.blend(image, noise, 0.5)
# Resize
image = image.resize(self.size, resample=self.resample)
return (text, image) | r"""
Tuple with a CAPTCHA text and a Image object.
Images are generated on the fly, using given text source, TTF font and
other parameters passable through __init__. All letters in used text
are morphed. Also a line is morphed and pased onto CAPTCHA text.
Additionaly, if self.noise > 1/255, a "snowy" image is merged with
CAPTCHA image with a 50/50 ratio.
Property returns a pair containing a string with text in returned
image and image itself.
:returns: ``tuple`` (CAPTCHA text, Image object) | Below is the the instruction that describes the task:
### Input:
r"""
Tuple with a CAPTCHA text and a Image object.
Images are generated on the fly, using given text source, TTF font and
other parameters passable through __init__. All letters in used text
are morphed. Also a line is morphed and pased onto CAPTCHA text.
Additionaly, if self.noise > 1/255, a "snowy" image is merged with
CAPTCHA image with a 50/50 ratio.
Property returns a pair containing a string with text in returned
image and image itself.
:returns: ``tuple`` (CAPTCHA text, Image object)
### Response:
def image(self):
r"""
Tuple with a CAPTCHA text and a Image object.
Images are generated on the fly, using given text source, TTF font and
other parameters passable through __init__. All letters in used text
are morphed. Also a line is morphed and pased onto CAPTCHA text.
Additionaly, if self.noise > 1/255, a "snowy" image is merged with
CAPTCHA image with a 50/50 ratio.
Property returns a pair containing a string with text in returned
image and image itself.
:returns: ``tuple`` (CAPTCHA text, Image object)
"""
text = self.text
w, h = self.font.getsize(text)
margin_x = round(self.margin_x * w / self.w)
margin_y = round(self.margin_y * h / self.h)
image = Image.new('RGB',
(w + 2*margin_x, h + 2*margin_y),
(255, 255, 255))
# Text
self._writeText(image, text, pos=(margin_x, margin_y))
# Line
self._drawLine(image)
# White noise
noise = self._whiteNoise(image.size)
if noise is not None:
image = Image.blend(image, noise, 0.5)
# Resize
image = image.resize(self.size, resample=self.resample)
return (text, image) |
def _diffSchema(diskSchema, memorySchema):
"""
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
"""
diskSchema = set(diskSchema)
memorySchema = set(memorySchema)
diskOnly = diskSchema - memorySchema
memoryOnly = memorySchema - diskSchema
diff = []
if diskOnly:
diff.append('Only on disk:')
diff.extend(map(repr, diskOnly))
if memoryOnly:
diff.append('Only in memory:')
diff.extend(map(repr, memoryOnly))
return '\n'.join(diff) | Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences. | Below is the the instruction that describes the task:
### Input:
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
### Response:
def _diffSchema(diskSchema, memorySchema):
"""
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
"""
diskSchema = set(diskSchema)
memorySchema = set(memorySchema)
diskOnly = diskSchema - memorySchema
memoryOnly = memorySchema - diskSchema
diff = []
if diskOnly:
diff.append('Only on disk:')
diff.extend(map(repr, diskOnly))
if memoryOnly:
diff.append('Only in memory:')
diff.extend(map(repr, memoryOnly))
return '\n'.join(diff) |
def get_sigla(self, work):
"""Returns a list of all of the sigla for `work`.
:param work: name of work
:type work: `str`
:rtype: `list` of `str`
"""
return [os.path.splitext(os.path.basename(path))[0]
for path in glob.glob(os.path.join(self._path, work, '*.txt'))] | Returns a list of all of the sigla for `work`.
:param work: name of work
:type work: `str`
:rtype: `list` of `str` | Below is the the instruction that describes the task:
### Input:
Returns a list of all of the sigla for `work`.
:param work: name of work
:type work: `str`
:rtype: `list` of `str`
### Response:
def get_sigla(self, work):
"""Returns a list of all of the sigla for `work`.
:param work: name of work
:type work: `str`
:rtype: `list` of `str`
"""
return [os.path.splitext(os.path.basename(path))[0]
for path in glob.glob(os.path.join(self._path, work, '*.txt'))] |
def _create_popup_window(title, body):
"""
Return the layout for a pop-up window. It consists of a title bar showing
the `title` text, and a body layout. The window is surrounded by borders.
"""
assert isinstance(title, six.text_type)
assert isinstance(body, Container)
return Frame(body=body, title=title) | Return the layout for a pop-up window. It consists of a title bar showing
the `title` text, and a body layout. The window is surrounded by borders. | Below is the the instruction that describes the task:
### Input:
Return the layout for a pop-up window. It consists of a title bar showing
the `title` text, and a body layout. The window is surrounded by borders.
### Response:
def _create_popup_window(title, body):
"""
Return the layout for a pop-up window. It consists of a title bar showing
the `title` text, and a body layout. The window is surrounded by borders.
"""
assert isinstance(title, six.text_type)
assert isinstance(body, Container)
return Frame(body=body, title=title) |
def get_time_estimator(total):
"""Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the number
of seconds that are still needed to compute the whole set, the second value
is the time in the future when the operation is expected to end.
"""
t1 = time.time()
count = [0]
def estimate_needed_time(step=1):
count[0] += step
t2 = time.time()
t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0])
return t3, t3 + t1
return estimate_needed_time | Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the number
of seconds that are still needed to compute the whole set, the second value
is the time in the future when the operation is expected to end. | Below is the the instruction that describes the task:
### Input:
Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the number
of seconds that are still needed to compute the whole set, the second value
is the time in the future when the operation is expected to end.
### Response:
def get_time_estimator(total):
"""Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the number
of seconds that are still needed to compute the whole set, the second value
is the time in the future when the operation is expected to end.
"""
t1 = time.time()
count = [0]
def estimate_needed_time(step=1):
count[0] += step
t2 = time.time()
t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0])
return t3, t3 + t1
return estimate_needed_time |
def run(self, cell, is_full_fc=False, parse_fc=True):
"""Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born.
"""
with open(self._filename) as f:
fc_dct = self._parse_q2r(f)
self.dimension = fc_dct['dimension']
self.epsilon = fc_dct['dielectric']
self.borns = fc_dct['born']
if parse_fc:
(self.fc,
self.primitive,
self.supercell) = self._arrange_supercell_fc(
cell, fc_dct['fc'], is_full_fc=is_full_fc) | Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born. | Below is the the instruction that describes the task:
### Input:
Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born.
### Response:
def run(self, cell, is_full_fc=False, parse_fc=True):
"""Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born.
"""
with open(self._filename) as f:
fc_dct = self._parse_q2r(f)
self.dimension = fc_dct['dimension']
self.epsilon = fc_dct['dielectric']
self.borns = fc_dct['born']
if parse_fc:
(self.fc,
self.primitive,
self.supercell) = self._arrange_supercell_fc(
cell, fc_dct['fc'], is_full_fc=is_full_fc) |
def event_handler(event_name):
"""
Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice.
"""
def wrapper(func):
func._event_handler = True
func._handled_event = event_name
return func
return wrapper | Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice. | Below is the the instruction that describes the task:
### Input:
Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice.
### Response:
def event_handler(event_name):
"""
Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice.
"""
def wrapper(func):
func._event_handler = True
func._handled_event = event_name
return func
return wrapper |
def isatty(self): # nocover
"""
Returns true of the redirect is a terminal.
Notes:
Needed for IPython.embed to work properly when this class is used
to override stdout / stderr.
"""
return (self.redirect is not None and
hasattr(self.redirect, 'isatty') and self.redirect.isatty()) | Returns true of the redirect is a terminal.
Notes:
Needed for IPython.embed to work properly when this class is used
to override stdout / stderr. | Below is the the instruction that describes the task:
### Input:
Returns true of the redirect is a terminal.
Notes:
Needed for IPython.embed to work properly when this class is used
to override stdout / stderr.
### Response:
def isatty(self): # nocover
"""
Returns true of the redirect is a terminal.
Notes:
Needed for IPython.embed to work properly when this class is used
to override stdout / stderr.
"""
return (self.redirect is not None and
hasattr(self.redirect, 'isatty') and self.redirect.isatty()) |
def vcfchunk(data, optim, sidx, chunk, full):
"""
Function called within make_vcf to run chunks on separate engines.
"""
## empty array to be filled before writing
## will not actually be optim*maxlen, extra needs to be trimmed
maxlen = data._hackersonly["max_fragment_length"] + 20
## get data sliced (optim chunks at a time)
hslice = [chunk, chunk+optim]
## read all taxa from disk (faster), then subsample taxa with sidx and
## keepmask to greatly reduce the memory load
with h5py.File(data.database, 'r') as co5:
afilt = co5["filters"][hslice[0]:hslice[1], :]
keepmask = afilt.sum(axis=1) == 0
## apply mask to edges
aedge = co5["edges"][hslice[0]:hslice[1], :]
aedge = aedge[keepmask, :]
del afilt
## same memory subsampling.
with h5py.File(data.clust_database, 'r') as io5:
## apply mask to edges to aseqs and acatg
#aseqs = io5["seqs"][hslice[0]:hslice[1], :, :].view(np.uint8)
## need to read in seqs with upper b/c lowercase allele info
aseqs = np.char.upper(io5["seqs"][hslice[0]:hslice[1], :, :]).view(np.uint8)
aseqs = aseqs[keepmask, :]
aseqs = aseqs[:, sidx, :]
acatg = io5["catgs"][hslice[0]:hslice[1], :, :, :]
acatg = acatg[keepmask, :]
acatg = acatg[:, sidx, :, :]
achrom = io5["chroms"][hslice[0]:hslice[1]]
achrom = achrom[keepmask, :]
LOGGER.info('acatg.shape %s', acatg.shape)
## to save memory some columns are stored in diff dtypes until printing
if not full:
with h5py.File(data.database, 'r') as co5:
snps = co5["snps"][hslice[0]:hslice[1], :]
snps = snps[keepmask, :]
snps = snps.sum(axis=2)
snpidxs = snps > 0
maxsnplen = snps.sum()
## vcf info to fill, this is bigger than the actual array
nrows = maxsnplen
cols0 = np.zeros(nrows, dtype=np.int64) #h5py.special_dtype(vlen=bytes))
cols1 = np.zeros(nrows, dtype=np.uint32)
cols34 = np.zeros((nrows, 2), dtype="S5")
cols7 = np.zeros((nrows, 1), dtype="S20")
## when nsamples is high this blows up memory (e.g., dim=(5M x 500))
## so we'll instead create a list of arrays with 10 samples at a time.
## maybe later replace this with a h5 array
tmph = os.path.join(data.dirs.outfiles, ".tmp.{}.h5".format(hslice[0]))
htmp = h5py.File(tmph, 'w')
htmp.create_dataset("vcf", shape=(nrows, sum(sidx)), dtype="S24")
## which loci passed all filters
init = 0
## write loci that passed after trimming edges, then write snp string
locindex = np.where(keepmask)[0]
for iloc in xrange(aseqs.shape[0]):
edg = aedge[iloc]
## grab all seqs between edges
if not 'pair' in data.paramsdict["datatype"]:
seq = aseqs[iloc, :, edg[0]:edg[1]+1]
catg = acatg[iloc, :, edg[0]:edg[1]+1]
if not full:
snpidx = snpidxs[iloc, edg[0]:edg[1]+1]
seq = seq[:, snpidx]
catg = catg[:, snpidx]
else:
seq = np.hstack([aseqs[iloc, :, edg[0]:edg[1]+1],
aseqs[iloc, :, edg[2]:edg[3]+1]])
catg = np.hstack([acatg[iloc, :, edg[0]:edg[1]+1],
acatg[iloc, :, edg[2]:edg[3]+1]])
if not full:
snpidx = np.hstack([snpidxs[iloc, edg[0]:edg[1]+1],
snpidxs[iloc, edg[2]:edg[3]+1]])
seq = seq[:, snpidx]
catg = catg[:, snpidx]
## empty arrs to fill
alleles = np.zeros((nrows, 4), dtype=np.uint8)
genos = np.zeros((seq.shape[1], sum(sidx)), dtype="S4")
genos[:] = "./.:"
## ---- build string array ----
pos = 0
## If any < 0 this indicates an anonymous locus in denovo+ref assembly
if achrom[iloc][0] > 0:
pos = achrom[iloc][1]
cols0[init:init+seq.shape[1]] = achrom[iloc][0]
cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1
else:
if full:
cols1[init:init+seq.shape[1]] = pos + np.arange(seq.shape[1]) + 1
else:
cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1
cols0[init:init+seq.shape[1]] = (chunk + locindex[iloc] + 1) * -1
## fill reference base
alleles = reftrick(seq, GETCONS)
## get the info string column
tmp0 = np.sum(catg, axis=2)
tmp1 = tmp0 != 0
tmp2 = tmp1.sum(axis=1) > 0
nsamp = np.sum(tmp1, axis=0)
depth = np.sum(tmp0, axis=0)
list7 = [["NS={};DP={}".format(i, j)] for i, j in zip(nsamp, depth)]
if list7:
cols7[init:init+seq.shape[1]] = list7
## default fill cons sites where no variants
genos[tmp1.T] = "0/0:"
## fill cons genotypes for sites with alt alleles for taxa in order
mask = alleles[:, 1] == 46
mask += alleles[:, 1] == 45
obs = alleles[~mask, :]
alts = seq[:, ~mask]
who = np.where(mask == False)[0]
## fill variable sites
for site in xrange(alts.shape[1]):
bases = alts[:, site]
#LOGGER.info("bases %s", bases)
ohere = obs[site][obs[site] != 0]
#LOGGER.info("ohere %s", ohere)
alls = np.array([DCONS[i] for i in bases], dtype=np.uint32)
#LOGGER.info("all %s", alls)
for jdx in xrange(ohere.shape[0]):
alls[alls == ohere[jdx]] = jdx
#LOGGER.info("all2 %s", alls)
## fill into array
for cidx in xrange(catg.shape[0]):
if tmp2[cidx]:
if alls[cidx][0] < 5:
genos[who[site], cidx] = "/".join(alls[cidx].astype("S1").tolist())+":"
else:
genos[who[site], cidx] = "./.:"
#LOGGER.info("genos filled: %s %s %s", who[site], cidx, genos)
## build geno+depth strings
## for each taxon enter 4 catg values
fulltmp = np.zeros((seq.shape[1], catg.shape[0]), dtype="S24")
for cidx in xrange(catg.shape[0]):
## fill catgs from catgs
tmp0 = [str(i.sum()) for i in catg[cidx]]
tmp1 = [",".join(i) for i in catg[cidx].astype("S4").tolist()]
tmp2 = ["".join(i+j+":"+k) for i, j, k in zip(genos[:, cidx], tmp0, tmp1)]
## fill tmp allcidx
fulltmp[:, cidx] = tmp2
## write to h5 for this locus
htmp["vcf"][init:init+seq.shape[1], :] = fulltmp
cols34[init:init+seq.shape[1], 0] = alleles[:, 0].view("S1")
cols34[init:init+seq.shape[1], 1] = [",".join([j for j in i if j]) \
for i in alleles[:, 1:].view("S1").tolist()]
## advance counter
init += seq.shape[1]
## trim off empty rows if they exist
withdat = cols0 != 0
tot = withdat.sum()
## get scaffold names
faidict = {}
if (data.paramsdict["assembly_method"] in ["reference", "denovo+reference"]) and \
(os.path.exists(data.paramsdict["reference_sequence"])):
fai = pd.read_csv(data.paramsdict["reference_sequence"] + ".fai",
names=['scaffold', 'size', 'sumsize', 'a', 'b'],
sep="\t")
faidict = {i+1:j for i,j in enumerate(fai.scaffold)}
try:
## This is hax, but it's the only way it will work. The faidict uses positive numbers
## for reference sequence mapped loci for the CHROM/POS info, and it uses negative
## numbers for anonymous loci. Both are 1 indexed, which is where that last `+ 2` comes from.
faidict.update({-i:"locus_{}".format(i-1) for i in xrange(chunk+1, chunk + optim + 2)})
chroms = [faidict[i] for i in cols0]
except Exception as inst:
LOGGER.error("Invalid chromosome dictionary indexwat: {}".format(inst))
LOGGER.debug("faidict {}".format([str(k)+"/"+str(v) for k, v in faidict.items() if "locus" in v]))
LOGGER.debug("chroms {}".format([x for x in cols0 if x < 0]))
raise
cols0 = np.array(chroms)
#else:
# cols0 = np.array(["locus_{}".format(i) for i in cols0-1])
## Only write if there is some data that passed filtering
if tot:
LOGGER.debug("Writing data to vcf")
if not full:
writer = open(data.outfiles.vcf+".{}".format(chunk), 'w')
else:
writer = gzip.open(data.outfiles.vcf+".{}".format(chunk), 'w')
try:
## write in iterations b/c it can be freakin huge.
## for cols0 and cols1 the 'newaxis' slice and the transpose
## are for turning the 1d arrays into column vectors.
np.savetxt(writer,
np.concatenate(
(cols0[:tot][np.newaxis].T,
cols1[:tot][np.newaxis].T,
np.array([["."]]*tot, dtype="S1"),
cols34[:tot, :],
np.array([["13", "PASS"]]*tot, dtype="S4"),
cols7[:tot, :],
np.array([["GT:DP:CATG"]]*tot, dtype="S10"),
htmp["vcf"][:tot, :],
),
axis=1),
delimiter="\t", fmt="%s")
except Exception as inst:
LOGGER.error("Error building vcf file - ".format(inst))
raise
writer.close()
## close and remove tmp h5
htmp.close()
os.remove(tmph) | Function called within make_vcf to run chunks on separate engines. | Below is the the instruction that describes the task:
### Input:
Function called within make_vcf to run chunks on separate engines.
### Response:
def vcfchunk(data, optim, sidx, chunk, full):
"""
Function called within make_vcf to run chunks on separate engines.
"""
## empty array to be filled before writing
## will not actually be optim*maxlen, extra needs to be trimmed
maxlen = data._hackersonly["max_fragment_length"] + 20
## get data sliced (optim chunks at a time)
hslice = [chunk, chunk+optim]
## read all taxa from disk (faster), then subsample taxa with sidx and
## keepmask to greatly reduce the memory load
with h5py.File(data.database, 'r') as co5:
afilt = co5["filters"][hslice[0]:hslice[1], :]
keepmask = afilt.sum(axis=1) == 0
## apply mask to edges
aedge = co5["edges"][hslice[0]:hslice[1], :]
aedge = aedge[keepmask, :]
del afilt
## same memory subsampling.
with h5py.File(data.clust_database, 'r') as io5:
## apply mask to edges to aseqs and acatg
#aseqs = io5["seqs"][hslice[0]:hslice[1], :, :].view(np.uint8)
## need to read in seqs with upper b/c lowercase allele info
aseqs = np.char.upper(io5["seqs"][hslice[0]:hslice[1], :, :]).view(np.uint8)
aseqs = aseqs[keepmask, :]
aseqs = aseqs[:, sidx, :]
acatg = io5["catgs"][hslice[0]:hslice[1], :, :, :]
acatg = acatg[keepmask, :]
acatg = acatg[:, sidx, :, :]
achrom = io5["chroms"][hslice[0]:hslice[1]]
achrom = achrom[keepmask, :]
LOGGER.info('acatg.shape %s', acatg.shape)
## to save memory some columns are stored in diff dtypes until printing
if not full:
with h5py.File(data.database, 'r') as co5:
snps = co5["snps"][hslice[0]:hslice[1], :]
snps = snps[keepmask, :]
snps = snps.sum(axis=2)
snpidxs = snps > 0
maxsnplen = snps.sum()
## vcf info to fill, this is bigger than the actual array
nrows = maxsnplen
cols0 = np.zeros(nrows, dtype=np.int64) #h5py.special_dtype(vlen=bytes))
cols1 = np.zeros(nrows, dtype=np.uint32)
cols34 = np.zeros((nrows, 2), dtype="S5")
cols7 = np.zeros((nrows, 1), dtype="S20")
## when nsamples is high this blows up memory (e.g., dim=(5M x 500))
## so we'll instead create a list of arrays with 10 samples at a time.
## maybe later replace this with a h5 array
tmph = os.path.join(data.dirs.outfiles, ".tmp.{}.h5".format(hslice[0]))
htmp = h5py.File(tmph, 'w')
htmp.create_dataset("vcf", shape=(nrows, sum(sidx)), dtype="S24")
## which loci passed all filters
init = 0
## write loci that passed after trimming edges, then write snp string
locindex = np.where(keepmask)[0]
for iloc in xrange(aseqs.shape[0]):
edg = aedge[iloc]
## grab all seqs between edges
if not 'pair' in data.paramsdict["datatype"]:
seq = aseqs[iloc, :, edg[0]:edg[1]+1]
catg = acatg[iloc, :, edg[0]:edg[1]+1]
if not full:
snpidx = snpidxs[iloc, edg[0]:edg[1]+1]
seq = seq[:, snpidx]
catg = catg[:, snpidx]
else:
seq = np.hstack([aseqs[iloc, :, edg[0]:edg[1]+1],
aseqs[iloc, :, edg[2]:edg[3]+1]])
catg = np.hstack([acatg[iloc, :, edg[0]:edg[1]+1],
acatg[iloc, :, edg[2]:edg[3]+1]])
if not full:
snpidx = np.hstack([snpidxs[iloc, edg[0]:edg[1]+1],
snpidxs[iloc, edg[2]:edg[3]+1]])
seq = seq[:, snpidx]
catg = catg[:, snpidx]
## empty arrs to fill
alleles = np.zeros((nrows, 4), dtype=np.uint8)
genos = np.zeros((seq.shape[1], sum(sidx)), dtype="S4")
genos[:] = "./.:"
## ---- build string array ----
pos = 0
## If any < 0 this indicates an anonymous locus in denovo+ref assembly
if achrom[iloc][0] > 0:
pos = achrom[iloc][1]
cols0[init:init+seq.shape[1]] = achrom[iloc][0]
cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1
else:
if full:
cols1[init:init+seq.shape[1]] = pos + np.arange(seq.shape[1]) + 1
else:
cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1
cols0[init:init+seq.shape[1]] = (chunk + locindex[iloc] + 1) * -1
## fill reference base
alleles = reftrick(seq, GETCONS)
## get the info string column
tmp0 = np.sum(catg, axis=2)
tmp1 = tmp0 != 0
tmp2 = tmp1.sum(axis=1) > 0
nsamp = np.sum(tmp1, axis=0)
depth = np.sum(tmp0, axis=0)
list7 = [["NS={};DP={}".format(i, j)] for i, j in zip(nsamp, depth)]
if list7:
cols7[init:init+seq.shape[1]] = list7
## default fill cons sites where no variants
genos[tmp1.T] = "0/0:"
## fill cons genotypes for sites with alt alleles for taxa in order
mask = alleles[:, 1] == 46
mask += alleles[:, 1] == 45
obs = alleles[~mask, :]
alts = seq[:, ~mask]
who = np.where(mask == False)[0]
## fill variable sites
for site in xrange(alts.shape[1]):
bases = alts[:, site]
#LOGGER.info("bases %s", bases)
ohere = obs[site][obs[site] != 0]
#LOGGER.info("ohere %s", ohere)
alls = np.array([DCONS[i] for i in bases], dtype=np.uint32)
#LOGGER.info("all %s", alls)
for jdx in xrange(ohere.shape[0]):
alls[alls == ohere[jdx]] = jdx
#LOGGER.info("all2 %s", alls)
## fill into array
for cidx in xrange(catg.shape[0]):
if tmp2[cidx]:
if alls[cidx][0] < 5:
genos[who[site], cidx] = "/".join(alls[cidx].astype("S1").tolist())+":"
else:
genos[who[site], cidx] = "./.:"
#LOGGER.info("genos filled: %s %s %s", who[site], cidx, genos)
## build geno+depth strings
## for each taxon enter 4 catg values
fulltmp = np.zeros((seq.shape[1], catg.shape[0]), dtype="S24")
for cidx in xrange(catg.shape[0]):
## fill catgs from catgs
tmp0 = [str(i.sum()) for i in catg[cidx]]
tmp1 = [",".join(i) for i in catg[cidx].astype("S4").tolist()]
tmp2 = ["".join(i+j+":"+k) for i, j, k in zip(genos[:, cidx], tmp0, tmp1)]
## fill tmp allcidx
fulltmp[:, cidx] = tmp2
## write to h5 for this locus
htmp["vcf"][init:init+seq.shape[1], :] = fulltmp
cols34[init:init+seq.shape[1], 0] = alleles[:, 0].view("S1")
cols34[init:init+seq.shape[1], 1] = [",".join([j for j in i if j]) \
for i in alleles[:, 1:].view("S1").tolist()]
## advance counter
init += seq.shape[1]
## trim off empty rows if they exist
withdat = cols0 != 0
tot = withdat.sum()
## get scaffold names
faidict = {}
if (data.paramsdict["assembly_method"] in ["reference", "denovo+reference"]) and \
(os.path.exists(data.paramsdict["reference_sequence"])):
fai = pd.read_csv(data.paramsdict["reference_sequence"] + ".fai",
names=['scaffold', 'size', 'sumsize', 'a', 'b'],
sep="\t")
faidict = {i+1:j for i,j in enumerate(fai.scaffold)}
try:
## This is hax, but it's the only way it will work. The faidict uses positive numbers
## for reference sequence mapped loci for the CHROM/POS info, and it uses negative
## numbers for anonymous loci. Both are 1 indexed, which is where that last `+ 2` comes from.
faidict.update({-i:"locus_{}".format(i-1) for i in xrange(chunk+1, chunk + optim + 2)})
chroms = [faidict[i] for i in cols0]
except Exception as inst:
LOGGER.error("Invalid chromosome dictionary indexwat: {}".format(inst))
LOGGER.debug("faidict {}".format([str(k)+"/"+str(v) for k, v in faidict.items() if "locus" in v]))
LOGGER.debug("chroms {}".format([x for x in cols0 if x < 0]))
raise
cols0 = np.array(chroms)
#else:
# cols0 = np.array(["locus_{}".format(i) for i in cols0-1])
## Only write if there is some data that passed filtering
if tot:
LOGGER.debug("Writing data to vcf")
if not full:
writer = open(data.outfiles.vcf+".{}".format(chunk), 'w')
else:
writer = gzip.open(data.outfiles.vcf+".{}".format(chunk), 'w')
try:
## write in iterations b/c it can be freakin huge.
## for cols0 and cols1 the 'newaxis' slice and the transpose
## are for turning the 1d arrays into column vectors.
np.savetxt(writer,
np.concatenate(
(cols0[:tot][np.newaxis].T,
cols1[:tot][np.newaxis].T,
np.array([["."]]*tot, dtype="S1"),
cols34[:tot, :],
np.array([["13", "PASS"]]*tot, dtype="S4"),
cols7[:tot, :],
np.array([["GT:DP:CATG"]]*tot, dtype="S10"),
htmp["vcf"][:tot, :],
),
axis=1),
delimiter="\t", fmt="%s")
except Exception as inst:
LOGGER.error("Error building vcf file - ".format(inst))
raise
writer.close()
## close and remove tmp h5
htmp.close()
os.remove(tmph) |
def cancelHistoricalData(self, contracts=None):
""" cancel historical data stream """
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelHistoricalData(tickerId=tickerId) | cancel historical data stream | Below is the the instruction that describes the task:
### Input:
cancel historical data stream
### Response:
def cancelHistoricalData(self, contracts=None):
""" cancel historical data stream """
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelHistoricalData(tickerId=tickerId) |
def write_file(response, destination_folder, file_name):
"""
Write the response content in a file in the destination folder.
:param response: Response
:param destination_folder: Destination folder, string
:param file_name: File name, string
:return: bool
"""
if response.status_code == 200:
with open(os.path.join(destination_folder, file_name), 'wb') as f:
for chunk in response.iter_content(chunk_size=None):
f.write(chunk)
logger.info(f'Saved downloaded file in {f.name}')
else:
logger.warning(f'consume failed: {response.reason}') | Write the response content in a file in the destination folder.
:param response: Response
:param destination_folder: Destination folder, string
:param file_name: File name, string
:return: bool | Below is the the instruction that describes the task:
### Input:
Write the response content in a file in the destination folder.
:param response: Response
:param destination_folder: Destination folder, string
:param file_name: File name, string
:return: bool
### Response:
def write_file(response, destination_folder, file_name):
"""
Write the response content in a file in the destination folder.
:param response: Response
:param destination_folder: Destination folder, string
:param file_name: File name, string
:return: bool
"""
if response.status_code == 200:
with open(os.path.join(destination_folder, file_name), 'wb') as f:
for chunk in response.iter_content(chunk_size=None):
f.write(chunk)
logger.info(f'Saved downloaded file in {f.name}')
else:
logger.warning(f'consume failed: {response.reason}') |
def query_orders(self, accounts, status='filled'):
"""查询订单
Arguments:
accounts {[type]} -- [description]
Keyword Arguments:
status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'})
Returns:
[type] -- [description]
"""
try:
data = self.call("orders", {'client': accounts, 'status': status})
if data is not None:
orders = data.get('dataTable', False)
order_headers = orders['columns']
if ('成交状态' in order_headers
or '状态说明' in order_headers) and ('备注' in order_headers):
order_headers[order_headers.index('备注')] = '废弃'
order_headers = [cn_en_compare[item] for item in order_headers]
order_all = pd.DataFrame(
orders['rows'],
columns=order_headers
).assign(account_cookie=accounts)
order_all.towards = order_all.towards.apply(
lambda x: trade_towards_cn_en[x]
)
if 'order_time' in order_headers:
# 这是order_status
order_all['status'] = order_all.status.apply(
lambda x: order_status_cn_en[x]
)
if 'order_date' not in order_headers:
order_all.order_time = order_all.order_time.apply(
lambda x: QA_util_get_order_datetime(
dt='{} {}'.format(datetime.date.today(),
x)
)
)
else:
order_all = order_all.assign(
order_time=order_all.order_date
.apply(QA_util_date_int2str) + ' ' +
order_all.order_time
)
if 'trade_time' in order_headers:
order_all.trade_time = order_all.trade_time.apply(
lambda x: '{} {}'.format(datetime.date.today(),
x)
)
if status is 'filled':
return order_all.loc[:,
self.dealstatus_headers].set_index(
['account_cookie',
'realorder_id']
).sort_index()
else:
return order_all.loc[:,
self.orderstatus_headers].set_index(
['account_cookie',
'realorder_id']
).sort_index()
else:
print('response is None')
return False
except Exception as e:
print(e)
return False | 查询订单
Arguments:
accounts {[type]} -- [description]
Keyword Arguments:
status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'})
Returns:
[type] -- [description] | Below is the the instruction that describes the task:
### Input:
查询订单
Arguments:
accounts {[type]} -- [description]
Keyword Arguments:
status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'})
Returns:
[type] -- [description]
### Response:
def query_orders(self, accounts, status='filled'):
"""查询订单
Arguments:
accounts {[type]} -- [description]
Keyword Arguments:
status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'})
Returns:
[type] -- [description]
"""
try:
data = self.call("orders", {'client': accounts, 'status': status})
if data is not None:
orders = data.get('dataTable', False)
order_headers = orders['columns']
if ('成交状态' in order_headers
or '状态说明' in order_headers) and ('备注' in order_headers):
order_headers[order_headers.index('备注')] = '废弃'
order_headers = [cn_en_compare[item] for item in order_headers]
order_all = pd.DataFrame(
orders['rows'],
columns=order_headers
).assign(account_cookie=accounts)
order_all.towards = order_all.towards.apply(
lambda x: trade_towards_cn_en[x]
)
if 'order_time' in order_headers:
# 这是order_status
order_all['status'] = order_all.status.apply(
lambda x: order_status_cn_en[x]
)
if 'order_date' not in order_headers:
order_all.order_time = order_all.order_time.apply(
lambda x: QA_util_get_order_datetime(
dt='{} {}'.format(datetime.date.today(),
x)
)
)
else:
order_all = order_all.assign(
order_time=order_all.order_date
.apply(QA_util_date_int2str) + ' ' +
order_all.order_time
)
if 'trade_time' in order_headers:
order_all.trade_time = order_all.trade_time.apply(
lambda x: '{} {}'.format(datetime.date.today(),
x)
)
if status is 'filled':
return order_all.loc[:,
self.dealstatus_headers].set_index(
['account_cookie',
'realorder_id']
).sort_index()
else:
return order_all.loc[:,
self.orderstatus_headers].set_index(
['account_cookie',
'realorder_id']
).sort_index()
else:
print('response is None')
return False
except Exception as e:
print(e)
return False |
def tox_configure(config):
"""Check for the presence of the added options."""
if 'TRAVIS' not in os.environ:
return
ini = config._cfg
# envlist
if 'TOXENV' not in os.environ and not config.option.env:
envlist = detect_envlist(ini)
undeclared = set(envlist) - set(config.envconfigs)
if undeclared:
print('Matching undeclared envs is deprecated. Be sure all the '
'envs that Tox should run are declared in the tox config.',
file=sys.stderr)
autogen_envconfigs(config, undeclared)
config.envlist = envlist
# Override ignore_outcomes
if override_ignore_outcome(ini):
for envconfig in config.envconfigs.values():
envconfig.ignore_outcome = False
# after
if config.option.travis_after:
print('The after all feature has been deprecated. Check out Travis\' '
'build stages, which are a better solution. '
'See https://tox-travis.readthedocs.io/en/stable/after.html '
'for more details.', file=sys.stderr) | Check for the presence of the added options. | Below is the the instruction that describes the task:
### Input:
Check for the presence of the added options.
### Response:
def tox_configure(config):
"""Check for the presence of the added options."""
if 'TRAVIS' not in os.environ:
return
ini = config._cfg
# envlist
if 'TOXENV' not in os.environ and not config.option.env:
envlist = detect_envlist(ini)
undeclared = set(envlist) - set(config.envconfigs)
if undeclared:
print('Matching undeclared envs is deprecated. Be sure all the '
'envs that Tox should run are declared in the tox config.',
file=sys.stderr)
autogen_envconfigs(config, undeclared)
config.envlist = envlist
# Override ignore_outcomes
if override_ignore_outcome(ini):
for envconfig in config.envconfigs.values():
envconfig.ignore_outcome = False
# after
if config.option.travis_after:
print('The after all feature has been deprecated. Check out Travis\' '
'build stages, which are a better solution. '
'See https://tox-travis.readthedocs.io/en/stable/after.html '
'for more details.', file=sys.stderr) |
def assign_type(mol, force_recalc=False):
""" PATTY [Bush et al. J. Inf. Comput. Sci 33 (1993) 756-762]
TODO: not yet implemented
1:cation 2:anion 3:donor 4:acceptor
5:polar 6:hydrophobe 7:others
"""
if "PATTY" in mol.descriptors and not force_recalc:
return
mol.require("Phys_charge")
for i, atom in mol.atoms_iter():
# default is 7 (others)
nbrcnt = mol.neighbor_count(i)
if atom.charge > 0 or atom.charge_phys > 0 or \
atom.charge_conj > 0 and not atom.n_oxide:
atom.type = 1 # cation
elif atom.charge < 0 or atom.charge_phys < 0 or \
atom.charge_conj < 0 and not atom.n_oxide:
atom.type = 2 # anion
elif atom.symbol == "N":
if nbrcnt in (1, 2):
if atom.pi == 2:
atom.type = 3 # donor
elif atom.pi == 1:
atom.type = 4 # acceptor
elif atom.symbol == "O":
if nbrcnt == 1 and not atom.pi:
atom.type = 5 # polar
else:
atom.type = 4 # acceptor
elif atom.symbol in ("C", "Si", "S", "Se", "P", "As"):
ewg = False
for n, bond in mol.neighbors(i).items():
natom = mol.atom(n)
if natom.symbol in ("N", "O", "S") and atom.pi \
and not (natom.pi == 2 and mol.neighbor_count(n) == 3):
# the sp2 adjacent to neg (but not conj tert amine) is 7
ewg = True
break
if not ewg:
atom.type = 6 # hydrophobes
elif atom.symbol in ("F", "Cl", "Br", "I") and nbrcnt == 1:
atom.type = 6 # typical halogens are hydrophobic
mol.descriptors.add("PATTY") | PATTY [Bush et al. J. Inf. Comput. Sci 33 (1993) 756-762]
TODO: not yet implemented
1:cation 2:anion 3:donor 4:acceptor
5:polar 6:hydrophobe 7:others | Below is the the instruction that describes the task:
### Input:
PATTY [Bush et al. J. Inf. Comput. Sci 33 (1993) 756-762]
TODO: not yet implemented
1:cation 2:anion 3:donor 4:acceptor
5:polar 6:hydrophobe 7:others
### Response:
def assign_type(mol, force_recalc=False):
""" PATTY [Bush et al. J. Inf. Comput. Sci 33 (1993) 756-762]
TODO: not yet implemented
1:cation 2:anion 3:donor 4:acceptor
5:polar 6:hydrophobe 7:others
"""
if "PATTY" in mol.descriptors and not force_recalc:
return
mol.require("Phys_charge")
for i, atom in mol.atoms_iter():
# default is 7 (others)
nbrcnt = mol.neighbor_count(i)
if atom.charge > 0 or atom.charge_phys > 0 or \
atom.charge_conj > 0 and not atom.n_oxide:
atom.type = 1 # cation
elif atom.charge < 0 or atom.charge_phys < 0 or \
atom.charge_conj < 0 and not atom.n_oxide:
atom.type = 2 # anion
elif atom.symbol == "N":
if nbrcnt in (1, 2):
if atom.pi == 2:
atom.type = 3 # donor
elif atom.pi == 1:
atom.type = 4 # acceptor
elif atom.symbol == "O":
if nbrcnt == 1 and not atom.pi:
atom.type = 5 # polar
else:
atom.type = 4 # acceptor
elif atom.symbol in ("C", "Si", "S", "Se", "P", "As"):
ewg = False
for n, bond in mol.neighbors(i).items():
natom = mol.atom(n)
if natom.symbol in ("N", "O", "S") and atom.pi \
and not (natom.pi == 2 and mol.neighbor_count(n) == 3):
# the sp2 adjacent to neg (but not conj tert amine) is 7
ewg = True
break
if not ewg:
atom.type = 6 # hydrophobes
elif atom.symbol in ("F", "Cl", "Br", "I") and nbrcnt == 1:
atom.type = 6 # typical halogens are hydrophobic
mol.descriptors.add("PATTY") |
def _process_axsettings(self, hist, lims, ticks):
"""
Get axis settings options including ticks, x- and y-labels
and limits.
"""
axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)]))
return axis_settings | Get axis settings options including ticks, x- and y-labels
and limits. | Below is the the instruction that describes the task:
### Input:
Get axis settings options including ticks, x- and y-labels
and limits.
### Response:
def _process_axsettings(self, hist, lims, ticks):
"""
Get axis settings options including ticks, x- and y-labels
and limits.
"""
axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)]))
return axis_settings |
def write_newick(rootnode,
features=None,
format=1,
format_root_node=True,
is_leaf_fn=None,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Iteratively export a tree structure and returns its NHX
representation.
"""
newick = []
leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children)
for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if postorder:
newick.append(")")
if node.up is not None or format_root_node:
newick.append(format_node(node, "internal", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
if node is not rootnode and node != node.up.children[0]:
newick.append(",")
if leaf(node):
safe_name = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, "name")))
newick.append(format_node(node, "leaf", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
newick.append("(")
newick.append(";")
return ''.join(newick) | Iteratively export a tree structure and returns its NHX
representation. | Below is the the instruction that describes the task:
### Input:
Iteratively export a tree structure and returns its NHX
representation.
### Response:
def write_newick(rootnode,
features=None,
format=1,
format_root_node=True,
is_leaf_fn=None,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Iteratively export a tree structure and returns its NHX
representation.
"""
newick = []
leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children)
for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if postorder:
newick.append(")")
if node.up is not None or format_root_node:
newick.append(format_node(node, "internal", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
if node is not rootnode and node != node.up.children[0]:
newick.append(",")
if leaf(node):
safe_name = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, "name")))
newick.append(format_node(node, "leaf", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
newick.append("(")
newick.append(";")
return ''.join(newick) |
def children(self):
"""
The list of child messages and actions sorted by task level, excluding the
start and end messages.
"""
return pvector(
sorted(self._children.values(), key=lambda m: m.task_level)) | The list of child messages and actions sorted by task level, excluding the
start and end messages. | Below is the the instruction that describes the task:
### Input:
The list of child messages and actions sorted by task level, excluding the
start and end messages.
### Response:
def children(self):
"""
The list of child messages and actions sorted by task level, excluding the
start and end messages.
"""
return pvector(
sorted(self._children.values(), key=lambda m: m.task_level)) |
def feat_segments_to_2dfmc_max(feat_segments, offset=4):
"""From a list of feature segments, return a list of 2D-Fourier Magnitude
Coefs using the maximum segment size as main size and zero pad the rest.
Parameters
----------
feat_segments: list
List of segments, one for each boundary interval.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
fmcs: np.ndarray
Tensor containing the 2D-FMC matrices, one matrix per segment.
"""
if len(feat_segments) == 0:
return []
# Get maximum segment size
max_len = max([feat_segment.shape[0] for feat_segment in feat_segments])
fmcs = []
for feat_segment in feat_segments:
# Zero pad if needed
X = np.zeros((max_len, feat_segment.shape[1]))
# Remove a set of frames in the beginning an end of the segment
if feat_segment.shape[0] <= offset or offset == 0:
X[:feat_segment.shape[0], :] = feat_segment
else:
X[:feat_segment.shape[0] - offset, :] = \
feat_segment[offset // 2:-offset // 2, :]
# Compute the 2D-FMC
try:
fmcs.append(utils2d.compute_ffmc2d(X))
except:
logging.warning("Couldn't compute the 2D Fourier Transform")
fmcs.append(np.zeros((X.shape[0] * X.shape[1]) // 2 + 1))
# Normalize
# fmcs[-1] = fmcs[-1] / float(fmcs[-1].max())
return np.asarray(fmcs) | From a list of feature segments, return a list of 2D-Fourier Magnitude
Coefs using the maximum segment size as main size and zero pad the rest.
Parameters
----------
feat_segments: list
List of segments, one for each boundary interval.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
fmcs: np.ndarray
Tensor containing the 2D-FMC matrices, one matrix per segment. | Below is the the instruction that describes the task:
### Input:
From a list of feature segments, return a list of 2D-Fourier Magnitude
Coefs using the maximum segment size as main size and zero pad the rest.
Parameters
----------
feat_segments: list
List of segments, one for each boundary interval.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
fmcs: np.ndarray
Tensor containing the 2D-FMC matrices, one matrix per segment.
### Response:
def feat_segments_to_2dfmc_max(feat_segments, offset=4):
"""From a list of feature segments, return a list of 2D-Fourier Magnitude
Coefs using the maximum segment size as main size and zero pad the rest.
Parameters
----------
feat_segments: list
List of segments, one for each boundary interval.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
fmcs: np.ndarray
Tensor containing the 2D-FMC matrices, one matrix per segment.
"""
if len(feat_segments) == 0:
return []
# Get maximum segment size
max_len = max([feat_segment.shape[0] for feat_segment in feat_segments])
fmcs = []
for feat_segment in feat_segments:
# Zero pad if needed
X = np.zeros((max_len, feat_segment.shape[1]))
# Remove a set of frames in the beginning an end of the segment
if feat_segment.shape[0] <= offset or offset == 0:
X[:feat_segment.shape[0], :] = feat_segment
else:
X[:feat_segment.shape[0] - offset, :] = \
feat_segment[offset // 2:-offset // 2, :]
# Compute the 2D-FMC
try:
fmcs.append(utils2d.compute_ffmc2d(X))
except:
logging.warning("Couldn't compute the 2D Fourier Transform")
fmcs.append(np.zeros((X.shape[0] * X.shape[1]) // 2 + 1))
# Normalize
# fmcs[-1] = fmcs[-1] / float(fmcs[-1].max())
return np.asarray(fmcs) |
def atanh(x, context=None):
"""
Return the inverse hyperbolic tangent of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_atanh,
(BigFloat._implicit_convert(x),),
context,
) | Return the inverse hyperbolic tangent of x. | Below is the the instruction that describes the task:
### Input:
Return the inverse hyperbolic tangent of x.
### Response:
def atanh(x, context=None):
"""
Return the inverse hyperbolic tangent of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_atanh,
(BigFloat._implicit_convert(x),),
context,
) |
def _GetArgsFromRequest(self, request, method_metadata, route_args):
"""Builds args struct out of HTTP request."""
format_mode = GetRequestFormatMode(request, method_metadata)
if request.method in ["GET", "HEAD"]:
if method_metadata.args_type:
unprocessed_request = request.args
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
elif type_info.name in unprocessed_request:
self._SetField(args, type_info, unprocessed_request[type_info.name])
else:
args = None
elif request.method in ["POST", "DELETE", "PATCH"]:
try:
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
if request.content_type and request.content_type.startswith(
"multipart/form-data;"):
payload = json.Parse(request.form["_params_"].decode("utf-8"))
args.FromDict(payload)
for name, fd in iteritems(request.files):
args.Set(name, fd.read())
elif format_mode == JsonMode.PROTO3_JSON_MODE:
# NOTE: Arguments rdfvalue has to be a protobuf-based RDFValue.
args_proto = args.protobuf()
json_format.Parse(request.get_data(as_text=True) or "{}", args_proto)
args.ParseFromString(args_proto.SerializeToString())
else:
json_data = request.get_data(as_text=True) or "{}"
payload = json.Parse(json_data)
if payload:
args.FromDict(payload)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
raise PostRequestParsingError(e)
else:
raise UnsupportedHttpMethod("Unsupported method: %s." % request.method)
return args | Builds args struct out of HTTP request. | Below is the the instruction that describes the task:
### Input:
Builds args struct out of HTTP request.
### Response:
def _GetArgsFromRequest(self, request, method_metadata, route_args):
"""Builds args struct out of HTTP request."""
format_mode = GetRequestFormatMode(request, method_metadata)
if request.method in ["GET", "HEAD"]:
if method_metadata.args_type:
unprocessed_request = request.args
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
elif type_info.name in unprocessed_request:
self._SetField(args, type_info, unprocessed_request[type_info.name])
else:
args = None
elif request.method in ["POST", "DELETE", "PATCH"]:
try:
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
if request.content_type and request.content_type.startswith(
"multipart/form-data;"):
payload = json.Parse(request.form["_params_"].decode("utf-8"))
args.FromDict(payload)
for name, fd in iteritems(request.files):
args.Set(name, fd.read())
elif format_mode == JsonMode.PROTO3_JSON_MODE:
# NOTE: Arguments rdfvalue has to be a protobuf-based RDFValue.
args_proto = args.protobuf()
json_format.Parse(request.get_data(as_text=True) or "{}", args_proto)
args.ParseFromString(args_proto.SerializeToString())
else:
json_data = request.get_data(as_text=True) or "{}"
payload = json.Parse(json_data)
if payload:
args.FromDict(payload)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
raise PostRequestParsingError(e)
else:
raise UnsupportedHttpMethod("Unsupported method: %s." % request.method)
return args |
def get_tc_device(self):
"""
Return a device name that associated network communication direction.
"""
if self.direction == TrafficDirection.OUTGOING:
return self.device
if self.direction == TrafficDirection.INCOMING:
return self.ifb_device
raise ParameterError(
"unknown direction", expected=TrafficDirection.LIST, value=self.direction
) | Return a device name that associated network communication direction. | Below is the the instruction that describes the task:
### Input:
Return a device name that associated network communication direction.
### Response:
def get_tc_device(self):
"""
Return a device name that associated network communication direction.
"""
if self.direction == TrafficDirection.OUTGOING:
return self.device
if self.direction == TrafficDirection.INCOMING:
return self.ifb_device
raise ParameterError(
"unknown direction", expected=TrafficDirection.LIST, value=self.direction
) |
def probe_async(self, callback):
"""Send advertisements for all connected devices.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe
"""
def _on_finished(_name, control_info, exception):
if exception is not None:
callback(self.id, False, str(exception))
return
self._control_info = control_info
try:
info = {
'connection_string': "direct",
'uuid': control_info.uuid,
'signal_strength': 100
}
self._trigger_callback('on_scan', self.id, info, self.ExpirationTime)
finally:
callback(self.id, True, None)
self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size) | Send advertisements for all connected devices.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe | Below is the the instruction that describes the task:
### Input:
Send advertisements for all connected devices.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe
### Response:
def probe_async(self, callback):
"""Send advertisements for all connected devices.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe
"""
def _on_finished(_name, control_info, exception):
if exception is not None:
callback(self.id, False, str(exception))
return
self._control_info = control_info
try:
info = {
'connection_string': "direct",
'uuid': control_info.uuid,
'signal_strength': 100
}
self._trigger_callback('on_scan', self.id, info, self.ExpirationTime)
finally:
callback(self.id, True, None)
self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size) |
def load_fixture(self, body, attachment_bodies={}):
'''
Loads the document into the database from json string. Fakes the
attachments if necessary.'''
doc = json.loads(body)
self._documents[doc['_id']] = doc
self._attachments[doc['_id']] = dict()
for name in doc.get('_attachments', list()):
attachment_body = attachment_bodies.get(name, 'stub')
self._attachments[doc['_id']][name] = attachment_body | Loads the document into the database from json string. Fakes the
attachments if necessary. | Below is the the instruction that describes the task:
### Input:
Loads the document into the database from json string. Fakes the
attachments if necessary.
### Response:
def load_fixture(self, body, attachment_bodies={}):
'''
Loads the document into the database from json string. Fakes the
attachments if necessary.'''
doc = json.loads(body)
self._documents[doc['_id']] = doc
self._attachments[doc['_id']] = dict()
for name in doc.get('_attachments', list()):
attachment_body = attachment_bodies.get(name, 'stub')
self._attachments[doc['_id']][name] = attachment_body |
def compute(self, config, budget, **kwargs):
"""
Simple example for a compute function
The loss is just a the config + some noise (that decreases with the budget)
For dramatization, the function can sleep for a given interval to emphasizes
the speed ups achievable with parallel workers.
Args:
config: dictionary containing the sampled configurations by the optimizer
budget: (float) amount of time/epochs/etc. the model can use to train
Returns:
dictionary with mandatory fields:
'loss' (scalar)
'info' (dict)
"""
res = numpy.clip(config['x'] + numpy.random.randn()/budget, config['x']/2, 1.5*config['x'])
time.sleep(self.sleep_interval)
return({
'loss': float(res), # this is the a mandatory field to run hyperband
'info': res # can be used for any user-defined information - also mandatory
}) | Simple example for a compute function
The loss is just a the config + some noise (that decreases with the budget)
For dramatization, the function can sleep for a given interval to emphasizes
the speed ups achievable with parallel workers.
Args:
config: dictionary containing the sampled configurations by the optimizer
budget: (float) amount of time/epochs/etc. the model can use to train
Returns:
dictionary with mandatory fields:
'loss' (scalar)
'info' (dict) | Below is the the instruction that describes the task:
### Input:
Simple example for a compute function
The loss is just a the config + some noise (that decreases with the budget)
For dramatization, the function can sleep for a given interval to emphasizes
the speed ups achievable with parallel workers.
Args:
config: dictionary containing the sampled configurations by the optimizer
budget: (float) amount of time/epochs/etc. the model can use to train
Returns:
dictionary with mandatory fields:
'loss' (scalar)
'info' (dict)
### Response:
def compute(self, config, budget, **kwargs):
"""
Simple example for a compute function
The loss is just a the config + some noise (that decreases with the budget)
For dramatization, the function can sleep for a given interval to emphasizes
the speed ups achievable with parallel workers.
Args:
config: dictionary containing the sampled configurations by the optimizer
budget: (float) amount of time/epochs/etc. the model can use to train
Returns:
dictionary with mandatory fields:
'loss' (scalar)
'info' (dict)
"""
res = numpy.clip(config['x'] + numpy.random.randn()/budget, config['x']/2, 1.5*config['x'])
time.sleep(self.sleep_interval)
return({
'loss': float(res), # this is the a mandatory field to run hyperband
'info': res # can be used for any user-defined information - also mandatory
}) |
def count_mnemonic(self, mnemonic, uwis=uwis, alias=None):
"""
Counts the wells that have a given curve, given the mnemonic and an
alias dict.
"""
all_mnemonics = self.get_mnemonics([mnemonic], uwis=uwis, alias=alias)
return len(list(filter(None, utils.flatten_list(all_mnemonics)))) | Counts the wells that have a given curve, given the mnemonic and an
alias dict. | Below is the the instruction that describes the task:
### Input:
Counts the wells that have a given curve, given the mnemonic and an
alias dict.
### Response:
def count_mnemonic(self, mnemonic, uwis=uwis, alias=None):
"""
Counts the wells that have a given curve, given the mnemonic and an
alias dict.
"""
all_mnemonics = self.get_mnemonics([mnemonic], uwis=uwis, alias=alias)
return len(list(filter(None, utils.flatten_list(all_mnemonics)))) |
def json(self, *, # type: ignore
loads: Callable[[Any], Any]=json.loads) -> None:
"""Return parsed JSON data.
.. versionadded:: 0.22
"""
return loads(self.data) | Return parsed JSON data.
.. versionadded:: 0.22 | Below is the the instruction that describes the task:
### Input:
Return parsed JSON data.
.. versionadded:: 0.22
### Response:
def json(self, *, # type: ignore
loads: Callable[[Any], Any]=json.loads) -> None:
"""Return parsed JSON data.
.. versionadded:: 0.22
"""
return loads(self.data) |
def area_difference(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in object areas.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
size_a = item_a.size(time_a)
size_b = item_b.size(time_b)
diff = np.sqrt((size_a - size_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) | RMS Difference in object areas.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. | Below is the the instruction that describes the task:
### Input:
RMS Difference in object areas.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
### Response:
def area_difference(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in object areas.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
size_a = item_a.size(time_a)
size_b = item_b.size(time_b)
diff = np.sqrt((size_a - size_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) |
def parent_tags(self):
"""Provides tags of all parent HTML elements."""
tags = set()
for addr in self._addresses:
if addr.attr == 'text':
tags.add(addr.element.tag)
tags.update(el.tag for el in addr.element.iterancestors())
tags.discard(HTMLFragment._root_tag)
return frozenset(tags) | Provides tags of all parent HTML elements. | Below is the the instruction that describes the task:
### Input:
Provides tags of all parent HTML elements.
### Response:
def parent_tags(self):
"""Provides tags of all parent HTML elements."""
tags = set()
for addr in self._addresses:
if addr.attr == 'text':
tags.add(addr.element.tag)
tags.update(el.tag for el in addr.element.iterancestors())
tags.discard(HTMLFragment._root_tag)
return frozenset(tags) |
def initialize(self, emt_id, emt_pass):
"""Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration
"""
self._emt_id = emt_id
self._emt_pass = emt_pass
# Initialize modules
self.bus = BusApi(self)
self.geo = GeoApi(self)
self.parking = ParkingApi(self) | Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration | Below is the the instruction that describes the task:
### Input:
Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration
### Response:
def initialize(self, emt_id, emt_pass):
"""Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration
"""
self._emt_id = emt_id
self._emt_pass = emt_pass
# Initialize modules
self.bus = BusApi(self)
self.geo = GeoApi(self)
self.parking = ParkingApi(self) |
def _get_qvm_based_on_real_device(name: str, device: Device,
noisy: bool, connection: ForestConnection = None,
qvm_type: str = 'qvm'):
"""
A qvm with a based on a real device.
This is the most realistic QVM.
:param name: The full name of this QVM
:param device: The device from :py:func:`get_lattice`.
:param noisy: Whether to construct a noisy quantum computer by using the device's
associated noise model.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A pre-configured QuantumComputer based on the named device.
"""
if noisy:
noise_model = device.noise_model
else:
noise_model = None
return _get_qvm_qc(name=name, connection=connection, device=device,
noise_model=noise_model, requires_executable=True,
qvm_type=qvm_type) | A qvm with a based on a real device.
This is the most realistic QVM.
:param name: The full name of this QVM
:param device: The device from :py:func:`get_lattice`.
:param noisy: Whether to construct a noisy quantum computer by using the device's
associated noise model.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A pre-configured QuantumComputer based on the named device. | Below is the the instruction that describes the task:
### Input:
A qvm with a based on a real device.
This is the most realistic QVM.
:param name: The full name of this QVM
:param device: The device from :py:func:`get_lattice`.
:param noisy: Whether to construct a noisy quantum computer by using the device's
associated noise model.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A pre-configured QuantumComputer based on the named device.
### Response:
def _get_qvm_based_on_real_device(name: str, device: Device,
noisy: bool, connection: ForestConnection = None,
qvm_type: str = 'qvm'):
"""
A qvm with a based on a real device.
This is the most realistic QVM.
:param name: The full name of this QVM
:param device: The device from :py:func:`get_lattice`.
:param noisy: Whether to construct a noisy quantum computer by using the device's
associated noise model.
:param connection: An optional :py:class:`ForestConnection` object. If not specified,
the default values for URL endpoints will be used.
:return: A pre-configured QuantumComputer based on the named device.
"""
if noisy:
noise_model = device.noise_model
else:
noise_model = None
return _get_qvm_qc(name=name, connection=connection, device=device,
noise_model=noise_model, requires_executable=True,
qvm_type=qvm_type) |
def get_next_invalid_time_from_t(self, timestamp):
"""Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int
"""
if not self.is_time_valid(timestamp):
return timestamp
# First we search for the day of time range
t_day = self.get_next_invalid_day(timestamp)
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if timestamp < t_day:
sec_from_morning = self.get_next_future_timerange_invalid(t_day)
else: # it is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_invalid(timestamp)
# tr can't be valid, or it will be return at the beginning
# sec_from_morning = self.get_next_future_timerange_invalid(t)
# Ok we've got a next invalid day and a invalid possibility in
# timerange, so the next invalid is this day+sec_from_morning
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning + 1
# We've got a day but no sec_from_morning: the timerange is full (0->24h)
# so the next invalid is this day at the day_start
if t_day is not None and sec_from_morning is None:
return t_day
# Then we search for the next day of t
# The sec will be the min of the day
timestamp = get_day(timestamp) + 86400
t_day2 = self.get_next_invalid_day(timestamp)
sec_from_morning = self.get_next_future_timerange_invalid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning + 1
if t_day2 is not None and sec_from_morning is None:
return t_day2
# I did not found any valid time
return None | Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int | Below is the the instruction that describes the task:
### Input:
Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int
### Response:
def get_next_invalid_time_from_t(self, timestamp):
"""Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int
"""
if not self.is_time_valid(timestamp):
return timestamp
# First we search for the day of time range
t_day = self.get_next_invalid_day(timestamp)
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if timestamp < t_day:
sec_from_morning = self.get_next_future_timerange_invalid(t_day)
else: # it is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_invalid(timestamp)
# tr can't be valid, or it will be return at the beginning
# sec_from_morning = self.get_next_future_timerange_invalid(t)
# Ok we've got a next invalid day and a invalid possibility in
# timerange, so the next invalid is this day+sec_from_morning
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning + 1
# We've got a day but no sec_from_morning: the timerange is full (0->24h)
# so the next invalid is this day at the day_start
if t_day is not None and sec_from_morning is None:
return t_day
# Then we search for the next day of t
# The sec will be the min of the day
timestamp = get_day(timestamp) + 86400
t_day2 = self.get_next_invalid_day(timestamp)
sec_from_morning = self.get_next_future_timerange_invalid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning + 1
if t_day2 is not None and sec_from_morning is None:
return t_day2
# I did not found any valid time
return None |
def create_sequence_readers(sources: List[str], target: str,
vocab_sources: List[vocab.Vocab],
vocab_target: vocab.Vocab) -> Tuple[List[SequenceReader], SequenceReader]:
"""
Create source readers with EOS and target readers with BOS.
:param sources: The file names of source data and factors.
:param target: The file name of the target data.
:param vocab_sources: The source vocabularies.
:param vocab_target: The target vocabularies.
:return: The source sequence readers and the target reader.
"""
source_sequence_readers = [SequenceReader(source, vocab, add_eos=True) for source, vocab in
zip(sources, vocab_sources)]
target_sequence_reader = SequenceReader(target, vocab_target, add_bos=True)
return source_sequence_readers, target_sequence_reader | Create source readers with EOS and target readers with BOS.
:param sources: The file names of source data and factors.
:param target: The file name of the target data.
:param vocab_sources: The source vocabularies.
:param vocab_target: The target vocabularies.
:return: The source sequence readers and the target reader. | Below is the the instruction that describes the task:
### Input:
Create source readers with EOS and target readers with BOS.
:param sources: The file names of source data and factors.
:param target: The file name of the target data.
:param vocab_sources: The source vocabularies.
:param vocab_target: The target vocabularies.
:return: The source sequence readers and the target reader.
### Response:
def create_sequence_readers(sources: List[str], target: str,
vocab_sources: List[vocab.Vocab],
vocab_target: vocab.Vocab) -> Tuple[List[SequenceReader], SequenceReader]:
"""
Create source readers with EOS and target readers with BOS.
:param sources: The file names of source data and factors.
:param target: The file name of the target data.
:param vocab_sources: The source vocabularies.
:param vocab_target: The target vocabularies.
:return: The source sequence readers and the target reader.
"""
source_sequence_readers = [SequenceReader(source, vocab, add_eos=True) for source, vocab in
zip(sources, vocab_sources)]
target_sequence_reader = SequenceReader(target, vocab_target, add_bos=True)
return source_sequence_readers, target_sequence_reader |
def Free(self):
'''
Frees the memory used by all of the dynamically allocated C arrays.
'''
if self.arrays._calloc:
_dbl_free(self.arrays._time)
_dbl_free(self.arrays._flux)
_dbl_free(self.arrays._bflx)
_dbl_free(self.arrays._M)
_dbl_free(self.arrays._E)
_dbl_free(self.arrays._f)
_dbl_free(self.arrays._r)
_dbl_free(self.arrays._x)
_dbl_free(self.arrays._y)
_dbl_free(self.arrays._z)
self.arrays._calloc = 0
if self.arrays._balloc:
_dbl_free(self.arrays._b)
self.arrays._balloc = 0
if self.arrays._ialloc:
_dbl_free(self.arrays._iarr)
self.arrays._ialloc = 0 | Frees the memory used by all of the dynamically allocated C arrays. | Below is the the instruction that describes the task:
### Input:
Frees the memory used by all of the dynamically allocated C arrays.
### Response:
def Free(self):
'''
Frees the memory used by all of the dynamically allocated C arrays.
'''
if self.arrays._calloc:
_dbl_free(self.arrays._time)
_dbl_free(self.arrays._flux)
_dbl_free(self.arrays._bflx)
_dbl_free(self.arrays._M)
_dbl_free(self.arrays._E)
_dbl_free(self.arrays._f)
_dbl_free(self.arrays._r)
_dbl_free(self.arrays._x)
_dbl_free(self.arrays._y)
_dbl_free(self.arrays._z)
self.arrays._calloc = 0
if self.arrays._balloc:
_dbl_free(self.arrays._b)
self.arrays._balloc = 0
if self.arrays._ialloc:
_dbl_free(self.arrays._iarr)
self.arrays._ialloc = 0 |
def _load(self, load_dict):
"""Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
"""
if self.v_locked:
raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name)
try:
self._data = load_dict['data' + ArrayParameter.IDENTIFIER]
if 'explored_data' + ArrayParameter.IDENTIFIER in load_dict:
explore_table = load_dict['explored_data' + ArrayParameter.IDENTIFIER]
idx = explore_table['idx']
explore_list = []
# Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'
for name_idx in idx:
arrayname = self._build_name(name_idx)
explore_list.append(load_dict[arrayname])
self._explored_range = [x for x in explore_list]
self._explored = True
except KeyError:
super(ArrayParameter, self)._load(load_dict)
self._default = self._data
self._locked = True | Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`. | Below is the the instruction that describes the task:
### Input:
Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
### Response:
def _load(self, load_dict):
"""Reconstructs the data and exploration array.
Checks if it can find the array identifier in the `load_dict`, i.e. '__rr__'.
If not calls :class:`~pypet.parameter.Parameter._load` of the parent class.
If the parameter is explored, the exploration range of arrays is reconstructed
as it was stored in :func:`~pypet.parameter.ArrayParameter._store`.
"""
if self.v_locked:
raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name)
try:
self._data = load_dict['data' + ArrayParameter.IDENTIFIER]
if 'explored_data' + ArrayParameter.IDENTIFIER in load_dict:
explore_table = load_dict['explored_data' + ArrayParameter.IDENTIFIER]
idx = explore_table['idx']
explore_list = []
# Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'
for name_idx in idx:
arrayname = self._build_name(name_idx)
explore_list.append(load_dict[arrayname])
self._explored_range = [x for x in explore_list]
self._explored = True
except KeyError:
super(ArrayParameter, self)._load(load_dict)
self._default = self._data
self._locked = True |
def Tm_depression_eutectic(Tm, Hm, x=None, M=None, MW=None):
r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
if x:
dTm = R*Tm**2*x/Hm
elif M and MW:
MW = MW/1000. #g/mol to kg/mol
dTm = R*Tm**2*MW*M/Hm
else:
raise Exception('Either molality or mole fraction of the solute must be specified; MW of the solvent is required also if molality is provided')
return dTm | r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012. | Below is the the instruction that describes the task:
### Input:
r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
### Response:
def Tm_depression_eutectic(Tm, Hm, x=None, M=None, MW=None):
r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
if x:
dTm = R*Tm**2*x/Hm
elif M and MW:
MW = MW/1000. #g/mol to kg/mol
dTm = R*Tm**2*MW*M/Hm
else:
raise Exception('Either molality or mole fraction of the solute must be specified; MW of the solvent is required also if molality is provided')
return dTm |
def to_dict(self):
"""Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format.
"""
try:
fulfillment = self.fulfillment.serialize_uri()
except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError):
fulfillment = _fulfillment_to_details(self.fulfillment)
try:
# NOTE: `self.fulfills` can be `None` and that's fine
fulfills = self.fulfills.to_dict()
except AttributeError:
fulfills = None
input_ = {
'owners_before': self.owners_before,
'fulfills': fulfills,
'fulfillment': fulfillment,
}
return input_ | Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format. | Below is the the instruction that describes the task:
### Input:
Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format.
### Response:
def to_dict(self):
"""Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format.
"""
try:
fulfillment = self.fulfillment.serialize_uri()
except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError):
fulfillment = _fulfillment_to_details(self.fulfillment)
try:
# NOTE: `self.fulfills` can be `None` and that's fine
fulfills = self.fulfills.to_dict()
except AttributeError:
fulfills = None
input_ = {
'owners_before': self.owners_before,
'fulfills': fulfills,
'fulfillment': fulfillment,
}
return input_ |
def norm_l0(x, axis=None, eps=0.0):
r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
"""
nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl0.size == 1:
nl0 = nl0.ravel()[0]
return nl0 | r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector | Below is the the instruction that describes the task:
### Input:
r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
### Response:
def norm_l0(x, axis=None, eps=0.0):
r"""Compute the :math:`\ell_0` "norm" (it is not really a norm)
.. math::
\| \mathbf{x} \|_0 = \sum_i \left\{ \begin{array}{ccc}
0 & \text{if} & x_i = 0 \\ 1 &\text{if} & x_i \neq 0
\end{array} \right.
where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`.
Parameters
----------
x : array_like
Input array :math:`\mathbf{x}`
axis : `None` or int or tuple of ints, optional (default None)
Axes of `x` over which to compute the :math:`\ell_0` "norm". If
`None`, an entire multi-dimensional array is treated as a
vector. If axes are specified, then distinct values are computed
over the indices of the remaining axes of input array `x`.
eps : float, optional (default 0.0)
Absolute value threshold below which a number is considered to be
zero.
Returns
-------
nl0 : float or ndarray
Norm of `x`, or array of norms treating specified axes of `x`
as a vector
"""
nl0 = np.sum(np.abs(x) > eps, axis=axis, keepdims=True)
# If the result has a single element, convert it to a scalar
if nl0.size == 1:
nl0 = nl0.ravel()[0]
return nl0 |
def run(self):
"""
Run the database seeds.
"""
self.factory.register(User, self.users_factory)
self.factory(User, 50).create() | Run the database seeds. | Below is the the instruction that describes the task:
### Input:
Run the database seeds.
### Response:
def run(self):
"""
Run the database seeds.
"""
self.factory.register(User, self.users_factory)
self.factory(User, 50).create() |
def _get_date_time_format(dt_string):
'''
Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
:raises: SaltInvocationError on Invalid Date/Time string
'''
valid_formats = [
'%H:%M',
'%H:%M:%S',
'%m:%d:%y',
'%m:%d:%Y',
'%m/%d/%y',
'%m/%d/%Y'
]
for dt_format in valid_formats:
try:
datetime.strptime(dt_string, dt_format)
return dt_format
except ValueError:
continue
msg = 'Invalid Date/Time Format: {0}'.format(dt_string)
raise SaltInvocationError(msg) | Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
:raises: SaltInvocationError on Invalid Date/Time string | Below is the the instruction that describes the task:
### Input:
Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
:raises: SaltInvocationError on Invalid Date/Time string
### Response:
def _get_date_time_format(dt_string):
'''
Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
:raises: SaltInvocationError on Invalid Date/Time string
'''
valid_formats = [
'%H:%M',
'%H:%M:%S',
'%m:%d:%y',
'%m:%d:%Y',
'%m/%d/%y',
'%m/%d/%Y'
]
for dt_format in valid_formats:
try:
datetime.strptime(dt_string, dt_format)
return dt_format
except ValueError:
continue
msg = 'Invalid Date/Time Format: {0}'.format(dt_string)
raise SaltInvocationError(msg) |
def ListChildren(self, urn, limit=None, age=NEWEST_TIME):
"""Lists bunch of directories efficiently.
Args:
urn: Urn to list children.
limit: Max number of children to list.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
RDFURNs instances of each child.
"""
_, children_urns = list(
self.MultiListChildren([urn], limit=limit, age=age))[0]
return children_urns | Lists bunch of directories efficiently.
Args:
urn: Urn to list children.
limit: Max number of children to list.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
RDFURNs instances of each child. | Below is the the instruction that describes the task:
### Input:
Lists bunch of directories efficiently.
Args:
urn: Urn to list children.
limit: Max number of children to list.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
RDFURNs instances of each child.
### Response:
def ListChildren(self, urn, limit=None, age=NEWEST_TIME):
"""Lists bunch of directories efficiently.
Args:
urn: Urn to list children.
limit: Max number of children to list.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
RDFURNs instances of each child.
"""
_, children_urns = list(
self.MultiListChildren([urn], limit=limit, age=age))[0]
return children_urns |
def dump(self, raw=False):
''' Dump all output currently in the arm's output queue. '''
raw_out = self.ser.read(self.ser.in_waiting)
if raw:
return raw_out
return raw_out.decode(OUTPUT_ENCODING) | Dump all output currently in the arm's output queue. | Below is the the instruction that describes the task:
### Input:
Dump all output currently in the arm's output queue.
### Response:
def dump(self, raw=False):
''' Dump all output currently in the arm's output queue. '''
raw_out = self.ser.read(self.ser.in_waiting)
if raw:
return raw_out
return raw_out.decode(OUTPUT_ENCODING) |
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage) | Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False | Below is the the instruction that describes the task:
### Input:
Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
### Response:
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage) |
def get(self, task_id=None, params=None):
"""
Retrieve information for a particular task.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_
:arg task_id: Return the task with specified id (node_id:task_number)
:arg wait_for_completion: Wait for the matching tasks to complete
(default: false)
:arg timeout: Maximum waiting time for `wait_for_completion`
"""
return self.transport.perform_request('GET', _make_path('_tasks',
task_id), params=params) | Retrieve information for a particular task.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_
:arg task_id: Return the task with specified id (node_id:task_number)
:arg wait_for_completion: Wait for the matching tasks to complete
(default: false)
:arg timeout: Maximum waiting time for `wait_for_completion` | Below is the the instruction that describes the task:
### Input:
Retrieve information for a particular task.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_
:arg task_id: Return the task with specified id (node_id:task_number)
:arg wait_for_completion: Wait for the matching tasks to complete
(default: false)
:arg timeout: Maximum waiting time for `wait_for_completion`
### Response:
def get(self, task_id=None, params=None):
"""
Retrieve information for a particular task.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_
:arg task_id: Return the task with specified id (node_id:task_number)
:arg wait_for_completion: Wait for the matching tasks to complete
(default: false)
:arg timeout: Maximum waiting time for `wait_for_completion`
"""
return self.transport.perform_request('GET', _make_path('_tasks',
task_id), params=params) |
def fully_scope_function_calls(node: ast.Tree, expression: ast.Expression, function_set: OrderedDict) -> ast.Expression:
"""
Turns the function references in this expression into fully scoped
references (e.g. relative to absolute). The component references of all
referenced functions are put into the functions set.
:param node: collection for performing symbol lookup etc.
:param expression: original expression
:param function_set: output of function component references
:return:
"""
expression_copy = copy.deepcopy(expression)
w = TreeWalker()
w.walk(FunctionExpander(node, function_set), expression_copy)
return expression_copy | Turns the function references in this expression into fully scoped
references (e.g. relative to absolute). The component references of all
referenced functions are put into the functions set.
:param node: collection for performing symbol lookup etc.
:param expression: original expression
:param function_set: output of function component references
:return: | Below is the the instruction that describes the task:
### Input:
Turns the function references in this expression into fully scoped
references (e.g. relative to absolute). The component references of all
referenced functions are put into the functions set.
:param node: collection for performing symbol lookup etc.
:param expression: original expression
:param function_set: output of function component references
:return:
### Response:
def fully_scope_function_calls(node: ast.Tree, expression: ast.Expression, function_set: OrderedDict) -> ast.Expression:
"""
Turns the function references in this expression into fully scoped
references (e.g. relative to absolute). The component references of all
referenced functions are put into the functions set.
:param node: collection for performing symbol lookup etc.
:param expression: original expression
:param function_set: output of function component references
:return:
"""
expression_copy = copy.deepcopy(expression)
w = TreeWalker()
w.walk(FunctionExpander(node, function_set), expression_copy)
return expression_copy |
def _detect_cms(self, tries=0):
"""
Detect CMS using whatcms.org.
Has a re-try mechanism because false negatives may occur
:param tries: Count of tries for CMS discovery
"""
# WhatCMS is under CloudFlare which detects and blocks proxied/Tor traffic, hence normal request.
page = requests.get(url="https://whatcms.org/?s={}".format(self.host.target))
soup = BeautifulSoup(page.text, "lxml")
found = soup.select(".panel.panel-success")
if found:
try:
cms = [a for a in soup.select("a") if "/c/" in a.get("href")][0]
self.logger.info("{} CMS detected: target is using {}{}{}".format(
COLORED_COMBOS.GOOD, COLOR.GREEN, cms.get("title"), COLOR.RESET))
except IndexError:
if tries >= 4:
return
else:
self._detect_cms(tries=tries + 1)
else:
if tries >= 4:
return
else:
self._detect_cms(tries=tries + 1) | Detect CMS using whatcms.org.
Has a re-try mechanism because false negatives may occur
:param tries: Count of tries for CMS discovery | Below is the the instruction that describes the task:
### Input:
Detect CMS using whatcms.org.
Has a re-try mechanism because false negatives may occur
:param tries: Count of tries for CMS discovery
### Response:
def _detect_cms(self, tries=0):
"""
Detect CMS using whatcms.org.
Has a re-try mechanism because false negatives may occur
:param tries: Count of tries for CMS discovery
"""
# WhatCMS is under CloudFlare which detects and blocks proxied/Tor traffic, hence normal request.
page = requests.get(url="https://whatcms.org/?s={}".format(self.host.target))
soup = BeautifulSoup(page.text, "lxml")
found = soup.select(".panel.panel-success")
if found:
try:
cms = [a for a in soup.select("a") if "/c/" in a.get("href")][0]
self.logger.info("{} CMS detected: target is using {}{}{}".format(
COLORED_COMBOS.GOOD, COLOR.GREEN, cms.get("title"), COLOR.RESET))
except IndexError:
if tries >= 4:
return
else:
self._detect_cms(tries=tries + 1)
else:
if tries >= 4:
return
else:
self._detect_cms(tries=tries + 1) |
def _setupParseTree(self, rowFrom, rowTo, colIndex, tree):
""" Build the search tree for multi-character encodings.
"""
if colIndex == self._longestEntry:
return
prevchar = None
rowIndex = rowFrom
while rowIndex <= rowTo:
if colIndex < len(self._parsedata[rowIndex]):
c = self._parsedata[rowIndex][colIndex]
if c != prevchar:
tree[c] = {}
if prevchar is not None:
self._setupParseTree(rowFrom, rowIndex - 1, colIndex + 1, tree[prevchar])
rowFrom = rowIndex
prevchar = c
if rowIndex == rowTo:
self._setupParseTree(rowFrom, rowIndex, colIndex + 1, tree[prevchar])
rowIndex = rowIndex + 1 | Build the search tree for multi-character encodings. | Below is the the instruction that describes the task:
### Input:
Build the search tree for multi-character encodings.
### Response:
def _setupParseTree(self, rowFrom, rowTo, colIndex, tree):
""" Build the search tree for multi-character encodings.
"""
if colIndex == self._longestEntry:
return
prevchar = None
rowIndex = rowFrom
while rowIndex <= rowTo:
if colIndex < len(self._parsedata[rowIndex]):
c = self._parsedata[rowIndex][colIndex]
if c != prevchar:
tree[c] = {}
if prevchar is not None:
self._setupParseTree(rowFrom, rowIndex - 1, colIndex + 1, tree[prevchar])
rowFrom = rowIndex
prevchar = c
if rowIndex == rowTo:
self._setupParseTree(rowFrom, rowIndex, colIndex + 1, tree[prevchar])
rowIndex = rowIndex + 1 |
def cause_mip(self, mechanism, purview):
"""Return the irreducibility analysis for the cause MIP.
Alias for |find_mip()| with ``direction`` set to |CAUSE|.
"""
return self.find_mip(Direction.CAUSE, mechanism, purview) | Return the irreducibility analysis for the cause MIP.
Alias for |find_mip()| with ``direction`` set to |CAUSE|. | Below is the the instruction that describes the task:
### Input:
Return the irreducibility analysis for the cause MIP.
Alias for |find_mip()| with ``direction`` set to |CAUSE|.
### Response:
def cause_mip(self, mechanism, purview):
"""Return the irreducibility analysis for the cause MIP.
Alias for |find_mip()| with ``direction`` set to |CAUSE|.
"""
return self.find_mip(Direction.CAUSE, mechanism, purview) |
def squareform_isfc(isfcs, iscs=None):
"""Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa
If input is a 2- or 3-dimensional array of square ISFC matrices, converts
this to the condensed off-diagonal ISFC values (i.e., the vectorized
triangle) and the diagonal ISC values. In this case, input must be a
single array of shape either n_voxels x n_voxels or n_subjects (or
n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized
according to scipy.spatial.distance.squareform, yielding n_voxels *
(n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if
input is an array of condensed off-diagonal ISFC values and an array of
diagonal ISC values, the square (redundant) ISFC values are returned.
This function mimics scipy.spatial.distance.squareform, but is intended
to retain the diagonal ISC values.
Parameters
----------
isfcs : ndarray
Either condensed or redundant ISFC values
iscs: ndarray, optional
Diagonal ISC values, required when input is condensed
Returns
-------
isfcs : ndarray or tuple of ndarrays
If condensed ISFCs are passed, a single redundant ISFC array is
returned; if redundant ISFCs are passed, both a condensed off-
diagonal ISFC array and the diagonal ISC values are returned
"""
# Check if incoming ISFCs are square (redundant)
if not type(iscs) == np.ndarray and isfcs.shape[-2] == isfcs.shape[-1]:
if isfcs.ndim == 2:
isfcs = isfcs[np.newaxis, ...]
if isfcs.ndim == 3:
iscs = np.diagonal(isfcs, axis1=1, axis2=2)
isfcs = np.vstack([squareform(isfc, checks=False)[np.newaxis, :]
for isfc in isfcs])
else:
raise ValueError("Square (redundant) ISFCs must be square "
"with multiple subjects or pairs of subjects "
"indexed by the first dimension")
if isfcs.shape[0] == iscs.shape[0] == 1:
isfcs, iscs = isfcs[0], iscs[0]
return isfcs, iscs
# Otherwise, convert from condensed to redundant
else:
if isfcs.ndim == iscs.ndim == 1:
isfcs, iscs = isfcs[np.newaxis, :], iscs[np.newaxis, :]
isfcs_stack = []
for isfc, isc in zip(isfcs, iscs):
isfc_sq = squareform(isfc, checks=False)
np.fill_diagonal(isfc_sq, isc)
isfcs_stack.append(isfc_sq[np.newaxis, ...])
isfcs = np.vstack(isfcs_stack)
if isfcs.shape[0] == 1:
isfcs = isfcs[0]
return isfcs | Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa
If input is a 2- or 3-dimensional array of square ISFC matrices, converts
this to the condensed off-diagonal ISFC values (i.e., the vectorized
triangle) and the diagonal ISC values. In this case, input must be a
single array of shape either n_voxels x n_voxels or n_subjects (or
n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized
according to scipy.spatial.distance.squareform, yielding n_voxels *
(n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if
input is an array of condensed off-diagonal ISFC values and an array of
diagonal ISC values, the square (redundant) ISFC values are returned.
This function mimics scipy.spatial.distance.squareform, but is intended
to retain the diagonal ISC values.
Parameters
----------
isfcs : ndarray
Either condensed or redundant ISFC values
iscs: ndarray, optional
Diagonal ISC values, required when input is condensed
Returns
-------
isfcs : ndarray or tuple of ndarrays
If condensed ISFCs are passed, a single redundant ISFC array is
returned; if redundant ISFCs are passed, both a condensed off-
diagonal ISFC array and the diagonal ISC values are returned | Below is the the instruction that describes the task:
### Input:
Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa
If input is a 2- or 3-dimensional array of square ISFC matrices, converts
this to the condensed off-diagonal ISFC values (i.e., the vectorized
triangle) and the diagonal ISC values. In this case, input must be a
single array of shape either n_voxels x n_voxels or n_subjects (or
n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized
according to scipy.spatial.distance.squareform, yielding n_voxels *
(n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if
input is an array of condensed off-diagonal ISFC values and an array of
diagonal ISC values, the square (redundant) ISFC values are returned.
This function mimics scipy.spatial.distance.squareform, but is intended
to retain the diagonal ISC values.
Parameters
----------
isfcs : ndarray
Either condensed or redundant ISFC values
iscs: ndarray, optional
Diagonal ISC values, required when input is condensed
Returns
-------
isfcs : ndarray or tuple of ndarrays
If condensed ISFCs are passed, a single redundant ISFC array is
returned; if redundant ISFCs are passed, both a condensed off-
diagonal ISFC array and the diagonal ISC values are returned
### Response:
def squareform_isfc(isfcs, iscs=None):
"""Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa
If input is a 2- or 3-dimensional array of square ISFC matrices, converts
this to the condensed off-diagonal ISFC values (i.e., the vectorized
triangle) and the diagonal ISC values. In this case, input must be a
single array of shape either n_voxels x n_voxels or n_subjects (or
n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized
according to scipy.spatial.distance.squareform, yielding n_voxels *
(n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if
input is an array of condensed off-diagonal ISFC values and an array of
diagonal ISC values, the square (redundant) ISFC values are returned.
This function mimics scipy.spatial.distance.squareform, but is intended
to retain the diagonal ISC values.
Parameters
----------
isfcs : ndarray
Either condensed or redundant ISFC values
iscs: ndarray, optional
Diagonal ISC values, required when input is condensed
Returns
-------
isfcs : ndarray or tuple of ndarrays
If condensed ISFCs are passed, a single redundant ISFC array is
returned; if redundant ISFCs are passed, both a condensed off-
diagonal ISFC array and the diagonal ISC values are returned
"""
# Check if incoming ISFCs are square (redundant)
if not type(iscs) == np.ndarray and isfcs.shape[-2] == isfcs.shape[-1]:
if isfcs.ndim == 2:
isfcs = isfcs[np.newaxis, ...]
if isfcs.ndim == 3:
iscs = np.diagonal(isfcs, axis1=1, axis2=2)
isfcs = np.vstack([squareform(isfc, checks=False)[np.newaxis, :]
for isfc in isfcs])
else:
raise ValueError("Square (redundant) ISFCs must be square "
"with multiple subjects or pairs of subjects "
"indexed by the first dimension")
if isfcs.shape[0] == iscs.shape[0] == 1:
isfcs, iscs = isfcs[0], iscs[0]
return isfcs, iscs
# Otherwise, convert from condensed to redundant
else:
if isfcs.ndim == iscs.ndim == 1:
isfcs, iscs = isfcs[np.newaxis, :], iscs[np.newaxis, :]
isfcs_stack = []
for isfc, isc in zip(isfcs, iscs):
isfc_sq = squareform(isfc, checks=False)
np.fill_diagonal(isfc_sq, isc)
isfcs_stack.append(isfc_sq[np.newaxis, ...])
isfcs = np.vstack(isfcs_stack)
if isfcs.shape[0] == 1:
isfcs = isfcs[0]
return isfcs |
def change_cash(self, money):
"""
外部操作|高危|
"""
res = self.cash[-1] + money
if res >= 0:
# 高危操作
self.cash[-1] = res | 外部操作|高危| | Below is the the instruction that describes the task:
### Input:
外部操作|高危|
### Response:
def change_cash(self, money):
"""
外部操作|高危|
"""
res = self.cash[-1] + money
if res >= 0:
# 高危操作
self.cash[-1] = res |
def disconnect(self):
"""Disconnect from server."""
self.logger.info("DISCONNECT")
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.state = NC.CS_DISCONNECTING
ret = self.send_disconnect()
ret2, bytes_written = self.packet_write()
self.socket_close()
return ret | Disconnect from server. | Below is the the instruction that describes the task:
### Input:
Disconnect from server.
### Response:
def disconnect(self):
"""Disconnect from server."""
self.logger.info("DISCONNECT")
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.state = NC.CS_DISCONNECTING
ret = self.send_disconnect()
ret2, bytes_written = self.packet_write()
self.socket_close()
return ret |
def ignore_exception(IgnoreException=Exception, DefaultVal=None):
""" Decorator for ignoring exception from a function
e.g. @ignore_exception(DivideByZero)
e.g.2. ignore_exception(DivideByZero)(Divide)(2/0)
borrowed from: http://stackoverflow.com/questions/2262333/is-there-a-built-in-or-more-pythonic-way-to-try-to-parse-a-string-to-an-integer
"""
def dec(function):
def _dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except IgnoreException:
return DefaultVal
return _dec
return dec | Decorator for ignoring exception from a function
e.g. @ignore_exception(DivideByZero)
e.g.2. ignore_exception(DivideByZero)(Divide)(2/0)
borrowed from: http://stackoverflow.com/questions/2262333/is-there-a-built-in-or-more-pythonic-way-to-try-to-parse-a-string-to-an-integer | Below is the the instruction that describes the task:
### Input:
Decorator for ignoring exception from a function
e.g. @ignore_exception(DivideByZero)
e.g.2. ignore_exception(DivideByZero)(Divide)(2/0)
borrowed from: http://stackoverflow.com/questions/2262333/is-there-a-built-in-or-more-pythonic-way-to-try-to-parse-a-string-to-an-integer
### Response:
def ignore_exception(IgnoreException=Exception, DefaultVal=None):
""" Decorator for ignoring exception from a function
e.g. @ignore_exception(DivideByZero)
e.g.2. ignore_exception(DivideByZero)(Divide)(2/0)
borrowed from: http://stackoverflow.com/questions/2262333/is-there-a-built-in-or-more-pythonic-way-to-try-to-parse-a-string-to-an-integer
"""
def dec(function):
def _dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except IgnoreException:
return DefaultVal
return _dec
return dec |
def parents(self, primary=None):
"""
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables referenced with self's foreign keys
"""
return self.connection.dependencies.parents(self.full_table_name, primary) | :param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables referenced with self's foreign keys | Below is the the instruction that describes the task:
### Input:
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables referenced with self's foreign keys
### Response:
def parents(self, primary=None):
"""
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables referenced with self's foreign keys
"""
return self.connection.dependencies.parents(self.full_table_name, primary) |
def local_is_up(self, target):
"""
Check if a tunnel is up (remote target's host is reachable on TCP
target's port)
Arguments:
target (tuple):
tuple of type (``str``, ``int``) indicating the listen IP
address and port
Return:
boolean
.. deprecated:: 0.1.0
Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up`
"""
try:
check_address(target)
except ValueError:
self.logger.warning('Target must be a tuple (IP, port), where IP '
'is a string (i.e. "192.168.0.1") and port is '
'an integer (i.e. 40000). Alternatively '
'target can be a valid UNIX domain socket.')
return False
if self.skip_tunnel_checkup: # force tunnel check at this point
self.skip_tunnel_checkup = False
self.check_tunnels()
self.skip_tunnel_checkup = True # roll it back
return self.tunnel_is_up.get(target, True) | Check if a tunnel is up (remote target's host is reachable on TCP
target's port)
Arguments:
target (tuple):
tuple of type (``str``, ``int``) indicating the listen IP
address and port
Return:
boolean
.. deprecated:: 0.1.0
Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up` | Below is the the instruction that describes the task:
### Input:
Check if a tunnel is up (remote target's host is reachable on TCP
target's port)
Arguments:
target (tuple):
tuple of type (``str``, ``int``) indicating the listen IP
address and port
Return:
boolean
.. deprecated:: 0.1.0
Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up`
### Response:
def local_is_up(self, target):
"""
Check if a tunnel is up (remote target's host is reachable on TCP
target's port)
Arguments:
target (tuple):
tuple of type (``str``, ``int``) indicating the listen IP
address and port
Return:
boolean
.. deprecated:: 0.1.0
Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up`
"""
try:
check_address(target)
except ValueError:
self.logger.warning('Target must be a tuple (IP, port), where IP '
'is a string (i.e. "192.168.0.1") and port is '
'an integer (i.e. 40000). Alternatively '
'target can be a valid UNIX domain socket.')
return False
if self.skip_tunnel_checkup: # force tunnel check at this point
self.skip_tunnel_checkup = False
self.check_tunnels()
self.skip_tunnel_checkup = True # roll it back
return self.tunnel_is_up.get(target, True) |
def InformarCalidadCertificacion(self, coe):
"Informar calidad de un certificado (C1116A/RT)"
# llamo al webservice:
ret = self.client.cgInformarCalidad(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
calidad=self.certificacion['primaria']['calidad'],
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarAutorizarCertificadoResp(ret)
return True | Informar calidad de un certificado (C1116A/RT) | Below is the the instruction that describes the task:
### Input:
Informar calidad de un certificado (C1116A/RT)
### Response:
def InformarCalidadCertificacion(self, coe):
"Informar calidad de un certificado (C1116A/RT)"
# llamo al webservice:
ret = self.client.cgInformarCalidad(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
calidad=self.certificacion['primaria']['calidad'],
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarAutorizarCertificadoResp(ret)
return True |
def on_update_enabled(self, conf_evt):
"""Implements neighbor configuration change listener.
"""
enabled = conf_evt.value
# If we do not have any protocol bound and configuration asks us to
# enable this peer, we try to establish connection again.
if enabled:
LOG.info('%s enabled', self)
if self._protocol and self._protocol.started:
LOG.error('Tried to enable neighbor that is already enabled')
else:
self.state.bgp_state = const.BGP_FSM_CONNECT
# Restart connect loop if not already running.
if not self._connect_retry_event.is_set():
self._connect_retry_event.set()
LOG.debug('Starting connect loop as neighbor is enabled.')
else:
LOG.info('%s disabled', self)
if self._protocol:
# Stopping protocol will eventually trigger connection_lost
# handler which will do some clean-up.
# But the greenlet that is in charge of the socket may be kill
# when we stop the protocol, hence we call connection_lost
# here as we triggered socket to close.
self._protocol.send_notification(
BGP_ERROR_CEASE,
BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN
)
self._protocol.stop()
self._protocol = None
self.state.bgp_state = const.BGP_FSM_IDLE
# If this peer is not enabled any-more we stop trying to make any
# connection.
LOG.debug('Disabling connect-retry as neighbor was disabled')
self._connect_retry_event.clear() | Implements neighbor configuration change listener. | Below is the the instruction that describes the task:
### Input:
Implements neighbor configuration change listener.
### Response:
def on_update_enabled(self, conf_evt):
"""Implements neighbor configuration change listener.
"""
enabled = conf_evt.value
# If we do not have any protocol bound and configuration asks us to
# enable this peer, we try to establish connection again.
if enabled:
LOG.info('%s enabled', self)
if self._protocol and self._protocol.started:
LOG.error('Tried to enable neighbor that is already enabled')
else:
self.state.bgp_state = const.BGP_FSM_CONNECT
# Restart connect loop if not already running.
if not self._connect_retry_event.is_set():
self._connect_retry_event.set()
LOG.debug('Starting connect loop as neighbor is enabled.')
else:
LOG.info('%s disabled', self)
if self._protocol:
# Stopping protocol will eventually trigger connection_lost
# handler which will do some clean-up.
# But the greenlet that is in charge of the socket may be kill
# when we stop the protocol, hence we call connection_lost
# here as we triggered socket to close.
self._protocol.send_notification(
BGP_ERROR_CEASE,
BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN
)
self._protocol.stop()
self._protocol = None
self.state.bgp_state = const.BGP_FSM_IDLE
# If this peer is not enabled any-more we stop trying to make any
# connection.
LOG.debug('Disabling connect-retry as neighbor was disabled')
self._connect_retry_event.clear() |
def connect(self, node="", rpcuser="", rpcpassword="", **kwargs):
""" Connect to blockchain network (internal use only)
"""
if not node:
if "node" in self.config:
node = self.config["node"]
else:
raise ValueError("A Blockchain node needs to be provided!")
if not rpcuser and "rpcuser" in self.config:
rpcuser = self.config["rpcuser"]
if not rpcpassword and "rpcpassword" in self.config:
rpcpassword = self.config["rpcpassword"]
self.rpc = self.rpc_class(node, rpcuser, rpcpassword, **kwargs) | Connect to blockchain network (internal use only) | Below is the the instruction that describes the task:
### Input:
Connect to blockchain network (internal use only)
### Response:
def connect(self, node="", rpcuser="", rpcpassword="", **kwargs):
""" Connect to blockchain network (internal use only)
"""
if not node:
if "node" in self.config:
node = self.config["node"]
else:
raise ValueError("A Blockchain node needs to be provided!")
if not rpcuser and "rpcuser" in self.config:
rpcuser = self.config["rpcuser"]
if not rpcpassword and "rpcpassword" in self.config:
rpcpassword = self.config["rpcpassword"]
self.rpc = self.rpc_class(node, rpcuser, rpcpassword, **kwargs) |
def _parse_to_recoverable_signature(sig):
"""
Returns a parsed recoverable signature of length 65 bytes
"""
# Buffer for getting values of signature object
assert isinstance(sig, bytes)
assert len(sig) == 65
# Make a recoverable signature of 65 bytes
rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *")
# Retrieving the recid from the last byte of the signed key
recid = ord(sig[64:65])
# Parse a revoverable signature
parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact(
ctx,
rec_sig,
sig,
recid
)
# Verify that the signature is parsable
if not parsable_sig:
raise InvalidSignatureError()
return rec_sig | Returns a parsed recoverable signature of length 65 bytes | Below is the the instruction that describes the task:
### Input:
Returns a parsed recoverable signature of length 65 bytes
### Response:
def _parse_to_recoverable_signature(sig):
"""
Returns a parsed recoverable signature of length 65 bytes
"""
# Buffer for getting values of signature object
assert isinstance(sig, bytes)
assert len(sig) == 65
# Make a recoverable signature of 65 bytes
rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *")
# Retrieving the recid from the last byte of the signed key
recid = ord(sig[64:65])
# Parse a revoverable signature
parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact(
ctx,
rec_sig,
sig,
recid
)
# Verify that the signature is parsable
if not parsable_sig:
raise InvalidSignatureError()
return rec_sig |
def ask_yesno(msg="Proceed?", dft=None):
"""Prompts the user for a yes or no answer. Returns True for yes, False
for no."""
yes = ["y", "yes", "Y", "YES"]
no = ["n", "no", "N", "NO"]
if dft != None:
dft = yes[0] if (dft in yes or dft == True) else no[0]
return ask(msg, dft=dft, vld=yes+no) in yes | Prompts the user for a yes or no answer. Returns True for yes, False
for no. | Below is the the instruction that describes the task:
### Input:
Prompts the user for a yes or no answer. Returns True for yes, False
for no.
### Response:
def ask_yesno(msg="Proceed?", dft=None):
"""Prompts the user for a yes or no answer. Returns True for yes, False
for no."""
yes = ["y", "yes", "Y", "YES"]
no = ["n", "no", "N", "NO"]
if dft != None:
dft = yes[0] if (dft in yes or dft == True) else no[0]
return ask(msg, dft=dft, vld=yes+no) in yes |
def assertFileSizeNotAlmostEqual(
self, filename, size, places=None, msg=None, delta=None):
'''Fail unless ``filename`` does not have the given ``size``
as determined by their difference rounded to the given number
ofdecimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertNotAlmostEqual(
fsize, size, places=places, msg=msg, delta=delta) | Fail unless ``filename`` does not have the given ``size``
as determined by their difference rounded to the given number
ofdecimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | Below is the the instruction that describes the task:
### Input:
Fail unless ``filename`` does not have the given ``size``
as determined by their difference rounded to the given number
ofdecimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
### Response:
def assertFileSizeNotAlmostEqual(
self, filename, size, places=None, msg=None, delta=None):
'''Fail unless ``filename`` does not have the given ``size``
as determined by their difference rounded to the given number
ofdecimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertNotAlmostEqual(
fsize, size, places=places, msg=msg, delta=delta) |
def enumerate_reversed(sequence):
"""
Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
"""
for index in reversed(xrange(len(sequence))):
yield index, sequence[index] | Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python | Below is the the instruction that describes the task:
### Input:
Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
### Response:
def enumerate_reversed(sequence):
"""
Perform reverse enumeration, returning an iterator with decrementing
index/position values
Source: http://stackoverflow.com/questions/529424/traverse-a-list-in-reverse-order-in-python
"""
for index in reversed(xrange(len(sequence))):
yield index, sequence[index] |
def __query(self, query, tagid=None):
"""
Extracts nodes that match the query from the Response
:param query: Xpath Expresion
:type query: String
:param tagid: Tag ID
:type query: String
:returns: The queried nodes
:rtype: list
"""
if self.encrypted:
document = self.decrypted_document
else:
document = self.document
return OneLogin_Saml2_Utils.query(document, query, None, tagid) | Extracts nodes that match the query from the Response
:param query: Xpath Expresion
:type query: String
:param tagid: Tag ID
:type query: String
:returns: The queried nodes
:rtype: list | Below is the the instruction that describes the task:
### Input:
Extracts nodes that match the query from the Response
:param query: Xpath Expresion
:type query: String
:param tagid: Tag ID
:type query: String
:returns: The queried nodes
:rtype: list
### Response:
def __query(self, query, tagid=None):
"""
Extracts nodes that match the query from the Response
:param query: Xpath Expresion
:type query: String
:param tagid: Tag ID
:type query: String
:returns: The queried nodes
:rtype: list
"""
if self.encrypted:
document = self.decrypted_document
else:
document = self.document
return OneLogin_Saml2_Utils.query(document, query, None, tagid) |
def is_sparse_file(filename):
"""Determine if the given filename indicates a dense or a sparse matrix
If pathname is xxx.coo.yyy return True otherwise False.
"""
dirname, basename = os.path.split(filename)
name, ext = os.path.splitext(basename)
matrix_name, matrix_ext = os.path.splitext(name)
if matrix_ext == '.coo':
return True
else:
return False | Determine if the given filename indicates a dense or a sparse matrix
If pathname is xxx.coo.yyy return True otherwise False. | Below is the the instruction that describes the task:
### Input:
Determine if the given filename indicates a dense or a sparse matrix
If pathname is xxx.coo.yyy return True otherwise False.
### Response:
def is_sparse_file(filename):
"""Determine if the given filename indicates a dense or a sparse matrix
If pathname is xxx.coo.yyy return True otherwise False.
"""
dirname, basename = os.path.split(filename)
name, ext = os.path.splitext(basename)
matrix_name, matrix_ext = os.path.splitext(name)
if matrix_ext == '.coo':
return True
else:
return False |
def create_tag(
tag,
escaper=EscapedHTMLString,
opening_only=False,
body=None,
escape_body=False,
escape_attr=True,
indent=0,
attrs=None,
**other_attrs):
"""
Create an XML/HTML tag.
This function create a full XML/HTML tag, putting toghether an
optional inner body and a dictionary of attributes.
>>> print create_html_tag ("select", create_html_tag("h1",
... "hello", other_attrs={'class': "foo"}))
<select>
<h1 class="foo">
hello
</h1>
</select>
@param tag: the tag (e.g. "select", "body", "h1"...).
@type tag: string
@param body: some text/HTML to put in the body of the tag (this
body will be indented WRT the tag).
@type body: string
@param escape_body: wether the body (if any) must be escaped.
@type escape_body: boolean
@param escape_attr: wether the attribute values (if any) must be
escaped.
@type escape_attr: boolean
@param indent: number of level of indentation for the tag.
@type indent: integer
@param attrs: map of attributes to add to the tag.
@type attrs: dict
@return: the HTML tag.
@rtype: string
"""
if attrs is None:
attrs = {}
for key, value in iteritems(other_attrs):
if value is not None:
if key.endswith('_'):
attrs[key[:-1]] = value
else:
attrs[key] = value
out = "<%s" % tag
for key, value in iteritems(attrs):
if escape_attr:
value = escaper(value, escape_quotes=True)
out += ' %s="%s"' % (key, value)
if body is not None:
if callable(body) and body.__name__ == 'handle_body':
body = body()
out += ">"
if escape_body and not isinstance(body, EscapedString):
body = escaper(body)
out += body
if not opening_only:
out += "</%s>" % tag
elif not opening_only:
out += " />"
if indent:
out = indent_text(out, indent)[:-1]
from invenio_utils.text import wash_for_utf8
return EscapedString(wash_for_utf8(out)) | Create an XML/HTML tag.
This function create a full XML/HTML tag, putting toghether an
optional inner body and a dictionary of attributes.
>>> print create_html_tag ("select", create_html_tag("h1",
... "hello", other_attrs={'class': "foo"}))
<select>
<h1 class="foo">
hello
</h1>
</select>
@param tag: the tag (e.g. "select", "body", "h1"...).
@type tag: string
@param body: some text/HTML to put in the body of the tag (this
body will be indented WRT the tag).
@type body: string
@param escape_body: wether the body (if any) must be escaped.
@type escape_body: boolean
@param escape_attr: wether the attribute values (if any) must be
escaped.
@type escape_attr: boolean
@param indent: number of level of indentation for the tag.
@type indent: integer
@param attrs: map of attributes to add to the tag.
@type attrs: dict
@return: the HTML tag.
@rtype: string | Below is the the instruction that describes the task:
### Input:
Create an XML/HTML tag.
This function create a full XML/HTML tag, putting toghether an
optional inner body and a dictionary of attributes.
>>> print create_html_tag ("select", create_html_tag("h1",
... "hello", other_attrs={'class': "foo"}))
<select>
<h1 class="foo">
hello
</h1>
</select>
@param tag: the tag (e.g. "select", "body", "h1"...).
@type tag: string
@param body: some text/HTML to put in the body of the tag (this
body will be indented WRT the tag).
@type body: string
@param escape_body: wether the body (if any) must be escaped.
@type escape_body: boolean
@param escape_attr: wether the attribute values (if any) must be
escaped.
@type escape_attr: boolean
@param indent: number of level of indentation for the tag.
@type indent: integer
@param attrs: map of attributes to add to the tag.
@type attrs: dict
@return: the HTML tag.
@rtype: string
### Response:
def create_tag(
tag,
escaper=EscapedHTMLString,
opening_only=False,
body=None,
escape_body=False,
escape_attr=True,
indent=0,
attrs=None,
**other_attrs):
"""
Create an XML/HTML tag.
This function create a full XML/HTML tag, putting toghether an
optional inner body and a dictionary of attributes.
>>> print create_html_tag ("select", create_html_tag("h1",
... "hello", other_attrs={'class': "foo"}))
<select>
<h1 class="foo">
hello
</h1>
</select>
@param tag: the tag (e.g. "select", "body", "h1"...).
@type tag: string
@param body: some text/HTML to put in the body of the tag (this
body will be indented WRT the tag).
@type body: string
@param escape_body: wether the body (if any) must be escaped.
@type escape_body: boolean
@param escape_attr: wether the attribute values (if any) must be
escaped.
@type escape_attr: boolean
@param indent: number of level of indentation for the tag.
@type indent: integer
@param attrs: map of attributes to add to the tag.
@type attrs: dict
@return: the HTML tag.
@rtype: string
"""
if attrs is None:
attrs = {}
for key, value in iteritems(other_attrs):
if value is not None:
if key.endswith('_'):
attrs[key[:-1]] = value
else:
attrs[key] = value
out = "<%s" % tag
for key, value in iteritems(attrs):
if escape_attr:
value = escaper(value, escape_quotes=True)
out += ' %s="%s"' % (key, value)
if body is not None:
if callable(body) and body.__name__ == 'handle_body':
body = body()
out += ">"
if escape_body and not isinstance(body, EscapedString):
body = escaper(body)
out += body
if not opening_only:
out += "</%s>" % tag
elif not opening_only:
out += " />"
if indent:
out = indent_text(out, indent)[:-1]
from invenio_utils.text import wash_for_utf8
return EscapedString(wash_for_utf8(out)) |
def to_trip(
self,
smooth,
smooth_strategy,
smooth_noise,
seg,
seg_eps,
seg_min_time,
simplify,
simplify_max_dist_error,
simplify_max_speed_error
):
"""In-place, transformation of a track into a trip
A trip is a more accurate depiction of reality than a
track.
For a track to become a trip it need to go through the
following steps:
+ noise removal
+ smoothing
+ spatio-temporal segmentation
+ simplification
At the end of these steps we have a less noisy, track
that has less points, but that holds the same information.
It's required that each segment has their metrics calculated
or has been preprocessed.
Args:
name: An optional string with the name of the trip. If
none is given, one will be generated by generateName
Returns:
This Track instance
"""
self.compute_metrics()
self.remove_noise()
print (smooth, seg, simplify)
if smooth:
self.compute_metrics()
self.smooth(smooth_strategy, smooth_noise)
if seg:
self.compute_metrics()
self.segment(seg_eps, seg_min_time)
if simplify:
self.compute_metrics()
self.simplify(0, simplify_max_dist_error, simplify_max_speed_error)
self.compute_metrics()
return self | In-place, transformation of a track into a trip
A trip is a more accurate depiction of reality than a
track.
For a track to become a trip it need to go through the
following steps:
+ noise removal
+ smoothing
+ spatio-temporal segmentation
+ simplification
At the end of these steps we have a less noisy, track
that has less points, but that holds the same information.
It's required that each segment has their metrics calculated
or has been preprocessed.
Args:
name: An optional string with the name of the trip. If
none is given, one will be generated by generateName
Returns:
This Track instance | Below is the the instruction that describes the task:
### Input:
In-place, transformation of a track into a trip
A trip is a more accurate depiction of reality than a
track.
For a track to become a trip it need to go through the
following steps:
+ noise removal
+ smoothing
+ spatio-temporal segmentation
+ simplification
At the end of these steps we have a less noisy, track
that has less points, but that holds the same information.
It's required that each segment has their metrics calculated
or has been preprocessed.
Args:
name: An optional string with the name of the trip. If
none is given, one will be generated by generateName
Returns:
This Track instance
### Response:
def to_trip(
self,
smooth,
smooth_strategy,
smooth_noise,
seg,
seg_eps,
seg_min_time,
simplify,
simplify_max_dist_error,
simplify_max_speed_error
):
"""In-place, transformation of a track into a trip
A trip is a more accurate depiction of reality than a
track.
For a track to become a trip it need to go through the
following steps:
+ noise removal
+ smoothing
+ spatio-temporal segmentation
+ simplification
At the end of these steps we have a less noisy, track
that has less points, but that holds the same information.
It's required that each segment has their metrics calculated
or has been preprocessed.
Args:
name: An optional string with the name of the trip. If
none is given, one will be generated by generateName
Returns:
This Track instance
"""
self.compute_metrics()
self.remove_noise()
print (smooth, seg, simplify)
if smooth:
self.compute_metrics()
self.smooth(smooth_strategy, smooth_noise)
if seg:
self.compute_metrics()
self.segment(seg_eps, seg_min_time)
if simplify:
self.compute_metrics()
self.simplify(0, simplify_max_dist_error, simplify_max_speed_error)
self.compute_metrics()
return self |
def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if there is creep on the grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.creep[pos] != 0 | Returns True if there is creep on the grid point. | Below is the the instruction that describes the task:
### Input:
Returns True if there is creep on the grid point.
### Response:
def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if there is creep on the grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.creep[pos] != 0 |
def get_overlay(self, overlay_name):
"""Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary
"""
url = self.http_manifest["overlays"][overlay_name]
return self._get_json_from_url(url) | Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary | Below is the the instruction that describes the task:
### Input:
Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary
### Response:
def get_overlay(self, overlay_name):
"""Return overlay as a dictionary.
:param overlay_name: name of the overlay
:returns: overlay as a dictionary
"""
url = self.http_manifest["overlays"][overlay_name]
return self._get_json_from_url(url) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.