code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create_network(batch_size, update_freq):
"""Create a linear regression network for performing SVRG optimization.
Parameters
----------
batch_size: int
Size of data split
update_freq: int
Update Frequency for calculating full gradients
Returns
----------
di: mx.io.NDArrayIter
Data iterator
update_freq: SVRGModule
An instance of SVRGModule for performing SVRG optimization
"""
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
train_data = np.random.randint(1, 5, [1000, 2])
weights = np.array([1.0, 2.0])
train_label = train_data.dot(weights)
di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label')
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=update_freq, logger=logging
)
return di, mod | Create a linear regression network for performing SVRG optimization.
Parameters
----------
batch_size: int
Size of data split
update_freq: int
Update Frequency for calculating full gradients
Returns
----------
di: mx.io.NDArrayIter
Data iterator
update_freq: SVRGModule
An instance of SVRGModule for performing SVRG optimization | Below is the the instruction that describes the task:
### Input:
Create a linear regression network for performing SVRG optimization.
Parameters
----------
batch_size: int
Size of data split
update_freq: int
Update Frequency for calculating full gradients
Returns
----------
di: mx.io.NDArrayIter
Data iterator
update_freq: SVRGModule
An instance of SVRGModule for performing SVRG optimization
### Response:
def create_network(batch_size, update_freq):
"""Create a linear regression network for performing SVRG optimization.
Parameters
----------
batch_size: int
Size of data split
update_freq: int
Update Frequency for calculating full gradients
Returns
----------
di: mx.io.NDArrayIter
Data iterator
update_freq: SVRGModule
An instance of SVRGModule for performing SVRG optimization
"""
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
train_data = np.random.randint(1, 5, [1000, 2])
weights = np.array([1.0, 2.0])
train_label = train_data.dot(weights)
di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label')
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=update_freq, logger=logging
)
return di, mod |
def _rotated_files(path):
"""Generator. Yields the next rotated file as a tuple:
(path, rotation_id)
"""
for globbed_path in iglob(path + FILE_NAME_GLOB):
match = re.search(FILE_NAME_REGEX, globbed_path)
if match:
yield globbed_path, int(match.group('rotation_id')) | Generator. Yields the next rotated file as a tuple:
(path, rotation_id) | Below is the the instruction that describes the task:
### Input:
Generator. Yields the next rotated file as a tuple:
(path, rotation_id)
### Response:
def _rotated_files(path):
"""Generator. Yields the next rotated file as a tuple:
(path, rotation_id)
"""
for globbed_path in iglob(path + FILE_NAME_GLOB):
match = re.search(FILE_NAME_REGEX, globbed_path)
if match:
yield globbed_path, int(match.group('rotation_id')) |
def md(
self,
url,
width="original"):
"""*generate a multimarkdown image link viewable anywhere (no sign-in needed for private photos)*
**Key Arguments:**
- ``url`` -- the share URL for the flickr image (or just the unique photoid)
- ``width`` -- the pixel width of the fully resolved image. Default *original*. [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048]
**Return:**
- ``md`` -- the image reference link in multi-markdown syntax
**Usage:**
To return the markdown markup for an image at a given Flickr share URL:
.. code-block:: python
from picaxe import picaxe
Flickr = picaxe(
log=log,
settings=settings
)
mdLink = Flickr.md(
url="https://www.flickr.com/photos/92344916@N06/30455211086"
width=1024
)
"""
self.log.info('starting the ``md_image`` method')
images, title, desc, photoId = self.get_photo_metadata(url)
if len(title) == 0:
tag = photoId
else:
tag = "%(title)s %(photoId)s" % locals()
image = images[str(width)]
if width == "original":
pxWidth = 1024
else:
pxWidth = width
md = """![%(title)s][%(tag)s]
[%(tag)s]: %(image)s title="%(title)s" width=600px
""" % locals()
self.log.info('completed the ``md_image`` method')
return md | *generate a multimarkdown image link viewable anywhere (no sign-in needed for private photos)*
**Key Arguments:**
- ``url`` -- the share URL for the flickr image (or just the unique photoid)
- ``width`` -- the pixel width of the fully resolved image. Default *original*. [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048]
**Return:**
- ``md`` -- the image reference link in multi-markdown syntax
**Usage:**
To return the markdown markup for an image at a given Flickr share URL:
.. code-block:: python
from picaxe import picaxe
Flickr = picaxe(
log=log,
settings=settings
)
mdLink = Flickr.md(
url="https://www.flickr.com/photos/92344916@N06/30455211086"
width=1024
) | Below is the the instruction that describes the task:
### Input:
*generate a multimarkdown image link viewable anywhere (no sign-in needed for private photos)*
**Key Arguments:**
- ``url`` -- the share URL for the flickr image (or just the unique photoid)
- ``width`` -- the pixel width of the fully resolved image. Default *original*. [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048]
**Return:**
- ``md`` -- the image reference link in multi-markdown syntax
**Usage:**
To return the markdown markup for an image at a given Flickr share URL:
.. code-block:: python
from picaxe import picaxe
Flickr = picaxe(
log=log,
settings=settings
)
mdLink = Flickr.md(
url="https://www.flickr.com/photos/92344916@N06/30455211086"
width=1024
)
### Response:
def md(
self,
url,
width="original"):
"""*generate a multimarkdown image link viewable anywhere (no sign-in needed for private photos)*
**Key Arguments:**
- ``url`` -- the share URL for the flickr image (or just the unique photoid)
- ``width`` -- the pixel width of the fully resolved image. Default *original*. [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048]
**Return:**
- ``md`` -- the image reference link in multi-markdown syntax
**Usage:**
To return the markdown markup for an image at a given Flickr share URL:
.. code-block:: python
from picaxe import picaxe
Flickr = picaxe(
log=log,
settings=settings
)
mdLink = Flickr.md(
url="https://www.flickr.com/photos/92344916@N06/30455211086"
width=1024
)
"""
self.log.info('starting the ``md_image`` method')
images, title, desc, photoId = self.get_photo_metadata(url)
if len(title) == 0:
tag = photoId
else:
tag = "%(title)s %(photoId)s" % locals()
image = images[str(width)]
if width == "original":
pxWidth = 1024
else:
pxWidth = width
md = """![%(title)s][%(tag)s]
[%(tag)s]: %(image)s title="%(title)s" width=600px
""" % locals()
self.log.info('completed the ``md_image`` method')
return md |
def get_reversed_statuses(context):
"""Return a mapping of exit codes to status strings.
Args:
context (scriptworker.context.Context): the scriptworker context
Returns:
dict: the mapping of exit codes to status strings.
"""
_rev = {v: k for k, v in STATUSES.items()}
_rev.update(dict(context.config['reversed_statuses']))
return _rev | Return a mapping of exit codes to status strings.
Args:
context (scriptworker.context.Context): the scriptworker context
Returns:
dict: the mapping of exit codes to status strings. | Below is the the instruction that describes the task:
### Input:
Return a mapping of exit codes to status strings.
Args:
context (scriptworker.context.Context): the scriptworker context
Returns:
dict: the mapping of exit codes to status strings.
### Response:
def get_reversed_statuses(context):
"""Return a mapping of exit codes to status strings.
Args:
context (scriptworker.context.Context): the scriptworker context
Returns:
dict: the mapping of exit codes to status strings.
"""
_rev = {v: k for k, v in STATUSES.items()}
_rev.update(dict(context.config['reversed_statuses']))
return _rev |
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(('Sum of squares error on test data: ' + str(sse)))
return m | Predict the location of a robot given wirelss signal strength readings. | Below is the the instruction that describes the task:
### Input:
Predict the location of a robot given wirelss signal strength readings.
### Response:
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(('Sum of squares error on test data: ' + str(sse)))
return m |
def fix_simple_errors(self):
''' This attempts to fix the easy errors raised by ValidationError.
This includes removing items from the cart that are no longer
available, recalculating all of the discounts, and removing voucher
codes that are no longer available. '''
# Fix vouchers first (this affects available discounts)
to_remove = []
for voucher in self.cart.vouchers.all():
try:
self._test_voucher(voucher)
except ValidationError:
to_remove.append(voucher)
for voucher in to_remove:
self.cart.vouchers.remove(voucher)
# Fix products and discounts
items = commerce.ProductItem.objects.filter(cart=self.cart)
items = items.select_related("product")
products = set(i.product for i in items)
available = set(ProductController.available_products(
self.cart.user,
products=products,
))
not_available = products - available
zeros = [(product, 0) for product in not_available]
self.set_quantities(zeros) | This attempts to fix the easy errors raised by ValidationError.
This includes removing items from the cart that are no longer
available, recalculating all of the discounts, and removing voucher
codes that are no longer available. | Below is the the instruction that describes the task:
### Input:
This attempts to fix the easy errors raised by ValidationError.
This includes removing items from the cart that are no longer
available, recalculating all of the discounts, and removing voucher
codes that are no longer available.
### Response:
def fix_simple_errors(self):
''' This attempts to fix the easy errors raised by ValidationError.
This includes removing items from the cart that are no longer
available, recalculating all of the discounts, and removing voucher
codes that are no longer available. '''
# Fix vouchers first (this affects available discounts)
to_remove = []
for voucher in self.cart.vouchers.all():
try:
self._test_voucher(voucher)
except ValidationError:
to_remove.append(voucher)
for voucher in to_remove:
self.cart.vouchers.remove(voucher)
# Fix products and discounts
items = commerce.ProductItem.objects.filter(cart=self.cart)
items = items.select_related("product")
products = set(i.product for i in items)
available = set(ProductController.available_products(
self.cart.user,
products=products,
))
not_available = products - available
zeros = [(product, 0) for product in not_available]
self.set_quantities(zeros) |
def audioread(filename):
"""Reads an audio signal from file.
Supported formats : wav
:param filename: filename of the audiofile to load
:type filename: str
:returns: int, numpy.ndarray -- samplerate, array containing the audio signal
"""
try:
if '.wav' in filename.lower():
fs, signal = wv.read(filename)
elif '.call' in filename.lower():
with open(filename, 'rb') as f:
signal = np.fromfile(f, dtype=np.int16)
fs = 333333
else:
raise IOError("Unsupported audio format for file: {}".format(filename))
except:
print u"Problem reading wav file"
raise
signal = signal.astype(float)
return fs, signal | Reads an audio signal from file.
Supported formats : wav
:param filename: filename of the audiofile to load
:type filename: str
:returns: int, numpy.ndarray -- samplerate, array containing the audio signal | Below is the the instruction that describes the task:
### Input:
Reads an audio signal from file.
Supported formats : wav
:param filename: filename of the audiofile to load
:type filename: str
:returns: int, numpy.ndarray -- samplerate, array containing the audio signal
### Response:
def audioread(filename):
"""Reads an audio signal from file.
Supported formats : wav
:param filename: filename of the audiofile to load
:type filename: str
:returns: int, numpy.ndarray -- samplerate, array containing the audio signal
"""
try:
if '.wav' in filename.lower():
fs, signal = wv.read(filename)
elif '.call' in filename.lower():
with open(filename, 'rb') as f:
signal = np.fromfile(f, dtype=np.int16)
fs = 333333
else:
raise IOError("Unsupported audio format for file: {}".format(filename))
except:
print u"Problem reading wav file"
raise
signal = signal.astype(float)
return fs, signal |
def sortByNamespacePrefix(urisList, nsList):
"""
Given an ordered list of namespaces prefixes, order a list of uris based on that.
Eg
In [7]: ll
Out[7]:
[rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')]
In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS])
Out[8]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]
"""
exit = []
urisList = sort_uri_list_by_name(urisList)
for ns in nsList:
innerexit = []
for uri in urisList:
if str(uri).startswith(str(ns)):
innerexit += [uri]
exit += innerexit
# add remaining uris (if any)
for uri in urisList:
if uri not in exit:
exit += [uri]
return exit | Given an ordered list of namespaces prefixes, order a list of uris based on that.
Eg
In [7]: ll
Out[7]:
[rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')]
In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS])
Out[8]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')] | Below is the the instruction that describes the task:
### Input:
Given an ordered list of namespaces prefixes, order a list of uris based on that.
Eg
In [7]: ll
Out[7]:
[rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')]
In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS])
Out[8]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]
### Response:
def sortByNamespacePrefix(urisList, nsList):
"""
Given an ordered list of namespaces prefixes, order a list of uris based on that.
Eg
In [7]: ll
Out[7]:
[rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')]
In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS])
Out[8]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]
"""
exit = []
urisList = sort_uri_list_by_name(urisList)
for ns in nsList:
innerexit = []
for uri in urisList:
if str(uri).startswith(str(ns)):
innerexit += [uri]
exit += innerexit
# add remaining uris (if any)
for uri in urisList:
if uri not in exit:
exit += [uri]
return exit |
def adjust_contrast_gamma(arr, gamma):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gamma : number
Exponent for the contrast adjustment. Higher values darken the image.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * ((I_ij/255)**gamma)
# using np.float32(.) here still works when the input is a numpy array of size 1
table = (min_value + (value_range ** np.float32(gamma)) * dynamic_range)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_gamma(arr, gamma) | Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gamma : number
Exponent for the contrast adjustment. Higher values darken the image.
Returns
-------
numpy.ndarray
Array with adjusted contrast. | Below is the the instruction that describes the task:
### Input:
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gamma : number
Exponent for the contrast adjustment. Higher values darken the image.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
### Response:
def adjust_contrast_gamma(arr, gamma):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gamma : number
Exponent for the contrast adjustment. Higher values darken the image.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * ((I_ij/255)**gamma)
# using np.float32(.) here still works when the input is a numpy array of size 1
table = (min_value + (value_range ** np.float32(gamma)) * dynamic_range)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_gamma(arr, gamma) |
def no_exception(on_exception, logger=None):
"""
处理函数抛出异常的装饰器, ATT: on_exception必填
:param on_exception: 遇到异常时函数返回什么内容
"""
def decorator(function):
def wrapper(*args, **kwargs):
try:
result = function(*args, **kwargs)
except Exception, e:
if hasattr(logger, 'exception'):
logger.exception(e)
else:
print traceback.format_exc()
result = on_exception
return result
return wrapper
return decorator | 处理函数抛出异常的装饰器, ATT: on_exception必填
:param on_exception: 遇到异常时函数返回什么内容 | Below is the the instruction that describes the task:
### Input:
处理函数抛出异常的装饰器, ATT: on_exception必填
:param on_exception: 遇到异常时函数返回什么内容
### Response:
def no_exception(on_exception, logger=None):
"""
处理函数抛出异常的装饰器, ATT: on_exception必填
:param on_exception: 遇到异常时函数返回什么内容
"""
def decorator(function):
def wrapper(*args, **kwargs):
try:
result = function(*args, **kwargs)
except Exception, e:
if hasattr(logger, 'exception'):
logger.exception(e)
else:
print traceback.format_exc()
result = on_exception
return result
return wrapper
return decorator |
def signature(self, block_size=None):
"Calculates signature for local file."
kwargs = {}
if block_size:
kwargs['block_size'] = block_size
return librsync.signature(open(self.path, 'rb'), **kwargs) | Calculates signature for local file. | Below is the the instruction that describes the task:
### Input:
Calculates signature for local file.
### Response:
def signature(self, block_size=None):
"Calculates signature for local file."
kwargs = {}
if block_size:
kwargs['block_size'] = block_size
return librsync.signature(open(self.path, 'rb'), **kwargs) |
def from_string(locale, strict=True):
"""
Return an instance ``Locale`` corresponding to the string
representation of a locale.
@param locale: a string representation of a locale, i.e., a ISO 639-3
alpha-3 code (or alpha-2 code), optionally followed by a dash
character ``-`` and a ISO 3166-1 alpha-2 code.
@param strict: indicate whether the string representation of a locale
has to be strictly compliant with RFC 4646, or whether a Java-style
locale (character ``_`` instead of ``-``) is accepted.
@return: an instance ``Locale``.
"""
(language_code, country_code) = Locale.decompose_locale(locale, strict)
return Locale(language_code, country_code) | Return an instance ``Locale`` corresponding to the string
representation of a locale.
@param locale: a string representation of a locale, i.e., a ISO 639-3
alpha-3 code (or alpha-2 code), optionally followed by a dash
character ``-`` and a ISO 3166-1 alpha-2 code.
@param strict: indicate whether the string representation of a locale
has to be strictly compliant with RFC 4646, or whether a Java-style
locale (character ``_`` instead of ``-``) is accepted.
@return: an instance ``Locale``. | Below is the the instruction that describes the task:
### Input:
Return an instance ``Locale`` corresponding to the string
representation of a locale.
@param locale: a string representation of a locale, i.e., a ISO 639-3
alpha-3 code (or alpha-2 code), optionally followed by a dash
character ``-`` and a ISO 3166-1 alpha-2 code.
@param strict: indicate whether the string representation of a locale
has to be strictly compliant with RFC 4646, or whether a Java-style
locale (character ``_`` instead of ``-``) is accepted.
@return: an instance ``Locale``.
### Response:
def from_string(locale, strict=True):
"""
Return an instance ``Locale`` corresponding to the string
representation of a locale.
@param locale: a string representation of a locale, i.e., a ISO 639-3
alpha-3 code (or alpha-2 code), optionally followed by a dash
character ``-`` and a ISO 3166-1 alpha-2 code.
@param strict: indicate whether the string representation of a locale
has to be strictly compliant with RFC 4646, or whether a Java-style
locale (character ``_`` instead of ``-``) is accepted.
@return: an instance ``Locale``.
"""
(language_code, country_code) = Locale.decompose_locale(locale, strict)
return Locale(language_code, country_code) |
def write(self, row, col, data, style=None):
"""
Write data to row, col of worksheet (ws) using the style
information.
Again, I'm wrapping this because you'll have to do it if you
create large amounts of formatted entries in your spreadsheet
(else Excel, but probably not OOo will crash).
"""
ws = self.ws
if not ws:
raise Exception('you must use set_sheet() before write()')
if style:
if isinstance(style, xlwt.Style.XFStyle):
s = style
else:
s = self.get_style(style)
ws.write(row, col, data, s)
else:
ws.write(row, col, data) | Write data to row, col of worksheet (ws) using the style
information.
Again, I'm wrapping this because you'll have to do it if you
create large amounts of formatted entries in your spreadsheet
(else Excel, but probably not OOo will crash). | Below is the the instruction that describes the task:
### Input:
Write data to row, col of worksheet (ws) using the style
information.
Again, I'm wrapping this because you'll have to do it if you
create large amounts of formatted entries in your spreadsheet
(else Excel, but probably not OOo will crash).
### Response:
def write(self, row, col, data, style=None):
"""
Write data to row, col of worksheet (ws) using the style
information.
Again, I'm wrapping this because you'll have to do it if you
create large amounts of formatted entries in your spreadsheet
(else Excel, but probably not OOo will crash).
"""
ws = self.ws
if not ws:
raise Exception('you must use set_sheet() before write()')
if style:
if isinstance(style, xlwt.Style.XFStyle):
s = style
else:
s = self.get_style(style)
ws.write(row, col, data, s)
else:
ws.write(row, col, data) |
def add_term(self, t):
"""Add a term to this section and set it's ownership. Should only be used on root level terms"""
if t not in self.terms:
if t.parent_term_lc == 'root':
self.terms.append(t)
self.doc.add_term(t, add_section=False)
t.set_ownership()
else:
raise GenerateError("Can only add or move root-level terms. Term '{}' parent is '{}' "
.format(t, t.parent_term_lc))
assert t.section or t.join_lc == 'root.root', t | Add a term to this section and set it's ownership. Should only be used on root level terms | Below is the the instruction that describes the task:
### Input:
Add a term to this section and set it's ownership. Should only be used on root level terms
### Response:
def add_term(self, t):
"""Add a term to this section and set it's ownership. Should only be used on root level terms"""
if t not in self.terms:
if t.parent_term_lc == 'root':
self.terms.append(t)
self.doc.add_term(t, add_section=False)
t.set_ownership()
else:
raise GenerateError("Can only add or move root-level terms. Term '{}' parent is '{}' "
.format(t, t.parent_term_lc))
assert t.section or t.join_lc == 'root.root', t |
def lock_input_target_config_target_running_running(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
lock = ET.Element("lock")
config = lock
input = ET.SubElement(lock, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
running = ET.SubElement(config_target, "running")
running = ET.SubElement(running, "running")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def lock_input_target_config_target_running_running(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
lock = ET.Element("lock")
config = lock
input = ET.SubElement(lock, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
running = ET.SubElement(config_target, "running")
running = ET.SubElement(running, "running")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
"""
name = self._get_softmax_name()
return self.get_layer(x, name) | :param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs | Below is the the instruction that describes the task:
### Input:
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
### Response:
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
"""
name = self._get_softmax_name()
return self.get_layer(x, name) |
def parse_property(self, tup_tree):
"""
Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED>
"""
self.check_node(tup_tree, 'PROPERTY', ('TYPE', 'NAME'),
('CLASSORIGIN', 'PROPAGATED', 'EmbeddedObject',
'EMBEDDEDOBJECT', 'xml:lang'),
('QUALIFIER', 'VALUE'))
# The 'xml:lang' attribute is tolerated but ignored.
attrl = attrs(tup_tree)
try:
val = self.unpack_value(tup_tree)
except ValueError as exc:
msg = str(exc)
raise CIMXMLParseError(
_format("Cannot parse content of 'VALUE' child element of "
"'PROPERTY' element with name {0!A}: {1}",
attrl['NAME'], msg),
conn_id=self.conn_id)
qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',))
embedded_object = False
if 'EmbeddedObject' in attrl or 'EMBEDDEDOBJECT' in attrl:
try:
embedded_object = attrl['EmbeddedObject']
except KeyError:
embedded_object = attrl['EMBEDDEDOBJECT']
if embedded_object:
val = self.parse_embeddedObject(val)
return CIMProperty(attrl['NAME'],
val,
type=attrl['TYPE'],
is_array=False,
class_origin=attrl.get('CLASSORIGIN', None),
propagated=self.unpack_boolean(
attrl.get('PROPAGATED', 'false')),
qualifiers=qualifiers,
embedded_object=embedded_object) | Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED> | Below is the the instruction that describes the task:
### Input:
Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED>
### Response:
def parse_property(self, tup_tree):
"""
Parse PROPERTY into a CIMProperty object.
VAL is just the pcdata of the enclosed VALUE node.
::
<!ELEMENT PROPERTY (QUALIFIER*, VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
%EmbeddedObject;
xml:lang NMTOKEN #IMPLIED>
"""
self.check_node(tup_tree, 'PROPERTY', ('TYPE', 'NAME'),
('CLASSORIGIN', 'PROPAGATED', 'EmbeddedObject',
'EMBEDDEDOBJECT', 'xml:lang'),
('QUALIFIER', 'VALUE'))
# The 'xml:lang' attribute is tolerated but ignored.
attrl = attrs(tup_tree)
try:
val = self.unpack_value(tup_tree)
except ValueError as exc:
msg = str(exc)
raise CIMXMLParseError(
_format("Cannot parse content of 'VALUE' child element of "
"'PROPERTY' element with name {0!A}: {1}",
attrl['NAME'], msg),
conn_id=self.conn_id)
qualifiers = self.list_of_matching(tup_tree, ('QUALIFIER',))
embedded_object = False
if 'EmbeddedObject' in attrl or 'EMBEDDEDOBJECT' in attrl:
try:
embedded_object = attrl['EmbeddedObject']
except KeyError:
embedded_object = attrl['EMBEDDEDOBJECT']
if embedded_object:
val = self.parse_embeddedObject(val)
return CIMProperty(attrl['NAME'],
val,
type=attrl['TYPE'],
is_array=False,
class_origin=attrl.get('CLASSORIGIN', None),
propagated=self.unpack_boolean(
attrl.get('PROPAGATED', 'false')),
qualifiers=qualifiers,
embedded_object=embedded_object) |
def list_users_in_group_category(self, group_category_id, search_term=None, unassigned=None):
"""
List users in group category.
Returns a list of users in the group category.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_category_id
"""ID"""
path["group_category_id"] = group_category_id
# OPTIONAL - search_term
"""The partial name or full ID of the users to match and return in the results
list. Must be at least 3 characters."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - unassigned
"""Set this value to true if you wish only to search unassigned users in the
group category."""
if unassigned is not None:
params["unassigned"] = unassigned
self.logger.debug("GET /api/v1/group_categories/{group_category_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}/users".format(**path), data=data, params=params, all_pages=True) | List users in group category.
Returns a list of users in the group category. | Below is the the instruction that describes the task:
### Input:
List users in group category.
Returns a list of users in the group category.
### Response:
def list_users_in_group_category(self, group_category_id, search_term=None, unassigned=None):
"""
List users in group category.
Returns a list of users in the group category.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_category_id
"""ID"""
path["group_category_id"] = group_category_id
# OPTIONAL - search_term
"""The partial name or full ID of the users to match and return in the results
list. Must be at least 3 characters."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - unassigned
"""Set this value to true if you wish only to search unassigned users in the
group category."""
if unassigned is not None:
params["unassigned"] = unassigned
self.logger.debug("GET /api/v1/group_categories/{group_category_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/group_categories/{group_category_id}/users".format(**path), data=data, params=params, all_pages=True) |
def mate_bottom(self):
" bottom of the stator"
return Mate(self, CoordSystem(
origin=(0, 0, -self.length/2),
xDir=(1, 0, 0),
normal=(0, 0, -1)
)) | bottom of the stator | Below is the the instruction that describes the task:
### Input:
bottom of the stator
### Response:
def mate_bottom(self):
" bottom of the stator"
return Mate(self, CoordSystem(
origin=(0, 0, -self.length/2),
xDir=(1, 0, 0),
normal=(0, 0, -1)
)) |
def _two_point_interp(times, altitudes, horizon=0*u.deg):
"""
Do linear interpolation between two ``altitudes`` at
two ``times`` to determine the time where the altitude
goes through zero.
Parameters
----------
times : `~astropy.time.Time`
Two times for linear interpolation between
altitudes : array of `~astropy.units.Quantity`
Two altitudes for linear interpolation between
horizon : `~astropy.units.Quantity`
Solve for the time when the altitude is equal to
reference_alt.
Returns
-------
t : `~astropy.time.Time`
Time when target crosses the horizon
"""
if not isinstance(times, Time):
return MAGIC_TIME
else:
slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd)
return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value,
format='jd') | Do linear interpolation between two ``altitudes`` at
two ``times`` to determine the time where the altitude
goes through zero.
Parameters
----------
times : `~astropy.time.Time`
Two times for linear interpolation between
altitudes : array of `~astropy.units.Quantity`
Two altitudes for linear interpolation between
horizon : `~astropy.units.Quantity`
Solve for the time when the altitude is equal to
reference_alt.
Returns
-------
t : `~astropy.time.Time`
Time when target crosses the horizon | Below is the the instruction that describes the task:
### Input:
Do linear interpolation between two ``altitudes`` at
two ``times`` to determine the time where the altitude
goes through zero.
Parameters
----------
times : `~astropy.time.Time`
Two times for linear interpolation between
altitudes : array of `~astropy.units.Quantity`
Two altitudes for linear interpolation between
horizon : `~astropy.units.Quantity`
Solve for the time when the altitude is equal to
reference_alt.
Returns
-------
t : `~astropy.time.Time`
Time when target crosses the horizon
### Response:
def _two_point_interp(times, altitudes, horizon=0*u.deg):
"""
Do linear interpolation between two ``altitudes`` at
two ``times`` to determine the time where the altitude
goes through zero.
Parameters
----------
times : `~astropy.time.Time`
Two times for linear interpolation between
altitudes : array of `~astropy.units.Quantity`
Two altitudes for linear interpolation between
horizon : `~astropy.units.Quantity`
Solve for the time when the altitude is equal to
reference_alt.
Returns
-------
t : `~astropy.time.Time`
Time when target crosses the horizon
"""
if not isinstance(times, Time):
return MAGIC_TIME
else:
slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd)
return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value,
format='jd') |
def get_gfe(self, annotation, locus):
"""
creates GFE from a sequence annotation
:param locus: The gene locus
:type locus: ``str``
:param annotation: An sequence annotation object
:type annotation: ``List``
:rtype: ``List``
Returns:
The GFE notation and the associated features in an array
"""
features = []
accessions = {}
for feat in annotation.annotation:
if isinstance(annotation.annotation[feat], DBSeq) \
or isinstance(annotation.annotation[feat], Seq):
seq = str(annotation.annotation[feat])
else:
seq = str(annotation.annotation[feat].seq)
# TODO: Drop this if statement
if isutr(feat):
feat_str = ":".join([locus, str(1), feat, seq])
# If the feature has been loaded or stored
# then use that instead of making a feature request
if self.verbose and self.verbosity > 2:
self.logger.info("Getting accession " + feat_str)
if feat_str in self.all_feats[locus]:
if self.verbose and self.verbosity > 2:
self.logger.info("Feature found " + feat_str)
accession = self.all_feats[locus][feat_str]
feature = Feature(term=feat,
rank=1,
locus=locus,
sequence=seq,
accession=accession)
accessions.update({feat: accession})
features.append(feature)
else:
if self.verbose and self.verbosity > 2:
self.logger.info(self.logname + "Making FeatureRequest " + feat_str)
# Create FeatureRequest object
request = FeatureRequest(locus=locus,
term=feat,
rank=1,
sequence=seq)
# Attempt to make feature request
try:
feature = self.api.create_feature(body=request)
accessions.update({feat: feature.accession})
features.append(feature)
except ApiException as e:
self.logger.error(self.logname + "Exception when calling DefaultApi->create_feature" + e)
blank_feat = Feature(term=feat, rank=1, locus=locus,
sequence=seq)
accessions.update({feat: 0})
features.append(blank_feat)
# Store new features for quick retrieval if flag passed
if self.store_features:
# Adding new feature to all_feats
self.all_feats[locus].update({feat_str: feature.accession})
# Calculating memory size of all_feats
if self.verbose and self.verbosity > 1:
self.logger.info(self.logname + "Storing new feature " + feat_str)
mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000)
self.logger.info(self.logname + "Updated * all_feats " + mem + " MB *")
else:
term, rank = feat.split("_")
feat_str = ":".join([locus, str(rank), term, seq])
# If the feature has been loaded or stored
# then use that instead of making a feature request
if feat_str in self.all_feats[locus]:
if self.verbose and self.verbosity > 2:
self.logger.info(self.logname + "Feature found " + feat_str)
accession = self.all_feats[locus][feat_str]
feature = Feature(term=term,
rank=rank,
locus=locus,
sequence=seq,
accession=accession)
accessions.update({feat: accession})
features.append(feature)
else:
if self.verbose and self.verbosity > 2:
self.logger.info(self.logname + "Making FeatureRequest " + feat_str)
# Create FeatureRequest object
request = FeatureRequest(locus=locus,
term=term,
rank=rank,
sequence=seq)
# Attempt to make feature request
try:
feature = self.api.create_feature(body=request)
accessions.update({feat: feature.accession})
features.append(feature)
except ApiException as e:
self.logger.error(self.logname + "Exception when calling DefaultApi->create_feature %e" + e)
blank_feat = Feature(term=term, rank=rank, locus=locus,
sequence=seq)
accessions.update({feat: 0})
features.append(blank_feat)
# Store new features for quick retrieval if flag passed
if self.store_features:
# Adding new feature to all_feats
self.all_feats[locus].update({feat_str: feature.accession})
# Calculating memory size of all_feats
if self.verbose and self.verbosity > 1:
self.logger.info(self.logname + "Storing new feature " + feat_str)
mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000)
self.logger.info(self.logname + "Updated * all_feats " + mem + " MB *")
# Creating GFE
gfe = self._make_gfe(accessions, locus)
if self.verbose:
self.logger.info("GFE = " + gfe)
return features, gfe | creates GFE from a sequence annotation
:param locus: The gene locus
:type locus: ``str``
:param annotation: An sequence annotation object
:type annotation: ``List``
:rtype: ``List``
Returns:
The GFE notation and the associated features in an array | Below is the the instruction that describes the task:
### Input:
creates GFE from a sequence annotation
:param locus: The gene locus
:type locus: ``str``
:param annotation: An sequence annotation object
:type annotation: ``List``
:rtype: ``List``
Returns:
The GFE notation and the associated features in an array
### Response:
def get_gfe(self, annotation, locus):
"""
creates GFE from a sequence annotation
:param locus: The gene locus
:type locus: ``str``
:param annotation: An sequence annotation object
:type annotation: ``List``
:rtype: ``List``
Returns:
The GFE notation and the associated features in an array
"""
features = []
accessions = {}
for feat in annotation.annotation:
if isinstance(annotation.annotation[feat], DBSeq) \
or isinstance(annotation.annotation[feat], Seq):
seq = str(annotation.annotation[feat])
else:
seq = str(annotation.annotation[feat].seq)
# TODO: Drop this if statement
if isutr(feat):
feat_str = ":".join([locus, str(1), feat, seq])
# If the feature has been loaded or stored
# then use that instead of making a feature request
if self.verbose and self.verbosity > 2:
self.logger.info("Getting accession " + feat_str)
if feat_str in self.all_feats[locus]:
if self.verbose and self.verbosity > 2:
self.logger.info("Feature found " + feat_str)
accession = self.all_feats[locus][feat_str]
feature = Feature(term=feat,
rank=1,
locus=locus,
sequence=seq,
accession=accession)
accessions.update({feat: accession})
features.append(feature)
else:
if self.verbose and self.verbosity > 2:
self.logger.info(self.logname + "Making FeatureRequest " + feat_str)
# Create FeatureRequest object
request = FeatureRequest(locus=locus,
term=feat,
rank=1,
sequence=seq)
# Attempt to make feature request
try:
feature = self.api.create_feature(body=request)
accessions.update({feat: feature.accession})
features.append(feature)
except ApiException as e:
self.logger.error(self.logname + "Exception when calling DefaultApi->create_feature" + e)
blank_feat = Feature(term=feat, rank=1, locus=locus,
sequence=seq)
accessions.update({feat: 0})
features.append(blank_feat)
# Store new features for quick retrieval if flag passed
if self.store_features:
# Adding new feature to all_feats
self.all_feats[locus].update({feat_str: feature.accession})
# Calculating memory size of all_feats
if self.verbose and self.verbosity > 1:
self.logger.info(self.logname + "Storing new feature " + feat_str)
mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000)
self.logger.info(self.logname + "Updated * all_feats " + mem + " MB *")
else:
term, rank = feat.split("_")
feat_str = ":".join([locus, str(rank), term, seq])
# If the feature has been loaded or stored
# then use that instead of making a feature request
if feat_str in self.all_feats[locus]:
if self.verbose and self.verbosity > 2:
self.logger.info(self.logname + "Feature found " + feat_str)
accession = self.all_feats[locus][feat_str]
feature = Feature(term=term,
rank=rank,
locus=locus,
sequence=seq,
accession=accession)
accessions.update({feat: accession})
features.append(feature)
else:
if self.verbose and self.verbosity > 2:
self.logger.info(self.logname + "Making FeatureRequest " + feat_str)
# Create FeatureRequest object
request = FeatureRequest(locus=locus,
term=term,
rank=rank,
sequence=seq)
# Attempt to make feature request
try:
feature = self.api.create_feature(body=request)
accessions.update({feat: feature.accession})
features.append(feature)
except ApiException as e:
self.logger.error(self.logname + "Exception when calling DefaultApi->create_feature %e" + e)
blank_feat = Feature(term=term, rank=rank, locus=locus,
sequence=seq)
accessions.update({feat: 0})
features.append(blank_feat)
# Store new features for quick retrieval if flag passed
if self.store_features:
# Adding new feature to all_feats
self.all_feats[locus].update({feat_str: feature.accession})
# Calculating memory size of all_feats
if self.verbose and self.verbosity > 1:
self.logger.info(self.logname + "Storing new feature " + feat_str)
mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000)
self.logger.info(self.logname + "Updated * all_feats " + mem + " MB *")
# Creating GFE
gfe = self._make_gfe(accessions, locus)
if self.verbose:
self.logger.info("GFE = " + gfe)
return features, gfe |
def indexes_some(ol,value,*seqs):
'''
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_some(ol,'a',0,2)
indexes_some(ol,'a',0,1)
indexes_some(ol,'a',1,2)
indexes_some(ol,'a',3,4)
'''
seqs = list(seqs)
length = ol.__len__()
indexes =[]
seq = -1
for i in range(0,length):
if(value == ol[i]):
seq = seq + 1
if(seq in seqs):
indexes.append(i)
else:
pass
else:
pass
return(indexes) | from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_some(ol,'a',0,2)
indexes_some(ol,'a',0,1)
indexes_some(ol,'a',1,2)
indexes_some(ol,'a',3,4) | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_some(ol,'a',0,2)
indexes_some(ol,'a',0,1)
indexes_some(ol,'a',1,2)
indexes_some(ol,'a',3,4)
### Response:
def indexes_some(ol,value,*seqs):
'''
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_some(ol,'a',0,2)
indexes_some(ol,'a',0,1)
indexes_some(ol,'a',1,2)
indexes_some(ol,'a',3,4)
'''
seqs = list(seqs)
length = ol.__len__()
indexes =[]
seq = -1
for i in range(0,length):
if(value == ol[i]):
seq = seq + 1
if(seq in seqs):
indexes.append(i)
else:
pass
else:
pass
return(indexes) |
def pending(self):
'''
Returns an array of updates that are currently in the buffer for an
individual social media profile.
'''
pending_updates = []
url = PATHS['GET_PENDING'] % self.profile_id
response = self.api.get(url=url)
for update in response['updates']:
pending_updates.append(Update(api=self.api, raw_response=update))
self.__pending = pending_updates
return self.__pending | Returns an array of updates that are currently in the buffer for an
individual social media profile. | Below is the the instruction that describes the task:
### Input:
Returns an array of updates that are currently in the buffer for an
individual social media profile.
### Response:
def pending(self):
'''
Returns an array of updates that are currently in the buffer for an
individual social media profile.
'''
pending_updates = []
url = PATHS['GET_PENDING'] % self.profile_id
response = self.api.get(url=url)
for update in response['updates']:
pending_updates.append(Update(api=self.api, raw_response=update))
self.__pending = pending_updates
return self.__pending |
def make_rpc_call(func_name, args=None, remote=None):
"""Performs an RPC function call (local or remote) with the given arguments.
:param str|unicode func_name: RPC function name to call.
:param Iterable args: Function arguments.
:param str|unicode remote:
:rtype: bytes|str
:raises ValueError: If unable to call RPC function.
"""
args = args or []
args = [encode(str(arg)) for arg in args]
if remote:
result = uwsgi.rpc(remote, func_name, *args)
else:
result = uwsgi.call(func_name, *args)
return decode(result) | Performs an RPC function call (local or remote) with the given arguments.
:param str|unicode func_name: RPC function name to call.
:param Iterable args: Function arguments.
:param str|unicode remote:
:rtype: bytes|str
:raises ValueError: If unable to call RPC function. | Below is the the instruction that describes the task:
### Input:
Performs an RPC function call (local or remote) with the given arguments.
:param str|unicode func_name: RPC function name to call.
:param Iterable args: Function arguments.
:param str|unicode remote:
:rtype: bytes|str
:raises ValueError: If unable to call RPC function.
### Response:
def make_rpc_call(func_name, args=None, remote=None):
"""Performs an RPC function call (local or remote) with the given arguments.
:param str|unicode func_name: RPC function name to call.
:param Iterable args: Function arguments.
:param str|unicode remote:
:rtype: bytes|str
:raises ValueError: If unable to call RPC function.
"""
args = args or []
args = [encode(str(arg)) for arg in args]
if remote:
result = uwsgi.rpc(remote, func_name, *args)
else:
result = uwsgi.call(func_name, *args)
return decode(result) |
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out | Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v | Below is the the instruction that describes the task:
### Input:
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
### Response:
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out |
def create_label(label_tuple, extra_label=None):
"""Return a label based on my_tuple (a,b) and extra label.
a and b are string.
The output will be something like:
[a - b] extra_label
"""
if extra_label is not None:
return '[' + ' - '.join(label_tuple) + '] ' + str(extra_label)
else:
return '[' + ' - '.join(label_tuple) + ']' | Return a label based on my_tuple (a,b) and extra label.
a and b are string.
The output will be something like:
[a - b] extra_label | Below is the the instruction that describes the task:
### Input:
Return a label based on my_tuple (a,b) and extra label.
a and b are string.
The output will be something like:
[a - b] extra_label
### Response:
def create_label(label_tuple, extra_label=None):
"""Return a label based on my_tuple (a,b) and extra label.
a and b are string.
The output will be something like:
[a - b] extra_label
"""
if extra_label is not None:
return '[' + ' - '.join(label_tuple) + '] ' + str(extra_label)
else:
return '[' + ' - '.join(label_tuple) + ']' |
def _call(self, path, method, body=None, headers=None):
"""
Wrapper around http.do_call that transforms some HTTPError into
our own exceptions
"""
try:
resp = self.http.do_call(path, method, body, headers)
except http.HTTPError as err:
if err.status == 401:
raise PermissionError('Insufficient permissions to query ' +
'%s with user %s :%s' % (path, self.user, err))
raise
return resp | Wrapper around http.do_call that transforms some HTTPError into
our own exceptions | Below is the the instruction that describes the task:
### Input:
Wrapper around http.do_call that transforms some HTTPError into
our own exceptions
### Response:
def _call(self, path, method, body=None, headers=None):
"""
Wrapper around http.do_call that transforms some HTTPError into
our own exceptions
"""
try:
resp = self.http.do_call(path, method, body, headers)
except http.HTTPError as err:
if err.status == 401:
raise PermissionError('Insufficient permissions to query ' +
'%s with user %s :%s' % (path, self.user, err))
raise
return resp |
def add_vcenter(self, **kwargs):
"""
Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter",
xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop('username')
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop('password')
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False | Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None | Below is the the instruction that describes the task:
### Input:
Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
### Response:
def add_vcenter(self, **kwargs):
"""
Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter",
xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop('username')
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop('password')
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False |
def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
cors, target_version=None, delete_retention_policy=None, static_website=None):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
'''
service_properties_element = ETree.Element('StorageServiceProperties')
# Logging
if logging:
logging_element = ETree.SubElement(service_properties_element, 'Logging')
ETree.SubElement(logging_element, 'Version').text = logging.version
ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
ETree.SubElement(logging_element, 'Read').text = str(logging.read)
ETree.SubElement(logging_element, 'Write').text = str(logging.write)
retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
_convert_retention_policy_to_xml(logging.retention_policy, retention_element)
# HourMetrics
if hour_metrics:
hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
_convert_metrics_to_xml(hour_metrics, hour_metrics_element)
# MinuteMetrics
if minute_metrics:
minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
_convert_metrics_to_xml(minute_metrics, minute_metrics_element)
# CORS
# Make sure to still serialize empty list
if cors is not None:
cors_element = ETree.SubElement(service_properties_element, 'Cors')
for rule in cors:
cors_rule = ETree.SubElement(cors_element, 'CorsRule')
ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
# Target version
if target_version:
ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
# DeleteRetentionPolicy
if delete_retention_policy:
policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
if delete_retention_policy.enabled:
ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
# StaticWebsite
if static_website:
static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite')
ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled)
if static_website.enabled:
if static_website.index_document is not None:
ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document)
if static_website.error_document_404_path is not None:
ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \
str(static_website.error_document_404_path)
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
method='xml')
except:
raise
finally:
output = stream.getvalue()
stream.close()
return output | <?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties> | Below is the the instruction that describes the task:
### Input:
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
### Response:
def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
cors, target_version=None, delete_retention_policy=None, static_website=None):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
'''
service_properties_element = ETree.Element('StorageServiceProperties')
# Logging
if logging:
logging_element = ETree.SubElement(service_properties_element, 'Logging')
ETree.SubElement(logging_element, 'Version').text = logging.version
ETree.SubElement(logging_element, 'Delete').text = str(logging.delete)
ETree.SubElement(logging_element, 'Read').text = str(logging.read)
ETree.SubElement(logging_element, 'Write').text = str(logging.write)
retention_element = ETree.SubElement(logging_element, 'RetentionPolicy')
_convert_retention_policy_to_xml(logging.retention_policy, retention_element)
# HourMetrics
if hour_metrics:
hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics')
_convert_metrics_to_xml(hour_metrics, hour_metrics_element)
# MinuteMetrics
if minute_metrics:
minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics')
_convert_metrics_to_xml(minute_metrics, minute_metrics_element)
# CORS
# Make sure to still serialize empty list
if cors is not None:
cors_element = ETree.SubElement(service_properties_element, 'Cors')
for rule in cors:
cors_rule = ETree.SubElement(cors_element, 'CorsRule')
ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins)
ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods)
ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds)
ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers)
ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers)
# Target version
if target_version:
ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version
# DeleteRetentionPolicy
if delete_retention_policy:
policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy')
ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled)
if delete_retention_policy.enabled:
ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days)
# StaticWebsite
if static_website:
static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite')
ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled)
if static_website.enabled:
if static_website.index_document is not None:
ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document)
if static_website.error_document_404_path is not None:
ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \
str(static_website.error_document_404_path)
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8',
method='xml')
except:
raise
finally:
output = stream.getvalue()
stream.close()
return output |
def query(method='servers', server_id=None, command=None, args=None,
http_method='GET', root='api_root'):
''' Make a call to the Scaleway API.
'''
if root == 'api_root':
default_url = 'https://cp-par1.scaleway.com'
else:
default_url = 'https://api-marketplace.scaleway.com'
base_path = six.text_type(config.get_cloud_config_value(
root,
get_configured_provider(),
__opts__,
search_global=False,
default=default_url
))
path = '{0}/{1}/'.format(base_path, method)
if server_id:
path += '{0}/'.format(server_id)
if command:
path += command
if not isinstance(args, dict):
args = {}
token = config.get_cloud_config_value(
'token', get_configured_provider(), __opts__, search_global=False
)
data = salt.utils.json.dumps(args)
request = __utils__["http.query"](path,
method=http_method,
data=data,
status=True,
decode=True,
decode_type='json',
data_render=True,
data_renderer='json',
headers=True,
header_dict={'X-Auth-Token': token,
'User-Agent': "salt-cloud",
'Content-Type': 'application/json'})
if request['status'] > 299:
raise SaltCloudSystemExit(
'An error occurred while querying Scaleway. HTTP Code: {0} '
'Error: \'{1}\''.format(
request['status'],
request['error']
)
)
# success without data
if request['status'] == 204:
return True
return salt.utils.json.loads(request['body']) | Make a call to the Scaleway API. | Below is the the instruction that describes the task:
### Input:
Make a call to the Scaleway API.
### Response:
def query(method='servers', server_id=None, command=None, args=None,
http_method='GET', root='api_root'):
''' Make a call to the Scaleway API.
'''
if root == 'api_root':
default_url = 'https://cp-par1.scaleway.com'
else:
default_url = 'https://api-marketplace.scaleway.com'
base_path = six.text_type(config.get_cloud_config_value(
root,
get_configured_provider(),
__opts__,
search_global=False,
default=default_url
))
path = '{0}/{1}/'.format(base_path, method)
if server_id:
path += '{0}/'.format(server_id)
if command:
path += command
if not isinstance(args, dict):
args = {}
token = config.get_cloud_config_value(
'token', get_configured_provider(), __opts__, search_global=False
)
data = salt.utils.json.dumps(args)
request = __utils__["http.query"](path,
method=http_method,
data=data,
status=True,
decode=True,
decode_type='json',
data_render=True,
data_renderer='json',
headers=True,
header_dict={'X-Auth-Token': token,
'User-Agent': "salt-cloud",
'Content-Type': 'application/json'})
if request['status'] > 299:
raise SaltCloudSystemExit(
'An error occurred while querying Scaleway. HTTP Code: {0} '
'Error: \'{1}\''.format(
request['status'],
request['error']
)
)
# success without data
if request['status'] == 204:
return True
return salt.utils.json.loads(request['body']) |
def _t_update_b(self):
r"""
A method to update 'b' array at each time step according to
't_scheme' and the source term value
"""
network = self.project.network
phase = self.project.phases()[self.settings['phase']]
Vi = network['pore.volume']
dt = self.settings['t_step']
s = self.settings['t_scheme']
if (s == 'implicit'):
f1, f2, f3 = 1, 1, 0
elif (s == 'cranknicolson'):
f1, f2, f3 = 0.5, 1, 0
elif (s == 'steady'):
f1, f2, f3 = 1, 0, 1
x_old = self[self.settings['quantity']]
b = (f2*(1-f1)*(-self._A_steady)*x_old +
f2*(Vi/dt)*x_old +
f3*np.zeros(shape=(self.Np, ), dtype=float))
self._update_physics()
for item in self.settings['sources']:
Ps = self.pores(item)
# Update b
b[Ps] = b[Ps] - f2*(1-f1)*(phase[item+'.'+'rate'][Ps])
self._b = b
return b | r"""
A method to update 'b' array at each time step according to
't_scheme' and the source term value | Below is the the instruction that describes the task:
### Input:
r"""
A method to update 'b' array at each time step according to
't_scheme' and the source term value
### Response:
def _t_update_b(self):
r"""
A method to update 'b' array at each time step according to
't_scheme' and the source term value
"""
network = self.project.network
phase = self.project.phases()[self.settings['phase']]
Vi = network['pore.volume']
dt = self.settings['t_step']
s = self.settings['t_scheme']
if (s == 'implicit'):
f1, f2, f3 = 1, 1, 0
elif (s == 'cranknicolson'):
f1, f2, f3 = 0.5, 1, 0
elif (s == 'steady'):
f1, f2, f3 = 1, 0, 1
x_old = self[self.settings['quantity']]
b = (f2*(1-f1)*(-self._A_steady)*x_old +
f2*(Vi/dt)*x_old +
f3*np.zeros(shape=(self.Np, ), dtype=float))
self._update_physics()
for item in self.settings['sources']:
Ps = self.pores(item)
# Update b
b[Ps] = b[Ps] - f2*(1-f1)*(phase[item+'.'+'rate'][Ps])
self._b = b
return b |
def installFuncVersionedLib(target, source, env):
"""Install a versioned library into a target using the function specified
as the INSTALLVERSIONEDLIB construction variable."""
try:
install = env['INSTALLVERSIONEDLIB']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALLVERSIONEDLIB construction variable.')
assert len(target)==len(source), \
"Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target)))
for t,s in zip(target,source):
if hasattr(t.attributes, 'shlibname'):
tpath = os.path.join(t.get_dir(), t.attributes.shlibname)
else:
tpath = t.get_path()
if install(tpath,s.get_path(),env):
return 1
return 0 | Install a versioned library into a target using the function specified
as the INSTALLVERSIONEDLIB construction variable. | Below is the the instruction that describes the task:
### Input:
Install a versioned library into a target using the function specified
as the INSTALLVERSIONEDLIB construction variable.
### Response:
def installFuncVersionedLib(target, source, env):
"""Install a versioned library into a target using the function specified
as the INSTALLVERSIONEDLIB construction variable."""
try:
install = env['INSTALLVERSIONEDLIB']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALLVERSIONEDLIB construction variable.')
assert len(target)==len(source), \
"Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target)))
for t,s in zip(target,source):
if hasattr(t.attributes, 'shlibname'):
tpath = os.path.join(t.get_dir(), t.attributes.shlibname)
else:
tpath = t.get_path()
if install(tpath,s.get_path(),env):
return 1
return 0 |
def _get_related_indicators_page_generator(self, indicators=None, enclave_ids=None, start_page=0, page_size=None):
"""
Creates a generator from the |get_related_indicators_page| method that returns each
successive page.
:param indicators: list of indicator values to search for
:param enclave_ids: list of IDs of enclaves to search in
:param start_page: The page to start on.
:param page_size: The size of each page.
:return: The generator.
"""
get_page = functools.partial(self.get_related_indicators_page, indicators, enclave_ids)
return Page.get_page_generator(get_page, start_page, page_size) | Creates a generator from the |get_related_indicators_page| method that returns each
successive page.
:param indicators: list of indicator values to search for
:param enclave_ids: list of IDs of enclaves to search in
:param start_page: The page to start on.
:param page_size: The size of each page.
:return: The generator. | Below is the the instruction that describes the task:
### Input:
Creates a generator from the |get_related_indicators_page| method that returns each
successive page.
:param indicators: list of indicator values to search for
:param enclave_ids: list of IDs of enclaves to search in
:param start_page: The page to start on.
:param page_size: The size of each page.
:return: The generator.
### Response:
def _get_related_indicators_page_generator(self, indicators=None, enclave_ids=None, start_page=0, page_size=None):
"""
Creates a generator from the |get_related_indicators_page| method that returns each
successive page.
:param indicators: list of indicator values to search for
:param enclave_ids: list of IDs of enclaves to search in
:param start_page: The page to start on.
:param page_size: The size of each page.
:return: The generator.
"""
get_page = functools.partial(self.get_related_indicators_page, indicators, enclave_ids)
return Page.get_page_generator(get_page, start_page, page_size) |
def process_formdata(self, valuelist):
"""Join time string."""
if valuelist:
time_str = u' '.join(valuelist)
try:
timetuple = time.strptime(time_str, self.format)
self.data = datetime.time(*timetuple[3:6])
except ValueError:
self.data = None
raise | Join time string. | Below is the the instruction that describes the task:
### Input:
Join time string.
### Response:
def process_formdata(self, valuelist):
"""Join time string."""
if valuelist:
time_str = u' '.join(valuelist)
try:
timetuple = time.strptime(time_str, self.format)
self.data = datetime.time(*timetuple[3:6])
except ValueError:
self.data = None
raise |
def write(self, oprot):
'''
Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
'''
oprot.write_struct_begin('DummyDatabaseConfiguration')
oprot.write_field_begin(name='images_per_object', type=8, id=None)
oprot.write_i32(self.images_per_object)
oprot.write_field_end()
oprot.write_field_begin(name='objects', type=8, id=None)
oprot.write_i32(self.objects)
oprot.write_field_end()
oprot.write_field_stop()
oprot.write_struct_end()
return self | Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration | Below is the the instruction that describes the task:
### Input:
Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
### Response:
def write(self, oprot):
'''
Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
'''
oprot.write_struct_begin('DummyDatabaseConfiguration')
oprot.write_field_begin(name='images_per_object', type=8, id=None)
oprot.write_i32(self.images_per_object)
oprot.write_field_end()
oprot.write_field_begin(name='objects', type=8, id=None)
oprot.write_i32(self.objects)
oprot.write_field_end()
oprot.write_field_stop()
oprot.write_struct_end()
return self |
def enroll(self, uuid, organization, from_date=MIN_PERIOD_DATE, to_date=MAX_PERIOD_DATE,
merge=False):
"""Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False
"""
# Empty or None values for uuid and organizations are not allowed
if not uuid or not organization:
return CMD_SUCCESS
try:
api.add_enrollment(self.db, uuid, organization, from_date, to_date)
code = CMD_SUCCESS
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
code = e.code
except AlreadyExistsError as e:
if not merge:
msg_data = {
'uuid': uuid,
'org': organization,
'from_dt': str(from_date),
'to_dt': str(to_date)
}
msg = "enrollment for '%(uuid)s' at '%(org)s' (from: %(from_dt)s, to: %(to_dt)s) already exists in the registry"
msg = msg % msg_data
self.error(msg)
code = e.code
if not merge:
return code
try:
api.merge_enrollments(self.db, uuid, organization)
except (NotFoundError, InvalidValueError) as e:
# These exceptions were checked above. If any of these raises
# is due to something really wrong has happened
raise RuntimeError(str(e))
return CMD_SUCCESS | Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False | Below is the the instruction that describes the task:
### Input:
Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False
### Response:
def enroll(self, uuid, organization, from_date=MIN_PERIOD_DATE, to_date=MAX_PERIOD_DATE,
merge=False):
"""Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False
"""
# Empty or None values for uuid and organizations are not allowed
if not uuid or not organization:
return CMD_SUCCESS
try:
api.add_enrollment(self.db, uuid, organization, from_date, to_date)
code = CMD_SUCCESS
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
code = e.code
except AlreadyExistsError as e:
if not merge:
msg_data = {
'uuid': uuid,
'org': organization,
'from_dt': str(from_date),
'to_dt': str(to_date)
}
msg = "enrollment for '%(uuid)s' at '%(org)s' (from: %(from_dt)s, to: %(to_dt)s) already exists in the registry"
msg = msg % msg_data
self.error(msg)
code = e.code
if not merge:
return code
try:
api.merge_enrollments(self.db, uuid, organization)
except (NotFoundError, InvalidValueError) as e:
# These exceptions were checked above. If any of these raises
# is due to something really wrong has happened
raise RuntimeError(str(e))
return CMD_SUCCESS |
def array2ntpl(arr):
"""
Convert a :class:`numpy.ndarray` object constructed by :func:`ntpl2array`
back to the original :func:`collections.namedtuple` representation.
Parameters
----------
arr : ndarray
Array representation of named tuple constructed by :func:`ntpl2array`
Returns
-------
ntpl : collections.namedtuple object
Named tuple object with the same name and fields as the original named
typle object provided to :func:`ntpl2array`
"""
cls = collections.namedtuple(arr[2], arr[1])
return cls(*tuple(arr[0])) | Convert a :class:`numpy.ndarray` object constructed by :func:`ntpl2array`
back to the original :func:`collections.namedtuple` representation.
Parameters
----------
arr : ndarray
Array representation of named tuple constructed by :func:`ntpl2array`
Returns
-------
ntpl : collections.namedtuple object
Named tuple object with the same name and fields as the original named
typle object provided to :func:`ntpl2array` | Below is the the instruction that describes the task:
### Input:
Convert a :class:`numpy.ndarray` object constructed by :func:`ntpl2array`
back to the original :func:`collections.namedtuple` representation.
Parameters
----------
arr : ndarray
Array representation of named tuple constructed by :func:`ntpl2array`
Returns
-------
ntpl : collections.namedtuple object
Named tuple object with the same name and fields as the original named
typle object provided to :func:`ntpl2array`
### Response:
def array2ntpl(arr):
"""
Convert a :class:`numpy.ndarray` object constructed by :func:`ntpl2array`
back to the original :func:`collections.namedtuple` representation.
Parameters
----------
arr : ndarray
Array representation of named tuple constructed by :func:`ntpl2array`
Returns
-------
ntpl : collections.namedtuple object
Named tuple object with the same name and fields as the original named
typle object provided to :func:`ntpl2array`
"""
cls = collections.namedtuple(arr[2], arr[1])
return cls(*tuple(arr[0])) |
def serialize(self):
'''
Return a JSON string of the serialized topology
'''
return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoder) | Return a JSON string of the serialized topology | Below is the the instruction that describes the task:
### Input:
Return a JSON string of the serialized topology
### Response:
def serialize(self):
'''
Return a JSON string of the serialized topology
'''
return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoder) |
def produce_upgrade_operations(
ctx=None, metadata=None, include_symbol=None, include_object=None,
**kwargs):
"""Produce a list of upgrade statements."""
if metadata is None:
# Note, all SQLAlchemy models must have been loaded to produce
# accurate results.
metadata = db.metadata
if ctx is None:
ctx = create_migration_ctx(target_metadata=metadata, **kwargs)
template_args = {}
imports = set()
_produce_migration_diffs(
ctx, template_args, imports,
include_object=include_object,
include_symbol=include_symbol,
**kwargs
)
return template_args | Produce a list of upgrade statements. | Below is the the instruction that describes the task:
### Input:
Produce a list of upgrade statements.
### Response:
def produce_upgrade_operations(
ctx=None, metadata=None, include_symbol=None, include_object=None,
**kwargs):
"""Produce a list of upgrade statements."""
if metadata is None:
# Note, all SQLAlchemy models must have been loaded to produce
# accurate results.
metadata = db.metadata
if ctx is None:
ctx = create_migration_ctx(target_metadata=metadata, **kwargs)
template_args = {}
imports = set()
_produce_migration_diffs(
ctx, template_args, imports,
include_object=include_object,
include_symbol=include_symbol,
**kwargs
)
return template_args |
def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):
"""start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner
"""
print '%s call joinCommissioned' % self.port
self.__sendCommand('ifconfig up')
cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl)
print cmd
if self.__sendCommand(cmd)[0] == "Done":
maxDuration = 150 # seconds
self.joinCommissionedStatus = self.joinStatus['ongoing']
if self.logThreadStatus == self.logStatus['stop']:
self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,))
t_end = time.time() + maxDuration
while time.time() < t_end:
if self.joinCommissionedStatus == self.joinStatus['succeed']:
break
elif self.joinCommissionedStatus == self.joinStatus['failed']:
return False
time.sleep(1)
self.__sendCommand('thread start')
time.sleep(30)
return True
else:
return False | start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner | Below is the the instruction that describes the task:
### Input:
start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner
### Response:
def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):
"""start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner
"""
print '%s call joinCommissioned' % self.port
self.__sendCommand('ifconfig up')
cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl)
print cmd
if self.__sendCommand(cmd)[0] == "Done":
maxDuration = 150 # seconds
self.joinCommissionedStatus = self.joinStatus['ongoing']
if self.logThreadStatus == self.logStatus['stop']:
self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,))
t_end = time.time() + maxDuration
while time.time() < t_end:
if self.joinCommissionedStatus == self.joinStatus['succeed']:
break
elif self.joinCommissionedStatus == self.joinStatus['failed']:
return False
time.sleep(1)
self.__sendCommand('thread start')
time.sleep(30)
return True
else:
return False |
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.") | One you have added all your configuration data (Section, Element,
...) you need to load data from the config file. | Below is the the instruction that describes the task:
### Input:
One you have added all your configuration data (Section, Element,
...) you need to load data from the config file.
### Response:
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.") |
def shadow_reference(self, dispatcher, node):
"""
Only simply make a reference to the value in the current scope,
specifically for the FuncBase type.
"""
# as opposed to the previous one, only add the value of the
# identifier itself to the scope so that it becomes reserved.
self.current_scope.reference(node.identifier.value) | Only simply make a reference to the value in the current scope,
specifically for the FuncBase type. | Below is the the instruction that describes the task:
### Input:
Only simply make a reference to the value in the current scope,
specifically for the FuncBase type.
### Response:
def shadow_reference(self, dispatcher, node):
"""
Only simply make a reference to the value in the current scope,
specifically for the FuncBase type.
"""
# as opposed to the previous one, only add the value of the
# identifier itself to the scope so that it becomes reserved.
self.current_scope.reference(node.identifier.value) |
def cpp_app_builder(build_context, target):
"""Pack a C++ binary as a Docker image with its runtime dependencies.
TODO(itamar): Dynamically analyze the binary and copy shared objects
from its buildenv image to the runtime image, unless they're installed.
"""
yprint(build_context.conf, 'Build CppApp', target)
if target.props.executable and target.props.main:
raise KeyError(
'`main` and `executable` arguments are mutually exclusive')
if target.props.executable:
if target.props.executable not in target.artifacts.get(AT.app):
target.artifacts.add(AT.app, target.props.executable)
entrypoint = [target.props.executable]
elif target.props.main:
prog = build_context.targets[target.props.main]
binary = list(prog.artifacts.get(AT.binary).keys())[0]
entrypoint = ['/usr/src/bin/' + binary]
else:
raise KeyError('Must specify either `main` or `executable` argument')
build_app_docker_and_bin(
build_context, target, entrypoint=entrypoint) | Pack a C++ binary as a Docker image with its runtime dependencies.
TODO(itamar): Dynamically analyze the binary and copy shared objects
from its buildenv image to the runtime image, unless they're installed. | Below is the the instruction that describes the task:
### Input:
Pack a C++ binary as a Docker image with its runtime dependencies.
TODO(itamar): Dynamically analyze the binary and copy shared objects
from its buildenv image to the runtime image, unless they're installed.
### Response:
def cpp_app_builder(build_context, target):
"""Pack a C++ binary as a Docker image with its runtime dependencies.
TODO(itamar): Dynamically analyze the binary and copy shared objects
from its buildenv image to the runtime image, unless they're installed.
"""
yprint(build_context.conf, 'Build CppApp', target)
if target.props.executable and target.props.main:
raise KeyError(
'`main` and `executable` arguments are mutually exclusive')
if target.props.executable:
if target.props.executable not in target.artifacts.get(AT.app):
target.artifacts.add(AT.app, target.props.executable)
entrypoint = [target.props.executable]
elif target.props.main:
prog = build_context.targets[target.props.main]
binary = list(prog.artifacts.get(AT.binary).keys())[0]
entrypoint = ['/usr/src/bin/' + binary]
else:
raise KeyError('Must specify either `main` or `executable` argument')
build_app_docker_and_bin(
build_context, target, entrypoint=entrypoint) |
def _element_get_id(self, element):
"""Get id of reaction or species element.
In old levels the name is used as the id. This method returns the
correct attribute depending on the level.
"""
if self._reader._level > 1:
entry_id = element.get('id')
else:
entry_id = element.get('name')
return entry_id | Get id of reaction or species element.
In old levels the name is used as the id. This method returns the
correct attribute depending on the level. | Below is the the instruction that describes the task:
### Input:
Get id of reaction or species element.
In old levels the name is used as the id. This method returns the
correct attribute depending on the level.
### Response:
def _element_get_id(self, element):
"""Get id of reaction or species element.
In old levels the name is used as the id. This method returns the
correct attribute depending on the level.
"""
if self._reader._level > 1:
entry_id = element.get('id')
else:
entry_id = element.get('name')
return entry_id |
def _hash_url(self, url):
"""
Hash the URL to an md5sum.
"""
if isinstance(url, six.text_type):
url = url.encode('utf-8')
return hashlib.md5(url).hexdigest() | Hash the URL to an md5sum. | Below is the the instruction that describes the task:
### Input:
Hash the URL to an md5sum.
### Response:
def _hash_url(self, url):
"""
Hash the URL to an md5sum.
"""
if isinstance(url, six.text_type):
url = url.encode('utf-8')
return hashlib.md5(url).hexdigest() |
def getinputfile(self, outputfile, loadmetadata=True, client=None,requiremetadata=False):
"""Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles()"""
if isinstance(outputfile, CLAMOutputFile):
outputfilename = str(outputfile).replace(os.path.join(self.projectpath,'output/'),'')
else:
outputfilename = outputfile
if outputfilename not in self:
raise KeyError("No such outputfile " + outputfilename)
try:
return next(self.getinputfiles(outputfile,loadmetadata,client,requiremetadata))
except StopIteration:
raise StopIteration("No input files for outputfile " + outputfilename) | Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles() | Below is the the instruction that describes the task:
### Input:
Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles()
### Response:
def getinputfile(self, outputfile, loadmetadata=True, client=None,requiremetadata=False):
"""Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles()"""
if isinstance(outputfile, CLAMOutputFile):
outputfilename = str(outputfile).replace(os.path.join(self.projectpath,'output/'),'')
else:
outputfilename = outputfile
if outputfilename not in self:
raise KeyError("No such outputfile " + outputfilename)
try:
return next(self.getinputfiles(outputfile,loadmetadata,client,requiremetadata))
except StopIteration:
raise StopIteration("No input files for outputfile " + outputfilename) |
def get_query(self, q, request):
""" return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition
"""
return Group.objects.filter(
Q(name__icontains=q)
| Q(description__icontains=q)
) | return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition | Below is the the instruction that describes the task:
### Input:
return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition
### Response:
def get_query(self, q, request):
""" return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition
"""
return Group.objects.filter(
Q(name__icontains=q)
| Q(description__icontains=q)
) |
def check_archs(copied_libs, require_archs=(), stop_fast=False):
""" Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required. | Below is the the instruction that describes the task:
### Input:
Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
### Response:
def check_archs(copied_libs, require_archs=(), stop_fast=False):
""" Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) |
def concrete_descendents(parentclass):
"""
Return a dictionary containing all subclasses of the specified
parentclass, including the parentclass. Only classes that are
defined in scripts that have been run or modules that have been
imported are included, so the caller will usually first do ``from
package import *``.
Only non-abstract classes will be included.
"""
return dict((c.__name__,c) for c in descendents(parentclass)
if not _is_abstract(c)) | Return a dictionary containing all subclasses of the specified
parentclass, including the parentclass. Only classes that are
defined in scripts that have been run or modules that have been
imported are included, so the caller will usually first do ``from
package import *``.
Only non-abstract classes will be included. | Below is the the instruction that describes the task:
### Input:
Return a dictionary containing all subclasses of the specified
parentclass, including the parentclass. Only classes that are
defined in scripts that have been run or modules that have been
imported are included, so the caller will usually first do ``from
package import *``.
Only non-abstract classes will be included.
### Response:
def concrete_descendents(parentclass):
"""
Return a dictionary containing all subclasses of the specified
parentclass, including the parentclass. Only classes that are
defined in scripts that have been run or modules that have been
imported are included, so the caller will usually first do ``from
package import *``.
Only non-abstract classes will be included.
"""
return dict((c.__name__,c) for c in descendents(parentclass)
if not _is_abstract(c)) |
def tags(self):
"""Returns a dictionary that lists all available tags that can be used
for further filtering
"""
ret = {}
for typ in _meta_fields_twig:
if typ in ['uniqueid', 'plugin', 'feedback', 'fitting', 'history', 'twig', 'uniquetwig']:
continue
k = '{}s'.format(typ)
ret[k] = getattr(self, k)
return ret | Returns a dictionary that lists all available tags that can be used
for further filtering | Below is the the instruction that describes the task:
### Input:
Returns a dictionary that lists all available tags that can be used
for further filtering
### Response:
def tags(self):
"""Returns a dictionary that lists all available tags that can be used
for further filtering
"""
ret = {}
for typ in _meta_fields_twig:
if typ in ['uniqueid', 'plugin', 'feedback', 'fitting', 'history', 'twig', 'uniquetwig']:
continue
k = '{}s'.format(typ)
ret[k] = getattr(self, k)
return ret |
def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER):
'''
Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
'''
setting = 'IPGrant'
formatted_addresses = list()
# It's okay to accept an empty list for set_connection_ip_list,
# since an empty list may be desirable.
if not addresses:
addresses = dict()
_LOG.debug('Empty %s specified.', setting)
# Convert addresses to the 'ip_address, subnet' format used by
# IIsIPSecuritySetting.
for address in addresses:
formatted_addresses.append('{0}, {1}'.format(address.strip(),
addresses[address].strip()))
current_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
# Order is not important, so compare to the current addresses as unordered sets.
if set(formatted_addresses) == set(current_addresses):
_LOG.debug('%s already contains the provided addresses.', setting)
return True
# First we should check GrantByDefault, and change it if necessary.
current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server)
if grant_by_default != current_grant_by_default:
_LOG.debug('Setting GrantByDefault to: %s', grant_by_default)
_set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server)
_set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server)
new_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
ret = set(formatted_addresses) == set(new_addresses)
if ret:
_LOG.debug('%s configured successfully: %s', setting, formatted_addresses)
return ret
_LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses)
return ret | Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" | Below is the the instruction that describes the task:
### Input:
Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
### Response:
def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER):
'''
Set the IPGrant list for the SMTP virtual server.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
'''
setting = 'IPGrant'
formatted_addresses = list()
# It's okay to accept an empty list for set_connection_ip_list,
# since an empty list may be desirable.
if not addresses:
addresses = dict()
_LOG.debug('Empty %s specified.', setting)
# Convert addresses to the 'ip_address, subnet' format used by
# IIsIPSecuritySetting.
for address in addresses:
formatted_addresses.append('{0}, {1}'.format(address.strip(),
addresses[address].strip()))
current_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
# Order is not important, so compare to the current addresses as unordered sets.
if set(formatted_addresses) == set(current_addresses):
_LOG.debug('%s already contains the provided addresses.', setting)
return True
# First we should check GrantByDefault, and change it if necessary.
current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server)
if grant_by_default != current_grant_by_default:
_LOG.debug('Setting GrantByDefault to: %s', grant_by_default)
_set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server)
_set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server)
new_addresses = get_connection_ip_list(as_wmi_format=True, server=server)
ret = set(formatted_addresses) == set(new_addresses)
if ret:
_LOG.debug('%s configured successfully: %s', setting, formatted_addresses)
return ret
_LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses)
return ret |
def manipulate(self, stored_instance, component_instance):
"""
Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance...
self._ipopo_instance = stored_instance
# ... and the bundle context
self._context = stored_instance.bundle_context
# Set the default value for the field: an empty dictionary
setattr(component_instance, self._field, {}) | Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance | Below is the the instruction that describes the task:
### Input:
Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
### Response:
def manipulate(self, stored_instance, component_instance):
"""
Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance...
self._ipopo_instance = stored_instance
# ... and the bundle context
self._context = stored_instance.bundle_context
# Set the default value for the field: an empty dictionary
setattr(component_instance, self._field, {}) |
def decodeString(encoded):
'''
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
'''
length = encoded[0]*256 + encoded[1]
return (encoded[2:2+length].decode('utf-8'), encoded[2+length:]) | Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed | Below is the the instruction that describes the task:
### Input:
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
### Response:
def decodeString(encoded):
'''
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
'''
length = encoded[0]*256 + encoded[1]
return (encoded[2:2+length].decode('utf-8'), encoded[2+length:]) |
def write(self, text, color):
"""
Write the given text to the stream in the given color.
"""
color = self._colors[color]
self.stream.write('\x1b[{}m{}\x1b[0m'.format(color, text)) | Write the given text to the stream in the given color. | Below is the the instruction that describes the task:
### Input:
Write the given text to the stream in the given color.
### Response:
def write(self, text, color):
"""
Write the given text to the stream in the given color.
"""
color = self._colors[color]
self.stream.write('\x1b[{}m{}\x1b[0m'.format(color, text)) |
def operations_map(self):
# type: () -> Dict[Union[str, None], str]
"""
returns a Mapping of operation names and it's associated types.
E.g. {'myQuery': 'query', 'myMutation': 'mutation'}
"""
document_ast = self.document_ast
operations = {} # type: Dict[Union[str, None], str]
for definition in document_ast.definitions:
if isinstance(definition, ast.OperationDefinition):
if definition.name:
operations[definition.name.value] = definition.operation
else:
operations[None] = definition.operation
return operations | returns a Mapping of operation names and it's associated types.
E.g. {'myQuery': 'query', 'myMutation': 'mutation'} | Below is the the instruction that describes the task:
### Input:
returns a Mapping of operation names and it's associated types.
E.g. {'myQuery': 'query', 'myMutation': 'mutation'}
### Response:
def operations_map(self):
# type: () -> Dict[Union[str, None], str]
"""
returns a Mapping of operation names and it's associated types.
E.g. {'myQuery': 'query', 'myMutation': 'mutation'}
"""
document_ast = self.document_ast
operations = {} # type: Dict[Union[str, None], str]
for definition in document_ast.definitions:
if isinstance(definition, ast.OperationDefinition):
if definition.name:
operations[definition.name.value] = definition.operation
else:
operations[None] = definition.operation
return operations |
def authorize(self, ctx, identity, ops):
'''Implements Authorizer.authorize by calling f with the given identity
for each operation.
'''
allowed = []
caveats = []
for op in ops:
ok, fcaveats = self._f(ctx, identity, op)
allowed.append(ok)
if fcaveats is not None:
caveats.extend(fcaveats)
return allowed, caveats | Implements Authorizer.authorize by calling f with the given identity
for each operation. | Below is the the instruction that describes the task:
### Input:
Implements Authorizer.authorize by calling f with the given identity
for each operation.
### Response:
def authorize(self, ctx, identity, ops):
'''Implements Authorizer.authorize by calling f with the given identity
for each operation.
'''
allowed = []
caveats = []
for op in ops:
ok, fcaveats = self._f(ctx, identity, op)
allowed.append(ok)
if fcaveats is not None:
caveats.extend(fcaveats)
return allowed, caveats |
def scientificformat(value, fmt='%13.9E', sep=' ', sep2=':'):
"""
:param value: the value to convert into a string
:param fmt: the formatting string to use for float values
:param sep: separator to use for vector-like values
:param sep2: second separator to use for matrix-like values
Convert a float or an array into a string by using the scientific notation
and a fixed precision (by default 10 decimal digits). For instance:
>>> scientificformat(-0E0)
'0.000000000E+00'
>>> scientificformat(-0.004)
'-4.000000000E-03'
>>> scientificformat([0.004])
'4.000000000E-03'
>>> scientificformat([0.01, 0.02], '%10.6E')
'1.000000E-02 2.000000E-02'
>>> scientificformat([[0.1, 0.2], [0.3, 0.4]], '%4.1E')
'1.0E-01:2.0E-01 3.0E-01:4.0E-01'
"""
if isinstance(value, numpy.bool_):
return '1' if value else '0'
elif isinstance(value, bytes):
return value.decode('utf8')
elif isinstance(value, str):
return value
elif hasattr(value, '__len__'):
return sep.join((scientificformat(f, fmt, sep2) for f in value))
elif isinstance(value, (float, numpy.float64, numpy.float32)):
fmt_value = fmt % value
if set(fmt_value) <= zeroset:
# '-0.0000000E+00' is converted into '0.0000000E+00
fmt_value = fmt_value.replace('-', '')
return fmt_value
return str(value) | :param value: the value to convert into a string
:param fmt: the formatting string to use for float values
:param sep: separator to use for vector-like values
:param sep2: second separator to use for matrix-like values
Convert a float or an array into a string by using the scientific notation
and a fixed precision (by default 10 decimal digits). For instance:
>>> scientificformat(-0E0)
'0.000000000E+00'
>>> scientificformat(-0.004)
'-4.000000000E-03'
>>> scientificformat([0.004])
'4.000000000E-03'
>>> scientificformat([0.01, 0.02], '%10.6E')
'1.000000E-02 2.000000E-02'
>>> scientificformat([[0.1, 0.2], [0.3, 0.4]], '%4.1E')
'1.0E-01:2.0E-01 3.0E-01:4.0E-01' | Below is the the instruction that describes the task:
### Input:
:param value: the value to convert into a string
:param fmt: the formatting string to use for float values
:param sep: separator to use for vector-like values
:param sep2: second separator to use for matrix-like values
Convert a float or an array into a string by using the scientific notation
and a fixed precision (by default 10 decimal digits). For instance:
>>> scientificformat(-0E0)
'0.000000000E+00'
>>> scientificformat(-0.004)
'-4.000000000E-03'
>>> scientificformat([0.004])
'4.000000000E-03'
>>> scientificformat([0.01, 0.02], '%10.6E')
'1.000000E-02 2.000000E-02'
>>> scientificformat([[0.1, 0.2], [0.3, 0.4]], '%4.1E')
'1.0E-01:2.0E-01 3.0E-01:4.0E-01'
### Response:
def scientificformat(value, fmt='%13.9E', sep=' ', sep2=':'):
"""
:param value: the value to convert into a string
:param fmt: the formatting string to use for float values
:param sep: separator to use for vector-like values
:param sep2: second separator to use for matrix-like values
Convert a float or an array into a string by using the scientific notation
and a fixed precision (by default 10 decimal digits). For instance:
>>> scientificformat(-0E0)
'0.000000000E+00'
>>> scientificformat(-0.004)
'-4.000000000E-03'
>>> scientificformat([0.004])
'4.000000000E-03'
>>> scientificformat([0.01, 0.02], '%10.6E')
'1.000000E-02 2.000000E-02'
>>> scientificformat([[0.1, 0.2], [0.3, 0.4]], '%4.1E')
'1.0E-01:2.0E-01 3.0E-01:4.0E-01'
"""
if isinstance(value, numpy.bool_):
return '1' if value else '0'
elif isinstance(value, bytes):
return value.decode('utf8')
elif isinstance(value, str):
return value
elif hasattr(value, '__len__'):
return sep.join((scientificformat(f, fmt, sep2) for f in value))
elif isinstance(value, (float, numpy.float64, numpy.float32)):
fmt_value = fmt % value
if set(fmt_value) <= zeroset:
# '-0.0000000E+00' is converted into '0.0000000E+00
fmt_value = fmt_value.replace('-', '')
return fmt_value
return str(value) |
def SwitchToAlert():
''' <input value="Test" type="button" onClick="alert('OK')" > '''
try:
alert = WebDriverWait(Web.driver, 10).until(lambda driver: driver.switch_to_alert())
return alert
except:
print("Waring: Timeout at %d seconds.Alert was not found.")
return False | <input value="Test" type="button" onClick="alert('OK')" > | Below is the the instruction that describes the task:
### Input:
<input value="Test" type="button" onClick="alert('OK')" >
### Response:
def SwitchToAlert():
''' <input value="Test" type="button" onClick="alert('OK')" > '''
try:
alert = WebDriverWait(Web.driver, 10).until(lambda driver: driver.switch_to_alert())
return alert
except:
print("Waring: Timeout at %d seconds.Alert was not found.")
return False |
def get_fields_in_model(instance):
"""
Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw
data. This method excludes many to many fields.
:param instance: The model instance to get the fields for
:type instance: Model
:return: The list of fields for the given model (instance)
:rtype: list
"""
assert isinstance(instance, Model)
# Check if the Django 1.8 _meta API is available
use_api = hasattr(instance._meta, 'get_fields') and callable(instance._meta.get_fields)
if use_api:
return [f for f in instance._meta.get_fields() if track_field(f)]
return instance._meta.fields | Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw
data. This method excludes many to many fields.
:param instance: The model instance to get the fields for
:type instance: Model
:return: The list of fields for the given model (instance)
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw
data. This method excludes many to many fields.
:param instance: The model instance to get the fields for
:type instance: Model
:return: The list of fields for the given model (instance)
:rtype: list
### Response:
def get_fields_in_model(instance):
"""
Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw
data. This method excludes many to many fields.
:param instance: The model instance to get the fields for
:type instance: Model
:return: The list of fields for the given model (instance)
:rtype: list
"""
assert isinstance(instance, Model)
# Check if the Django 1.8 _meta API is available
use_api = hasattr(instance._meta, 'get_fields') and callable(instance._meta.get_fields)
if use_api:
return [f for f in instance._meta.get_fields() if track_field(f)]
return instance._meta.fields |
def delete_value(self, label=None):
"""Delete the labelled value (or all values) on this Point
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`label` (optional) (string) the label for the value you want to delete. If not specified, all values for this
point will be removed.
"""
evt = self._client._request_point_value_delete(self.__lid, self.__pid, self._type, label=label)
self._client._wait_and_except_if_failed(evt) | Delete the labelled value (or all values) on this Point
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`label` (optional) (string) the label for the value you want to delete. If not specified, all values for this
point will be removed. | Below is the the instruction that describes the task:
### Input:
Delete the labelled value (or all values) on this Point
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`label` (optional) (string) the label for the value you want to delete. If not specified, all values for this
point will be removed.
### Response:
def delete_value(self, label=None):
"""Delete the labelled value (or all values) on this Point
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`label` (optional) (string) the label for the value you want to delete. If not specified, all values for this
point will be removed.
"""
evt = self._client._request_point_value_delete(self.__lid, self.__pid, self._type, label=label)
self._client._wait_and_except_if_failed(evt) |
def add_product_error(self, product, error):
''' Adds an error to the given product's field '''
''' if product in field_names:
field = field_names[product]
elif isinstance(product, inventory.Product):
return
else:
field = None '''
self.add_error(self.field_name(product), error) | Adds an error to the given product's field | Below is the the instruction that describes the task:
### Input:
Adds an error to the given product's field
### Response:
def add_product_error(self, product, error):
''' Adds an error to the given product's field '''
''' if product in field_names:
field = field_names[product]
elif isinstance(product, inventory.Product):
return
else:
field = None '''
self.add_error(self.field_name(product), error) |
def loadBWT(bwtDir, logger=None):
'''
Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression
and assign the appropriate class preferring the decompressed version if both exist.
@return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated
'''
if os.path.exists(bwtDir+'/msbwt.npy'):
msbwt = MultiStringBWT()
msbwt.loadMsbwt(bwtDir, logger)
return msbwt
elif os.path.exists(bwtDir+'/comp_msbwt.npy'):
msbwt = CompressedMSBWT()
msbwt.loadMsbwt(bwtDir, logger)
return msbwt
else:
logger.error('Invalid BWT directory.')
return None | Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression
and assign the appropriate class preferring the decompressed version if both exist.
@return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated | Below is the the instruction that describes the task:
### Input:
Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression
and assign the appropriate class preferring the decompressed version if both exist.
@return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated
### Response:
def loadBWT(bwtDir, logger=None):
'''
Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression
and assign the appropriate class preferring the decompressed version if both exist.
@return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated
'''
if os.path.exists(bwtDir+'/msbwt.npy'):
msbwt = MultiStringBWT()
msbwt.loadMsbwt(bwtDir, logger)
return msbwt
elif os.path.exists(bwtDir+'/comp_msbwt.npy'):
msbwt = CompressedMSBWT()
msbwt.loadMsbwt(bwtDir, logger)
return msbwt
else:
logger.error('Invalid BWT directory.')
return None |
def ion_balance_proportional(anion_charges, cation_charges, zs, n_anions,
n_cations, balance_error, method):
'''Helper method for balance_ions for the proportional family of methods.
See balance_ions for a description of the methods; parameters are fairly
obvious.
'''
anion_zs = zs[0:n_anions]
cation_zs = zs[n_anions:n_cations+n_anions]
anion_balance_error = sum([zi*ci for zi, ci in zip(anion_zs, anion_charges)])
cation_balance_error = sum([zi*ci for zi, ci in zip(cation_zs, cation_charges)])
if method == 'proportional insufficient ions increase':
if balance_error < 0:
multiplier = -anion_balance_error/cation_balance_error
cation_zs = [i*multiplier for i in cation_zs]
else:
multiplier = -cation_balance_error/anion_balance_error
anion_zs = [i*multiplier for i in anion_zs]
elif method == 'proportional excess ions decrease':
if balance_error < 0:
multiplier = -cation_balance_error/anion_balance_error
anion_zs = [i*multiplier for i in anion_zs]
else:
multiplier = -anion_balance_error/cation_balance_error
cation_zs = [i*multiplier for i in cation_zs]
elif method == 'proportional cation adjustment':
multiplier = -anion_balance_error/cation_balance_error
cation_zs = [i*multiplier for i in cation_zs]
elif method == 'proportional anion adjustment':
multiplier = -cation_balance_error/anion_balance_error
anion_zs = [i*multiplier for i in anion_zs]
else:
raise Exception('Allowable methods are %s' %charge_balance_methods)
z_water = 1. - sum(anion_zs) - sum(cation_zs)
return anion_zs, cation_zs, z_water | Helper method for balance_ions for the proportional family of methods.
See balance_ions for a description of the methods; parameters are fairly
obvious. | Below is the the instruction that describes the task:
### Input:
Helper method for balance_ions for the proportional family of methods.
See balance_ions for a description of the methods; parameters are fairly
obvious.
### Response:
def ion_balance_proportional(anion_charges, cation_charges, zs, n_anions,
n_cations, balance_error, method):
'''Helper method for balance_ions for the proportional family of methods.
See balance_ions for a description of the methods; parameters are fairly
obvious.
'''
anion_zs = zs[0:n_anions]
cation_zs = zs[n_anions:n_cations+n_anions]
anion_balance_error = sum([zi*ci for zi, ci in zip(anion_zs, anion_charges)])
cation_balance_error = sum([zi*ci for zi, ci in zip(cation_zs, cation_charges)])
if method == 'proportional insufficient ions increase':
if balance_error < 0:
multiplier = -anion_balance_error/cation_balance_error
cation_zs = [i*multiplier for i in cation_zs]
else:
multiplier = -cation_balance_error/anion_balance_error
anion_zs = [i*multiplier for i in anion_zs]
elif method == 'proportional excess ions decrease':
if balance_error < 0:
multiplier = -cation_balance_error/anion_balance_error
anion_zs = [i*multiplier for i in anion_zs]
else:
multiplier = -anion_balance_error/cation_balance_error
cation_zs = [i*multiplier for i in cation_zs]
elif method == 'proportional cation adjustment':
multiplier = -anion_balance_error/cation_balance_error
cation_zs = [i*multiplier for i in cation_zs]
elif method == 'proportional anion adjustment':
multiplier = -cation_balance_error/anion_balance_error
anion_zs = [i*multiplier for i in anion_zs]
else:
raise Exception('Allowable methods are %s' %charge_balance_methods)
z_water = 1. - sum(anion_zs) - sum(cation_zs)
return anion_zs, cation_zs, z_water |
def serialize(self, obj, level=0, objname=None, topLevelKeysToIgnore=None,
toBytes=True):
"""
Create a string representation of the given object.
Examples:
::
>>> serialize("str")
'str'
>>> serialize([1,2,3,4,5])
'1,2,3,4,5'
>>> signing.serlize({1:'a', 2:'b'})
'1:a|2:b'
>>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]})
'1:a|2:b|3:1,2:k'
:param obj: the object to serlize
:param level: a parameter used internally for recursion to serialize nested
data structures
:param topLevelKeysToIgnore: the list of top level keys to ignore for
serialization
:return: a string representation of `obj`
"""
res = None
if not isinstance(obj, acceptableTypes):
error("invalid type found {}: {}".format(objname, obj))
elif isinstance(obj, str):
res = obj
elif isinstance(obj, dict):
if level > 0:
keys = list(obj.keys())
else:
topLevelKeysToIgnore = topLevelKeysToIgnore or []
keys = [k for k in obj.keys() if k not in topLevelKeysToIgnore]
keys.sort()
strs = []
for k in keys:
onm = ".".join([str(objname), str(k)]) if objname else k
strs.append(
str(k) + ":" + self.serialize(obj[k], level + 1, onm, toBytes=False))
res = "|".join(strs)
elif isinstance(obj, Iterable):
strs = []
for o in obj:
strs.append(self.serialize(
o, level + 1, objname, toBytes=False))
res = ",".join(strs)
elif obj is None:
res = ""
else:
res = str(obj)
# logger.trace("serialized msg {} into {}".format(obj, res))
if not toBytes:
return res
return res.encode('utf-8') | Create a string representation of the given object.
Examples:
::
>>> serialize("str")
'str'
>>> serialize([1,2,3,4,5])
'1,2,3,4,5'
>>> signing.serlize({1:'a', 2:'b'})
'1:a|2:b'
>>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]})
'1:a|2:b|3:1,2:k'
:param obj: the object to serlize
:param level: a parameter used internally for recursion to serialize nested
data structures
:param topLevelKeysToIgnore: the list of top level keys to ignore for
serialization
:return: a string representation of `obj` | Below is the the instruction that describes the task:
### Input:
Create a string representation of the given object.
Examples:
::
>>> serialize("str")
'str'
>>> serialize([1,2,3,4,5])
'1,2,3,4,5'
>>> signing.serlize({1:'a', 2:'b'})
'1:a|2:b'
>>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]})
'1:a|2:b|3:1,2:k'
:param obj: the object to serlize
:param level: a parameter used internally for recursion to serialize nested
data structures
:param topLevelKeysToIgnore: the list of top level keys to ignore for
serialization
:return: a string representation of `obj`
### Response:
def serialize(self, obj, level=0, objname=None, topLevelKeysToIgnore=None,
toBytes=True):
"""
Create a string representation of the given object.
Examples:
::
>>> serialize("str")
'str'
>>> serialize([1,2,3,4,5])
'1,2,3,4,5'
>>> signing.serlize({1:'a', 2:'b'})
'1:a|2:b'
>>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]})
'1:a|2:b|3:1,2:k'
:param obj: the object to serlize
:param level: a parameter used internally for recursion to serialize nested
data structures
:param topLevelKeysToIgnore: the list of top level keys to ignore for
serialization
:return: a string representation of `obj`
"""
res = None
if not isinstance(obj, acceptableTypes):
error("invalid type found {}: {}".format(objname, obj))
elif isinstance(obj, str):
res = obj
elif isinstance(obj, dict):
if level > 0:
keys = list(obj.keys())
else:
topLevelKeysToIgnore = topLevelKeysToIgnore or []
keys = [k for k in obj.keys() if k not in topLevelKeysToIgnore]
keys.sort()
strs = []
for k in keys:
onm = ".".join([str(objname), str(k)]) if objname else k
strs.append(
str(k) + ":" + self.serialize(obj[k], level + 1, onm, toBytes=False))
res = "|".join(strs)
elif isinstance(obj, Iterable):
strs = []
for o in obj:
strs.append(self.serialize(
o, level + 1, objname, toBytes=False))
res = ",".join(strs)
elif obj is None:
res = ""
else:
res = str(obj)
# logger.trace("serialized msg {} into {}".format(obj, res))
if not toBytes:
return res
return res.encode('utf-8') |
def new_mapper(agent):
"""Creates a mapper object on witch add_mapping() and remove_mapping()
can be called. It uses fire-and-forget notifications so it has a very
low overhead and latency but a little less guarantees."""
recp = recipient.Broadcast(MappingUpdatesPoster.protocol_id, 'lobby')
return agent.initiate_protocol(MappingUpdatesPoster, recp) | Creates a mapper object on witch add_mapping() and remove_mapping()
can be called. It uses fire-and-forget notifications so it has a very
low overhead and latency but a little less guarantees. | Below is the the instruction that describes the task:
### Input:
Creates a mapper object on witch add_mapping() and remove_mapping()
can be called. It uses fire-and-forget notifications so it has a very
low overhead and latency but a little less guarantees.
### Response:
def new_mapper(agent):
"""Creates a mapper object on witch add_mapping() and remove_mapping()
can be called. It uses fire-and-forget notifications so it has a very
low overhead and latency but a little less guarantees."""
recp = recipient.Broadcast(MappingUpdatesPoster.protocol_id, 'lobby')
return agent.initiate_protocol(MappingUpdatesPoster, recp) |
def parse(self, parser):
"""Main method to render data into the template."""
lineno = next(parser.stream).lineno
if parser.stream.skip_if('name:short'):
parser.stream.skip(1)
short = parser.parse_expression()
else:
short = nodes.Const(False)
result = self.call_method('_commit_hash', [short], [], lineno=lineno)
return nodes.Output([result], lineno=lineno) | Main method to render data into the template. | Below is the the instruction that describes the task:
### Input:
Main method to render data into the template.
### Response:
def parse(self, parser):
"""Main method to render data into the template."""
lineno = next(parser.stream).lineno
if parser.stream.skip_if('name:short'):
parser.stream.skip(1)
short = parser.parse_expression()
else:
short = nodes.Const(False)
result = self.call_method('_commit_hash', [short], [], lineno=lineno)
return nodes.Output([result], lineno=lineno) |
def _to_dict(self):
''' Returns a dictionary representation of this object '''
return dict(area= self.area._to_dict(),
earthquakes = [q._to_dict() for q in self.earthquakes],
title = self.title) | Returns a dictionary representation of this object | Below is the the instruction that describes the task:
### Input:
Returns a dictionary representation of this object
### Response:
def _to_dict(self):
''' Returns a dictionary representation of this object '''
return dict(area= self.area._to_dict(),
earthquakes = [q._to_dict() for q in self.earthquakes],
title = self.title) |
def install_dap_from_path(path, update=False, update_allpaths=False, first=True,
force=False, nodeps=False, reinstall=False, __ui__=''):
'''Installs a dap from a given path'''
will_uninstall = False
dap_obj = dapi.Dap(path)
name = dap_obj.meta['package_name']
if name in get_installed_daps():
if not update and not reinstall:
raise DapiLocalError(
'DAP {name} is already installed. '
'Run `da pkg list` to see it\'s location, or use --reinstall to ignore this check.'
.format(name=name))
elif not update_allpaths and name in get_installed_daps(_install_path()):
will_uninstall = True
elif update_allpaths and name in get_installed_daps():
will_uninstall = True
if update and update_allpaths:
install_locations = []
for pair in get_installed_daps_detailed()[name]:
install_locations.append(pair['location'])
else:
install_locations = [_install_path()]
# This should not happen unless someone did it on purpose
for location in install_locations:
if os.path.isfile(location):
raise DapiLocalError(
'{i} is a file, not a directory.'.format(i=_install_path()))
_dir = tempfile.mkdtemp()
old_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
ok = dapi.DapChecker.check(dap_obj)
logger.setLevel(old_level)
if not ok:
raise DapiLocalError('The DAP you want to install has errors, not installing.')
installed = []
if first:
if not force and not _is_supported_here(dap_obj.meta):
raise DapiLocalError(
'{0} is not supported on this platform (use --force to suppress this check)'.
format(name))
deps = set()
if 'dependencies' in dap_obj.meta and not nodeps:
for dep in dap_obj.meta['dependencies']:
dep = _strip_version_from_dependency(dep)
if dep not in get_installed_daps():
deps |= _get_all_dependencies_of(dep, force=force)
for dep in deps:
if dep not in get_installed_daps():
installed += install_dap(dep, first=False, __ui__=__ui__)
dap_obj.extract(_dir)
if will_uninstall:
uninstall_dap(name, allpaths=update_allpaths, __ui__=__ui__)
_dapdir = os.path.join(_dir, name + '-' + dap_obj.meta['version'])
if not os.path.isdir(_install_path()):
os.makedirs(_install_path())
os.mkdir(os.path.join(_dapdir, 'meta'))
os.rename(os.path.join(_dapdir, 'meta.yaml'),
os.path.join(_dapdir, 'meta', name + '.yaml'))
for location in install_locations:
for f in glob.glob(_dapdir + '/*'):
dst = os.path.join(location, os.path.basename(f))
if os.path.isdir(f):
if not os.path.exists(dst):
os.mkdir(dst)
for src_dir, dirs, files in os.walk(f):
dst_dir = src_dir.replace(f, dst)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
shutil.copyfile(src_file, dst_file)
else:
shutil.copyfile(f, dst)
try:
shutil.rmtree(_dir)
except:
pass
return [name] + installed | Installs a dap from a given path | Below is the the instruction that describes the task:
### Input:
Installs a dap from a given path
### Response:
def install_dap_from_path(path, update=False, update_allpaths=False, first=True,
force=False, nodeps=False, reinstall=False, __ui__=''):
'''Installs a dap from a given path'''
will_uninstall = False
dap_obj = dapi.Dap(path)
name = dap_obj.meta['package_name']
if name in get_installed_daps():
if not update and not reinstall:
raise DapiLocalError(
'DAP {name} is already installed. '
'Run `da pkg list` to see it\'s location, or use --reinstall to ignore this check.'
.format(name=name))
elif not update_allpaths and name in get_installed_daps(_install_path()):
will_uninstall = True
elif update_allpaths and name in get_installed_daps():
will_uninstall = True
if update and update_allpaths:
install_locations = []
for pair in get_installed_daps_detailed()[name]:
install_locations.append(pair['location'])
else:
install_locations = [_install_path()]
# This should not happen unless someone did it on purpose
for location in install_locations:
if os.path.isfile(location):
raise DapiLocalError(
'{i} is a file, not a directory.'.format(i=_install_path()))
_dir = tempfile.mkdtemp()
old_level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
ok = dapi.DapChecker.check(dap_obj)
logger.setLevel(old_level)
if not ok:
raise DapiLocalError('The DAP you want to install has errors, not installing.')
installed = []
if first:
if not force and not _is_supported_here(dap_obj.meta):
raise DapiLocalError(
'{0} is not supported on this platform (use --force to suppress this check)'.
format(name))
deps = set()
if 'dependencies' in dap_obj.meta and not nodeps:
for dep in dap_obj.meta['dependencies']:
dep = _strip_version_from_dependency(dep)
if dep not in get_installed_daps():
deps |= _get_all_dependencies_of(dep, force=force)
for dep in deps:
if dep not in get_installed_daps():
installed += install_dap(dep, first=False, __ui__=__ui__)
dap_obj.extract(_dir)
if will_uninstall:
uninstall_dap(name, allpaths=update_allpaths, __ui__=__ui__)
_dapdir = os.path.join(_dir, name + '-' + dap_obj.meta['version'])
if not os.path.isdir(_install_path()):
os.makedirs(_install_path())
os.mkdir(os.path.join(_dapdir, 'meta'))
os.rename(os.path.join(_dapdir, 'meta.yaml'),
os.path.join(_dapdir, 'meta', name + '.yaml'))
for location in install_locations:
for f in glob.glob(_dapdir + '/*'):
dst = os.path.join(location, os.path.basename(f))
if os.path.isdir(f):
if not os.path.exists(dst):
os.mkdir(dst)
for src_dir, dirs, files in os.walk(f):
dst_dir = src_dir.replace(f, dst)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
shutil.copyfile(src_file, dst_file)
else:
shutil.copyfile(f, dst)
try:
shutil.rmtree(_dir)
except:
pass
return [name] + installed |
def convert(pinyin, style, strict, default=None, **kwargs):
"""根据拼音风格把原始拼音转换为不同的格式
:param pinyin: 原始有声调的单个拼音
:type pinyin: unicode
:param style: 拼音风格
:param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict`
:type strict: bool
:param default: 拼音风格对应的实现不存在时返回的默认值
:return: 按照拼音风格进行处理过后的拼音字符串
:rtype: unicode
"""
if style in _registry:
return _registry[style](pinyin, strict=strict, **kwargs)
return default | 根据拼音风格把原始拼音转换为不同的格式
:param pinyin: 原始有声调的单个拼音
:type pinyin: unicode
:param style: 拼音风格
:param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict`
:type strict: bool
:param default: 拼音风格对应的实现不存在时返回的默认值
:return: 按照拼音风格进行处理过后的拼音字符串
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
根据拼音风格把原始拼音转换为不同的格式
:param pinyin: 原始有声调的单个拼音
:type pinyin: unicode
:param style: 拼音风格
:param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict`
:type strict: bool
:param default: 拼音风格对应的实现不存在时返回的默认值
:return: 按照拼音风格进行处理过后的拼音字符串
:rtype: unicode
### Response:
def convert(pinyin, style, strict, default=None, **kwargs):
"""根据拼音风格把原始拼音转换为不同的格式
:param pinyin: 原始有声调的单个拼音
:type pinyin: unicode
:param style: 拼音风格
:param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母,详见 :ref:`strict`
:type strict: bool
:param default: 拼音风格对应的实现不存在时返回的默认值
:return: 按照拼音风格进行处理过后的拼音字符串
:rtype: unicode
"""
if style in _registry:
return _registry[style](pinyin, strict=strict, **kwargs)
return default |
def btc_tx_serialize(_txobj):
"""
Given a transaction dict returned by btc_tx_deserialize, convert it back into a
hex-encoded byte string.
Derived from code written by Vitalik Buterin in pybitcointools (https://github.com/vbuterin/pybitcointools)
"""
# output buffer
o = []
txobj = None
if encoding.json_is_base(_txobj, 16):
# txobj is built from hex strings already. deserialize them
txobj = encoding.json_changebase(_txobj, lambda x: binascii.unhexlify(x))
else:
txobj = copy.deepcopy(_txobj)
# version
o.append(encoding.encode(txobj["version"], 256, 4)[::-1])
# do we have any witness scripts?
have_witness = False
for inp in txobj['ins']:
if inp.has_key('witness_script') and len(inp['witness_script']) > 0:
have_witness = True
break
if have_witness:
# add segwit marker
o.append('\x00\x01')
# number of inputs
o.append(encoding.num_to_var_int(len(txobj["ins"])))
# all inputs
for inp in txobj["ins"]:
# input tx hash
o.append(inp["outpoint"]["hash"][::-1])
# input tx outpoint
o.append(encoding.encode(inp["outpoint"]["index"], 256, 4)[::-1])
# input scriptsig
script = inp.get('script')
if not script:
script = bytes()
scriptsig = encoding.num_to_var_int(len(script)) + script
o.append(scriptsig)
# sequence
o.append(encoding.encode(inp.get("sequence", UINT_MAX - 1), 256, 4)[::-1])
# number of outputs
o.append(encoding.num_to_var_int(len(txobj["outs"])))
# all outputs
for out in txobj["outs"]:
# value
o.append(encoding.encode(out["value"], 256, 8)[::-1])
# scriptPubKey
scriptpubkey = encoding.num_to_var_int(len(out['script'])) + out['script']
o.append(scriptpubkey)
# add witnesses
if have_witness:
for inp in txobj['ins']:
witness_script = inp.get('witness_script')
if not witness_script:
witness_script = '\x00'
o.append(witness_script)
# locktime
o.append(encoding.encode(txobj["locktime"], 256, 4)[::-1])
# full string
ret = ''.join( encoding.json_changebase(o, lambda x: encoding.safe_hexlify(x)) )
return ret | Given a transaction dict returned by btc_tx_deserialize, convert it back into a
hex-encoded byte string.
Derived from code written by Vitalik Buterin in pybitcointools (https://github.com/vbuterin/pybitcointools) | Below is the the instruction that describes the task:
### Input:
Given a transaction dict returned by btc_tx_deserialize, convert it back into a
hex-encoded byte string.
Derived from code written by Vitalik Buterin in pybitcointools (https://github.com/vbuterin/pybitcointools)
### Response:
def btc_tx_serialize(_txobj):
"""
Given a transaction dict returned by btc_tx_deserialize, convert it back into a
hex-encoded byte string.
Derived from code written by Vitalik Buterin in pybitcointools (https://github.com/vbuterin/pybitcointools)
"""
# output buffer
o = []
txobj = None
if encoding.json_is_base(_txobj, 16):
# txobj is built from hex strings already. deserialize them
txobj = encoding.json_changebase(_txobj, lambda x: binascii.unhexlify(x))
else:
txobj = copy.deepcopy(_txobj)
# version
o.append(encoding.encode(txobj["version"], 256, 4)[::-1])
# do we have any witness scripts?
have_witness = False
for inp in txobj['ins']:
if inp.has_key('witness_script') and len(inp['witness_script']) > 0:
have_witness = True
break
if have_witness:
# add segwit marker
o.append('\x00\x01')
# number of inputs
o.append(encoding.num_to_var_int(len(txobj["ins"])))
# all inputs
for inp in txobj["ins"]:
# input tx hash
o.append(inp["outpoint"]["hash"][::-1])
# input tx outpoint
o.append(encoding.encode(inp["outpoint"]["index"], 256, 4)[::-1])
# input scriptsig
script = inp.get('script')
if not script:
script = bytes()
scriptsig = encoding.num_to_var_int(len(script)) + script
o.append(scriptsig)
# sequence
o.append(encoding.encode(inp.get("sequence", UINT_MAX - 1), 256, 4)[::-1])
# number of outputs
o.append(encoding.num_to_var_int(len(txobj["outs"])))
# all outputs
for out in txobj["outs"]:
# value
o.append(encoding.encode(out["value"], 256, 8)[::-1])
# scriptPubKey
scriptpubkey = encoding.num_to_var_int(len(out['script'])) + out['script']
o.append(scriptpubkey)
# add witnesses
if have_witness:
for inp in txobj['ins']:
witness_script = inp.get('witness_script')
if not witness_script:
witness_script = '\x00'
o.append(witness_script)
# locktime
o.append(encoding.encode(txobj["locktime"], 256, 4)[::-1])
# full string
ret = ''.join( encoding.json_changebase(o, lambda x: encoding.safe_hexlify(x)) )
return ret |
def _connect(self):
"""Connect to Squid Proxy Manager interface."""
if sys.version_info[:2] < (2,6):
self._conn = httplib.HTTPConnection(self._host, self._port)
else:
self._conn = httplib.HTTPConnection(self._host, self._port,
False, defaultTimeout) | Connect to Squid Proxy Manager interface. | Below is the the instruction that describes the task:
### Input:
Connect to Squid Proxy Manager interface.
### Response:
def _connect(self):
"""Connect to Squid Proxy Manager interface."""
if sys.version_info[:2] < (2,6):
self._conn = httplib.HTTPConnection(self._host, self._port)
else:
self._conn = httplib.HTTPConnection(self._host, self._port,
False, defaultTimeout) |
def find_invalid_chars(self, text, context_size=20):
"""Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
"""
result = defaultdict(list)
for idx, char in enumerate(text):
if char not in self.alphabet:
start = max(0, idx-context_size)
end = min(len(text), idx+context_size)
result[char].append(text[start:end])
return result | Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context. | Below is the the instruction that describes the task:
### Input:
Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
### Response:
def find_invalid_chars(self, text, context_size=20):
"""Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
"""
result = defaultdict(list)
for idx, char in enumerate(text):
if char not in self.alphabet:
start = max(0, idx-context_size)
end = min(len(text), idx+context_size)
result[char].append(text[start:end])
return result |
def function(self, x, y, amp, R_sersic, Re, n_sersic, gamma, e1, e2, center_x=0, center_y=0, alpha=3.):
"""
returns Core-Sersic function
"""
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
Rb = R_sersic
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
xt1 = cos_phi*x_shift+sin_phi*y_shift
xt2 = -sin_phi*x_shift+cos_phi*y_shift
xt2difq2 = xt2/(q*q)
R_ = np.sqrt(xt1*xt1+xt2*xt2difq2)
#R_ = R_.astype(np.float32)
if isinstance(R_, int) or isinstance(R_, float):
R_ = max(self._smoothing, R_)
else:
R_[R_ < self._smoothing] = self._smoothing
if isinstance(R_, int) or isinstance(R_, float):
R = max(self._smoothing, R_)
else:
R=np.empty_like(R_)
_R = R_[R_ > self._smoothing] #in the SIS regime
R[R_ <= self._smoothing] = self._smoothing
R[R_ > self._smoothing] = _R
k, bn = self.k_bn(n_sersic, Re)
result = amp * (1 + (Rb / R) ** alpha) ** (gamma / alpha) * np.exp(-bn * (((R ** alpha + Rb ** alpha) / Re ** alpha) ** (1. / (alpha * n_sersic)) - 1.))
return np.nan_to_num(result) | returns Core-Sersic function | Below is the the instruction that describes the task:
### Input:
returns Core-Sersic function
### Response:
def function(self, x, y, amp, R_sersic, Re, n_sersic, gamma, e1, e2, center_x=0, center_y=0, alpha=3.):
"""
returns Core-Sersic function
"""
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
Rb = R_sersic
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
xt1 = cos_phi*x_shift+sin_phi*y_shift
xt2 = -sin_phi*x_shift+cos_phi*y_shift
xt2difq2 = xt2/(q*q)
R_ = np.sqrt(xt1*xt1+xt2*xt2difq2)
#R_ = R_.astype(np.float32)
if isinstance(R_, int) or isinstance(R_, float):
R_ = max(self._smoothing, R_)
else:
R_[R_ < self._smoothing] = self._smoothing
if isinstance(R_, int) or isinstance(R_, float):
R = max(self._smoothing, R_)
else:
R=np.empty_like(R_)
_R = R_[R_ > self._smoothing] #in the SIS regime
R[R_ <= self._smoothing] = self._smoothing
R[R_ > self._smoothing] = _R
k, bn = self.k_bn(n_sersic, Re)
result = amp * (1 + (Rb / R) ** alpha) ** (gamma / alpha) * np.exp(-bn * (((R ** alpha + Rb ** alpha) / Re ** alpha) ** (1. / (alpha * n_sersic)) - 1.))
return np.nan_to_num(result) |
def _wait_for_file(cls, filename, timeout=FAIL_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
action_msg = 'file {} to appear'.format(filename)
return cls._deadline_until(file_waiter, action_msg, timeout=timeout) | Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout(). | Below is the the instruction that describes the task:
### Input:
Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout().
### Response:
def _wait_for_file(cls, filename, timeout=FAIL_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
action_msg = 'file {} to appear'.format(filename)
return cls._deadline_until(file_waiter, action_msg, timeout=timeout) |
def resetn(self):
"""
reset a core. After a call to this function, the core
is running
"""
#Regular reset will kick NRF out of DBG mode
logging.debug("target_nrf51.reset: enable reset pin")
self.write_memory(RESET, RESET_ENABLE)
#reset
logging.debug("target_nrf51.reset: trigger nRST pin")
self.reset() | reset a core. After a call to this function, the core
is running | Below is the the instruction that describes the task:
### Input:
reset a core. After a call to this function, the core
is running
### Response:
def resetn(self):
"""
reset a core. After a call to this function, the core
is running
"""
#Regular reset will kick NRF out of DBG mode
logging.debug("target_nrf51.reset: enable reset pin")
self.write_memory(RESET, RESET_ENABLE)
#reset
logging.debug("target_nrf51.reset: trigger nRST pin")
self.reset() |
def det4D(m):
'''
det4D(array) yields the determinate of the given matrix array, which may have more than 2
dimensions, in which case the later dimensions are multiplied and added point-wise.
'''
# I just solved this in Mathematica, copy-pasted, and replaced the string '] m' with ']*m':
# Mathematica code: Det@Table[m[i][j], {i, 0, 3}, {j, 0, 3}]
return (m[0][3]*m[1][2]*m[2][1]*m[3][0] - m[0][2]*m[1][3]*m[2][1]*m[3][0] -
m[0][3]*m[1][1]*m[2][2]*m[3][0] + m[0][1]*m[1][3]*m[2][2]*m[3][0] +
m[0][2]*m[1][1]*m[2][3]*m[3][0] - m[0][1]*m[1][2]*m[2][3]*m[3][0] -
m[0][3]*m[1][2]*m[2][0]*m[3][1] + m[0][2]*m[1][3]*m[2][0]*m[3][1] +
m[0][3]*m[1][0]*m[2][2]*m[3][1] - m[0][0]*m[1][3]*m[2][2]*m[3][1] -
m[0][2]*m[1][0]*m[2][3]*m[3][1] + m[0][0]*m[1][2]*m[2][3]*m[3][1] +
m[0][3]*m[1][1]*m[2][0]*m[3][2] - m[0][1]*m[1][3]*m[2][0]*m[3][2] -
m[0][3]*m[1][0]*m[2][1]*m[3][2] + m[0][0]*m[1][3]*m[2][1]*m[3][2] +
m[0][1]*m[1][0]*m[2][3]*m[3][2] - m[0][0]*m[1][1]*m[2][3]*m[3][2] -
m[0][2]*m[1][1]*m[2][0]*m[3][3] + m[0][1]*m[1][2]*m[2][0]*m[3][3] +
m[0][2]*m[1][0]*m[2][1]*m[3][3] - m[0][0]*m[1][2]*m[2][1]*m[3][3] -
m[0][1]*m[1][0]*m[2][2]*m[3][3] + m[0][0]*m[1][1]*m[2][2]*m[3][3]) | det4D(array) yields the determinate of the given matrix array, which may have more than 2
dimensions, in which case the later dimensions are multiplied and added point-wise. | Below is the the instruction that describes the task:
### Input:
det4D(array) yields the determinate of the given matrix array, which may have more than 2
dimensions, in which case the later dimensions are multiplied and added point-wise.
### Response:
def det4D(m):
'''
det4D(array) yields the determinate of the given matrix array, which may have more than 2
dimensions, in which case the later dimensions are multiplied and added point-wise.
'''
# I just solved this in Mathematica, copy-pasted, and replaced the string '] m' with ']*m':
# Mathematica code: Det@Table[m[i][j], {i, 0, 3}, {j, 0, 3}]
return (m[0][3]*m[1][2]*m[2][1]*m[3][0] - m[0][2]*m[1][3]*m[2][1]*m[3][0] -
m[0][3]*m[1][1]*m[2][2]*m[3][0] + m[0][1]*m[1][3]*m[2][2]*m[3][0] +
m[0][2]*m[1][1]*m[2][3]*m[3][0] - m[0][1]*m[1][2]*m[2][3]*m[3][0] -
m[0][3]*m[1][2]*m[2][0]*m[3][1] + m[0][2]*m[1][3]*m[2][0]*m[3][1] +
m[0][3]*m[1][0]*m[2][2]*m[3][1] - m[0][0]*m[1][3]*m[2][2]*m[3][1] -
m[0][2]*m[1][0]*m[2][3]*m[3][1] + m[0][0]*m[1][2]*m[2][3]*m[3][1] +
m[0][3]*m[1][1]*m[2][0]*m[3][2] - m[0][1]*m[1][3]*m[2][0]*m[3][2] -
m[0][3]*m[1][0]*m[2][1]*m[3][2] + m[0][0]*m[1][3]*m[2][1]*m[3][2] +
m[0][1]*m[1][0]*m[2][3]*m[3][2] - m[0][0]*m[1][1]*m[2][3]*m[3][2] -
m[0][2]*m[1][1]*m[2][0]*m[3][3] + m[0][1]*m[1][2]*m[2][0]*m[3][3] +
m[0][2]*m[1][0]*m[2][1]*m[3][3] - m[0][0]*m[1][2]*m[2][1]*m[3][3] -
m[0][1]*m[1][0]*m[2][2]*m[3][3] + m[0][0]*m[1][1]*m[2][2]*m[3][3]) |
def start_service(self, stack, service):
"""启动服务
启动指定名称服务的所有容器。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}/start'.format(self.host, stack, service)
return self.__post(url) | 启动服务
启动指定名称服务的所有容器。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 | Below is the the instruction that describes the task:
### Input:
启动服务
启动指定名称服务的所有容器。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
### Response:
def start_service(self, stack, service):
"""启动服务
启动指定名称服务的所有容器。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}/start'.format(self.host, stack, service)
return self.__post(url) |
def _rgb_triangle(ax, r_label, g_label, b_label, loc):
"""
Draw an RGB triangle legend on the desired axis
"""
if not loc in range(1, 11):
loc = 2
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
inset_ax = inset_axes(ax, width=1, height=1, loc=loc)
mesh = 35
x = []
y = []
color = []
for r in range(0, mesh):
for g in range(0, mesh):
for b in range(0, mesh):
if not (r == 0 and b == 0 and g == 0):
r1 = r / (r + g + b)
g1 = g / (r + g + b)
b1 = b / (r + g + b)
x.append(0.33 * (2. * g1 + r1) / (r1 + b1 + g1))
y.append(0.33 * np.sqrt(3) * r1 / (r1 + b1 + g1))
rc = math.sqrt(r ** 2 / (r ** 2 + g ** 2 + b ** 2))
gc = math.sqrt(g ** 2 / (r ** 2 + g ** 2 + b ** 2))
bc = math.sqrt(b ** 2 / (r ** 2 + g ** 2 + b ** 2))
color.append([rc, gc, bc])
# x = [n + 0.25 for n in x] # nudge x coordinates
# y = [n + (max_y - 1) for n in y] # shift y coordinates to top
# plot the triangle
inset_ax.scatter(x, y, s=7, marker='.', edgecolor=color)
inset_ax.set_xlim([-0.35, 1.00])
inset_ax.set_ylim([-0.35, 1.00])
# add the labels
inset_ax.text(0.70, -0.2, g_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='left')
inset_ax.text(0.325, 0.70, r_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='center')
inset_ax.text(-0.05, -0.2, b_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='right')
inset_ax.get_xaxis().set_visible(False)
inset_ax.get_yaxis().set_visible(False) | Draw an RGB triangle legend on the desired axis | Below is the the instruction that describes the task:
### Input:
Draw an RGB triangle legend on the desired axis
### Response:
def _rgb_triangle(ax, r_label, g_label, b_label, loc):
"""
Draw an RGB triangle legend on the desired axis
"""
if not loc in range(1, 11):
loc = 2
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
inset_ax = inset_axes(ax, width=1, height=1, loc=loc)
mesh = 35
x = []
y = []
color = []
for r in range(0, mesh):
for g in range(0, mesh):
for b in range(0, mesh):
if not (r == 0 and b == 0 and g == 0):
r1 = r / (r + g + b)
g1 = g / (r + g + b)
b1 = b / (r + g + b)
x.append(0.33 * (2. * g1 + r1) / (r1 + b1 + g1))
y.append(0.33 * np.sqrt(3) * r1 / (r1 + b1 + g1))
rc = math.sqrt(r ** 2 / (r ** 2 + g ** 2 + b ** 2))
gc = math.sqrt(g ** 2 / (r ** 2 + g ** 2 + b ** 2))
bc = math.sqrt(b ** 2 / (r ** 2 + g ** 2 + b ** 2))
color.append([rc, gc, bc])
# x = [n + 0.25 for n in x] # nudge x coordinates
# y = [n + (max_y - 1) for n in y] # shift y coordinates to top
# plot the triangle
inset_ax.scatter(x, y, s=7, marker='.', edgecolor=color)
inset_ax.set_xlim([-0.35, 1.00])
inset_ax.set_ylim([-0.35, 1.00])
# add the labels
inset_ax.text(0.70, -0.2, g_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='left')
inset_ax.text(0.325, 0.70, r_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='center')
inset_ax.text(-0.05, -0.2, b_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='right')
inset_ax.get_xaxis().set_visible(False)
inset_ax.get_yaxis().set_visible(False) |
def _tryMatch(self, textToMatchObject):
"""Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match
"""
# andreikop: This check is not described in kate docs, and I haven't found it in the code
if not textToMatchObject.isWordStart:
return None
index = self._tryMatchText(textToMatchObject.text)
if index is None:
return None
if textToMatchObject.currentColumnIndex + index < len(textToMatchObject.wholeLineText):
newTextToMatchObject = TextToMatchObject(textToMatchObject.currentColumnIndex + index,
textToMatchObject.wholeLineText,
self.parentContext.parser.deliminatorSet,
textToMatchObject.contextData)
for rule in self.childRules:
ruleTryMatchResult = rule.tryMatch(newTextToMatchObject)
if ruleTryMatchResult is not None:
index += ruleTryMatchResult.length
break
# child rule context and attribute ignored
return RuleTryMatchResult(self, index) | Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match | Below is the the instruction that describes the task:
### Input:
Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match
### Response:
def _tryMatch(self, textToMatchObject):
"""Try to find themselves in the text.
Returns (count, matchedRule) or (None, None) if doesn't match
"""
# andreikop: This check is not described in kate docs, and I haven't found it in the code
if not textToMatchObject.isWordStart:
return None
index = self._tryMatchText(textToMatchObject.text)
if index is None:
return None
if textToMatchObject.currentColumnIndex + index < len(textToMatchObject.wholeLineText):
newTextToMatchObject = TextToMatchObject(textToMatchObject.currentColumnIndex + index,
textToMatchObject.wholeLineText,
self.parentContext.parser.deliminatorSet,
textToMatchObject.contextData)
for rule in self.childRules:
ruleTryMatchResult = rule.tryMatch(newTextToMatchObject)
if ruleTryMatchResult is not None:
index += ruleTryMatchResult.length
break
# child rule context and attribute ignored
return RuleTryMatchResult(self, index) |
def enter_command_mode(self):
"""
Go into command mode.
"""
self.application.layout.focus(self.command_buffer)
self.application.vi_state.input_mode = InputMode.INSERT
self.previewer.save() | Go into command mode. | Below is the the instruction that describes the task:
### Input:
Go into command mode.
### Response:
def enter_command_mode(self):
"""
Go into command mode.
"""
self.application.layout.focus(self.command_buffer)
self.application.vi_state.input_mode = InputMode.INSERT
self.previewer.save() |
def new(cls, ns_path, script, campaign_dir, runner_type='Auto',
overwrite=False, optimized=True, check_repo=True):
"""
Create a new campaign from an ns-3 installation and a campaign
directory.
This method will create a DatabaseManager, which will install a
database in the specified campaign_dir. If a database is already
available at the ns_path described in the specified campaign_dir and
its configuration matches config, this instance is used instead. If the
overwrite argument is set to True instead, the specified directory is
wiped and a new campaign is created in its place.
Furthermore, this method will initialize a SimulationRunner, of type
specified by the runner_type parameter, which will be locked on the
ns-3 installation at ns_path and set up to run the desired script.
Finally, note that creation of a campaign requires a git repository to
be initialized at the specified ns_path. This will allow SEM to save
the commit at which the simulations are run, enforce reproducibility
and avoid mixing results coming from different versions of ns-3 and its
libraries.
Args:
ns_path (str): path to the ns-3 installation to employ in this
campaign.
script (str): ns-3 script that will be executed to run simulations.
campaign_dir (str): path to the directory in which to save the
simulation campaign database.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). Use Auto to
automatically pick the best runner.
overwrite (bool): whether to overwrite already existing
campaign_dir folders. This deletes the directory if and only if
it only contains files that were detected to be created by sem.
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build.
"""
# Convert paths to be absolute
ns_path = os.path.abspath(ns_path)
campaign_dir = os.path.abspath(campaign_dir)
# Verify if the specified campaign is already available
if Path(campaign_dir).exists() and not overwrite:
# Try loading
manager = CampaignManager.load(campaign_dir, ns_path,
runner_type=runner_type,
optimized=optimized,
check_repo=check_repo)
if manager.db.get_script() == script:
return manager
else:
del manager
# Initialize runner
runner = CampaignManager.create_runner(ns_path, script,
runner_type=runner_type,
optimized=optimized)
# Get list of parameters to save in the DB
params = runner.get_available_parameters()
# Get current commit
commit = ""
if check_repo:
from git import Repo, exc
commit = Repo(ns_path).head.commit.hexsha
# Create a database manager from the configuration
db = DatabaseManager.new(script=script,
params=params,
commit=commit,
campaign_dir=campaign_dir,
overwrite=overwrite)
return cls(db, runner, check_repo) | Create a new campaign from an ns-3 installation and a campaign
directory.
This method will create a DatabaseManager, which will install a
database in the specified campaign_dir. If a database is already
available at the ns_path described in the specified campaign_dir and
its configuration matches config, this instance is used instead. If the
overwrite argument is set to True instead, the specified directory is
wiped and a new campaign is created in its place.
Furthermore, this method will initialize a SimulationRunner, of type
specified by the runner_type parameter, which will be locked on the
ns-3 installation at ns_path and set up to run the desired script.
Finally, note that creation of a campaign requires a git repository to
be initialized at the specified ns_path. This will allow SEM to save
the commit at which the simulations are run, enforce reproducibility
and avoid mixing results coming from different versions of ns-3 and its
libraries.
Args:
ns_path (str): path to the ns-3 installation to employ in this
campaign.
script (str): ns-3 script that will be executed to run simulations.
campaign_dir (str): path to the directory in which to save the
simulation campaign database.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). Use Auto to
automatically pick the best runner.
overwrite (bool): whether to overwrite already existing
campaign_dir folders. This deletes the directory if and only if
it only contains files that were detected to be created by sem.
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build. | Below is the the instruction that describes the task:
### Input:
Create a new campaign from an ns-3 installation and a campaign
directory.
This method will create a DatabaseManager, which will install a
database in the specified campaign_dir. If a database is already
available at the ns_path described in the specified campaign_dir and
its configuration matches config, this instance is used instead. If the
overwrite argument is set to True instead, the specified directory is
wiped and a new campaign is created in its place.
Furthermore, this method will initialize a SimulationRunner, of type
specified by the runner_type parameter, which will be locked on the
ns-3 installation at ns_path and set up to run the desired script.
Finally, note that creation of a campaign requires a git repository to
be initialized at the specified ns_path. This will allow SEM to save
the commit at which the simulations are run, enforce reproducibility
and avoid mixing results coming from different versions of ns-3 and its
libraries.
Args:
ns_path (str): path to the ns-3 installation to employ in this
campaign.
script (str): ns-3 script that will be executed to run simulations.
campaign_dir (str): path to the directory in which to save the
simulation campaign database.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). Use Auto to
automatically pick the best runner.
overwrite (bool): whether to overwrite already existing
campaign_dir folders. This deletes the directory if and only if
it only contains files that were detected to be created by sem.
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build.
### Response:
def new(cls, ns_path, script, campaign_dir, runner_type='Auto',
overwrite=False, optimized=True, check_repo=True):
"""
Create a new campaign from an ns-3 installation and a campaign
directory.
This method will create a DatabaseManager, which will install a
database in the specified campaign_dir. If a database is already
available at the ns_path described in the specified campaign_dir and
its configuration matches config, this instance is used instead. If the
overwrite argument is set to True instead, the specified directory is
wiped and a new campaign is created in its place.
Furthermore, this method will initialize a SimulationRunner, of type
specified by the runner_type parameter, which will be locked on the
ns-3 installation at ns_path and set up to run the desired script.
Finally, note that creation of a campaign requires a git repository to
be initialized at the specified ns_path. This will allow SEM to save
the commit at which the simulations are run, enforce reproducibility
and avoid mixing results coming from different versions of ns-3 and its
libraries.
Args:
ns_path (str): path to the ns-3 installation to employ in this
campaign.
script (str): ns-3 script that will be executed to run simulations.
campaign_dir (str): path to the directory in which to save the
simulation campaign database.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). Use Auto to
automatically pick the best runner.
overwrite (bool): whether to overwrite already existing
campaign_dir folders. This deletes the directory if and only if
it only contains files that were detected to be created by sem.
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build.
"""
# Convert paths to be absolute
ns_path = os.path.abspath(ns_path)
campaign_dir = os.path.abspath(campaign_dir)
# Verify if the specified campaign is already available
if Path(campaign_dir).exists() and not overwrite:
# Try loading
manager = CampaignManager.load(campaign_dir, ns_path,
runner_type=runner_type,
optimized=optimized,
check_repo=check_repo)
if manager.db.get_script() == script:
return manager
else:
del manager
# Initialize runner
runner = CampaignManager.create_runner(ns_path, script,
runner_type=runner_type,
optimized=optimized)
# Get list of parameters to save in the DB
params = runner.get_available_parameters()
# Get current commit
commit = ""
if check_repo:
from git import Repo, exc
commit = Repo(ns_path).head.commit.hexsha
# Create a database manager from the configuration
db = DatabaseManager.new(script=script,
params=params,
commit=commit,
campaign_dir=campaign_dir,
overwrite=overwrite)
return cls(db, runner, check_repo) |
def teardown_app_request(self, func: Callable) -> Callable:
"""Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
...
"""
self.record_once(lambda state: state.app.teardown_request(func))
return func | Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
... | Below is the the instruction that describes the task:
### Input:
Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
...
### Response:
def teardown_app_request(self, func: Callable) -> Callable:
"""Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
...
"""
self.record_once(lambda state: state.app.teardown_request(func))
return func |
def to_dict(x, depth, exclude_keys=set(), depth_threshold=8):
"""Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values
"""
if x is None or isinstance(x, (str, int, float, bool)):
return x
if isinstance(x, np.int_):
return int(x)
if isinstance(x, np.int64):
return int(x)
if isinstance(x, np.float_):
return float(x)
if isinstance(x, np.float64):
return float(x)
if isinstance(x, np.bool_):
return bool(x)
if depth + 1 > depth_threshold: return {}
if isinstance(x, list):
out = []
for v in x:
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out.append(tmp)
return out
out = {}
if isinstance(x, dict):
for k, v in x.items():
if k in exclude_keys: continue
if not isinstance(k, (str,)):
k = str(k)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[k] = tmp
return out
for attr in listattrs(x):
if attr in exclude_keys: continue
v = getattr(x, attr)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[attr] = tmp
return out | Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values | Below is the the instruction that describes the task:
### Input:
Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values
### Response:
def to_dict(x, depth, exclude_keys=set(), depth_threshold=8):
"""Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values
"""
if x is None or isinstance(x, (str, int, float, bool)):
return x
if isinstance(x, np.int_):
return int(x)
if isinstance(x, np.int64):
return int(x)
if isinstance(x, np.float_):
return float(x)
if isinstance(x, np.float64):
return float(x)
if isinstance(x, np.bool_):
return bool(x)
if depth + 1 > depth_threshold: return {}
if isinstance(x, list):
out = []
for v in x:
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out.append(tmp)
return out
out = {}
if isinstance(x, dict):
for k, v in x.items():
if k in exclude_keys: continue
if not isinstance(k, (str,)):
k = str(k)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[k] = tmp
return out
for attr in listattrs(x):
if attr in exclude_keys: continue
v = getattr(x, attr)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[attr] = tmp
return out |
def _float_or_str(value):
"""Internal method to attempt `float(value)` handling a `ValueError`
"""
# remove any surrounding quotes
value = QUOTE_REGEX.sub('', value)
try: # attempt `float()` conversion
return float(value)
except ValueError: # just return the input
return value | Internal method to attempt `float(value)` handling a `ValueError` | Below is the the instruction that describes the task:
### Input:
Internal method to attempt `float(value)` handling a `ValueError`
### Response:
def _float_or_str(value):
"""Internal method to attempt `float(value)` handling a `ValueError`
"""
# remove any surrounding quotes
value = QUOTE_REGEX.sub('', value)
try: # attempt `float()` conversion
return float(value)
except ValueError: # just return the input
return value |
def unquote(value):
"""Remove wrapping quotes from a string.
:param value: A string that might be wrapped in double quotes, such
as a HTTP cookie value.
:returns: Beginning and ending quotes removed and escaped quotes (``\"``)
unescaped
"""
if len(value) > 1 and value[0] == '"' and value[-1] == '"':
value = value[1:-1].replace(r'\"', '"')
return value | Remove wrapping quotes from a string.
:param value: A string that might be wrapped in double quotes, such
as a HTTP cookie value.
:returns: Beginning and ending quotes removed and escaped quotes (``\"``)
unescaped | Below is the the instruction that describes the task:
### Input:
Remove wrapping quotes from a string.
:param value: A string that might be wrapped in double quotes, such
as a HTTP cookie value.
:returns: Beginning and ending quotes removed and escaped quotes (``\"``)
unescaped
### Response:
def unquote(value):
"""Remove wrapping quotes from a string.
:param value: A string that might be wrapped in double quotes, such
as a HTTP cookie value.
:returns: Beginning and ending quotes removed and escaped quotes (``\"``)
unescaped
"""
if len(value) > 1 and value[0] == '"' and value[-1] == '"':
value = value[1:-1].replace(r'\"', '"')
return value |
def parse_list_objects(data, bucket_name):
"""
Parser for list objects response.
:param data: Response data for list objects.
:param bucket_name: Response for the bucket.
:return: Replies back three distinctive components.
- List of :class:`Object <Object>`
- True if list is truncated, False otherwise.
- Object name marker for the next request.
"""
root = S3Element.fromstring('ListObjectResult', data)
is_truncated = root.get_child_text('IsTruncated').lower() == 'true'
# NextMarker element need not be present.
marker = root.get_urldecoded_elem_text('NextMarker', strict=False)
objects, object_dirs = _parse_objects_from_xml_elts(
bucket_name,
root.findall('Contents'),
root.findall('CommonPrefixes')
)
if is_truncated and marker is None:
marker = objects[-1].object_name
return objects + object_dirs, is_truncated, marker | Parser for list objects response.
:param data: Response data for list objects.
:param bucket_name: Response for the bucket.
:return: Replies back three distinctive components.
- List of :class:`Object <Object>`
- True if list is truncated, False otherwise.
- Object name marker for the next request. | Below is the the instruction that describes the task:
### Input:
Parser for list objects response.
:param data: Response data for list objects.
:param bucket_name: Response for the bucket.
:return: Replies back three distinctive components.
- List of :class:`Object <Object>`
- True if list is truncated, False otherwise.
- Object name marker for the next request.
### Response:
def parse_list_objects(data, bucket_name):
"""
Parser for list objects response.
:param data: Response data for list objects.
:param bucket_name: Response for the bucket.
:return: Replies back three distinctive components.
- List of :class:`Object <Object>`
- True if list is truncated, False otherwise.
- Object name marker for the next request.
"""
root = S3Element.fromstring('ListObjectResult', data)
is_truncated = root.get_child_text('IsTruncated').lower() == 'true'
# NextMarker element need not be present.
marker = root.get_urldecoded_elem_text('NextMarker', strict=False)
objects, object_dirs = _parse_objects_from_xml_elts(
bucket_name,
root.findall('Contents'),
root.findall('CommonPrefixes')
)
if is_truncated and marker is None:
marker = objects[-1].object_name
return objects + object_dirs, is_truncated, marker |
def change_password(self, body, username=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html>`_
:arg body: the new password for the user
:arg username: The username of the user to change the password for
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"PUT",
_make_path("_security", "user", username, "_password"),
params=params,
body=body,
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html>`_
:arg body: the new password for the user
:arg username: The username of the user to change the password for
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for' | Below is the the instruction that describes the task:
### Input:
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html>`_
:arg body: the new password for the user
:arg username: The username of the user to change the password for
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
### Response:
def change_password(self, body, username=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html>`_
:arg body: the new password for the user
:arg username: The username of the user to change the password for
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"PUT",
_make_path("_security", "user", username, "_password"),
params=params,
body=body,
) |
def sext(self, num):
"""Sign-extend this farray by *num* bits.
Returns a new farray.
"""
sign = self._items[-1]
return self.__class__(self._items + [sign] * num, ftype=self.ftype) | Sign-extend this farray by *num* bits.
Returns a new farray. | Below is the the instruction that describes the task:
### Input:
Sign-extend this farray by *num* bits.
Returns a new farray.
### Response:
def sext(self, num):
"""Sign-extend this farray by *num* bits.
Returns a new farray.
"""
sign = self._items[-1]
return self.__class__(self._items + [sign] * num, ftype=self.ftype) |
def copy_data(self):
"""
Copy the data from the it's point of origin, serializing it,
storing it serialized as well as in it's raw form and calculate
a running hash of the serialized representation
"""
HASH_FUNCTION = hashlib.sha256()
try:
raw_iterator = self.get_binary_iterator()
except AttributeError:
raw_iterator = self.get_non_binary_iterator()
self.copy_file = tempfile.NamedTemporaryFile(mode='w+')
for part in raw_iterator:
encoded_part = dbsafe_encode(part)
self.copy_file.write(encoded_part)
self.copy_file.write('\n')
HASH_FUNCTION.update(encoded_part)
self.copy_file.seek(0)
self.data_iterator = (dbsafe_decode(line) for line in self.copy_file)
else:
self.copy_file = tempfile.NamedTemporaryFile(mode='w+b')
for part in raw_iterator:
self.copy_file.write(part)
HASH_FUNCTION.update(part)
self.copy_file.seek(0)
self.data_iterator = self.copy_file
self.new_hash = HASH_FUNCTION.hexdigest() | Copy the data from the it's point of origin, serializing it,
storing it serialized as well as in it's raw form and calculate
a running hash of the serialized representation | Below is the the instruction that describes the task:
### Input:
Copy the data from the it's point of origin, serializing it,
storing it serialized as well as in it's raw form and calculate
a running hash of the serialized representation
### Response:
def copy_data(self):
"""
Copy the data from the it's point of origin, serializing it,
storing it serialized as well as in it's raw form and calculate
a running hash of the serialized representation
"""
HASH_FUNCTION = hashlib.sha256()
try:
raw_iterator = self.get_binary_iterator()
except AttributeError:
raw_iterator = self.get_non_binary_iterator()
self.copy_file = tempfile.NamedTemporaryFile(mode='w+')
for part in raw_iterator:
encoded_part = dbsafe_encode(part)
self.copy_file.write(encoded_part)
self.copy_file.write('\n')
HASH_FUNCTION.update(encoded_part)
self.copy_file.seek(0)
self.data_iterator = (dbsafe_decode(line) for line in self.copy_file)
else:
self.copy_file = tempfile.NamedTemporaryFile(mode='w+b')
for part in raw_iterator:
self.copy_file.write(part)
HASH_FUNCTION.update(part)
self.copy_file.seek(0)
self.data_iterator = self.copy_file
self.new_hash = HASH_FUNCTION.hexdigest() |
def get_server_certificate(server_certificate, flags=FLAGS.BASE, **conn):
"""
Orchestrates all the calls required to fully build out an IAM User in the following format:
{
"Arn": ...,
"ServerCertificateName": ...,
"Path": ...,
"ServerCertificateId": ...,
"UploadDate": ..., # str
"Expiration": ..., # str
"CertificateBody": ...,
"CertificateChain": ...,
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the
`get_server_certificate` call multiple times.
:param server_certificate: dict MUST contain the ServerCertificateName and also a combination of
either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Server Certificate.
"""
if not server_certificate.get('ServerCertificateName'):
raise MissingFieldException('Must include ServerCertificateName.')
server_certificate = modify(server_certificate, output='camelized')
_conn_from_args(server_certificate, conn)
return registry.build_out(flags, start_with=server_certificate, pass_datastructure=True, **conn) | Orchestrates all the calls required to fully build out an IAM User in the following format:
{
"Arn": ...,
"ServerCertificateName": ...,
"Path": ...,
"ServerCertificateId": ...,
"UploadDate": ..., # str
"Expiration": ..., # str
"CertificateBody": ...,
"CertificateChain": ...,
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the
`get_server_certificate` call multiple times.
:param server_certificate: dict MUST contain the ServerCertificateName and also a combination of
either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Server Certificate. | Below is the the instruction that describes the task:
### Input:
Orchestrates all the calls required to fully build out an IAM User in the following format:
{
"Arn": ...,
"ServerCertificateName": ...,
"Path": ...,
"ServerCertificateId": ...,
"UploadDate": ..., # str
"Expiration": ..., # str
"CertificateBody": ...,
"CertificateChain": ...,
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the
`get_server_certificate` call multiple times.
:param server_certificate: dict MUST contain the ServerCertificateName and also a combination of
either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Server Certificate.
### Response:
def get_server_certificate(server_certificate, flags=FLAGS.BASE, **conn):
"""
Orchestrates all the calls required to fully build out an IAM User in the following format:
{
"Arn": ...,
"ServerCertificateName": ...,
"Path": ...,
"ServerCertificateId": ...,
"UploadDate": ..., # str
"Expiration": ..., # str
"CertificateBody": ...,
"CertificateChain": ...,
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the
`get_server_certificate` call multiple times.
:param server_certificate: dict MUST contain the ServerCertificateName and also a combination of
either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Server Certificate.
"""
if not server_certificate.get('ServerCertificateName'):
raise MissingFieldException('Must include ServerCertificateName.')
server_certificate = modify(server_certificate, output='camelized')
_conn_from_args(server_certificate, conn)
return registry.build_out(flags, start_with=server_certificate, pass_datastructure=True, **conn) |
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout | Adds annotations to the layout object | Below is the the instruction that describes the task:
### Input:
Adds annotations to the layout object
### Response:
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout |
def build_component(res, parent=None):
"Create a gui2py control based on the python resource"
# control specs (parameters)
kwargs = dict(res.items())
comtype = kwargs.pop('type')
if 'components' in res:
components = kwargs.pop('components')
elif comtype == 'Menu' and 'items' in res:
components = kwargs.pop('items')
else:
components = []
from gui import registry
if comtype in registry.CONTROLS:
comclass = registry.CONTROLS[comtype]
elif comtype in registry.MENU:
comclass = registry.MENU[comtype]
elif comtype in registry.MISC:
comclass = registry.MISC[comtype]
else:
raise RuntimeError("%s not in registry" % comtype)
# Instantiate the GUI object
com = comclass(parent=parent, **kwargs)
for comp in components:
build_component(comp, parent=com)
return com | Create a gui2py control based on the python resource | Below is the the instruction that describes the task:
### Input:
Create a gui2py control based on the python resource
### Response:
def build_component(res, parent=None):
"Create a gui2py control based on the python resource"
# control specs (parameters)
kwargs = dict(res.items())
comtype = kwargs.pop('type')
if 'components' in res:
components = kwargs.pop('components')
elif comtype == 'Menu' and 'items' in res:
components = kwargs.pop('items')
else:
components = []
from gui import registry
if comtype in registry.CONTROLS:
comclass = registry.CONTROLS[comtype]
elif comtype in registry.MENU:
comclass = registry.MENU[comtype]
elif comtype in registry.MISC:
comclass = registry.MISC[comtype]
else:
raise RuntimeError("%s not in registry" % comtype)
# Instantiate the GUI object
com = comclass(parent=parent, **kwargs)
for comp in components:
build_component(comp, parent=com)
return com |
def page_length(self, length):
'''Specifies page length. This command is only valid with continuous length labels.
Args:
length: The length of the page, in dots. Can't exceed 12000.
Returns:
None
Raises:
RuntimeError: Length must be less than 12000.
'''
mH = length/256
mL = length%256
if length < 12000:
self.send(chr(27)+'('+'C'+chr(2)+chr(0)+chr(mL)+chr(mH))
else:
raise RuntimeError('Length must be less than 12000.') | Specifies page length. This command is only valid with continuous length labels.
Args:
length: The length of the page, in dots. Can't exceed 12000.
Returns:
None
Raises:
RuntimeError: Length must be less than 12000. | Below is the the instruction that describes the task:
### Input:
Specifies page length. This command is only valid with continuous length labels.
Args:
length: The length of the page, in dots. Can't exceed 12000.
Returns:
None
Raises:
RuntimeError: Length must be less than 12000.
### Response:
def page_length(self, length):
'''Specifies page length. This command is only valid with continuous length labels.
Args:
length: The length of the page, in dots. Can't exceed 12000.
Returns:
None
Raises:
RuntimeError: Length must be less than 12000.
'''
mH = length/256
mL = length%256
if length < 12000:
self.send(chr(27)+'('+'C'+chr(2)+chr(0)+chr(mL)+chr(mH))
else:
raise RuntimeError('Length must be less than 12000.') |
def get_dot(stop=True):
"""Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop.
"""
defaults = []
nodes = []
edges = []
# define default attributes
for comp, comp_attr in graph_attributes.items():
attr = ', '.join( '%s = "%s"' % (attr, val)
for attr, val in comp_attr.items() )
defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() )
# define nodes
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()]
node_str = '"%s" [%s];' % (func, ', '.join(attribs))
nodes.append( node_str % locals() )
# define edges
for fr_key, fr_val in call_dict.items():
if not fr_key: continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, totla_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
edge = '[ color = "%s", label="%s" ]' % (col, to_val)
edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge))
defaults = '\n\t'.join( defaults )
nodes = '\n\t'.join( nodes )
edges = '\n\t'.join( edges )
dot_fmt = ("digraph G {\n"
" %(defaults)s\n\n"
" %(nodes)s\n\n"
" %(edges)s\n}\n"
)
return dot_fmt % locals() | Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop. | Below is the the instruction that describes the task:
### Input:
Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop.
### Response:
def get_dot(stop=True):
"""Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop.
"""
defaults = []
nodes = []
edges = []
# define default attributes
for comp, comp_attr in graph_attributes.items():
attr = ', '.join( '%s = "%s"' % (attr, val)
for attr, val in comp_attr.items() )
defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() )
# define nodes
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()]
node_str = '"%s" [%s];' % (func, ', '.join(attribs))
nodes.append( node_str % locals() )
# define edges
for fr_key, fr_val in call_dict.items():
if not fr_key: continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, totla_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
edge = '[ color = "%s", label="%s" ]' % (col, to_val)
edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge))
defaults = '\n\t'.join( defaults )
nodes = '\n\t'.join( nodes )
edges = '\n\t'.join( edges )
dot_fmt = ("digraph G {\n"
" %(defaults)s\n\n"
" %(nodes)s\n\n"
" %(edges)s\n}\n"
)
return dot_fmt % locals() |
def _cache(self):
"""
Populates the list with a number of gradient colors.
The list has Gradient.steps colors that interpolate between
the fixed base Gradient.colors.
The spread parameter controls the midpoint of the gradient,
you can shift it right and left. A separate gradient is
calculated for each half and then glued together.
"""
n = self.steps
# Only one color in base list.
if len(self._colors) == 1:
ColorList.__init__(self, [self._colors[0] for i in _range(n)])
return
# Expand the base list so we can chop more accurately.
colors = self._interpolate(self._colors, 40)
# Chop into left half and right half.
# Make sure their ending and beginning match colors.
left = colors[:len(colors) / 2]
right = colors[len(colors) / 2:]
left.append(right[0])
right.insert(0, left[-1])
# Calculate left and right gradient proportionally to spread.
gradient = self._interpolate(left, int(n * self.spread))[:-1]
gradient.extend(
self._interpolate(right, n - int(n * self.spread))[1:]
)
if self.spread > 1: gradient = gradient[:n]
if self.spread < 0: gradient = gradient[-n:]
ColorList.__init__(self, gradient) | Populates the list with a number of gradient colors.
The list has Gradient.steps colors that interpolate between
the fixed base Gradient.colors.
The spread parameter controls the midpoint of the gradient,
you can shift it right and left. A separate gradient is
calculated for each half and then glued together. | Below is the the instruction that describes the task:
### Input:
Populates the list with a number of gradient colors.
The list has Gradient.steps colors that interpolate between
the fixed base Gradient.colors.
The spread parameter controls the midpoint of the gradient,
you can shift it right and left. A separate gradient is
calculated for each half and then glued together.
### Response:
def _cache(self):
"""
Populates the list with a number of gradient colors.
The list has Gradient.steps colors that interpolate between
the fixed base Gradient.colors.
The spread parameter controls the midpoint of the gradient,
you can shift it right and left. A separate gradient is
calculated for each half and then glued together.
"""
n = self.steps
# Only one color in base list.
if len(self._colors) == 1:
ColorList.__init__(self, [self._colors[0] for i in _range(n)])
return
# Expand the base list so we can chop more accurately.
colors = self._interpolate(self._colors, 40)
# Chop into left half and right half.
# Make sure their ending and beginning match colors.
left = colors[:len(colors) / 2]
right = colors[len(colors) / 2:]
left.append(right[0])
right.insert(0, left[-1])
# Calculate left and right gradient proportionally to spread.
gradient = self._interpolate(left, int(n * self.spread))[:-1]
gradient.extend(
self._interpolate(right, n - int(n * self.spread))[1:]
)
if self.spread > 1: gradient = gradient[:n]
if self.spread < 0: gradient = gradient[-n:]
ColorList.__init__(self, gradient) |
def locate_fixed_differences(ac1, ac2):
"""Locate variants with no shared alleles between two populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
Returns
-------
loc : ndarray, bool, shape (n_variants,)
See Also
--------
allel.stats.diversity.windowed_df
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 1], [0, 1], [1, 1], [1, 1]],
... [[0, 0], [0, 0], [1, 1], [2, 2]],
... [[0, 0], [-1, -1], [1, 1], [-1, -1]]])
>>> ac1 = g.count_alleles(subpop=[0, 1])
>>> ac2 = g.count_alleles(subpop=[2, 3])
>>> loc_df = allel.locate_fixed_differences(ac1, ac2)
>>> loc_df
array([ True, False, False, True, True])
"""
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# stack allele counts for convenience
pac = np.dstack([ac1, ac2])
# count numbers of alleles called in each population
pan = np.sum(pac, axis=1)
# count the numbers of populations with each allele
npa = np.sum(pac > 0, axis=2)
# locate variants with allele calls in both populations
non_missing = np.all(pan > 0, axis=1)
# locate variants where all alleles are only found in a single population
no_shared_alleles = np.all(npa <= 1, axis=1)
return non_missing & no_shared_alleles | Locate variants with no shared alleles between two populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
Returns
-------
loc : ndarray, bool, shape (n_variants,)
See Also
--------
allel.stats.diversity.windowed_df
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 1], [0, 1], [1, 1], [1, 1]],
... [[0, 0], [0, 0], [1, 1], [2, 2]],
... [[0, 0], [-1, -1], [1, 1], [-1, -1]]])
>>> ac1 = g.count_alleles(subpop=[0, 1])
>>> ac2 = g.count_alleles(subpop=[2, 3])
>>> loc_df = allel.locate_fixed_differences(ac1, ac2)
>>> loc_df
array([ True, False, False, True, True]) | Below is the the instruction that describes the task:
### Input:
Locate variants with no shared alleles between two populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
Returns
-------
loc : ndarray, bool, shape (n_variants,)
See Also
--------
allel.stats.diversity.windowed_df
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 1], [0, 1], [1, 1], [1, 1]],
... [[0, 0], [0, 0], [1, 1], [2, 2]],
... [[0, 0], [-1, -1], [1, 1], [-1, -1]]])
>>> ac1 = g.count_alleles(subpop=[0, 1])
>>> ac2 = g.count_alleles(subpop=[2, 3])
>>> loc_df = allel.locate_fixed_differences(ac1, ac2)
>>> loc_df
array([ True, False, False, True, True])
### Response:
def locate_fixed_differences(ac1, ac2):
"""Locate variants with no shared alleles between two populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
Returns
-------
loc : ndarray, bool, shape (n_variants,)
See Also
--------
allel.stats.diversity.windowed_df
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 1], [0, 1], [1, 1], [1, 1]],
... [[0, 0], [0, 0], [1, 1], [2, 2]],
... [[0, 0], [-1, -1], [1, 1], [-1, -1]]])
>>> ac1 = g.count_alleles(subpop=[0, 1])
>>> ac2 = g.count_alleles(subpop=[2, 3])
>>> loc_df = allel.locate_fixed_differences(ac1, ac2)
>>> loc_df
array([ True, False, False, True, True])
"""
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# stack allele counts for convenience
pac = np.dstack([ac1, ac2])
# count numbers of alleles called in each population
pan = np.sum(pac, axis=1)
# count the numbers of populations with each allele
npa = np.sum(pac > 0, axis=2)
# locate variants with allele calls in both populations
non_missing = np.all(pan > 0, axis=1)
# locate variants where all alleles are only found in a single population
no_shared_alleles = np.all(npa <= 1, axis=1)
return non_missing & no_shared_alleles |
def status_message(self):
"""Detailed message about whether the dependency is installed.
:rtype: str
"""
if self.is_available:
return "INSTALLED {0!s}"
elif self.why and self.package:
return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package"
elif self.why:
return "MISSING {0!s:<20}needed for {0.why}"
elif self.package:
return "MISSING {0!s:<20}part of the {0.package} package"
else:
return "MISSING {0!s:<20}" | Detailed message about whether the dependency is installed.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Detailed message about whether the dependency is installed.
:rtype: str
### Response:
def status_message(self):
"""Detailed message about whether the dependency is installed.
:rtype: str
"""
if self.is_available:
return "INSTALLED {0!s}"
elif self.why and self.package:
return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package"
elif self.why:
return "MISSING {0!s:<20}needed for {0.why}"
elif self.package:
return "MISSING {0!s:<20}part of the {0.package} package"
else:
return "MISSING {0!s:<20}" |
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions %r must be a superset of '
'existing dimensions %r' % (dims, self.dims))
self_dims = set(self.dims)
expanded_dims = tuple(
d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[
(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(expanded_dims, expanded_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims) | Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable | Below is the the instruction that describes the task:
### Input:
Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
### Response:
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions %r must be a superset of '
'existing dimensions %r' % (dims, self.dims))
self_dims = set(self.dims)
expanded_dims = tuple(
d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[
(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(expanded_dims, expanded_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims) |
def _update_conda_devel():
"""Update to the latest development conda package.
"""
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
assert conda_bin, "Could not find anaconda distribution for upgrading bcbio"
subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels +
["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")])
return os.path.dirname(os.path.dirname(conda_bin)) | Update to the latest development conda package. | Below is the the instruction that describes the task:
### Input:
Update to the latest development conda package.
### Response:
def _update_conda_devel():
"""Update to the latest development conda package.
"""
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
assert conda_bin, "Could not find anaconda distribution for upgrading bcbio"
subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels +
["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")])
return os.path.dirname(os.path.dirname(conda_bin)) |
def form_valid(self, form):
'''
Create slots and return success message.
'''
startDate = form.cleaned_data['startDate']
endDate = form.cleaned_data['endDate']
startTime = form.cleaned_data['startTime']
endTime = form.cleaned_data['endTime']
instructor = form.cleaned_data['instructorId']
interval_minutes = getConstant('privateLessons__lessonLengthInterval')
this_date = startDate
while this_date <= endDate:
this_time = startTime
while this_time < endTime:
InstructorAvailabilitySlot.objects.create(
instructor=instructor,
startTime=ensure_localtime(datetime.combine(this_date, this_time)),
duration=interval_minutes,
location=form.cleaned_data.get('location'),
room=form.cleaned_data.get('room'),
pricingTier=form.cleaned_data.get('pricingTier'),
)
this_time = (ensure_localtime(datetime.combine(this_date, this_time)) + timedelta(minutes=interval_minutes)).time()
this_date += timedelta(days=1)
return JsonResponse({'valid': True}) | Create slots and return success message. | Below is the the instruction that describes the task:
### Input:
Create slots and return success message.
### Response:
def form_valid(self, form):
'''
Create slots and return success message.
'''
startDate = form.cleaned_data['startDate']
endDate = form.cleaned_data['endDate']
startTime = form.cleaned_data['startTime']
endTime = form.cleaned_data['endTime']
instructor = form.cleaned_data['instructorId']
interval_minutes = getConstant('privateLessons__lessonLengthInterval')
this_date = startDate
while this_date <= endDate:
this_time = startTime
while this_time < endTime:
InstructorAvailabilitySlot.objects.create(
instructor=instructor,
startTime=ensure_localtime(datetime.combine(this_date, this_time)),
duration=interval_minutes,
location=form.cleaned_data.get('location'),
room=form.cleaned_data.get('room'),
pricingTier=form.cleaned_data.get('pricingTier'),
)
this_time = (ensure_localtime(datetime.combine(this_date, this_time)) + timedelta(minutes=interval_minutes)).time()
this_date += timedelta(days=1)
return JsonResponse({'valid': True}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.