text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def from_board(cls: Type[GameT], board: chess.Board) -> GameT:
"""Creates a game from the move stack of a :class:`~chess.Board()`."""
# Setup the initial position.
game = cls()
game.setup(board.root())
node = game # type: GameNode
# Replay all moves.
for move in board.move_stack:
node = node.add_variation(move)
game.headers["Result"] = board.result()
return game | 0.004435 |
def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy() | 0.000552 |
def catalog(self):
"""Create MOC from catalog of coordinates.
This command requires that the Healpy and Astropy libraries
be available. It attempts to load the given catalog,
and merges it with the running MOC.
The name of an ASCII catalog file should be given. The file
should contain either "RA" and "Dec" columns (for ICRS coordinates)
or "Lon" and "Lat" columns (for galactic coordinates). The MOC
order and radius (in arcseconds) can be given with additional
options.
::
pymoctool --catalog coords.txt
[order 12]
[radius 3600]
[unit (hour | deg | rad) (deg | rad)]
[format commented_header]
[inclusive]
Units (if not specified) are assumed to be hours and degrees for ICRS
coordinates and degrees for galactic coordinates. The format, if not
specified (as an Astropy ASCII table format name) is assumed to be
commented header, e.g.:
::
# RA Dec
01:30:00 +45:00:00
22:30:00 +45:00:00
"""
from .catalog import catalog_to_moc, read_ascii_catalog
filename = self.params.pop()
order = 12
radius = 3600
unit = None
format_ = 'commented_header'
kwargs = {}
while self.params:
if self.params[-1] == 'order':
self.params.pop()
order = int(self.params.pop())
elif self.params[-1] == 'radius':
self.params.pop()
radius = float(self.params.pop())
elif self.params[-1] == 'unit':
self.params.pop()
unit_x = self.params.pop()
unit_y = self.params.pop()
unit = (unit_x, unit_y)
elif self.params[-1] == 'format':
self.params.pop()
format_ = self.params.pop()
elif self.params[-1] == 'inclusive':
self.params.pop()
kwargs['inclusive'] = True
else:
break
coords = read_ascii_catalog(filename, format_=format_, unit=unit)
catalog_moc = catalog_to_moc(coords, radius, order, **kwargs)
if self.moc is None:
self.moc = catalog_moc
else:
self.moc += catalog_moc | 0.000828 |
def stop(self):
"""Stop pipeline."""
urllib.request.urlcleanup()
self._player.set_state(Gst.State.NULL)
self.state = STATE_IDLE
self._tags = {} | 0.010929 |
def tail(ctx):
"""Show the last 10 lines of the log file"""
click.echo('tailing logs')
for e in ctx.tail()[-10:]:
ts = datetime.utcfromtimestamp(e['timestamp'] // 1000).isoformat()
click.echo("{}: {}".format(ts, e['message']))
click.echo('done') | 0.00361 |
def flush_all(self, time=0):
"""
Send a command to server flush|delete all keys.
:param time: Time to wait until flush in seconds.
:type time: int
:return: True in case of success, False in case of failure
:rtype: bool
"""
returns = []
for server in self.servers:
returns.append(server.flush_all(time))
return any(returns) | 0.004808 |
def analyze_pages(file_name, char_margin=1.0):
"""
Input: the file path to the PDF file
Output: yields the layout object for each page in the PDF
"""
log = logging.getLogger(__name__)
# Open a PDF file.
with open(os.path.realpath(file_name), "rb") as fp:
# Create a PDF parser object associated with the file object.
parser = PDFParser(fp)
# Create a PDF document object that stores the document structure.
# Supply the password for initialization.
document = PDFDocument(parser, password="")
# Create a PDF resource manager object that stores shared resources.
rsrcmgr = PDFResourceManager()
# Set parameters for analysis.
laparams = LAParams(
char_margin=char_margin, word_margin=0.1, detect_vertical=True
)
# Create a PDF page aggregator object.
device = CustomPDFPageAggregator(rsrcmgr, laparams=laparams)
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
for page_num, page in enumerate(PDFPage.create_pages(document)):
try:
interpreter.process_page(page)
except OverflowError as oe:
log.exception(
"{}, skipping page {} of {}".format(oe, page_num, file_name)
)
continue
layout = device.get_result()
yield layout | 0.001334 |
def get_kde_contour(self, xax="area_um", yax="deform", xacc=None,
yacc=None, kde_type="histogram", kde_kwargs={},
xscale="linear", yscale="linear"):
"""Evaluate the kernel density estimate for contour plots
Parameters
----------
xax: str
Identifier for X axis (e.g. "area_um", "aspect", "deform")
yax: str
Identifier for Y axis
xacc: float
Contour accuracy in x direction
yacc: float
Contour accuracy in y direction
kde_type: str
The KDE method to use
kde_kwargs: dict
Additional keyword arguments to the KDE method
xscale: str
If set to "log", take the logarithm of the x-values before
computing the KDE. This is useful when data are are
displayed on a log-scale. Defaults to "linear".
yscale: str
See `xscale`.
Returns
-------
X, Y, Z : coordinates
The kernel density Z evaluated on a rectangular grid (X,Y).
"""
xax = xax.lower()
yax = yax.lower()
kde_type = kde_type.lower()
if kde_type not in kde_methods.methods:
raise ValueError("Not a valid kde type: {}!".format(kde_type))
# Get data
x = self[xax][self.filter.all]
y = self[yax][self.filter.all]
# Apply scale (no change for linear scale)
xs = self._apply_scale(x, xscale, xax)
ys = self._apply_scale(y, yscale, yax)
# accuracy (bin width) of KDE estimator
if xacc is None:
xacc = kde_methods.bin_width_doane(xs) / 5
if yacc is None:
yacc = kde_methods.bin_width_doane(ys) / 5
# Ignore infs and nans
bad = kde_methods.get_bad_vals(xs, ys)
xc = xs[~bad]
yc = ys[~bad]
xnum = int(np.ceil((xc.max() - xc.min()) / xacc))
ynum = int(np.ceil((yc.max() - yc.min()) / yacc))
xlin = np.linspace(xc.min(), xc.max(), xnum, endpoint=True)
ylin = np.linspace(yc.min(), yc.max(), ynum, endpoint=True)
xmesh, ymesh = np.meshgrid(xlin, ylin, indexing="ij")
kde_fct = kde_methods.methods[kde_type]
if len(x):
density = kde_fct(events_x=xs, events_y=ys,
xout=xmesh, yout=ymesh,
**kde_kwargs)
else:
density = []
# Convert mesh back to linear scale if applicable
if xscale == "log":
xmesh = np.exp(xmesh)
if yscale == "log":
ymesh = np.exp(ymesh)
return xmesh, ymesh, density | 0.00148 |
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Transformation is done in parallel, and correctly handles dask
collections.
Parameters
----------
X : dask.Bag of raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : dask.array.Array, shape = (n_samples, self.n_features)
Document-term matrix. Each block of the array is a scipy sparse
matrix.
Notes
-----
The returned dask Array is composed scipy sparse matricies. If you need
to compute on the result immediately, you may need to convert the individual
blocks to ndarrays or pydata/sparse matricies.
>>> import sparse
>>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP
See the :doc:`examples/text-vectorization` for more.
"""
msg = "'X' should be a 1-dimensional array with length 'num_samples'."
if not dask.is_dask_collection(X):
return super(HashingVectorizer, self).transform(X)
if isinstance(X, db.Bag):
bag2 = X.map_partitions(_transform, estimator=self)
objs = bag2.to_delayed()
arrs = [
da.from_delayed(obj, (np.nan, self.n_features), self.dtype)
for obj in objs
]
result = da.concatenate(arrs, axis=0)
elif isinstance(X, dd.Series):
result = X.map_partitions(_transform, self)
elif isinstance(X, da.Array):
# dask.Array
chunks = ((np.nan,) * X.numblocks[0], (self.n_features,))
if X.ndim == 1:
result = X.map_blocks(
_transform, estimator=self, dtype="f8", chunks=chunks, new_axis=1
)
else:
raise ValueError(msg)
else:
raise ValueError(msg)
return result | 0.002294 |
def convert(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
'''
This function converts the specified CoreML model into its ONNX counterpart. Some information such as the produced
ONNX model name can be specified.
:param model: A `CoreML model <https://apple.github.io/coremltools/coremlspecification/sections/Model.html#model>`_ or
a CoreML MLModel object
:param initial_types: A list providing some types for some root variables. Each element is a tuple of a variable
name and a type defined in *data_types.py*.
:param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
:param doc_string: A string attached onto the produced ONNX model
:param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
:param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
:param custom_conversion_functions: a dictionary for specifying the user customized conversion function
:param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
:return: An ONNX model (type: ModelProto) which is equivalent to the input CoreML model
Example of initial types:
Assume that 'A' and 'B' are two root variable names used in the CoreML
model you want to convert. We can specify their types via:
::
from onnxmltools.convert.common.data_types import FloatTensorType
initial_type = [('A', FloatTensorType([40, 12, 1, 1])),
('B', FloatTensorType([1, 32, 1, 1]))]
'''
if isinstance(model, coremltools.models.MLModel):
spec = model.get_spec()
else:
spec = model
if name is None:
name = str(uuid4().hex)
target_opset = target_opset if target_opset else get_opset_number_from_onnx()
# Parse CoreML model as our internal data structure (i.e., Topology)
topology = parse_coreml(spec, initial_types, target_opset, custom_conversion_functions, custom_shape_calculators)
# Parse CoreML description, author, and license. Those information will be attached to the final ONNX model.
metadata = spec.description.metadata
metadata_props = []
if metadata:
if not doc_string and metadata.shortDescription:
doc_string = metadata.shortDescription # If doc_string is not specified, we use description from CoreML
if metadata.author:
entry = onnx_proto.StringStringEntryProto()
entry.key = 'author'
entry.value = metadata.author
metadata_props.append(entry)
if metadata.license:
entry = onnx_proto.StringStringEntryProto()
entry.key = 'license'
entry.value = metadata.license
metadata_props.append(entry)
# Convert our Topology object into ONNX. The outcome is an ONNX model.
onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)
# Edit ONNX model's attributes related to CoreML's meta information
if len(metadata_props) > 0:
onnx_model.metadata_props.extend(metadata_props)
return onnx_model | 0.00669 |
def do_uni_form(parser, token):
"""
You need to pass in at least the form/formset object, and can also pass in the
optional `crispy_forms.helpers.FormHelper` object.
helper (optional): A `crispy_forms.helper.FormHelper` object.
Usage::
{% load crispy_tags %}
{% crispy form form.helper %}
You can also provide the template pack as the third argument::
{% crispy form form.helper 'bootstrap' %}
If the `FormHelper` attribute is named `helper` you can simply do::
{% crispy form %}
{% crispy form 'bootstrap' %}
"""
token = token.split_contents()
form = token.pop(1)
helper = None
template_pack = "'%s'" % get_template_pack()
# {% crispy form helper %}
try:
helper = token.pop(1)
except IndexError:
pass
# {% crispy form helper 'bootstrap' %}
try:
template_pack = token.pop(1)
except IndexError:
pass
# {% crispy form 'bootstrap' %}
if (
helper is not None and
isinstance(helper, string_types) and
("'" in helper or '"' in helper)
):
template_pack = helper
helper = None
if template_pack is not None:
template_pack = template_pack[1:-1]
ALLOWED_TEMPLATE_PACKS = getattr(
settings,
'CRISPY_ALLOWED_TEMPLATE_PACKS',
('bootstrap', 'uni_form', 'bootstrap3', 'bootstrap4')
)
if template_pack not in ALLOWED_TEMPLATE_PACKS:
raise template.TemplateSyntaxError(
"crispy tag's template_pack argument should be in %s" %
str(ALLOWED_TEMPLATE_PACKS)
)
return CrispyFormNode(form, helper, template_pack=template_pack) | 0.001147 |
def memoized_property(func=None, key_factory=per_instance, **kwargs):
"""A convenience wrapper for memoizing properties.
Applied like so:
>>> class Foo(object):
... @memoized_property
... def name(self):
... pass
Is equivalent to:
>>> class Foo(object):
... @property
... @memoized_method
... def name(self):
... pass
Which is equivalent to:
>>> class Foo(object):
... @property
... @memoized(key_factory=per_instance)
... def name(self):
... pass
By default a deleter for the property is setup that un-caches the property such that a subsequent
property access re-computes the value. In other words, for this `now` @memoized_property:
>>> import time
>>> class Bar(object):
... @memoized_property
... def now(self):
... return time.time()
You could write code like so:
>>> bar = Bar()
>>> bar.now
1433267312.622095
>>> time.sleep(5)
>>> bar.now
1433267312.622095
>>> del bar.now
>>> bar.now
1433267424.056189
>>> time.sleep(5)
>>> bar.now
1433267424.056189
>>>
:API: public
:param func: The property getter method to wrap. Only generally passed by the python runtime and
should be omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A read-only property that memoizes its calculated value and un-caches its value when
`del`ed.
"""
getter = memoized_method(func=func, key_factory=key_factory, **kwargs)
return property(fget=getter, fdel=lambda self: getter.forget(self)) | 0.00586 |
def update(fields, path='', profile=None, **kwargs):
'''
.. versionadded:: 2016.3.0
Sets a dictionary of values in one call. Useful for large updates
in syndic environments. The dictionary can contain a mix of formats
such as:
.. code-block:: python
{
'/some/example/key': 'bar',
'/another/example/key': 'baz'
}
Or it may be a straight dictionary, which will be flattened to look
like the above format:
.. code-block:: python
{
'some': {
'example': {
'key': 'bar'
}
},
'another': {
'example': {
'key': 'baz'
}
}
}
You can even mix the two formats and it will be flattened to the first
format. Leading and trailing '/' will be removed.
Empty directories can be created by setting the value of the key to an
empty dictionary.
The 'path' parameter will optionally set the root of the path to use.
CLI Example:
.. code-block:: bash
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}"
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" profile=my_etcd_config
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" host=127.0.0.1 port=2379
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" path='/some/root'
'''
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
return client.update(fields, path) | 0.003069 |
def column(self, index_or_label):
"""Return the values of a column as an array.
table.column(label) is equivalent to table[label].
>>> tiles = Table().with_columns(
... 'letter', make_array('c', 'd'),
... 'count', make_array(2, 4),
... )
>>> list(tiles.column('letter'))
['c', 'd']
>>> tiles.column(1)
array([2, 4])
Args:
label (int or str): The index or label of a column
Returns:
An instance of ``numpy.array``.
Raises:
``ValueError``: When the ``index_or_label`` is not in the table.
"""
if (isinstance(index_or_label, str)
and index_or_label not in self.labels):
raise ValueError(
'The column "{}" is not in the table. The table contains '
'these columns: {}'
.format(index_or_label, ', '.join(self.labels))
)
if (isinstance(index_or_label, int)
and not 0 <= index_or_label < len(self.labels)):
raise ValueError(
'The index {} is not in the table. Only indices between '
'0 and {} are valid'
.format(index_or_label, len(self.labels) - 1)
)
return self._columns[self._as_label(index_or_label)] | 0.001471 |
def error(self, error_code, value, **kwargs):
"""
Helper to add error to messages field. It fills placeholder with extra call parameters
or values from message_value map.
:param error_code: Error code to use
:rparam error_code: str
:param value: Value checked
:param kwargs: Map of values to use in placeholders
"""
code = self.error_code_map.get(error_code, error_code)
try:
message = Template(self.error_messages[code])
except KeyError:
message = Template(self.error_messages[error_code])
placeholders = {"value": self.hidden_value if self.hidden else value}
placeholders.update(kwargs)
placeholders.update(self.message_values)
self.messages[code] = message.safe_substitute(placeholders) | 0.003584 |
def clear_lock(backend=None, remote=None):
'''
.. versionadded:: 2015.5.0
Clear the fileserver update lock from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). This should only need to be done if a fileserver
update was interrupted and a remote is not updating (generating a warning
in the Master's log file). Executing this runner with no arguments will
remove all update locks from all enabled VCS fileserver backends, but this
can be narrowed by using the following arguments:
backend
Only clear the update lock for the specified backend(s).
remote
If specified, then any remotes which contain the passed string will
have their lock cleared. For example, a ``remote`` value of **github**
will remove the lock from all github.com remotes.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_lock
salt-run fileserver.clear_lock backend=git,hg
salt-run fileserver.clear_lock backend=git remote=github
salt-run fileserver.clear_lock remote=bitbucket
'''
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_lock(back=backend, remote=remote)
ret = {}
if cleared:
ret['cleared'] = cleared
if errors:
ret['errors'] = errors
if not ret:
return 'No locks were removed'
return ret | 0.000678 |
def vcontour(self, win, n, levels, labels=False, decimals=0, color=None,
vinterp=True, nudge=1e-6, newfig=True, figsize=None, layout=True):
"""Vertical contour
"""
x1, x2, y1, y2 = win
h = self.headalongline(np.linspace(x1 + nudge, x2 - nudge, n),
np.linspace(y1 + nudge, y2 - nudge, n))
L = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
xg = np.linspace(0, L, n)
if vinterp:
zg = 0.5 * (self.aq.zaqbot + self.aq.zaqtop)
zg = np.hstack((self.aq.zaqtop[0], zg, self.aq.zaqbot[-1]))
h = np.vstack((h[0], h, h[-1]))
else:
zg = np.empty(2 * self.aq.naq)
for i in range(self.aq.naq):
zg[2 * i] = self.aq.zaqtop[i]
zg[2 * i + 1] = self.aq.zaqbot[i]
h = np.repeat(h, 2, 0)
if newfig:
plt.figure(figsize=figsize)
cs = plt.contour(xg, zg, h, levels, colors=color)
if labels:
fmt = '%1.' + str(decimals) + 'f'
plt.clabel(cs, fmt=fmt)
if layout:
self.plot(win=[x1, x2, y1, y2], orientation='ver', newfig=False) | 0.003361 |
def where(self, predicate):
"""
Returns new Enumerable where elements matching predicate are selected
:param predicate: predicate as a lambda expression
:return: new Enumerable object
"""
if predicate is None:
raise NullArgumentError("No predicate given for where clause")
return Enumerable(itertools.ifilter(predicate, self)) | 0.005076 |
def embeddedFileGet(self, id):
"""Retrieve embedded file content by name or by number."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_embeddedFileGet(self, id) | 0.00722 |
def mime_type(filename):
""" Guess mime type for the given file name
Note: this implementation uses python_magic package which is not thread-safe, as a workaround global lock is
used for the ability to work in threaded environment
:param filename: file name to guess
:return: str
"""
# TODO: write lock-free mime_type function
try:
__mime_lock.acquire()
extension = filename.split(".")
extension = extension[len(extension) - 1]
if extension == "woff2":
return "application/font-woff2"
if extension == "css":
return "text/css"
m = magic.from_file(filename, mime=True)
m = m.decode() if isinstance(m, bytes) else m # compatibility fix, some versions return bytes some - str
if m == "text/plain":
guessed_type = mimetypes.guess_type(filename)[0] # for js-detection
if guessed_type:
return guessed_type
return m
finally:
__mime_lock.release() | 0.030337 |
def import_model(self, source):
"""Import and return model instance."""
model = super(NonstrictImporter, self).import_model(source)
sbml.convert_sbml_model(model)
return model | 0.009662 |
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags | 0.004283 |
def delete(self, **kwds):
"""
Endpoint: /album/<id>/delete.json
Deletes this album.
Returns True if successful.
Raises a TroveboxError if not.
"""
result = self._client.album.delete(self, **kwds)
self._delete_fields()
return result | 0.006579 |
def provider(self, value):
"""
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
"""
result = None
# `None` defaults to `ProviderArchitecture.DEFAULT`
defaulted_value = value or ProviderArchitecture.DEFAULT
try:
parsed_value = int(defaulted_value)
except ValueError:
pass
else:
if parsed_value in ProviderArchitecture:
result = parsed_value
if result is None:
self.logger.error(u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value)
self._provider = result or ProviderArchitecture.DEFAULT | 0.005789 |
def istoken(docgraph, node_id, namespace=None):
"""returns true, iff the given node ID belongs to a token node.
Parameters
----------
node_id : str
the node to be checked
namespace : str or None
If a namespace is given, only look for tokens in the given namespace.
Otherwise, look for tokens in the default namespace of the given
document graph.
"""
if namespace is None:
namespace = docgraph.ns
return namespace+':token' in docgraph.node[node_id] | 0.001923 |
def make_regex(string):
"""Regex string for optionally signed binary or privative feature.
>>> [make_regex(s) for s in '+spam -spam spam'.split()]
['([+]?spam)', '(-spam)', '(spam)']
>>> make_regex('+eggs-spam')
Traceback (most recent call last):
...
ValueError: inappropriate feature name: '+eggs-spam'
>>> make_regex('')
Traceback (most recent call last):
...
ValueError: inappropriate feature name: ''
"""
if string and string[0] in '+-':
sign, name = string[0], string[1:]
if not name or '+' in name or '-' in name:
raise ValueError('inappropriate feature name: %r' % string)
tmpl = r'([+]?%s)' if sign == '+' else r'(-%s)'
return tmpl % name
if not string or '+' in string or '-' in string:
raise ValueError('inappropriate feature name: %r' % string)
return r'(%s)' % string | 0.001105 |
def _extract_and_handle_bgp4_new_paths(self, update_msg):
"""Extracts new paths advertised in the given update message's
*MpReachNlri* attribute.
Assumes MPBGP capability is enabled and message was validated.
Parameters:
- update_msg: (Update) is assumed to be checked for all bgp
message errors.
- valid_rts: (iterable) current valid/configured RTs.
Extracted paths are added to appropriate *Destination* for further
processing.
"""
umsg_pattrs = update_msg.pathattr_map
next_hop = update_msg.get_path_attr(BGP_ATTR_TYPE_NEXT_HOP).value
# Nothing to do if we do not have any new NLRIs in this message.
msg_nlri_list = update_msg.nlri
if not msg_nlri_list:
LOG.debug('Update message did not have any new MP_REACH_NLRIs.')
return
# Create path instances for each NLRI from the update message.
for msg_nlri in msg_nlri_list:
LOG.debug('NLRI: %s', msg_nlri)
new_path = bgp_utils.create_path(
self,
msg_nlri,
pattrs=umsg_pattrs,
nexthop=next_hop
)
LOG.debug('Extracted paths from Update msg.: %s', new_path)
block, blocked_cause = self._apply_in_filter(new_path)
nlri_str = new_path.nlri.formatted_nlri_str
received_route = ReceivedRoute(new_path, self, block)
self._adj_rib_in[nlri_str] = received_route
self._signal_bus.adj_rib_in_changed(self, received_route)
if not block:
# Update appropriate table with new paths.
tm = self._core_service.table_manager
tm.learn_path(new_path)
else:
LOG.debug('prefix : %s is blocked by in-bound filter: %s',
msg_nlri, blocked_cause)
# If update message had any qualifying new paths, do some book-keeping.
if msg_nlri_list:
# Update prefix statistics.
self.state.incr(PeerCounterNames.RECV_PREFIXES,
incr_by=len(msg_nlri_list))
# Check if we exceed max. prefixes allowed for this neighbor.
if self._neigh_conf.exceeds_max_prefix_allowed(
self.state.get_count(PeerCounterNames.RECV_PREFIXES)):
LOG.error('Max. prefix allowed for this neighbor '
'exceeded.') | 0.000796 |
def generate_func(self, table):
"""
Generates a random table based mini-hashing function.
"""
# Ensure that `self` isn't suddenly in the closure...
n = self.n
def func(word):
return sum(x * ord(c) for x, c in zip(table, word)) % n
return func | 0.00639 |
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
DataFrame.ftypes : Dtype and sparsity information.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_) | 0.001799 |
def _get_filesystem_config(file_types):
"""Retrieve filesystem configuration, including support for specified file types.
"""
out = " filesystems {\n"
for file_type in sorted(list(file_types)):
if file_type in _FILESYSTEM_CONFIG:
out += _FILESYSTEM_CONFIG[file_type]
out += " }\n"
return out | 0.005814 |
def encode(cls, value):
"""
write binary data into redis without encoding it.
:param value: bytes
:return: bytes
"""
try:
coerced = bytes(value)
if coerced == value:
return coerced
except (TypeError, UnicodeError):
pass
raise InvalidValue('not binary') | 0.005405 |
def major_axis_endpoints(self):
"""Return the endpoints of the major axis."""
i = np.argmax(self.axlens) # find the major axis
v = self.paxes[:, i] # vector from center to major axis endpoint
return self.ctr - v, self.ctr + v | 0.007663 |
def deserialize(cls, config, credentials):
"""
A *class method* which reconstructs credentials created by
:meth:`serialize`. You can also pass it a :class:`.Credentials`
instance.
:param dict config:
The same :doc:`config` used in the :func:`.login` to get the
credentials.
:param str credentials:
:class:`string` The serialized credentials or
:class:`.Credentials` instance.
:returns:
:class:`.Credentials`
"""
# Accept both serialized and normal.
if isinstance(credentials, Credentials):
return credentials
decoded = parse.unquote(credentials)
split = decoded.split('\n')
# We need the provider ID to move forward.
if split[0] is None:
raise CredentialsError(
'To deserialize credentials you need to specify a unique '
'integer under the "id" key in the config for each provider!')
# Get provider config by short name.
provider_name = id_to_name(config, int(split[0]))
cfg = config.get(provider_name)
# Get the provider class.
ProviderClass = resolve_provider_class(cfg.get('class_'))
deserialized = Credentials(config)
deserialized.provider_id = provider_id
deserialized.provider_type = ProviderClass.get_type()
deserialized.provider_type_id = split[1]
deserialized.provider_class = ProviderClass
deserialized.provider_name = provider_name
deserialized.provider_class = ProviderClass
# Add provider type specific properties.
return ProviderClass.reconstruct(split[2:], deserialized, cfg) | 0.001148 |
def new_pivot(self, attributes=None, exists=False):
"""
Create a new pivot model instance.
"""
pivot = self._related.new_pivot(self._parent, attributes, self._table, exists)
return pivot.set_pivot_keys(self._foreign_key, self._other_key) | 0.010791 |
def alphafilter(request, queryset, template):
"""
Render the template with the filtered queryset
"""
qs_filter = {}
for key in list(request.GET.keys()):
if '__istartswith' in key:
qs_filter[str(key)] = request.GET[key]
break
return render_to_response(
template,
{'objects': queryset.filter(**qs_filter),
'unfiltered_objects': queryset},
context_instance=RequestContext(request)
) | 0.00211 |
def _query(self, action, qobj):
"""
returns WPToolsQuery string
"""
title = self.params.get('title')
pageid = self.params.get('pageid')
wikibase = self.params.get('wikibase')
qstr = None
if action == 'random':
qstr = qobj.random()
elif action == 'query':
qstr = qobj.query(title, pageid, self._continue_params())
elif action == 'querymore':
qstr = qobj.querymore(title, pageid, self._continue_params())
elif action == 'parse':
qstr = qobj.parse(title, pageid)
elif action == 'imageinfo':
qstr = qobj.imageinfo(self._missing_imageinfo())
elif action == 'labels':
qstr = qobj.labels(self._pop_entities())
elif action == 'wikidata':
qstr = qobj.wikidata(title, wikibase)
elif action == 'restbase':
qstr = qobj.restbase(self.params.get('rest_endpoint'), title)
if qstr is None:
raise ValueError("Unknown action: %s" % action)
return qstr | 0.00185 |
def preprocess_frame(frame):
"""Preprocess frame.
1. Converts [0, 255] to [-0.5, 0.5]
2. Adds uniform noise.
Args:
frame: 3-D Tensor representing pixels.
Returns:
frame: 3-D Tensor with values in between [-0.5, 0.5]
"""
# Normalize from [0.0, 1.0] -> [-0.5, 0.5]
frame = common_layers.convert_rgb_to_real(frame)
frame = frame - 0.5
frame, _ = glow_ops.uniform_binning_correction(frame)
return frame | 0.016279 |
def child_added(self, child):
""" Reset the item cache when a child is added """
super(AbstractWidgetItemGroup, self).child_added(child)
self.get_member('_items').reset(self) | 0.010101 |
def remaining(self):
"""
Get the remaining time-to-live of this lease.
:returns: TTL in seconds.
:rtype: int
"""
if self._expired:
raise Expired()
obj = {
u'ID': self.lease_id,
}
data = json.dumps(obj).encode('utf8')
url = u'{}/v3alpha/kv/lease/timetolive'.format(self._client._url).encode()
response = yield treq.post(url, data, headers=self._client._REQ_HEADERS)
obj = yield treq.json_content(response)
ttl = obj.get(u'TTL', None)
if not ttl:
self._expired = True
raise Expired()
# grantedTTL = int(obj[u'grantedTTL'])
# header = Header._parse(obj[u'header']) if u'header' in obj else None
returnValue(ttl) | 0.005006 |
def _parse_bugs_callback(self, value):
"""
Fires when we get bug information back from the XML-RPC server.
param value: dict of data from XML-RPC server. The "bugs" dict element
contains a list of bugs.
returns: ``list`` of ``AttrDict``
"""
return list(map(lambda x: self._parse_bug(x), value['bugs'])) | 0.005376 |
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class | 0.003922 |
def main(unused_argv):
"""Run the reinforcement learning loop."""
print('Wiping dir %s' % FLAGS.base_dir, flush=True)
shutil.rmtree(FLAGS.base_dir, ignore_errors=True)
dirs = [fsdb.models_dir(), fsdb.selfplay_dir(), fsdb.holdout_dir(),
fsdb.eval_dir(), fsdb.golden_chunk_dir(), fsdb.working_dir()]
for d in dirs:
ensure_dir_exists(d);
# Copy the flag files so there's no chance of them getting accidentally
# overwritten while the RL loop is running.
flags_dir = os.path.join(FLAGS.base_dir, 'flags')
shutil.copytree(FLAGS.flags_dir, flags_dir)
FLAGS.flags_dir = flags_dir
# Copy the target model to the models directory so we can find it easily.
shutil.copy(FLAGS.target_path, os.path.join(fsdb.models_dir(), 'target.pb'))
logging.getLogger().addHandler(
logging.FileHandler(os.path.join(FLAGS.base_dir, 'rl_loop.log')))
formatter = logging.Formatter('[%(asctime)s] %(message)s',
'%Y-%m-%d %H:%M:%S')
for handler in logging.getLogger().handlers:
handler.setFormatter(formatter)
with logged_timer('Total time'):
try:
rl_loop()
finally:
asyncio.get_event_loop().close() | 0.016935 |
def trunc_neg_eigs(self, particle):
"""
Given a state represented as a model parameter vector,
returns a model parameter vector representing the same
state with any negative eigenvalues set to zero.
:param np.ndarray particle: Vector of length ``(dim ** 2, )``
representing a state.
:return: The same state with any negative eigenvalues
set to zero.
"""
arr = np.tensordot(particle, self._basis.data.conj(), 1)
w, v = np.linalg.eig(arr)
if np.all(w >= 0):
return particle
else:
w[w < 0] = 0
new_arr = np.dot(v * w, v.conj().T)
new_particle = np.real(np.dot(self._basis.flat(), new_arr.flatten()))
assert new_particle[0] > 0
return new_particle | 0.003623 |
def apply_groups(cls, obj, options=None, backend=None, clone=True, **kwargs):
"""Applies nested options definition grouped by type.
Applies options on an object or nested group of objects,
returning a new object with the options applied. This method
accepts the separate option namespaces explicitly (i.e 'plot',
'style' and 'norm').
If the options are to be set directly on the object a
simple format may be used, e.g.:
opts.apply_groups(obj, style={'cmap': 'viridis'},
plot={'show_title': False})
If the object is nested the options must be qualified using
a type[.group][.label] specification, e.g.:
opts.apply_groups(obj, {'Image': {'plot': {'show_title': False},
'style': {'cmap': 'viridis}}})
If no opts are supplied all options on the object will be reset.
Args:
options (dict): Options specification
Options specification should be indexed by
type[.group][.label] or option type ('plot', 'style',
'norm').
backend (optional): Backend to apply options to
Defaults to current selected backend
clone (bool, optional): Whether to clone object
Options can be applied inplace with clone=False
**kwargs: Keywords of options by type
Applies options directly to the object by type
(e.g. 'plot', 'style', 'norm') specified as
dictionaries.
Returns:
Returns the object or a clone with the options applied
"""
if isinstance(options, basestring):
from ..util.parser import OptsSpec
try:
options = OptsSpec.parse(options)
except SyntaxError:
options = OptsSpec.parse(
'{clsname} {options}'.format(clsname=obj.__class__.__name__,
options=options))
if kwargs:
options = cls._group_kwargs_to_options(obj, kwargs)
for backend, backend_opts in cls._grouped_backends(options, backend):
obj = cls._apply_groups_to_backend(obj, backend_opts, backend, clone)
return obj | 0.001695 |
def enrolled_device_id(self, enrolled_device_id):
"""
Sets the enrolled_device_id of this EnrollmentIdentity.
The ID of the device in the Device Directory once it has been registered.
:param enrolled_device_id: The enrolled_device_id of this EnrollmentIdentity.
:type: str
"""
if enrolled_device_id is None:
raise ValueError("Invalid value for `enrolled_device_id`, must not be `None`")
if enrolled_device_id is not None and not re.search('^[A-Za-z0-9]{32}', enrolled_device_id):
raise ValueError("Invalid value for `enrolled_device_id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`")
self._enrolled_device_id = enrolled_device_id | 0.009447 |
def convert_surfaces(self, surface_nodes):
"""
Utility to convert a list of surface nodes into a single hazardlib
surface. There are four possibilities:
1. there is a single simpleFaultGeometry node; returns a
:class:`openquake.hazardlib.geo.simpleFaultSurface` instance
2. there is a single complexFaultGeometry node; returns a
:class:`openquake.hazardlib.geo.complexFaultSurface` instance
3. there is a single griddedSurface node; returns a
:class:`openquake.hazardlib.geo.GriddedSurface` instance
4. there is a list of PlanarSurface nodes; returns a
:class:`openquake.hazardlib.geo.MultiSurface` instance
:param surface_nodes: surface nodes as just described
"""
surface_node = surface_nodes[0]
if surface_node.tag.endswith('simpleFaultGeometry'):
surface = geo.SimpleFaultSurface.from_fault_data(
self.geo_line(surface_node),
~surface_node.upperSeismoDepth,
~surface_node.lowerSeismoDepth,
~surface_node.dip,
self.rupture_mesh_spacing)
elif surface_node.tag.endswith('complexFaultGeometry'):
surface = geo.ComplexFaultSurface.from_fault_data(
self.geo_lines(surface_node),
self.complex_fault_mesh_spacing)
elif surface_node.tag.endswith('griddedSurface'):
with context(self.fname, surface_node):
coords = split_coords_3d(~surface_node.posList)
points = [geo.Point(*p) for p in coords]
surface = geo.GriddedSurface.from_points_list(points)
else: # a collection of planar surfaces
planar_surfaces = list(map(self.geo_planar, surface_nodes))
surface = geo.MultiSurface(planar_surfaces)
return surface | 0.001064 |
def generate(self):
"""!
@brief Generates data in line with generator parameters.
"""
data_points = []
for index_cluster in range(self.__amount_clusters):
for _ in range(self.__cluster_sizes[index_cluster]):
point = self.__generate_point(index_cluster)
data_points.append(point)
return data_points | 0.00495 |
def get_min_risk(self, weights, cov_matrix):
"""
Minimizes the variance of a portfolio.
"""
def func(weights):
"""The objective function that minimizes variance."""
return np.matmul(np.matmul(weights.transpose(), cov_matrix), weights)
def func_deriv(weights):
"""The derivative of the objective function."""
return (
np.matmul(weights.transpose(), cov_matrix.transpose()) +
np.matmul(weights.transpose(), cov_matrix)
)
constraints = ({'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)})
solution = self.solve_minimize(func, weights, constraints, func_deriv=func_deriv)
# NOTE: `min_risk` is unused, but may be helpful later.
# min_risk = solution.fun
allocation = solution.x
return allocation | 0.005663 |
def volume_delete(pool, volume, **kwargs):
'''
Delete a libvirt managed volume.
:param pool: libvirt storage pool name
:param volume: name of the volume to delete
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt "*" virt.volume_delete <pool> <volume>
'''
conn = __get_conn(**kwargs)
try:
vol = _get_storage_vol(conn, pool, volume)
return not bool(vol.delete())
finally:
conn.close() | 0.001477 |
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation against validation settings and return
simple result object
:param value: str, value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
length = len(str(value))
params = dict(min=self.min, max=self.max)
# too short?
if self.min and self.max is None:
if length < self.min:
return Error(self.too_short, params)
# too long?
if self.max and self.min is None:
if length > self.max:
return Error(self.too_long, params)
# within range?
if self.min and self.max:
if length < self.min or length > self.max:
return Error(self.not_in_range, params)
# success otherwise
return Error() | 0.001912 |
def db_get_map(self, table, record, column):
"""
Gets dict type value of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL
"""
val = self.db_get_val(table, record, column)
assert isinstance(val, dict)
return val | 0.005587 |
def resolve_topic(topic):
"""Return class described by given topic.
Args:
topic: A string describing a class.
Returns:
A class.
Raises:
TopicResolutionError: If there is no such class.
"""
try:
module_name, _, class_name = topic.partition('#')
module = importlib.import_module(module_name)
except ImportError as e:
raise TopicResolutionError("{}: {}".format(topic, e))
try:
cls = resolve_attr(module, class_name)
except AttributeError as e:
raise TopicResolutionError("{}: {}".format(topic, e))
return cls | 0.001634 |
def direct_mode_cluster_role_env(cluster_role_env, config_path):
"""Check cluster/[role]/[environ], if they are required"""
# otherwise, get the client.yaml file
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
return True
client_confs = {}
with open(cli_conf_file, 'r') as conf_file:
client_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if not client_confs:
return True
# if role is required but not provided, raise exception
role_present = True if len(cluster_role_env[1]) > 0 else False
if ROLE_REQUIRED in client_confs and client_confs[ROLE_REQUIRED] and not role_present:
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ROLE_REQUIRED, cli_conf_file))
# if environ is required but not provided, raise exception
environ_present = True if len(cluster_role_env[2]) > 0 else False
if ENV_REQUIRED in client_confs and client_confs[ENV_REQUIRED] and not environ_present:
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, ENV_REQUIRED, cli_conf_file))
return True | 0.011791 |
def bulleted_list(items, indent=0, bullet_type='-'):
"""Format a bulleted list of values.
Parameters
----------
items : sequence
The items to make a list.
indent : int, optional
The number of spaces to add before each bullet.
bullet_type : str, optional
The bullet type to use.
Returns
-------
formatted_list : str
The formatted list as a single string.
"""
format_string = ' ' * indent + bullet_type + ' {}'
return "\n".join(map(format_string.format, items)) | 0.001848 |
def get_item(key):
"""Return content in cached file in JSON format"""
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
try:
return json.loads(open(CACHED_KEY_FILE, "rb").read().decode('UTF-8'))["_"]
except (IOError, ValueError):
return None | 0.007143 |
def close_database_session(session):
"""Close connection with the database"""
try:
session.close()
except OperationalError as e:
raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0]) | 0.004545 |
def is_analysis_edition_allowed(self, analysis_brain):
"""Returns if the analysis passed in can be edited by the current user
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the analysis, otherwise False
"""
if not self.context_active:
# The current context must be active. We cannot edit analyses from
# inside a deactivated Analysis Request, for instance
return False
analysis_obj = api.get_object(analysis_brain)
if analysis_obj.getPointOfCapture() == 'field':
# This analysis must be captured on field, during sampling.
if not self.has_permission(EditFieldResults, analysis_obj):
# Current user cannot edit field analyses.
return False
elif not self.has_permission(EditResults, analysis_obj):
# The Point of Capture is 'lab' and the current user cannot edit
# lab analyses.
return False
# Check if the user is allowed to enter a value to to Result field
if not self.has_permission(FieldEditAnalysisResult, analysis_obj):
return False
# Is the instrument out of date?
# The user can assign a result to the analysis if it does not have any
# instrument assigned or the instrument assigned is valid.
if not self.is_analysis_instrument_valid(analysis_brain):
# return if it is allowed to enter a manual result
return analysis_obj.getManualEntryOfResults()
return True | 0.001249 |
def createValidationDataSampler(dataset, ratio):
"""
Create `torch.utils.data.Sampler`s used to split the dataset into 2 ramdom
sampled subsets. The first should used for training and the second for
validation.
:param dataset: A valid torch.utils.data.Dataset (i.e. torchvision.datasets.MNIST)
:param ratio: The percentage of the dataset to be used for training. The
remaining (1-ratio)% will be used for validation
:return: tuple with 2 torch.utils.data.Sampler. (train, validate)
"""
indices = np.random.permutation(len(dataset))
training_count = int(len(indices) * ratio)
train = torch.utils.data.SubsetRandomSampler(indices=indices[:training_count])
validate = torch.utils.data.SubsetRandomSampler(indices=indices[training_count:])
return (train, validate) | 0.012453 |
def from_hdf(cls, filename):
"""Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance
"""
import h5py
with h5py.File(filename, 'r') as f:
wc = f["wc"].value
lgamma = f["lgamma"].value
K = f["K"].value
readout = f["readout"].value
image_size = f["size"].value
fps = f["fps"].value
instance = cls(image_size, fps, readout, K, wc, lgamma)
return instance | 0.00199 |
def mark(self, channel_name, ts):
""" https://api.slack.com/methods/channels.mark
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/channels.mark', self._requests)(data=self.params).post() | 0.008174 |
def modulate_data(self, buffer: np.ndarray) -> np.ndarray:
"""
:param buffer: Buffer in which the modulated data shall be written, initialized with zeros
:return:
"""
self.ui.prBarGeneration.show()
self.ui.prBarGeneration.setValue(0)
self.ui.prBarGeneration.setMaximum(self.table_model.row_count)
self.modulation_msg_indices.clear()
pos = 0
for i in range(0, self.table_model.row_count):
message = self.table_model.protocol.messages[i]
modulator = self.__get_modulator_of_message(message)
# We do not need to modulate the pause extra, as result is already initialized with zeros
modulated = modulator.modulate(start=0, data=message.encoded_bits, pause=0)
buffer[pos:pos + len(modulated)] = modulated
pos += len(modulated) + message.pause
self.modulation_msg_indices.append(pos)
self.ui.prBarGeneration.setValue(i + 1)
QApplication.instance().processEvents()
self.ui.prBarGeneration.hide()
return buffer | 0.006261 |
def status(institute_id, case_name):
"""Update status of a specific case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
status = request.form.get('status', case_obj['status'])
link = url_for('.case', institute_id=institute_id, case_name=case_name)
if status == 'archive':
store.archive_case(institute_obj, case_obj, user_obj, status, link)
else:
store.update_status(institute_obj, case_obj, user_obj, status, link)
return redirect(request.referrer) | 0.003478 |
def run_shell_command(
state, host, command,
get_pty=False, timeout=None, print_output=False,
**command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_command(command, **command_kwargs)
logger.debug('Running command on {0}: (pty={1}) {2}'.format(
host.name, get_pty, command,
))
if print_output:
print('{0}>>> {1}'.format(host.print_prefix, command))
# Run it! Get stdout, stderr & the underlying channel
_, stdout_buffer, stderr_buffer = host.connection.exec_command(
command,
get_pty=get_pty,
)
channel = stdout_buffer.channel
# Iterate through outputs to get an exit status and generate desired list
# output, done in two greenlets so stdout isn't printed before stderr. Not
# attached to state.pool to avoid blocking it with 2x n-hosts greenlets.
stdout_reader = gevent.spawn(
read_buffer, stdout_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(host.print_prefix, line),
)
stderr_reader = gevent.spawn(
read_buffer, stderr_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(
host.print_prefix, click.style(line, 'red'),
),
)
# Wait on output, with our timeout (or None)
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
# Timeout doesn't raise an exception, but gevent.wait returns the greenlets
# which did complete. So if both haven't completed, we kill them and fail
# with a timeout.
if len(greenlets) != 2:
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
# Read the buffers into a list of lines
stdout = stdout_reader.get()
stderr = stderr_reader.get()
logger.debug('Waiting for exit status...')
exit_status = channel.recv_exit_status()
logger.debug('Command exit status: {0}'.format(exit_status))
return exit_status == 0, stdout, stderr | 0.000377 |
def send_sticker(self, chat_id, data, reply_to_message_id=None, reply_markup=None, disable_notification=None,
timeout=None):
"""
Use this method to send .webp stickers.
:param chat_id:
:param data:
:param reply_to_message_id:
:param reply_markup:
:return: API reply.
"""
return types.Message.de_json(
apihelper.send_data(self.token, chat_id, data, 'sticker', reply_to_message_id, reply_markup,
disable_notification, timeout)) | 0.008897 |
async def open(self) -> None:
"""
Begin the search operation.
"""
LOGGER.debug('StorageRecordSearch.open >>>')
if self.opened:
LOGGER.debug('StorageRecordSearch.open <!< Search is already opened')
raise BadSearch('Search is already opened')
if not self._wallet.opened:
LOGGER.debug('StorageRecordSearch.open <!< Wallet %s is closed', self._wallet.name)
raise WalletState('Wallet {} is closed'.format(self._wallet.name))
self._handle = await non_secrets.open_wallet_search(
self._wallet.handle,
self._type,
self._query_json,
StorageRecordSearch.OPTIONS_JSON)
LOGGER.debug('StorageRecordSearch.open <<<') | 0.005215 |
def parseCapabilities(self, capdict):
"""Parse a capabilities dictionary and adjust instance settings.
At the time this function is called, the user has requested some
settings (e.g., mode identifier), but we haven't yet asked the reader
whether those requested settings are within its capabilities. This
function's job is to parse the reader's capabilities, compare them
against any requested settings, and raise an error if there are any
incompatibilities.
Sets the following instance variables:
- self.antennas (list of antenna numbers, e.g., [1] or [1, 2])
- self.tx_power_table (list of dBm values)
- self.reader_mode (dictionary of mode settings, e.g., Tari)
Raises ReaderConfigurationError if the requested settings are not
within the reader's capabilities.
"""
# check requested antenna set
gdc = capdict['GeneralDeviceCapabilities']
max_ant = gdc['MaxNumberOfAntennaSupported']
if max(self.antennas) > max_ant:
reqd = ','.join(map(str, self.antennas))
avail = ','.join(map(str, range(1, max_ant + 1)))
errmsg = ('Invalid antenna set specified: requested={},'
' available={}; ignoring invalid antennas'.format(
reqd, avail))
raise ReaderConfigurationError(errmsg)
logger.debug('set antennas: %s', self.antennas)
# parse available transmit power entries, set self.tx_power
bandcap = capdict['RegulatoryCapabilities']['UHFBandCapabilities']
self.tx_power_table = self.parsePowerTable(bandcap)
logger.debug('tx_power_table: %s', self.tx_power_table)
self.setTxPower(self.tx_power)
# parse list of reader's supported mode identifiers
regcap = capdict['RegulatoryCapabilities']
modes = regcap['UHFBandCapabilities']['UHFRFModeTable']
mode_list = [modes[k] for k in sorted(modes.keys(), key=natural_keys)]
# select a mode by matching available modes to requested parameters
if self.mode_identifier is not None:
logger.debug('Setting mode from mode_identifier=%s',
self.mode_identifier)
try:
mode = [mo for mo in mode_list
if mo['ModeIdentifier'] == self.mode_identifier][0]
self.reader_mode = mode
except IndexError:
valid_modes = sorted(mo['ModeIdentifier'] for mo in mode_list)
errstr = ('Invalid mode_identifier; valid mode_identifiers'
' are {}'.format(valid_modes))
raise ReaderConfigurationError(errstr)
# if we're trying to set Tari explicitly, but the selected mode doesn't
# support the requested Tari, that's a configuration error.
if self.reader_mode and self.tari:
if self.reader_mode['MinTari'] < self.tari < self.reader_mode['MaxTari']:
logger.debug('Overriding mode Tari %s with requested Tari %s',
self.reader_mode['MaxTari'], self.tari)
else:
errstr = ('Requested Tari {} is incompatible with selected '
'mode {}'.format(self.tari, self.reader_mode))
logger.info('using reader mode: %s', self.reader_mode) | 0.000882 |
def _copy_each_include_files_to_include_dir(self):
"""Copy include header files for each directory to include directory.
Copy include header files
from
rpm/
rpmio/*.h
lib/*.h
build/*.h
sign/*.h
to
rpm/
include/
rpm/*.h
.
This is a status after running "make" on actual rpm build process.
"""
src_header_dirs = [
'rpmio',
'lib',
'build',
'sign',
]
with Cmd.pushd('..'):
src_include_dir = os.path.abspath('./include')
for header_dir in src_header_dirs:
if not os.path.isdir(header_dir):
message_format = "Skip not existing header directory '{0}'"
Log.debug(message_format.format(header_dir))
continue
header_files = Cmd.find(header_dir, '*.h')
for header_file in header_files:
pattern = '^{0}/'.format(header_dir)
(dst_header_file, subs_num) = re.subn(pattern,
'', header_file)
if subs_num == 0:
message = 'Failed to replace header_file: {0}'.format(
header_file)
raise ValueError(message)
dst_header_file = os.path.abspath(
os.path.join(src_include_dir, 'rpm', dst_header_file)
)
dst_dir = os.path.dirname(dst_header_file)
if not os.path.isdir(dst_dir):
Cmd.mkdir_p(dst_dir)
shutil.copyfile(header_file, dst_header_file) | 0.001083 |
def window(self, windowNum=0):
""" Returns the region corresponding to the specified window of the app.
Defaults to the first window found for the corresponding PID.
"""
if self._pid == -1:
return None
x,y,w,h = PlatformManager.getWindowRect(PlatformManager.getWindowByPID(self._pid, windowNum))
return Region(x,y,w,h).clipRegionToScreen() | 0.025 |
def memory_read32(self, addr, num_words, zone=None):
"""Reads memory from the target system in units of 32-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_words (int): number of words to read
zone (str): memory zone to read from
Returns:
List of words read from the target system.
Raises:
JLinkException: if memory could not be read
"""
return self.memory_read(addr, num_words, zone=zone, nbits=32) | 0.003617 |
def form_valid(self, form):
""" Handles a valid form. """
# Move the topic
topic = self.object
old_forum = topic.forum
new_forum = form.cleaned_data['forum']
topic.forum = new_forum
# Eventually lock the topic
if form.cleaned_data['lock_topic']:
topic.status = Topic.TOPIC_LOCKED
else:
topic.status = Topic.TOPIC_MOVED
topic.save()
old_forum.save()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.get_success_url()) | 0.003425 |
def delete_last_line(self, file_path='', date=str(datetime.date.today())):
"""
The following code was modified from
http://stackoverflow.com/a/10289740 &
http://stackoverflow.com/a/17309010
It essentially will check if the total for the current date already
exists in total.csv. If it does, it just removes the last line.
This is so the script could be run more than once a day and not
create many entries in the total.csv file for the same date.
"""
deleted_line = False
if os.path.isfile(file_path):
with open(file_path, 'r+') as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
if date == row[0]:
file.seek(0, os.SEEK_END)
pos = file.tell() - 1
while pos > 0 and file.read(1) != "\n":
pos -= 1
file.seek(pos, os.SEEK_SET)
if pos > 0:
file.seek(pos, os.SEEK_SET)
file.truncate()
deleted_line = True
break
if deleted_line: file.write('\n')
file.close() | 0.002283 |
def relocate(source, destination, move=False):
"""Adjust the virtual environment settings and optional move it.
Args:
source (str): Path to the existing virtual environment.
destination (str): Desired path of the virtual environment.
move (bool): Whether or not to actually move the files. Default False.
"""
venv = api.VirtualEnvironment(source)
if not move:
venv.relocate(destination)
return None
venv.move(destination)
return None | 0.001984 |
def previous(task):
"""
Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
params = task.get_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateSecondParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(seconds=1)
elif isinstance(param_obj, parameter.DateMinuteParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params) | 0.005307 |
def draw(self, labels):
"""
Draw the silhouettes for each sample and the average score.
Parameters
----------
labels : array-like
An array with the cluster label for each silhouette sample,
usually computed with ``predict()``. Labels are not stored on the
visualizer so that the figure can be redrawn with new data.
"""
# Track the positions of the lines being drawn
y_lower = 10 # The bottom of the silhouette
# Get the colors from the various properties
# TODO: Use resolve_colors instead of this
colors = color_palette(self.colormap, self.n_clusters_)
# For each cluster, plot the silhouette scores
for idx in range(self.n_clusters_):
# Collect silhouette scores for samples in the current cluster .
values = self.silhouette_samples_[labels == idx]
values.sort()
# Compute the size of the cluster and find upper limit
size = values.shape[0]
y_upper = y_lower + size
color = colors[idx]
self.ax.fill_betweenx(
np.arange(y_lower, y_upper), 0, values,
facecolor=color, edgecolor=color, alpha=0.5
)
# Label the silhouette plots with their cluster numbers
self.ax.text(-0.05, y_lower + 0.5 * size, str(idx))
# Compute the new y_lower for next plot
y_lower = y_upper + 10
# The vertical line for average silhouette score of all the values
self.ax.axvline(
x=self.silhouette_score_, color="red", linestyle="--"
)
return self.ax | 0.001757 |
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name | 0.014815 |
def decrypt(data, digest=True):
"""Decrypt provided data."""
alg, _, data = data.rpartition("$")
if not alg:
return data
data = _from_hex_digest(data) if digest else data
try:
return implementations["decryption"][alg](
data, implementations["get_key"]()
)
except KeyError:
raise CryptError("Can not decrypt key for algorithm: %s" % alg) | 0.002475 |
def _parse_kexgss_continue(self, m):
"""
Parse the SSH2_MSG_KEXGSS_CONTINUE message.
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
message
"""
if not self.transport.server_mode:
srv_token = m.get_string()
m = Message()
m.add_byte(c_MSG_KEXGSS_CONTINUE)
m.add_string(
self.kexgss.ssh_init_sec_context(
target=self.gss_host, recv_token=srv_token
)
)
self.transport.send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
)
else:
pass | 0.002714 |
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth | 0.00274 |
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None | 0.005682 |
def execute_predict_task(self, task_inst, predict_data, **kwargs):
"""
Do a prediction
task_inst - instance of a task
"""
result = task_inst.predict(predict_data, **task_inst.args)
return result | 0.008264 |
def expectation(P, obs):
r"""Equilibrium expectation of given observable.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs : (M,) ndarray
Observable, represented as vector on state space
Returns
-------
x : float
Expectation value
"""
pi = statdist(P)
return np.dot(pi, obs) | 0.002786 |
def can_access_objective_hierarchy(self):
"""Tests if this user can perform hierarchy queries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a PermissionDenied. This is intended as a
hint to an an application that may not offer traversal functions
to unauthorized users.
return: (boolean) - false if hierarchy traversal methods are not
authorized, true otherwise
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['objectiveHierarchyHints']['canAccessHierarchy'] | 0.00365 |
def _flatten(iterable):
"""
Given an iterable with nested iterables, generate a flat iterable
"""
for i in iterable:
if isinstance(i, Iterable) and not isinstance(i, string_types):
for sub_i in _flatten(i):
yield sub_i
else:
yield i | 0.003289 |
def get_all_polymorphic_return(self) -> bool:
""" For now, polymorphic return type are handle by symbol artefact.
--> possible multi-polymorphic but with different constraint attached!
"""
lst = []
for s in self.values():
if hasattr(s, 'tret') and s.tret.is_polymorphic:
# encapsulate s into a EvalCtx for meta-var resolution
lst.append(EvalCtx.from_sig(s))
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return rscope | 0.003226 |
def make_objects(self, meta, data, related_fields=None):
'''Generator of :class:`stdnet.odm.StdModel` instances with data
from database.
:parameter meta: instance of model :class:`stdnet.odm.Metaclass`.
:parameter data: iterator over instances data.
'''
make_object = meta.make_object
related_data = []
if related_fields:
for fname, fdata in iteritems(related_fields):
field = meta.dfields[fname]
if field in meta.multifields:
related = dict(fdata)
multi = True
else:
multi = False
relmodel = field.relmodel
related = dict(((obj.id, obj) for obj in
self.make_objects(relmodel._meta, fdata)))
related_data.append((field, related, multi))
for state in data:
instance = make_object(state, self)
for field, rdata, multi in related_data:
if multi:
field.set_cache(instance, rdata.get(str(instance.id)))
else:
rid = getattr(instance, field.attname, None)
if rid is not None:
value = rdata.get(rid)
setattr(instance, field.name, value)
yield instance | 0.001426 |
def want_host_notification(self, timperiods, timestamp,
state, n_type, business_impact, cmd=None):
# pylint: disable=too-many-return-statements
"""Check if notification options match the state of the host
Notification is NOT wanted in ONE of the following case::
* host notifications are disabled
* cmd is not in host_notification_commands
* business_impact < self.min_business_impact
* host_notification_period is not valid
* state does not match host_notification_options for problem, recovery, flapping and dt
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
TODO: Simplify function
"""
if not self.host_notifications_enabled:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.host_notification_commands:
return False
notif_period = timperiods[self.host_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if 'n' in self.host_notification_options:
return False
if in_notification_period:
short_states = {
u'DOWN': 'd', u'UNREACHABLE': 'u', u'RECOVERY': 'r',
u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.host_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.host_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.host_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
return 's' in self.host_notification_options
return False | 0.00291 |
def mute(self):
"""get/set the current mute state"""
response = self.rendering_control.GetMute(InstanceID=1, Channel=1)
return response.CurrentMute == 1 | 0.011364 |
def AddTableColumns(self, table, columns):
"""Add columns to table if they are not already there.
Args:
table: table name as a string
columns: an iterable of column names"""
table_columns = self._table_columns.setdefault(table, [])
for attr in columns:
if attr not in table_columns:
table_columns.append(attr) | 0.005682 |
def keep_indels(mut_df,
indel_len_col=True,
indel_type_col=True):
"""Filters out all mutations that are not indels.
Requires that one of the alleles have '-' indicating either an insertion
or deletion depending if found in reference allele or somatic allele
columns, respectively.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
indel_len_col : bool
whether or not to add a column indicating the length of the indel
Returns
-------
mut_df : pd.DataFrame
mutations with only frameshift mutations kept
"""
# keep only frameshifts
mut_df = mut_df[is_indel_annotation(mut_df)]
if indel_len_col:
# calculate length
mut_df.loc[:, 'indel len'] = compute_indel_length(mut_df)
if indel_type_col:
is_ins = mut_df['Reference_Allele']=='-'
is_del = mut_df['Tumor_Allele']=='-'
mut_df['indel type'] = ''
mut_df.loc[is_ins, 'indel type'] = 'INS'
mut_df.loc[is_del, 'indel type'] = 'DEL'
return mut_df | 0.002674 |
def road(self):
"""
:example 도움5로
"""
pattern = self.random_element(self.road_formats)
return self.generator.parse(pattern) | 0.01227 |
def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args):
'''
Enable a server:port member of a servicegroup
CLI Example:
.. code-block:: bash
salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort'
'''
ret = True
server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args)
if server is None:
return False
nitro = _connect(**connection_args)
if nitro is None:
return False
try:
NSServiceGroup.enable_server(nitro, server)
except NSNitroError as error:
log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error)
ret = False
_disconnect(nitro)
return ret | 0.005284 |
def verify_editor(self):
"""Verify if the software used in the changeset is a powerfull_editor.
"""
powerful_editors = [
'josm', 'level0', 'merkaartor', 'qgis', 'arcgis', 'upload.py',
'osmapi', 'Services_OpenStreetMap'
]
if self.editor is not None:
for editor in powerful_editors:
if editor in self.editor.lower():
self.powerfull_editor = True
break
if 'iD' in self.editor:
trusted_hosts = [
'www.openstreetmap.org/id',
'www.openstreetmap.org/edit',
'improveosm.org',
'strava.github.io/iD',
'preview.ideditor.com/release',
'preview.ideditor.com/master',
'hey.mapbox.com/iD-internal',
'projets.pavie.info/id-indoor',
'maps.mapcat.com/edit',
'id.softek.ir'
]
if self.host.split('://')[-1].strip('/') not in trusted_hosts:
self.label_suspicious('Unknown iD instance')
else:
self.powerfull_editor = True
self.label_suspicious('Software editor was not declared') | 0.001527 |
def create_command(
principal, permissions, endpoint_plus_path, notify_email, notify_message
):
"""
Executor for `globus endpoint permission create`
"""
if not principal:
raise click.UsageError("A security principal is required for this command")
endpoint_id, path = endpoint_plus_path
principal_type, principal_val = principal
client = get_client()
if principal_type == "identity":
principal_val = maybe_lookup_identity_id(principal_val)
if not principal_val:
raise click.UsageError(
"Identity does not exist. "
"Use --provision-identity to auto-provision an identity."
)
elif principal_type == "provision-identity":
principal_val = maybe_lookup_identity_id(principal_val, provision=True)
principal_type = "identity"
if not notify_email:
notify_message = None
rule_data = assemble_generic_doc(
"access",
permissions=permissions,
principal=principal_val,
principal_type=principal_type,
path=path,
notify_email=notify_email,
notify_message=notify_message,
)
res = client.add_endpoint_acl_rule(endpoint_id, rule_data)
formatted_print(
res,
text_format=FORMAT_TEXT_RECORD,
fields=[("Message", "message"), ("Rule ID", "access_id")],
) | 0.001445 |
def merge(var1, var2):
"""
Take two copies of a variable and reconcile them. var1 is assumed
to be the higher-level variable, and so will be overridden by var2
where such becomes necessary.
"""
out = {}
out['value'] = var2.get('value', var1.get('value', None))
out['mimetype'] = var2.get('mimetype', var1.get('mimetype', None))
out['types'] = var2.get('types') + [x for x in var1.get('types') if x not in var2.get('types')]
out['optional'] = var2.get('optional', var1.get('optional', False))
out['filename'] = var2.get('filename', var2.get('filename', None))
return Variable(var1.default_type, **out) | 0.003086 |
def patched(f):
"""Patches a given API function to not send."""
def wrapped(*args, **kwargs):
kwargs['return_response'] = False
kwargs['prefetch'] = True
return f(*args, **kwargs)
return wrapped | 0.004274 |
def setmonitor(self, enable=True):
"""Alias for setmode('monitor') or setmode('managed')
Only available with Npcap"""
# We must reset the monitor cache
if enable:
res = self.setmode('monitor')
else:
res = self.setmode('managed')
if not res:
log_runtime.error("Npcap WlanHelper returned with an error code !")
self.cache_mode = None
tmp = self.cache_mode = self.ismonitor()
return tmp if enable else (not tmp) | 0.003876 |
def a_star_search(start, successors, state_value, is_goal):
"""
This is a searching function of A*
"""
if is_goal(start):
return [start]
explored = []
g = 1
h = state_value(start)
f = g + h
p = [start]
frontier = [(f, g, h, p)]
while frontier:
f, g, h, path = frontier.pop(0)
s = path[-1]
for (action, state) in successors(s, path_actions(path)[-1] if len(path) != 1 else []):
if state not in explored:
explored.append(state)
path2 = path + [action, state]
h2 = state_value(state)
g2 = g + 1
f2 = h2 + g2
if is_goal(state):
return path2
else:
frontier.append((f2, g2, h2, path2))
frontier.sort(key=lambda x:x[:3])
return [] | 0.003382 |
def next_chunk(self):
"""
Read a chunk of arbitrary size from the underlying iterator. To get a
chunk of an specific size, use read()
"""
if self._unconsumed:
data = self._unconsumed.pop()
else:
data = self._iterator.next() # Might raise StopIteration
self._pos += len(data)
return data | 0.005333 |
def add_position(self, radial=False, chunksize=2**19, chunkslice='bytes',
comp_filter=default_compression, overwrite=False,
params=dict()):
"""Add the `position` array in '/trajectories'.
"""
nparams = self.numeric_params
num_particles = nparams['np']
name, ncoords, prefix = 'position', 3, 'X-Y-Z'
if radial:
name, ncoords, prefix = 'position_rz', 2, 'R-Z'
title = '%s position trace of each particle' % prefix
return self.add_trajectory(name, shape=(num_particles, ncoords, 0),
overwrite=overwrite, chunksize=chunksize,
comp_filter=comp_filter,
atom=tables.Float32Atom(),
title=title,
params=params) | 0.004469 |
def to_daydate(cls, *argv):
"""
Convert date to Julian day (DAYDATE)
"""
argc = len(argv)
if argc == 3:
year, month, day = argv
elif argc == 1:
dval = argv[0]
try:
year = dval.year
month = dval.month
day = dval.day
except AttributeError:
raise InterfaceError("Unsupported python date input: %s (%s)" % (str(dval), dval.__class__))
else:
raise InterfaceError("Date.to_datetime does not support %d arguments." % argc)
TURN_OF_ERAS = 1721424
if month < 3:
year -= 1
month += 12
if ((year > 1582) or
(year == 1582 and month > 10) or
(year == 1582 and month == 10 and day >= 15)):
A = int(year / 100)
B = int(A / 4)
C = 2 - A + B
else:
C = 0
E = int(365.25 * (year + 4716))
F = int(30.6001 * (month + 1))
Z = C + day + E + F - 1524
return Z + 1 - TURN_OF_ERAS | 0.003617 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.