text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def service_create(image=str,
name=str,
command=str,
hostname=str,
replicas=int,
target_port=int,
published_port=int):
'''
Create Docker Swarm Service Create
image
The docker image
name
Is the service name
command
The docker command to run in the container at launch
hostname
The hostname of the containers
replicas
How many replicas you want running in the swarm
target_port
The target port on the container
published_port
port thats published on the host/os
CLI Example:
.. code-block:: bash
salt '*' swarm.service_create image=httpd name=Test_Service \
command=None hostname=salthttpd replicas=6 target_port=80 published_port=80
'''
try:
salt_return = {}
replica_mode = docker.types.ServiceMode('replicated', replicas=replicas)
ports = docker.types.EndpointSpec(ports={target_port: published_port})
__context__['client'].services.create(name=name,
image=image,
command=command,
mode=replica_mode,
endpoint_spec=ports)
echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name
salt_return.update({'Info': echoback,
'Minion': __context__['server_name'],
'Name': name,
'Image': image,
'Command': command,
'Hostname': hostname,
'Replicas': replicas,
'Target_Port': target_port,
'Published_Port': published_port})
except TypeError:
salt_return = {}
salt_return.update({'Error': 'Please make sure you are passing arguments correctly '
'[image, name, command, hostname, replicas, target_port and published_port]'})
return salt_return | 0.002688 |
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged | 0.009554 |
def plot_roc_curve(y_true, y_probas, title='ROC Curves',
curves=('micro', 'macro', 'each_class'),
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Generates the ROC curves from labels and predicted scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"ROC Curves".
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "macro", "each_class")`
i.e. "micro" for micro-averaged curve, "macro" for macro-averaged
curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.plot_roc_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_roc_curve.png
:align: center
:alt: ROC Curves
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
if 'micro' not in curves and 'macro' not in curves and \
'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro", "macro", or "each_class"')
classes = np.unique(y_true)
probas = y_probas
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, probas[:, i],
pos_label=classes[i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in fpr:
i += 1
micro_key += str(i)
y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
y_true = np.hstack((1 - y_true, y_true))
fpr[micro_key], tpr[micro_key], _ = roc_curve(y_true.ravel(),
probas.ravel())
roc_auc[micro_key] = auc(fpr[micro_key], tpr[micro_key])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[x] for x in range(len(classes))]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(classes)):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= len(classes)
macro_key = 'macro'
i = 0
while macro_key in fpr:
i += 1
macro_key += str(i)
fpr[macro_key] = all_fpr
tpr[macro_key] = mean_tpr
roc_auc[macro_key] = auc(fpr[macro_key], tpr[macro_key])
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
if 'each_class' in curves:
for i in range(len(classes)):
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(fpr[i], tpr[i], lw=2, color=color,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(classes[i], roc_auc[i]))
if 'micro' in curves:
ax.plot(fpr[micro_key], tpr[micro_key],
label='micro-average ROC curve '
'(area = {0:0.2f})'.format(roc_auc[micro_key]),
color='deeppink', linestyle=':', linewidth=4)
if 'macro' in curves:
ax.plot(fpr[macro_key], tpr[macro_key],
label='macro-average ROC curve '
'(area = {0:0.2f})'.format(roc_auc[macro_key]),
color='navy', linestyle=':', linewidth=4)
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)
ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax | 0.000369 |
def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
Args:
network_id (str): The ID of the network.
verbose (bool): Retrieve the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
Returns:
(:py:class:`Network`) The network.
Raises:
:py:class:`docker.errors.NotFound`
If the network does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(
self.client.api.inspect_network(network_id, *args, **kwargs)
) | 0.002587 |
def masked_array(self, geometry=None):
"""Returns a MaskedArray using nodata values.
Keyword args:
geometry -- any geometry, envelope, or coordinate extent tuple
"""
if geometry is None:
return self._masked_array()
geom = transform(geometry, self.sref)
env = Envelope.from_geom(geom).intersect(self.envelope)
arr = self._masked_array(env)
if geom.GetGeometryType() != ogr.wkbPoint:
dims = self.get_offset(env)[2:]
affine = AffineTransform(*tuple(self.affine))
affine.origin = env.ul
mask = ~np.ma.make_mask(geom_to_array(geom, dims, affine))
arr.mask = arr.mask | mask
return arr | 0.002732 |
def to_json(self, X, y):
'''
Reads dataset to csv.
:param X: dataset as list of dict.
:param y: labels.
'''
with gzip.open('%s.gz' % self.path, 'wt') if self.gz else open(
self.path, 'w') as file:
json.dump(list(zip(y, X)), file) | 0.006536 |
def class_name(self, cls, parts=0, aliases=None):
# type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module in ('__builtin__', 'builtins'):
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
result = fullname
else:
name_parts = fullname.split('.')
result = '.'.join(name_parts[-parts:])
if aliases is not None and result in aliases:
return aliases[result]
return result | 0.003911 |
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto | 0.01105 |
def add(self, key, value, lang=None):
""" Add a triple to the graph related to this node
:param key: Predicate of the triple
:param value: Object of the triple
:param lang: Language of the triple if applicable
"""
if not isinstance(value, Literal) and lang is not None:
value = Literal(value, lang=lang)
elif not isinstance(value, (BNode, URIRef)):
value, _type = term._castPythonToLiteral(value)
if _type is None:
value = Literal(value)
else:
value = Literal(value, datatype=_type)
self.graph.add((self.asNode(), key, value)) | 0.002981 |
def all(self, page=1, per_page=10):
"""
Get a single page from the list of all collections.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the Collection list.
"""
url = "/collections"
result = self._all(url, page=page, per_page=per_page)
return CollectionModel.parse_list(result) | 0.006135 |
def decode_complex_ops(encoded_querystring, operators=None, negation=True):
"""
Returns a list of (querystring, negate, op) tuples that represent complex operations.
This function will raise a `ValidationError`s if:
- the individual querystrings are not wrapped in parentheses
- the set operators do not match the provided `operators`
- there is trailing content after the ending querysting
Ex::
# unencoded query: (a=1) & (b=2) | ~(c=3)
>>> s = '%28a%253D1%29%20%26%20%28b%253D2%29%20%7C%20%7E%28c%253D3%29'
>>> decode_querystring_ops(s)
[
('a=1', False, QuerySet.__and__),
('b=2', False, QuerySet.__or__),
('c=3', True, None),
]
"""
complex_op_re = COMPLEX_OP_NEG_RE if negation else COMPLEX_OP_RE
if operators is None:
operators = COMPLEX_OPERATORS
# decode into: (a%3D1) & (b%3D2) | ~(c%3D3)
decoded_querystring = unquote(encoded_querystring)
matches = [m for m in complex_op_re.finditer(decoded_querystring)]
if not matches:
msg = _("Unable to parse querystring. Decoded: '%(decoded)s'.")
raise SerializerValidationError(msg % {'decoded': decoded_querystring})
results, errors = [], []
for match, has_next in lookahead(matches):
negate, querystring, op = match.groups()
negate = negate == '~'
querystring = unquote(querystring)
op_func = operators.get(op.strip()) if op else None
if op_func is None and has_next:
msg = _("Invalid querystring operator. Matched: '%(op)s'.")
errors.append(msg % {'op': op})
results.append(ComplexOp(querystring, negate, op_func))
trailing_chars = decoded_querystring[matches[-1].end():]
if trailing_chars:
msg = _("Ending querystring must not have trailing characters. Matched: '%(chars)s'.")
errors.append(msg % {'chars': trailing_chars})
if errors:
raise SerializerValidationError(errors)
return results | 0.001481 |
def get_utc_sun_time_full(self):
"""Return a list of Jewish times for the given location."""
# sunset and rise time
sunrise, sunset = self._get_utc_sun_time_deg(90.833)
# shaa zmanit by gara, 1/12 of light time
sun_hour = (sunset - sunrise) // 12
midday = (sunset + sunrise) // 2
# get times of the different sun angles
first_light, _ = self._get_utc_sun_time_deg(106.1)
talit, _ = self._get_utc_sun_time_deg(101.0)
_, first_stars = self._get_utc_sun_time_deg(96.0)
_, three_stars = self._get_utc_sun_time_deg(98.5)
mga_sunhour = (midday - first_light) / 6
res = dict(sunrise=sunrise, sunset=sunset, sun_hour=sun_hour,
midday=midday, first_light=first_light, talit=talit,
first_stars=first_stars, three_stars=three_stars,
plag_mincha=sunset - 1.25 * sun_hour,
stars_out=sunset + 18. * sun_hour / 60.,
small_mincha=sunrise + 9.5 * sun_hour,
big_mincha=sunrise + 6.5 * sun_hour,
mga_end_shma=first_light + mga_sunhour * 3.,
gra_end_shma=sunrise + sun_hour * 3.,
mga_end_tfila=first_light + mga_sunhour * 4.,
gra_end_tfila=sunrise + sun_hour * 4.,
midnight=midday + 12 * 60.)
return res | 0.001421 |
def get_fragment(self, **kwargs):
"""
Return a complete fragment.
:param gp:
:return:
"""
gen, namespaces, plan = self.get_fragment_generator(**kwargs)
graph = ConjunctiveGraph()
[graph.bind(prefix, u) for (prefix, u) in namespaces]
[graph.add((s, p, o)) for (_, s, p, o) in gen]
return graph | 0.005362 |
def configure(
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
) -> None:
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs) | 0.002947 |
def formatter(self, api_client, data, newval):
"""Parse additional url fields and map them to inputs
Attempt to create a dictionary with keys being user input, and
response being the returned URL
"""
if newval is None:
return None
user_param = data['_paramAdditionalUrls']
urls = {}
if isinstance(newval, str):
urls[user_param[0]] = newval
else:
for key, url in zip(user_param, newval):
urls[key] = url
return urls | 0.00365 |
def parse(station: str, txt: str) -> (MetarData, Units): # type: ignore
"""
Returns MetarData and Units dataclasses with parsed data and their associated units
"""
core.valid_station(station)
return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt) | 0.006944 |
def _buffer_incomplete_responses(raw_output, buf):
"""It is possible for some of gdb's output to be read before it completely finished its response.
In that case, a partial mi response was read, which cannot be parsed into structured data.
We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's
output if the output did not end in a newline.
Args:
raw_output: Contents of the gdb mi output
buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to
gdb's next output.
Returns:
(raw_output, buf)
"""
if raw_output:
if buf:
# concatenate buffer and new output
raw_output = b"".join([buf, raw_output])
buf = None
if b"\n" not in raw_output:
# newline was not found, so assume output is incomplete and store in buffer
buf = raw_output
raw_output = None
elif not raw_output.endswith(b"\n"):
# raw output doesn't end in a newline, so store everything after the last newline (if anything)
# in the buffer, and parse everything before it
remainder_offset = raw_output.rindex(b"\n") + 1
buf = raw_output[remainder_offset:]
raw_output = raw_output[:remainder_offset]
return (raw_output, buf) | 0.005084 |
def get_project_children(self, project_id, name_contains, exclude_response_fields=None):
"""
Send GET to /projects/{project_id}/children filtering by a name.
:param project_id: str uuid of the project
:param name_contains: str name to filter folders by (if not None this method works recursively)
:param exclude_response_fields: [str]: list of fields to exclude in the response items
:return: requests.Response containing the successful result
"""
return self._get_children('projects', project_id, name_contains, exclude_response_fields) | 0.009983 |
def generate(self):
"""Returns (ts, rvs), where ts is a list of arrays of
observation times (one array for each observatory), and rvs is
a corresponding list of total radial velocity measurements."""
ts=self.generate_tobs()
noise=self.generate_noise(ts)
rvs=[]
for t,n in zip(ts, noise):
rvs.append(n + np.sum(rv.rv_model(t, self.params), axis=0))
return ts,rvs | 0.017857 |
def extendChainsInSentence( self, sentence, foundChains ):
''' Rakendab meetodit self.extendChainsInClause() antud lause igal osalausel.
'''
# 1) Preprocessing
clauses = getClausesByClauseIDs( sentence )
# 2) Extend verb chains in each clause
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
self.extendChainsInClause(clause, clauseID, foundChains) | 0.014925 |
def fromPy(cls, val, typeObj, vldMask=None):
"""
:param val: python string or None
:param typeObj: instance of String HdlType
:param vldMask: if is None validity is resolved from val
if is 0 value is invalidated
if is 1 value has to be valid
"""
assert isinstance(val, str) or val is None
vld = 0 if val is None else 1
if not vld:
assert vldMask is None or vldMask == 0
val = ""
else:
if vldMask == 0:
val = ""
vld = 0
return cls(val, typeObj, vld) | 0.003221 |
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, b''.join(container).decode(
part_charset, self.errors))) | 0.001106 |
def _expectation(p, linear_mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n)
- m(x_i) = A x_i + b :: Linear mean function
- K_{.,.} :: Kernel function
:return: NxQxM
"""
with params_as_tensors_for(linear_mean):
N = p.mu.shape[0].value
D = p.mu.shape[1].value
exKxz = expectation(p, mean_functions.Identity(D), (kern, feat), nghp=nghp)
eKxz = expectation(p, (kern, feat), nghp=nghp)
eAxKxz = tf.matmul(tf.tile(linear_mean.A[None, :, :], (N, 1, 1)),
exKxz, transpose_a=True)
ebKxz = linear_mean.b[None, :, None] * eKxz[:, None, :]
return eAxKxz + ebKxz | 0.002681 |
def process(self, context, data):
"""
Will modify the context.target_backend attribute based on the requester identifier.
:param context: request context
:param data: the internal request
"""
context.target_backend = self.requester_mapping[data.requester]
return super().process(context, data) | 0.008596 |
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, errors='strict'):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors)
# set as a data block
else:
self.set_atom_data(block) | 0.001575 |
def role_create(auth=None, **kwargs):
'''
Create a role
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_create name=role1
salt '*' keystoneng.role_create name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_role(**kwargs) | 0.005128 |
def get(self, key, default=None):
"""Get value with optional default."""
try:
key = self.get_key(key)
except KeyError:
return default
return super(DatasetDict, self).get(key, default) | 0.008368 |
def click(self, x, y):
'''
same as adb -s ${SERIALNO} shell input tap x y
FIXME(ssx): not tested on horizontal screen
'''
self.shell('input', 'tap', str(x), str(y)) | 0.009804 |
def _spelling_pipeline(self, sources, options, personal_dict):
"""Check spelling pipeline."""
for source in self._pipeline_step(sources, options, personal_dict):
# Don't waste time on empty strings
if source._has_error():
yield Results([], source.context, source.category, source.error)
elif not source.text or source.text.isspace():
continue
else:
encoding = source.encoding
if source._is_bytes():
text = source.text
else:
# UTF-16 and UTF-32 don't work well with Aspell and Hunspell,
# so encode with the compatible UTF-8 instead.
if encoding.startswith(('utf-16', 'utf-32')):
encoding = 'utf-8'
text = source.text.encode(encoding)
self.log('', 3)
self.log(text, 3)
cmd = self.setup_command(encoding, options, personal_dict)
self.log("Command: " + str(cmd), 4)
try:
wordlist = util.call_spellchecker(cmd, input_text=text, encoding=encoding)
yield Results(
[w for w in sorted(set(wordlist.replace('\r', '').split('\n'))) if w],
source.context,
source.category
)
except Exception as e: # pragma: no cover
err = self.get_error(e)
yield Results([], source.context, source.category, err) | 0.003697 |
def _get_deleted_at_column(self, builder):
"""
Get the "deleted at" column for the builder.
:param builder: The query builder
:type builder: orator.orm.builder.Builder
:rtype: str
"""
if len(builder.get_query().joins) > 0:
return builder.get_model().get_qualified_deleted_at_column()
else:
return builder.get_model().get_deleted_at_column() | 0.004651 |
def read(self, request, pk=None):
"""
Mark the message as read (i.e. delete from inbox)
"""
from .settings import stored_messages_settings
backend = stored_messages_settings.STORAGE_BACKEND()
try:
backend.inbox_delete(request.user, pk)
except MessageDoesNotExist as e:
return Response(e.message, status='404')
return Response({'status': 'message marked as read'}) | 0.004415 |
def _load_credentials_file(credentials_file):
"""Load credentials from the given file handle.
The file is expected to be in this format:
{
"file_version": 2,
"credentials": {
"key": "base64 encoded json representation of credentials."
}
}
This function will warn and return empty credentials instead of raising
exceptions.
Args:
credentials_file: An open file handle.
Returns:
A dictionary mapping user-defined keys to an instance of
:class:`oauth2client.client.Credentials`.
"""
try:
credentials_file.seek(0)
data = json.load(credentials_file)
except Exception:
logger.warning(
'Credentials file could not be loaded, will ignore and '
'overwrite.')
return {}
if data.get('file_version') != 2:
logger.warning(
'Credentials file is not version 2, will ignore and '
'overwrite.')
return {}
credentials = {}
for key, encoded_credential in iteritems(data.get('credentials', {})):
try:
credential_json = base64.b64decode(encoded_credential)
credential = client.Credentials.new_from_json(credential_json)
credentials[key] = credential
except:
logger.warning(
'Invalid credential {0} in file, ignoring.'.format(key))
return credentials | 0.001373 |
def isdisjoint(self, other):
r"""Return True if the set has no elements in common with other.
Sets are disjoint iff their intersection is the empty set.
>>> ms = Multiset('aab')
>>> ms.isdisjoint('bc')
False
>>> ms.isdisjoint(Multiset('ccd'))
True
Args:
other: The other set to check disjointedness. Can also be an :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
if isinstance(other, _sequence_types + (BaseMultiset, )):
pass
elif not isinstance(other, Container):
other = self._as_multiset(other)
return all(element not in other for element in self._elements.keys()) | 0.004975 |
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if cv.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv) | 0.002924 |
def _get_paged_resource(self, url, params=None, data_key=None):
"""
Canvas GET method. Return representation of the requested paged
resource, either the requested page, or chase pagination links to
coalesce resources.
"""
if not params:
params = {}
self._set_as_user(params)
auto_page = not ('page' in params or 'per_page' in params)
if 'per_page' not in params and self._per_page != DEFAULT_PAGINATION:
params["per_page"] = self._per_page
full_url = url + self._params(params)
return self._get_resource_url(full_url, auto_page, data_key) | 0.003058 |
def tabfile2list(fname):
"tabfile2list"
#dat = mylib1.readfileasmac(fname)
#data = string.strip(dat)
data = mylib1.readfileasmac(fname)
#data = data[:-2]#remove the last return
alist = data.split('\r')#since I read it as a mac file
blist = alist[1].split('\t')
clist = []
for num in range(0, len(alist)):
ilist = alist[num].split('\t')
clist = clist+[ilist]
cclist = clist[:-1]#the last element is turning out to be empty
return cclist | 0.016129 |
def name(self):
"""
Returns the connected user's full name or string representation if the
full name method is unavailable (e.g. on a custom user class).
"""
if hasattr(self.user, "get_full_name"):
return self.user.get_full_name()
return "{0}".format(self.user) | 0.006231 |
def search(self, query, _or=False, ignores=[]):
"""Search word from FM-index
Params:
<str> | <Sequential> query
<bool> _or
<list <str> > ignores
Return:
<list>SEARCH_RESULT(<int> document_id,
<list <int> > counts
<str> doc)
"""
if isinstance(query, str):
dids = MapIntInt({})
self.fm.search(query, dids)
dids = dids.asdict()
result = []
for did in sorted(dids.keys()):
doc = self.fm.get_document(did)
if not any(ignore in doc for ignore in ignores):
count = dids[did]
result.append(SEARCH_RESULT(int(did), [count], doc))
return result
search_results = []
for q in query:
dids = MapIntInt({})
self.fm.search(q, dids)
search_results.append(dids.asdict())
merged_dids = self._merge_search_result(search_results, _or)
result = []
for did in merged_dids:
doc = self.fm.get_document(did)
if not any(ignore in doc for ignore in ignores):
counts = map(lambda x: int(x.pop(did, 0)), search_results)
result.append(SEARCH_RESULT(int(did), list(counts), doc))
return result | 0.001431 |
def relative(dataset, ori=0, column=1, fail_silently=True):
"""
Convert dataset to relative value from the value of :attr:`ori`
Parameters
----------
dataset : list of numpy array list
A list of numpy array list
ori : integer or numpy array, optional
A relative original data index or numpy array
column : integer, optional
An index of base column to calculate the relative value
fail_silently : boolean
If `True`, do not raise exception if no data exists
Returns
-------
ndarray
A list of numpy array list
Examples
--------
>>> import numpy as np
>>> from maidenhair.filters.relative import relative
>>> dataset = []
>>> dataset.append([np.array([0, 1, 2]), np.array([3, 4, 5])])
>>> dataset.append([np.array([0, 1, 2]), np.array([3, 5, 7])])
>>> dataset.append([np.array([0, 1, 2]), np.array([100, 103, 106])])
>>> expected = [
... [np.array([0, 1, 2]), np.array([0, 50, 100])],
... [np.array([0, 1, 2]), np.array([0, 100, 200])],
... [np.array([0, 1, 2]), np.array([4850, 5000, 5150])],
... ]
>>> proceed = relative(dataset)
>>> np.array_equal(proceed, expected)
True
"""
try:
if isinstance(ori, int):
# relative from the [ori]th array
ori = dataset[ori][column]
if isinstance(ori[0], (list, tuple, np.ndarray)):
# calculate min/max difference
for i in range(len(ori[0])):
orimin = np.min(ori[:,i])
orimax = np.max(ori[:,i])
oridiff = orimax - orimin
# baseline
for data in dataset:
data[column][:,i] -= orimin
# convert
for data in dataset:
data[column][:,i] /= oridiff / 100.0
else:
orimin = np.min(ori)
orimax = np.max(ori)
oridiff = orimax - orimin
# baseline
for data in dataset:
data[column] -= orimin
# convert
for data in dataset:
data[column] /= oridiff / 100.0
return dataset
except IndexError, e:
if fail_silently:
# fail silently
return dataset
raise e | 0.002145 |
def submit(self, stanza):
"""Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object.
"""
body = _encode(**stanza)
self.service.post(self.path, body=body)
return self | 0.005168 |
def load_labeled_intervals(filename, delimiter=r'\s+'):
r"""Import labeled intervals from an annotation file. The file should consist
of three columns: Two consisting of numeric values corresponding to start
and end time of each interval and a third corresponding to the label of
each interval. This is primarily useful for processing events which span a
duration, such as segmentation, chords, or instrument activation.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
intervals : np.ndarray, shape=(n_events, 2)
array of event start and end time
labels : list of str
list of labels
"""
# Use our universal function to load in the events
starts, ends, labels = load_delimited(filename, [float, float, str],
delimiter)
# Stack into an interval matrix
intervals = np.array([starts, ends]).T
# Validate them, but throw a warning in place of an error
try:
util.validate_intervals(intervals)
except ValueError as error:
warnings.warn(error.args[0])
return intervals, labels | 0.001538 |
def set_complete_message(self, message):
"""Set a complete message."""
@self.connect
def on_complete(**kwargs):
_default_on_complete(message, **kwargs) | 0.010638 |
def save(self, *args, **kwargs):
"""
Create formatted version of body text.
"""
self.body_formatted = sanetize_text(self.body)
super(Contact, self).save() | 0.010309 |
def key_required(group=None, perm=None, keytype=None):
"""
Decorator for key authentication
"""
def decorator(f):
def wrapper(request, *args, **kwargs):
try:
validate_key( request, group, perm, keytype )
return f(request, *args, **kwargs)
except AccessUnauthorized:
return HttpResponse401( request )
except AccessForbidden:
return HttpResponse403( request )
return wrapper
return decorator | 0.015209 |
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
]) | 0.005865 |
def simulate_list(nwords=16, nrec=10, ncats=4):
"""A function to simulate a list"""
# load wordpool
wp = pd.read_csv('data/cut_wordpool.csv')
# get one list
wp = wp[wp['GROUP']==np.random.choice(list(range(16)), 1)[0]].sample(16)
wp['COLOR'] = [[int(np.random.rand() * 255) for i in range(3)] for i in range(16)] | 0.00885 |
def _get(self, numberOfBits: int, doCollect: bool):
"""
:param numberOfBits: number of bits to get from actual possition
:param doCollect: if False output is not collected just iterator moves
in structure
"""
if not isinstance(numberOfBits, int):
numberOfBits = int(numberOfBits)
while self.actuallyHave < numberOfBits:
# accumulate while not has enought
try:
f = next(self.it)
except StopIteration:
if self.fillup and self.actual is not None:
break
else:
raise NotEnoughtBitsErr()
thisFieldLen = f._dtype.bit_length()
if self.actual is None:
if not doCollect and thisFieldLen <= numberOfBits:
numberOfBits -= thisFieldLen
else:
self.actual = f
self.actuallyHave = thisFieldLen
else:
if not doCollect and self.actuallyHave < numberOfBits:
self.actuallyHave = thisFieldLen
self.actual = f
else:
self.actuallyHave += thisFieldLen
self.actual = f._concat(self.actual)
# slice out from actual
actual = self.actual
actualOffset = self.actualOffset
if self.actuallyHave < numberOfBits:
assert self.fillup
if doCollect:
t = self.actual._dtype
fillupW = numberOfBits - self.actuallyHave
padding_t = Bits(fillupW, signed=t.signed, negated=t.negated)
padding = padding_t.fromPy(None)
actual = padding._concat(actual)
self.actuallyHave = 0
# update about what was taken
self.actuallyHave -= numberOfBits
self.actualOffset += numberOfBits
if self.actuallyHave == 0:
self.actual = None
self.actualOffset = 0
if doCollect:
if numberOfBits == 1:
return actual[actualOffset]
else:
return actual[(actualOffset + numberOfBits):actualOffset] | 0.000896 |
def payload(self):
"""
Picks out the payload from the different parts of the signed/encrypted
JSON Web Token. If the content type is said to be 'jwt' deserialize the
payload into a Python object otherwise return as-is.
:return: The payload
"""
_msg = as_unicode(self.part[1])
# If not JSON web token assume JSON
if "cty" in self.headers and self.headers["cty"].lower() != "jwt":
pass
else:
try:
_msg = json.loads(_msg)
except ValueError:
pass
return _msg | 0.003263 |
def csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a CSV object to a numpy array.
Args:
string_like (str): CSV string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
stream = StringIO(string_like)
return np.genfromtxt(stream, dtype=dtype, delimiter=',') | 0.006192 |
def _ReadFixedSizeDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes,
default_size=definitions.SIZE_NATIVE, default_units='bytes',
is_member=False, supported_size_values=None):
"""Reads a fixed-size data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
default_size (Optional[int]): default size.
default_units (Optional[str]): default units.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
supported_size_values (Optional[tuple[int]]): supported size values,
or None if not set.
Returns:
FixedSizeDataTypeDefinition: fixed-size data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
definition_object = self._ReadStorageDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes, is_member=is_member)
attributes = definition_values.get('attributes', None)
if attributes:
size = attributes.get('size', default_size)
if size != definitions.SIZE_NATIVE:
try:
int(size)
except ValueError:
error_message = 'unuspported size attribute: {0!s}'.format(size)
raise errors.DefinitionReaderError(definition_name, error_message)
if supported_size_values and size not in supported_size_values:
error_message = 'unuspported size value: {0!s}'.format(size)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.size = size
definition_object.units = attributes.get('units', default_units)
return definition_object | 0.005053 |
def class_for_type(self, object_type):
""" Given an object_type return the class associated with it. """
if object_type not in self.class_mapping:
raise ZenpyException("Unknown object_type: " + str(object_type))
else:
return self.class_mapping[object_type] | 0.006579 |
def generate_shard_args(outfiles, num_examples):
"""Generate start and end indices per outfile."""
num_shards = len(outfiles)
num_examples_per_shard = num_examples // num_shards
start_idxs = [i * num_examples_per_shard for i in range(num_shards)]
end_idxs = list(start_idxs)
end_idxs.pop(0)
end_idxs.append(num_examples)
return zip(start_idxs, end_idxs, outfiles) | 0.023747 |
def colormagdiagram_cplist(cplist,
outpkl,
color_mag1=['gaiamag','sdssg'],
color_mag2=['kmag','kmag'],
yaxis_mag=['gaia_absmag','rpmj']):
'''This makes color-mag diagrams for all checkplot pickles in the provided
list.
Can make an arbitrary number of CMDs given lists of x-axis colors and y-axis
mags to use.
Parameters
----------
cplist : list of str
This is the list of checkplot pickles to process.
outpkl : str
The filename of the output pickle that will contain the color-mag
information for all objects in the checkplots specified in `cplist`.
color_mag1 : list of str
This a list of the keys in each checkplot's `objectinfo` dict that will
be used as color_1 in the equation::
x-axis color = color_mag1 - color_mag2
color_mag2 : list of str
This a list of the keys in each checkplot's `objectinfo` dict that will
be used as color_2 in the equation::
x-axis color = color_mag1 - color_mag2
yaxis_mag : list of str
This is a list of the keys in each checkplot's `objectinfo` dict that
will be used as the (absolute) magnitude y-axis of the color-mag
diagrams.
Returns
-------
str
The path to the generated CMD pickle file for the collection of objects
in the input checkplot list.
Notes
-----
This can make many CMDs in one go. For example, the default kwargs for
`color_mag`, `color_mag2`, and `yaxis_mag` result in two CMDs generated and
written to the output pickle file:
- CMD1 -> gaiamag - kmag on the x-axis vs gaia_absmag on the y-axis
- CMD2 -> sdssg - kmag on the x-axis vs rpmj (J reduced PM) on the y-axis
'''
# first, we'll collect all of the info
cplist_objectids = []
cplist_mags = []
cplist_colors = []
for cpf in cplist:
cpd = _read_checkplot_picklefile(cpf)
cplist_objectids.append(cpd['objectid'])
thiscp_mags = []
thiscp_colors = []
for cm1, cm2, ym in zip(color_mag1, color_mag2, yaxis_mag):
if (ym in cpd['objectinfo'] and
cpd['objectinfo'][ym] is not None):
thiscp_mags.append(cpd['objectinfo'][ym])
else:
thiscp_mags.append(np.nan)
if (cm1 in cpd['objectinfo'] and
cpd['objectinfo'][cm1] is not None and
cm2 in cpd['objectinfo'] and
cpd['objectinfo'][cm2] is not None):
thiscp_colors.append(cpd['objectinfo'][cm1] -
cpd['objectinfo'][cm2])
else:
thiscp_colors.append(np.nan)
cplist_mags.append(thiscp_mags)
cplist_colors.append(thiscp_colors)
# convert these to arrays
cplist_objectids = np.array(cplist_objectids)
cplist_mags = np.array(cplist_mags)
cplist_colors = np.array(cplist_colors)
# prepare the outdict
cmddict = {'objectids':cplist_objectids,
'mags':cplist_mags,
'colors':cplist_colors,
'color_mag1':color_mag1,
'color_mag2':color_mag2,
'yaxis_mag':yaxis_mag}
# save the pickled figure and dict for fast retrieval later
with open(outpkl,'wb') as outfd:
pickle.dump(cmddict, outfd, pickle.HIGHEST_PROTOCOL)
plt.close('all')
return cmddict | 0.004236 |
def prj_view_user(self, *args, **kwargs):
"""View the, in the user table view selected, user.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
i = self.prj_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.view_user(user) | 0.004988 |
def interpolate(values, value_times, sampling_rate=1000):
"""
3rd order spline interpolation.
Parameters
----------
values : dataframe
Values.
value_times : list
Time indices of values.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
signal : pd.Series
An array containing the values indexed by time.
Example
----------
>>> import neurokit as nk
>>> signal = interpolate([800, 900, 700, 500], [1000, 2000, 3000, 4000], sampling_rate=1000)
>>> pd.Series(signal).plot()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- pandas
"""
# values=RRis.copy()
# value_times=beats_times.copy()
# Preprocessing
initial_index = value_times[0]
value_times = np.array(value_times) - initial_index
# fit a 3rd degree spline on the data.
spline = scipy.interpolate.splrep(x=value_times, y=values, k=3, s=0) # s=0 guarantees that it will pass through ALL the given points
x = np.arange(0, value_times[-1], 1)
# Get the values indexed per time
signal = scipy.interpolate.splev(x=x, tck=spline, der=0)
# Transform to series
signal = pd.Series(signal)
signal.index = np.array(np.arange(initial_index, initial_index+len(signal), 1))
return(signal) | 0.002855 |
def cli(env, limit, closed=False, get_all=False):
"""Invoices and all that mess"""
manager = AccountManager(env.client)
invoices = manager.get_invoices(limit, closed, get_all)
table = formatting.Table([
"Id", "Created", "Type", "Status", "Starting Balance", "Ending Balance", "Invoice Amount", "Items"
])
table.align['Starting Balance'] = 'l'
table.align['Ending Balance'] = 'l'
table.align['Invoice Amount'] = 'l'
table.align['Items'] = 'l'
if isinstance(invoices, dict):
invoices = [invoices]
for invoice in invoices:
table.add_row([
invoice.get('id'),
utils.clean_time(invoice.get('createDate'), out_format="%Y-%m-%d"),
invoice.get('typeCode'),
invoice.get('statusCode'),
invoice.get('startingBalance'),
invoice.get('endingBalance'),
invoice.get('invoiceTotalAmount'),
invoice.get('itemCount')
])
env.fout(table) | 0.002014 |
def delete_service(self, service: str):
"""Removes/stops a docker service.
Only the manager nodes can delete a service
Args:
service (string): Service name or ID
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
# Remove service
self._api_client.remove_service(service) | 0.00409 |
async def enter_captcha(self, url: str, sid: str) -> str:
"""
Override this method for processing captcha.
:param url: link to captcha image
:param sid: captcha id. I do not know why pass here but may be useful
:return captcha value
"""
raise VkCaptchaNeeded(url, sid) | 0.006154 |
def iter_paragraph(filename, filetype):
"""
A helper function to iterate through the diff types of Wikipedia data inputs.
:param arguments: The docopt arguments
:type arguments: dict
:return: A generator yielding a pargraph of text for each iteration.
"""
assert filetype in ['jsonzip', 'jsondir', 'wikidump']
# Iterating through paragraphes from the Anntoated Wikipedia zipfile.
if filetype == 'jsonzip':
with ZipFile(filename, 'r') as zip_in:
# Iterate through the individual files.
for infile in zip_in.namelist():
if infile.endswith('/'): # Skip the directories.
continue
print(infile, end='\n', file=sys.stderr) # Logging progress.
with zip_in.open(infile) as f_in:
for line in io.TextIOWrapper(f_in, 'utf8'):
# Each line is a separate json.
data = json.loads(line)
# The useful text under 'text' key.
yield data['text'].strip()
# Iterating through paragraphes from the Anntoated Wikipedia directory.
elif filetype == 'jsondir':
for root, dirs, files in os.walk(filename):
for wiki_file in files:
infile = os.path.join(root, wiki_file)
print(infile, end='\n', file=sys.stderr) # Logging progress.
with io.open(infile, 'r', encoding='utf8') as f_in:
for line in f_in:
# Each line is a separate json.
data = json.loads(line)
# The useful text under 'text' key.
yield data['text'].strip()
# Iterating through paragraphes from the Wikipedia dump.
elif filetype == 'wikidump':
# Simply iterate through every line in the dump
# and treat each line as a paragraph.
with io.open(filename, 'r', encoding='utf8') as f_in:
for line_count, paragraph in enumerate(f_in):
if line_count % 100000:
_msg = 'Processing line {}\n'.format(line_count)
print(_msg, file=sys.stderr) # Logging progress.
if pargraph:
yield paragraph | 0.002613 |
def parse_file_provider(uri):
"""Find the file provider for a URI."""
providers = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL}
# URI scheme detector uses a range up to 30 since none of the IANA
# registered schemes are longer than this.
provider_found = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', uri)
if provider_found:
prefix = provider_found.group(1).lower()
else:
# If no provider is specified in the URI, assume that the local
# filesystem is being used. Availability and validity of the local
# file/directory will be checked later.
prefix = 'file'
if prefix in providers:
return providers[prefix]
else:
raise ValueError('File prefix not supported: %s://' % prefix) | 0.010526 |
def get_body(self, msg):
""" Extracts and returns the decoded body from an EmailMessage object"""
body = ""
charset = ""
if msg.is_multipart():
for part in msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
# skip any text/plain (txt) attachments
if ctype == 'text/plain' and 'attachment' not in cdispo:
body = part.get_payload(decode=True) # decode
charset = part.get_content_charset()
break
# not multipart - i.e. plain text, no attachments, keeping fingers crossed
else:
body = msg.get_payload(decode=True)
charset = msg.get_content_charset()
return body.decode(charset) | 0.004796 |
def unique_email_validator(form, field):
""" Username must be unique. This validator may NOT be customized."""
user_manager = current_app.user_manager
if not user_manager.email_is_available(field.data):
raise ValidationError(_('This Email is already in use. Please try another one.')) | 0.009836 |
def search_list(kb, from_=None, match_type=None,
page=None, per_page=None, unique=False):
"""Search "mapping from" for knowledge."""
# init
page = page or 1
per_page = per_page or 10
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
# get the base query
query = api.query_kb_mappings(
kbid=kb.id,
key=from_ or '',
match_type=match_type or 's'
).with_entities(models.KnwKBRVAL.m_key)
# if you want a 'unique' list
if unique:
query = query.distinct()
# run query and paginate
return [item.m_key for item in
pagination.RestfulSQLAlchemyPagination(
query, page=page or 1,
per_page=per_page or 10
).items]
return [] | 0.003257 |
def play(self, tone):
"""
Play the given *tone*. This can either be an instance of
:class:`~gpiozero.tones.Tone` or can be anything that could be used to
construct an instance of :class:`~gpiozero.tones.Tone`.
For example::
>>> from gpiozero import TonalBuzzer
>>> from gpiozero.tones import Tone
>>> b = TonalBuzzer(17)
>>> b.play(Tone("A4"))
>>> b.play(Tone(220.0)) # Hz
>>> b.play(Tone(60)) # middle C in MIDI notation
>>> b.play("A4")
>>> b.play(220.0)
>>> b.play(60)
"""
if tone is None:
self.value = None
else:
if not isinstance(tone, Tone):
tone = Tone(tone)
freq = tone.frequency
if self.min_tone.frequency <= tone <= self.max_tone.frequency:
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise ValueError("tone is out of the device's range") | 0.001864 |
def get_placeholder_image(width, height, name=None, fg_color=get_color('black'),
bg_color=get_color('grey'), text=None, font=u'Verdana.ttf',
fontsize=42, encoding=u'unic', mode='RGBA', fmt=u'PNG'):
"""Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it."""
size = (width, height)
text = text if text else '{0}x{1}'.format(width, height)
try:
font = ImageFont.truetype(font, size=fontsize, encoding=encoding)
except IOError:
font = ImageFont.load_default()
result_img = Image.new(mode, size, bg_color)
text_size = font.getsize(text)
text_img = Image.new("RGBA", size, bg_color)
#position for the text:
left = size[0] / 2 - text_size[0] / 2
top = size[1] / 2 - text_size[1] / 2
drawing = ImageDraw.Draw(text_img)
drawing.text((left, top),
text,
font=font,
fill=fg_color)
txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5))
result_img.paste(txt_img)
file_obj = io.BytesIO()
txt_img.save(file_obj, fmt)
return file_obj.getvalue() | 0.005063 |
def _get_fout_go(self):
"""Get the name of an output file based on the top GO term."""
assert self.goids, "NO VALID GO IDs WERE PROVIDED AS STARTING POINTS FOR HIERARCHY REPORT"
base = next(iter(self.goids)).replace(':', '')
upstr = '_up' if 'up' in self.kws else ''
return "hier_{BASE}{UP}.{EXT}".format(BASE=base, UP=upstr, EXT='txt') | 0.007979 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict | 0.0059 |
def _deleteRangeFromKNN(self, start=0, end=None):
"""
Removes any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
prototype_idx = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete.tolist())
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete) | 0.00222 |
def create_argument_parser():
"""Creates the argument parser for haas.
"""
parser = argparse.ArgumentParser(prog='haas')
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(haas.__version__))
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument('-v', '--verbose', action='store_const', default=1,
dest='verbosity', const=2, help='Verbose output')
verbosity.add_argument('-q', '--quiet', action='store_const', const=0,
dest='verbosity', help='Quiet output')
parser.add_argument('-f', '--failfast', action='store_true', default=False,
help='Stop on first fail or error')
parser.add_argument('-c', '--catch', dest='catch_interrupt',
action='store_true', default=False,
help=('(Ignored) Catch ctrl-C and display results so '
'far'))
parser.add_argument('-b', '--buffer', action='store_true', default=False,
help='Buffer stdout and stderr during tests')
parser.add_argument(
'start', nargs='*', default=[os.getcwd()],
help=('One or more directories or dotted package/module names from '
'which to start searching for tests'))
parser.add_argument('-p', '--pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_argument('-t', '--top-level-directory', default=None,
help=('Top level directory of project (defaults to '
'start directory)'))
_add_log_level_option(parser)
return parser | 0.000576 |
def _update_docinfo(self):
"""Update the PDF's DocumentInfo dictionary to match XMP metadata
The standard mapping is described here:
https://www.pdfa.org/pdfa-metadata-xmp-rdf-dublin-core/
"""
self._pdf.docinfo # Touch object to ensure it exists
for uri, element, docinfo_name, converter in self.DOCINFO_MAPPING:
qname = QName(uri, element)
try:
value = self[qname]
except KeyError:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name]
continue
if converter:
try:
value = converter.docinfo_from_xmp(value)
except ValueError:
warn(
"The DocumentInfo field {} could not be updated from XMP".format(
docinfo_name
)
)
value = None
if value is None:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name]
continue
value = re_xml_illegal_chars.sub('', value)
try:
# Try to save pure ASCII
self._pdf.docinfo[docinfo_name] = value.encode('ascii')
except UnicodeEncodeError:
# qpdf will serialize this as a UTF-16 with BOM string
self._pdf.docinfo[docinfo_name] = value | 0.001971 |
def verify(self, data, signature):
"""
Verify signed data using the Montgomery public key stored by this XEdDSA instance.
:param data: A bytes-like object containing the data that was signed.
:param signature: A bytes-like object encoding the signature with length
SIGNATURE_SIZE.
:returns: A boolean indicating whether the signature was valid or not.
"""
cls = self.__class__
if not isinstance(data, bytes):
raise TypeError("The data parameter must be a bytes-like object.")
if not isinstance(signature, bytes):
raise TypeError("Wrong type passed for the signature parameter.")
if len(signature) != cls.SIGNATURE_SIZE:
raise ValueError("Invalid value passed for the signature parameter.")
return cls._verify(
bytearray(data),
bytearray(signature),
cls._mont_pub_to_ed_pub(bytearray(self.__mont_pub))
) | 0.005061 |
def get_from_area(self, lat_min, lon_min, lat_max, lon_max, picture_size=None, set_=None, map_filter=None):
"""
Get all available photos for a specific bounding box
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: Returns the full dataset of all available photos
"""
page_size = 100
page = 0
result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
total_photos = result['count']
if total_photos < page_size:
return result
page += 1
pages = (total_photos / page_size) + 1
while page < pages:
new_result = self._request(lat_min, lon_min, lat_max, lon_max, page * page_size, (page + 1) * page_size,
picture_size, set_, map_filter)
result['photos'].extend(new_result['photos'])
page += 1
return result | 0.004215 |
def enable(self):
"""
Enable antivirus on the engine
"""
self.update(antivirus_enabled=True,
virus_mirror='update.nai.com/Products/CommonUpdater' if \
not self.get('virus_mirror') else self.virus_mirror,
antivirus_update_time=self.antivirus_update_time if \
self.get('antivirus_update_time') else 21600000) | 0.014085 |
def get(self):
"""Returns the spanning-tree configuration as a dict object
The dictionary object represents the entire spanning-tree
configuration derived from the nodes running config. This
includes both globally configuration attributes as well as
interfaces and instances. See the StpInterfaces and StpInstances
classes for the key/value pair definitions.
Note:
See the individual classes for detailed message structures
Returns:
A Python dictionary object of key/value pairs the represent
the entire supported spanning-tree configuration::
{
"mode": [mstp, none],
"interfaces": {...},
"instances": {...}
}
"""
return dict(interfaces=self.interfaces.getall(),
instances=self.instances.getall()) | 0.002148 |
def node_applications(self, state=None, user=None):
"""
With the Applications API, you can obtain a collection of resources,
each of which represents an application.
:param str state: application state
:param str user: user name
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `state`
incorrect
"""
path = '/ws/v1/node/apps'
legal_states = set([s for s, _ in ApplicationState])
if state is not None and state not in legal_states:
msg = 'Application State %s is illegal' % (state,)
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('user', user))
params = self.construct_parameters(loc_args)
return self.request(path, **params) | 0.002155 |
def _user_input() -> str:
"""
A helper function which waits for user multi-line input.
:return: A string input by user, separated by ``'\\n'``.
"""
lines = []
try:
while True:
line = input()
if line != '':
lines.append(line)
else:
break
except (EOFError, KeyboardInterrupt):
return '\n'.join(lines) | 0.002427 |
def managed_context_entry(process_name,
classname,
token,
time_qualifier,
trigger_frequency='every 60',
state_machine_name=STATE_MACHINE_DISCRETE,
is_on=True,
exchange=EXCHANGE_MANAGED_WORKER,
blocking_type=BLOCKING_NORMAL,
present_on_boxes=None,
time_grouping=1,
arguments=None,
queue=None,
routing=None,
source=None,
sink=None,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
if arguments is not None:
assert isinstance(arguments, dict)
process_entry = ManagedProcessEntry(
process_name=process_name,
trigger_frequency=trigger_frequency,
state_machine_name=state_machine_name,
is_on=is_on,
blocking_type=blocking_type,
classname=classname,
token=token,
source=source,
sink=sink,
mq_queue=queue if queue is not None else _QUEUE_PREFIX + token + time_qualifier,
mq_routing_key=routing if routing is not None else _ROUTING_PREFIX + token + time_qualifier,
mq_exchange=exchange,
present_on_boxes=present_on_boxes,
arguments=arguments if arguments is not None else dict(),
time_qualifier=time_qualifier,
time_grouping=time_grouping,
log_filename=log_file if log_file is not None else token + time_qualifier + '.log',
pid_filename=pid_file if pid_file is not None else token + time_qualifier + '.pid')
return process_entry | 0.002616 |
def add_to_role(server_context, role, user_id=None, email=None, container_path=None):
"""
Add user/group to security role
:param server_context: A LabKey server context. See utils.create_server_context.
:param role: (from get_roles) to add user to
:param user_id: to add permissions role to (must supply this or email or both)
:param email: to add permissions role to (must supply this or user_id or both)
:param container_path: additional project path context
:return:
"""
return __make_security_role_api_request(server_context, 'addAssignment.api', role, user_id=user_id, email=email,
container_path=container_path) | 0.008559 |
def plotstuff(self, T=[0, 1000]):
"""
Create a scatter plot of the contents of the database,
with entries on the interval T.
Parameters
----------
T : list
Time interval.
Returns
-------
None
See also
--------
GDF.select_neurons_interval
"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
neurons = self.neurons()
i = 0
for x in self.select_neurons_interval(neurons, T):
ax.plot(x, np.zeros(x.size) + neurons[i], 'o',
markersize=1, markerfacecolor='k', markeredgecolor='k',
alpha=0.25)
i += 1
ax.set_xlabel('time (ms)')
ax.set_ylabel('neuron ID')
ax.set_xlim(T[0], T[1])
ax.set_ylim(neurons.min(), neurons.max())
ax.set_title('database content on T = [%.0f, %.0f]' % (T[0], T[1])) | 0.007042 |
def _build_list_items(self, matches):
"""Returns the HTML list items for the next matches that have a larger (or equal) header
compared to the first header's level.
This method mutatively removes elements from the front of matches as it processes each
element. This method assumes matches contains at least one match.
PARAMETERS:
matches -- list of tuples; each tuple corresponds to the groups matched by the header_regex.
RETURNS:
list of str; the table of contents as a list of lines.
"""
assert len(matches) > 0, "Should be at least one match, by assumption"
lines = []
current_level = matches[0][0]
while matches and current_level <= matches[0][0]:
level, _, tag_id, title = matches[0]
if current_level < level:
lines.extend(self._build_list(matches, level))
continue
if tag_id:
lines.append('<li><a href="#{0}">{1}</a></li>'.format(tag_id, title))
else:
lines.append('<li>{0}</li>'.format(title))
matches.pop(0)
return lines | 0.005128 |
def add_entry(self, timestamp, data):
"""Adds a new data entry to the TimeSeries.
:param timestamp: Time stamp of the data.
This has either to be a float representing the UNIX epochs
or a string containing a timestamp in the given format.
:param list data: A list containing the actual dimension values.
:raise: Raises a :py:exc:`ValueError` if data does not contain as many dimensions as
defined in __init__.
"""
if not isinstance(data, list):
data = [data]
if len(data) != self._dimensionCount:
raise ValueError("data does contain %s instead of %s dimensions.\n %s" % (len(data), self._dimensionCount, data))
self._normalized = self._predefinedNormalized
self._sorted = self._predefinedSorted
tsformat = self._timestampFormat
if tsformat is not None:
timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat)
self._timeseriesData.append([float(timestamp)] + [float(dimensionValue) for dimensionValue in data]) | 0.006289 |
def mean_interval(self, name, alpha=_alpha, **kwargs):
"""
Interval assuming gaussian posterior.
"""
data = self.get(name,**kwargs)
#return ugali.utils.stats.mean_interval(data,alpha)
return mean_interval(data,alpha) | 0.018939 |
def follow_info(self, index=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html>`_
:arg index: A comma-separated list of index patterns; use `_all` to
perform the operation on all indices
"""
return self.transport.perform_request(
"GET", _make_path(index, "_ccr", "info"), params=params
) | 0.004717 |
def match_time(self, value):
'''Search for time information in the string'''
m = self.REGEX_TIME.search(value)
time = datetime.datetime.utcnow().time()
if m:
time = datetime.time(int(m.group(1)), int(m.group(2)))
value = self.REGEX_TIME.sub('', value)
return (time, value) | 0.009868 |
def _J(self, theta):
"""
Implements the order dependent family of functions defined in equations
4 to 7 in the reference paper.
"""
if self.order == 0:
return np.pi - theta
elif self.order == 1:
return tf.sin(theta) + (np.pi - theta) * tf.cos(theta)
elif self.order == 2:
return 3. * tf.sin(theta) * tf.cos(theta) + \
(np.pi - theta) * (1. + 2. * tf.cos(theta) ** 2) | 0.004193 |
def get_redacted_args(entrypoint, *args, **kwargs):
""" Utility function for use with entrypoints that are marked with
``sensitive_arguments`` -- e.g. :class:`nameko.rpc.Rpc` and
:class:`nameko.events.EventHandler`.
:Parameters:
entrypoint : :class:`~nameko.extensions.Entrypoint`
The entrypoint that fired.
args : tuple
Positional arguments for the method call.
kwargs : dict
Keyword arguments for the method call.
The entrypoint should have a ``sensitive_arguments`` attribute, the value
of which is a string or tuple of strings specifying the arguments or
partial arguments that should be redacted. To partially redact an argument,
the following syntax is used::
<argument-name>.<dict-key>[<list-index>]
:Returns:
A dictionary as returned by :func:`inspect.getcallargs`, but with
sensitive arguments or partial arguments redacted.
.. note::
This function does not raise if one of the ``sensitive_arguments``
doesn't match or partially match the calling ``args`` and ``kwargs``.
This allows "fuzzier" pattern matching (e.g. redact a field if it is
present, and otherwise do nothing).
To avoid exposing sensitive arguments through a typo, it is recommend
to test the configuration of each entrypoint with
``sensitive_arguments`` individually. For example:
.. code-block:: python
class Service(object):
@rpc(sensitive_arguments="foo.bar")
def method(self, foo):
pass
container = ServiceContainer(Service, {})
entrypoint = get_extension(container, Rpc, method_name="method")
# no redaction
foo = "arg"
expected_foo = {'foo': "arg"}
assert get_redacted_args(entrypoint, foo) == expected
# 'bar' key redacted
foo = {'bar': "secret value", 'baz': "normal value"}
expected = {'foo': {'bar': "********", 'baz': "normal value"}}
assert get_redacted_args(entrypoint, foo) == expected
.. seealso::
The tests for this utility demonstrate its full usage:
:class:`test.test_utils.TestGetRedactedArgs`
"""
sensitive_arguments = entrypoint.sensitive_arguments
if isinstance(sensitive_arguments, six.string_types):
sensitive_arguments = (sensitive_arguments,)
method = getattr(entrypoint.container.service_cls, entrypoint.method_name)
callargs = inspect.getcallargs(method, None, *args, **kwargs)
del callargs['self']
# make a deepcopy before redacting so that "partial" redacations aren't
# applied to a referenced object
callargs = deepcopy(callargs)
def redact(data, keys):
key = keys[0]
if len(keys) == 1:
try:
data[key] = REDACTED
except (KeyError, IndexError, TypeError):
pass
else:
if key in data:
redact(data[key], keys[1:])
for variable in sensitive_arguments:
keys = []
for dict_key, list_index in re.findall(r"(\w+)|\[(\d+)\]", variable):
if dict_key:
keys.append(dict_key)
elif list_index:
keys.append(int(list_index))
if keys[0] in callargs:
redact(callargs, keys)
return callargs | 0.00029 |
def expand_time(str_time, default_unit='s', multiplier=1):
"""
helper for above functions
"""
parser = re.compile(r'(\d+)([a-zA-Z]*)')
parts = parser.findall(str_time)
result = 0.0
for value, unit in parts:
value = int(value)
unit = unit.lower()
if unit == '':
unit = default_unit
if unit == 'ms':
result += value * 0.001
continue
elif unit == 's':
result += value
continue
elif unit == 'm':
result += value * 60
continue
elif unit == 'h':
result += value * 60 * 60
continue
elif unit == 'd':
result += value * 60 * 60 * 24
continue
elif unit == 'w':
result += value * 60 * 60 * 24 * 7
continue
else:
raise ValueError(
"String contains unsupported unit %s: %s" % (unit, str_time))
return int(result * multiplier) | 0.000991 |
def saveSV(self, fname, comments=None, metadata=None, printmetadict=None,
dialect = None, delimiter=None, doublequote=True,
lineterminator='\n', escapechar = None, quoting=csv.QUOTE_MINIMAL,
quotechar='"', skipinitialspace=False,
stringifier=None, verbosity=DEFAULT_VERBOSITY):
"""
Save the tabarray to a single flat separated variable (CSV) text file.
Method wraps::
tabular.io.saveSV.
See docstring of tabular.io.saveSV, or Tabular reference documentation, for more information.
"""
io.saveSV(fname,self, comments, metadata, printmetadict,
dialect, delimiter, doublequote, lineterminator, escapechar, quoting, quotechar,skipinitialspace,stringifier=stringifier,verbosity=verbosity) | 0.029573 |
def check_args(func):
"""A decorator that performs type checking using type hints at runtime::
@check_args
def fun(a: int):
print(f'fun is being called with parameter {a}')
# this will raise a TypeError describing the issue without the function being called
fun('not an int')
"""
@wraps(func)
def check(*args, **kwargs): # pylint: disable=C0111
sig = inspect.signature(func)
found_errors = []
binding = None
try:
binding = sig.bind(*args, **kwargs)
except TypeError as te:
for name, metadata in sig.parameters.items():
# Comparison with the message error as a string :(
# Know a nicer way? Please drop me a message
if metadata.default == inspect.Parameter.empty:
# copy from inspect module, it is the very same error message
error_in_case = 'missing a required argument: {arg!r}'.format(arg=name)
if str(te) == error_in_case:
found_errors.append(IssueDescription(
name, sig.parameters[name].annotation, None, True))
# NOTE currently only find one, at most, detecting what else
# is missing is tricky if not impossible
if not found_errors:
raise DetailedTypeError([IssueDescription(None, None, None, None, str(te))])
raise DetailedTypeError(found_errors)
for name, value in binding.arguments.items():
if not check_type(value, sig.parameters[name].annotation):
found_errors.append(IssueDescription(
name, sig.parameters[name].annotation, value, False))
if found_errors:
raise DetailedTypeError(found_errors)
return func(*args, **kwargs)
return check | 0.002621 |
def _transform_abstract(plpy, module_ident):
"""Transform abstract, bi-directionally.
Transforms an abstract using one of content columns
('abstract' or 'html') to determine which direction the transform
will go (cnxml->html or html->cnxml).
A transform is done on either one of them to make
the other value. If no value is supplied, the trigger raises an error.
If both values are supplied, the trigger will skip.
"""
plan = plpy.prepare("""\
SELECT a.abstractid, a.abstract, a.html
FROM modules AS m NATURAL JOIN abstracts AS a
WHERE m.module_ident = $1""", ('integer',))
result = plpy.execute(plan, (module_ident,), 1)[0]
abstractid, cnxml, html = (
result['abstractid'], result['abstract'], result['html'])
if cnxml is not None and html is not None:
return # skip
# TODO Prevent blank abstracts (abstract = null & html = null).
msg = "produce {}->{} for abstractid={}"
if cnxml is None:
# Transform html->cnxml
msg = msg.format('html', 'cnxml', abstractid)
content = html
column = 'abstract'
transform_func = transform_abstract_to_cnxml
else:
# Transform cnxml->html
msg = msg.format('cnxml', 'html', abstractid)
content = cnxml
column = 'html'
transform_func = transform_abstract_to_html
content, messages = transform_func(content, module_ident, plpy)
plan = plpy.prepare(
"UPDATE abstracts SET {} = $1 WHERE abstractid = $2".format(column),
('text', 'integer'))
plpy.execute(plan, (content, abstractid,))
return msg | 0.000618 |
async def process(client: Client, transaction_signed_raw: str) -> ClientResponse:
"""
POST a transaction raw document
:param client: Client to connect to the api
:param transaction_signed_raw: Transaction signed raw document
:return:
"""
return await client.post(MODULE + '/process', {'transaction': transaction_signed_raw}, rtype=RESPONSE_AIOHTTP) | 0.007958 |
def convert_unicode_field(string):
"""
Convert a Unicode field into the corresponding list of Unicode strings.
The (input) Unicode field is a Unicode string containing
one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``),
separated by a space.
:param str string: the (input) Unicode field
:rtype: list of Unicode strings
"""
values = []
for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:
values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))
return values | 0.004594 |
def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
"""
Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response) | 0.003497 |
def before_loop(self, coro):
"""A function that also acts as a decorator to register a coroutine to be
called before the loop starts running. This is useful if you want to wait
for some bot state before the loop starts,
such as :meth:`discord.Client.wait_until_ready`.
Parameters
------------
coro: :term:`py:awaitable`
The coroutine to register before the loop runs.
Raises
-------
TypeError
The function was not a coroutine.
"""
if not (inspect.iscoroutinefunction(coro) or inspect.isawaitable(coro)):
raise TypeError('Expected coroutine or awaitable, received {0.__name__!r}.'.format(type(coro)))
self._before_loop = coro | 0.007813 |
def _finalize_response(self, response):
""" Convert the ``Response`` object into django's ``HttpResponse``
:return: django's ``HttpResponse``
"""
res = HttpResponse(content=response.content,
content_type=self._get_content_type())
# status_code is set separately to allow zero
res.status_code = response.code
return res | 0.004963 |
def distance_home(GPS_RAW):
'''distance from first fix point'''
global first_fix
if (hasattr(GPS_RAW, 'fix_type') and GPS_RAW.fix_type < 2) or \
(hasattr(GPS_RAW, 'Status') and GPS_RAW.Status < 2):
return 0
if first_fix == None:
first_fix = GPS_RAW
return 0
return distance_two(GPS_RAW, first_fix) | 0.011331 |
def LR_collection(hyper_lr, args):
"""This custom function implements a proprietary IntegralDefense Live Response collection package.
'None' will be immediately returned if you do no have this package. The location of the package is
assumed to be defined by a 'lr_path' variable in the configuration file.
:dependencies lr_package_path: Path to the ID LR package on system cbinterface is running
:dependencies streamline_path: Path to proprietary post-collection LR streamline package
:returns: True on success, False on Failure, None if a requirement is missing
"""
# Get configuration items
config = ConfigParser()
config.read(CONFIG_PATH)
lr_analysis_path = config['ID-LR']['lr_package_path']
if not os.path.exists(lr_analysis_path):
LOGGER.info("LR package not defined")
return None
lr_filename = lr_analysis_path[lr_analysis_path.rfind('/')+1:]
lr_dirname = lr_filename[:lr_filename.rfind('.')]
sensor_dir = config['ID-LR']['sensor_path']
if not sensor_dir:
LOGGER.info("sensor_dir not defined in configuration. Using C:\\")
sensor_dir = "C:\\" # Default
streamline_path = config['ID-LR']['streamline_path']
if not os.path.exists(streamline_path):
LOGGER.info("Path to streamline.py doesn't exist.")
return None
collect_cmd = config['ID-LR']['collect_cmd']
if not collect_cmd:
LOGGER.info("Collection command missing")
return None
lr_session = hyper_lr.go_live()
def lr_cleanup(lr_session):
# delete our LR tools
try:
dir_output = lr_session.list_directory(sensor_dir)
for dir_item in dir_output:
if 'DIRECTORY' in dir_item['attributes'] and dir_item['filename'] == lr_dirname:
print("[+] Found existing LR directory.. deleting")
command_str = "powershell.exe Remove-Item {} -Force -Recurse".format(sensor_dir + lr_dirname)
result = lr_session.create_process(command_str)
if result != b'':
LOGGER.warn(" ! Problem deleting {}".format(sensor_dir + lr_dirname))
if 'ARCHIVE' in dir_item['attributes'] and dir_item['filename'] == lr_filename:
print("[+] Existing LR package found.. deleting..")
try:
lr_session.delete_file(sensor_dir + dir_item['filename'])
except TypeError as e:
if 'startswith first arg must be bytes' in e: # might be fixed in newer cbapi versions
LOGGER.warn("Error deleting {}.. trying to move on".format(lr_filename))
except live_response_api.LiveResponseError as e:
if 'ERROR_PATH_NOT_FOUND' not in str(e):
print("[ERROR] LiveResponseError: {}".format(e))
return False
# LR remnants already on host?
lr_cleanup(lr_session)
print("[+] Dropping latest LR on host..")
filedata = None
with open(lr_analysis_path, 'rb') as f:
filedata = f.read()
try:
lr_session.put_file(filedata, sensor_dir + lr_filename)
except Exception as e:
# If 'ERROR_FILE_EXISTS' in errmsg, log the error, but try to continue with existing package
if 'ERROR_FILE_EXISTS' not in str(e):
LOGGER.error("Unknown Error: {}".format(str(e)))
return False
# execute lr.exe to extract files
# unzip = "C:\\lr.exe -o \'C:\\' -y"
extract_cmd = " -o \'" + sensor_dir + "' -y"
unzip = sensor_dir + lr_filename + extract_cmd
print("[+] Extracting LR ..")
lr_session.create_process(unzip)
# execute collection
#collect = "C:\\lr\\win32\\tools\\collect.bat"
collect = sensor_dir + lr_dirname + collect_cmd
collect_filename = collect_cmd[collect_cmd.rfind('\\')+1:]
time.sleep(1)
print("[+] Executing collect.bat..")
start_time = time.time()
lr_session.create_process(collect, wait_for_output=False, wait_for_completion=False) #, wait_timeout=900)
hyper_lr.wait_for_process_to_finish(collect_filename)
collect_duration = datetime.timedelta(minutes=(time.time() - start_time))
print("[+] Collect completed in {}".format(collect_duration))
# Collect resulting output file
outputdir = sensor_dir + lr_dirname + "\\win32\\output\\"
localfile = None
for dir_item in lr_session.list_directory(outputdir):
if 'ARCHIVE' in dir_item['attributes'] and dir_item['filename'].endswith('7z'):
# use lerc, if available
if hyper_lr.lerc_session:
lerc = hyper_lr.lerc_session.get_host(hyper_lr.hostname)
command = lerc.Upload(outputdir+dir_item['filename'])
#command = hyper_lr.lerc_session.check_command()
# wait for client to complete the command
print(" ~ Issued upload command to lerc. Waiting for command to finish..")
if command.wait_for_completion():
print(" ~ lerc command complete. Streaming LR from lerc server..")
command.get_results(file_path=dir_item['filename'])
if command:
print("[+] lerc command results: ")
print(command)
file_path = command.server_file_path
filename = file_path[file_path.rfind('/')+1:]
print()
print("[+] Wrote {}".format(dir_item['filename']))
localfile = dir_item['filename']
else:
LOGGER.error("problem waiting for lerc client to complete command")
else:
localfile = hyper_lr.getFile_with_timeout(outputdir+dir_item['filename'], localfname=dir_item['filename'])
# Delete leftovers from sensor
lr_cleanup(lr_session)
# Call steamline on the 7z lr package
print("[+] Starting streamline on {}".format(localfile))
args = shlex.split(streamline_path + " " + localfile)
subprocess.call(args, stdout=subprocess.PIPE)
print("[+] Streamline complete")
return True | 0.005144 |
def inceptionv4(pretrained=True):
r"""InceptionV4 model architecture from the
`"Inception-v4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = InceptionV4()
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['imagenet']))
return model | 0.005038 |
def action_object(self, obj, **kwargs):
"""
Stream of most recent actions where obj is the action_object.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.action_object_actions.public(**kwargs) | 0.007246 |
def list_global_ips(self, version=None, identifier=None, **kwargs):
"""Returns a list of all global IP address records on the account.
:param int version: Only returns IPs of this version (4 or 6)
:param string identifier: If specified, the list will only contain the
global ips matching this network identifier.
"""
if 'mask' not in kwargs:
mask = ['destinationIpAddress[hardware, virtualGuest]',
'ipAddress']
kwargs['mask'] = ','.join(mask)
_filter = utils.NestedDict({})
if version:
ver = utils.query_filter(version)
_filter['globalIpRecords']['ipAddress']['subnet']['version'] = ver
if identifier:
subnet_filter = _filter['globalIpRecords']['ipAddress']['subnet']
subnet_filter['networkIdentifier'] = utils.query_filter(identifier)
kwargs['filter'] = _filter.to_dict()
return self.account.getGlobalIpRecords(**kwargs) | 0.001938 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.