text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
async def process_message(self, message, wait=True):
"""Process a message to see if it wakes any waiters.
This will check waiters registered to see if they match the given
message. If so, they are awoken and passed the message. All matching
waiters will be woken.
This method returns False if the message matched no waiters so it was
ignored.
Normally you want to use wait=True (the default behavior) to guarantee
that all callbacks have finished before this method returns. However,
sometimes that can cause a deadlock if those callbacks would
themselves invoke behavior that requires whatever is waiting for this
method to be alive. In that case you can pass wait=False to ensure
that the caller of this method does not block.
Args:
message (dict or object): The message that we should process
wait (bool): Whether to block until all callbacks have finished
or to return once the callbacks have been launched.
Returns:
bool: True if at least one waiter matched, otherwise False.
"""
to_check = deque([self._waiters])
ignored = True
while len(to_check) > 0:
context = to_check.popleft()
waiters = context.get(OperationManager._LEAF, [])
for waiter in waiters:
if isinstance(waiter, asyncio.Future):
waiter.set_result(message)
else:
try:
await _wait_or_launch(self._loop, waiter, message, wait)
except: #pylint:disable=bare-except;We can't let a user callback break this routine
self._logger.warning("Error calling every_match callback, callback=%s, message=%s",
waiter, message, exc_info=True)
ignored = False
for key in context:
if key is OperationManager._LEAF:
continue
message_val = _get_key(message, key)
if message_val is _MISSING:
continue
next_level = context[key]
if message_val in next_level:
to_check.append(next_level[message_val])
return not ignored | 0.002949 |
async def send_audio(self, url, user, options=None):
"""
send audio message
:param url: link to the audio file
:param user: target user
:param options:
:return:
"""
return await self.chat.send_audio(url, user, options) | 0.007067 |
def pack(self, data):
"""See :func:`~bitstruct.pack_dict()`.
"""
try:
return self.pack_any(data)
except KeyError as e:
raise Error('{} not found in data dictionary'.format(str(e))) | 0.008403 |
def derivative(self, x):
"""Derivative of the product space operator.
Parameters
----------
x : `domain` element
The point to take the derivative in
Returns
-------
adjoint : linear`ProductSpaceOperator`
The derivative
Examples
--------
>>> r3 = odl.rn(3)
>>> pspace = odl.ProductSpace(r3, r3)
>>> I = odl.IdentityOperator(r3)
>>> x = pspace.element([[1, 2, 3], [4, 5, 6]])
Example with linear operator (derivative is itself)
>>> prod_op = ProductSpaceOperator([[0, I], [0, 0]],
... domain=pspace, range=pspace)
>>> prod_op(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
>>> prod_op.derivative(x)(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
Example with affine operator
>>> residual_op = I - r3.element([1, 1, 1])
>>> op = ProductSpaceOperator([[0, residual_op], [0, 0]],
... domain=pspace, range=pspace)
Calling operator gives offset by [1, 1, 1]
>>> op(x)
ProductSpace(rn(3), 2).element([
[ 3., 4., 5.],
[ 0., 0., 0.]
])
Derivative of affine operator does not have this offset
>>> op.derivative(x)(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
"""
# Lazy import to improve `import odl` time
import scipy.sparse
# Short circuit optimization
if self.is_linear:
return self
deriv_ops = [op.derivative(x[col]) for op, col in zip(self.ops.data,
self.ops.col)]
data = np.empty(len(deriv_ops), dtype=object)
data[:] = deriv_ops
indices = [self.ops.row, self.ops.col]
shape = self.ops.shape
deriv_matrix = scipy.sparse.coo_matrix((data, indices), shape)
return ProductSpaceOperator(deriv_matrix, self.domain, self.range) | 0.000897 |
def make_color_tuple(color):
"""
turn something like "#000000" into 0,0,0
or "#FFFFFF into "255,255,255"
"""
R = color[1:3]
G = color[3:5]
B = color[5:7]
R = int(R, 16)
G = int(G, 16)
B = int(B, 16)
return R, G, B | 0.003861 |
def cnmf(S, rank, niter=500, hull=False):
"""(Convex) Non-Negative Matrix Factorization.
Parameters
----------
S: np.array(p, N)
Features matrix. p row features and N column observations.
rank: int
Rank of decomposition
niter: int
Number of iterations to be used
Returns
-------
F: np.array
Cluster matrix (decomposed matrix)
G: np.array
Activation matrix (decomposed matrix)
(s.t. S ~= F * G)
"""
if hull:
nmf_mdl = pymf.CHNMF(S, num_bases=rank)
else:
nmf_mdl = pymf.CNMF(S, num_bases=rank)
nmf_mdl.factorize(niter=niter)
F = np.asarray(nmf_mdl.W)
G = np.asarray(nmf_mdl.H)
return F, G | 0.001389 |
def crop(self, data):
'''Crop a data dictionary down to its common time
Parameters
----------
data : dict
As produced by pumpp.transform
Returns
-------
data_cropped : dict
Like `data` but with all time-like axes truncated to the
minimum common duration
'''
duration = self.data_duration(data)
data_out = dict()
for key in data:
idx = [slice(None)] * data[key].ndim
for tdim in self._time.get(key, []):
idx[tdim] = slice(duration)
data_out[key] = data[key][tuple(idx)]
return data_out | 0.002976 |
def _get_stack_frame(stacklevel):
"""
utility functions to get a stackframe, skipping internal frames.
"""
stacklevel = stacklevel + 1
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
return frame | 0.001497 |
def plot_pseudodepths(configs, nr_electrodes, spacing=1, grid=None,
ctypes=None, dd_merge=False, **kwargs):
"""Plot pseudodepths for the measurements. If grid is given, then the
actual electrode positions are used, and the parameter 'spacing' is
ignored'
Parameters
----------
configs: :class:`numpy.ndarray`
Nx4 array containing the quadrupoles for different measurements
nr_electrodes: int
The overall number of electrodes of the dataset. This is used to plot
the surface electrodes
spacing: float, optional
assumed distance between electrodes. Default=1
grid: crtomo.grid.crt_grid instance, optional
grid instance. Used to infer real electrode positions
ctypes: list of strings, optional
a list of configuration types that will be plotted. All
configurations that can not be sorted into these types will not be
plotted! Possible types:
* dd
* schlumberger
dd_merge: bool, optional
if True, merge all skips. Otherwise, generate individual plots for
each skip
Returns
-------
figs: matplotlib.figure.Figure instance or list of Figure instances
if only one type was plotted, then the figure instance is returned.
Otherwise, return a list of figure instances.
axes: axes object or list of axes ojects
plot axes
Examples
--------
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(1, 2, 4, 3),
(1, 2, 5, 4),
(1, 2, 6, 5),
(2, 3, 5, 4),
(2, 3, 6, 5),
(3, 4, 6, 5),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=6, spacing=1,
ctypes=['dd', ])
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(4, 7, 5, 6),
(3, 8, 5, 6),
(2, 9, 5, 6),
(1, 10, 5, 6),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=10, spacing=1,
ctypes=['schlumberger', ])
"""
# for each configuration type we have different ways of computing
# pseudodepths
pseudo_d_functions = {
'dd': _pseudodepths_dd_simple,
'schlumberger': _pseudodepths_schlumberger,
'wenner': _pseudodepths_wenner,
}
titles = {
'dd': 'dipole-dipole configurations',
'schlumberger': 'Schlumberger configurations',
'wenner': 'Wenner configurations',
}
# sort the configurations into the various types of configurations
only_types = ctypes or ['dd', ]
results = fT.filter(configs, settings={'only_types': only_types, })
# loop through all measurement types
figs = []
axes = []
for key in sorted(results.keys()):
print('plotting: ', key)
if key == 'not_sorted':
continue
index_dict = results[key]
# it is possible that we want to generate multiple plots for one
# type of measurement, i.e., to separate skips of dipole-dipole
# measurements. Therefore we generate two lists:
# 1) list of list of indices to plot
# 2) corresponding labels
if key == 'dd' and not dd_merge:
plot_list = []
labels_add = []
for skip in sorted(index_dict.keys()):
plot_list.append(index_dict[skip])
labels_add.append(' - skip {0}'.format(skip))
else:
# merge all indices
plot_list = [np.hstack(index_dict.values()), ]
print('schlumberger', plot_list)
labels_add = ['', ]
grid = None
# generate plots
for indices, label_add in zip(plot_list, labels_add):
if len(indices) == 0:
continue
ddc = configs[indices]
px, pz = pseudo_d_functions[key](ddc, spacing, grid)
fig, ax = plt.subplots(figsize=(15 / 2.54, 5 / 2.54))
ax.scatter(px, pz, color='k', alpha=0.5)
# plot electrodes
if grid is not None:
electrodes = grid.get_electrode_positions()
ax.scatter(
electrodes[:, 0],
electrodes[:, 1],
color='b',
label='electrodes', )
else:
ax.scatter(
np.arange(0, nr_electrodes) * spacing,
np.zeros(nr_electrodes),
color='b',
label='electrodes', )
ax.set_title(titles[key] + label_add)
ax.set_aspect('equal')
ax.set_xlabel('x [m]')
ax.set_ylabel('x [z]')
fig.tight_layout()
figs.append(fig)
axes.append(ax)
if len(figs) == 1:
return figs[0], axes[0]
else:
return figs, axes | 0.000192 |
def manage_view(request, semester, profile=None):
"""
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
"""
page_name = "Manage Workshift"
pools = WorkshiftPool.objects.filter(semester=semester)
full_management = utils.can_manage(request.user, semester=semester)
edit_semester_form = None
close_semester_form = None
open_semester_form = None
if not full_management:
pools = pools.filter(managers__incumbent__user=request.user)
if not pools.count():
messages.add_message(request, messages.ERROR,
MESSAGES["ADMINS_ONLY"])
return HttpResponseRedirect(semester.get_view_url())
else:
edit_semester_form = FullSemesterForm(
data=request.POST if "edit_semester" in request.POST else None,
instance=semester,
)
if semester.current:
close_semester_form = CloseSemesterForm(
data=request.POST if "close_semester" in request.POST else None,
semester=semester,
)
else:
open_semester_form = OpenSemesterForm(
data=request.POST if "open_semester" in request.POST else None,
semester=semester
)
if edit_semester_form and edit_semester_form.is_valid():
semester = edit_semester_form.save()
messages.add_message(
request,
messages.INFO,
"Semester successfully updated.",
)
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
if close_semester_form and close_semester_form.is_valid():
close_semester_form.save()
messages.add_message(request, messages.INFO, "Semester closed.")
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
if open_semester_form and open_semester_form.is_valid():
open_semester_form.save()
messages.add_message(request, messages.INFO, "Semester reopened.")
return HttpResponseRedirect(wurl(
"workshift:manage",
sem_url=semester.sem_url,
))
pools = pools.order_by("-is_primary", "title")
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pool_hours = [
[
workshifter.pool_hours.get(pool=pool)
for pool in pools
]
for workshifter in workshifters
]
return render_to_response("manage.html", {
"page_name": page_name,
"pools": pools,
"full_management": full_management,
"edit_semester_form": edit_semester_form,
"close_semester_form": close_semester_form,
"open_semester_form": open_semester_form,
"workshifters": zip(workshifters, pool_hours),
}, context_instance=RequestContext(request)) | 0.00067 |
def random_rademacher(shape, dtype=tf.float32, seed=None, name=None):
"""Generates `Tensor` consisting of `-1` or `+1`, chosen uniformly at random.
For more details, see [Rademacher distribution](
https://en.wikipedia.org/wiki/Rademacher_distribution).
Args:
shape: Vector-shaped, `int` `Tensor` representing shape of output.
dtype: (Optional) TF `dtype` representing `dtype` of output.
seed: (Optional) Python integer to seed the random number generator.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'random_rademacher').
Returns:
rademacher: `Tensor` with specified `shape` and `dtype` consisting of `-1`
or `+1` chosen uniformly-at-random.
"""
with tf.compat.v1.name_scope(name, 'random_rademacher', [shape, seed]):
# Choose the dtype to cause `2 * random_bernoulli - 1` to run in the same
# memory (host or device) as the downstream cast will want to put it. The
# convention on GPU is that int32 are in host memory and int64 are in device
# memory.
generation_dtype = tf.int64 if tf.as_dtype(dtype) != tf.int32 else tf.int32
random_bernoulli = tf.random.uniform(
shape, minval=0, maxval=2, dtype=generation_dtype, seed=seed)
return tf.cast(2 * random_bernoulli - 1, dtype) | 0.003046 |
def aes_cbc_no_padding_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s
''',
len(key)
))
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingNoneKey) | 0.002593 |
def run(self):
"""The thread's main activity. Call start() instead."""
self._create_socket()
self._running = True
self._beating = True
while self._running:
if self._pause:
# just sleep, and skip the rest of the loop
time.sleep(self.time_to_dead)
continue
since_last_heartbeat = 0.0
# io.rprint('Ping from HB channel') # dbg
# no need to catch EFSM here, because the previous event was
# either a recv or connect, which cannot be followed by EFSM
self.socket.send(b'ping')
request_time = time.time()
ready = self._poll(request_time)
if ready:
self._beating = True
# the poll above guarantees we have something to recv
self.socket.recv()
# sleep the remainder of the cycle
remainder = self.time_to_dead - (time.time() - request_time)
if remainder > 0:
time.sleep(remainder)
continue
else:
# nothing was received within the time limit, signal heart failure
self._beating = False
since_last_heartbeat = time.time() - request_time
self.call_handlers(since_last_heartbeat)
# and close/reopen the socket, because the REQ/REP cycle has been broken
self._create_socket()
continue
try:
self.socket.close()
except:
pass | 0.004313 |
def cancelMktData(self, contract: Contract):
"""
Unsubscribe from realtime streaming tick data.
Args:
contract: The exact contract object that was used to
subscribe with.
"""
ticker = self.ticker(contract)
reqId = self.wrapper.endTicker(ticker, 'mktData')
if reqId:
self.client.cancelMktData(reqId)
else:
self._logger.error(
'cancelMktData: ' f'No reqId found for contract {contract}') | 0.003861 |
def findElementsWithId(node, elems=None):
"""
Returns all elements with id attributes
"""
if elems is None:
elems = {}
id = node.getAttribute('id')
if id != '':
elems[id] = node
if node.hasChildNodes():
for child in node.childNodes:
# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html
# we are only really interested in nodes of type Element (1)
if child.nodeType == Node.ELEMENT_NODE:
findElementsWithId(child, elems)
return elems | 0.001808 |
def free_data(self):
"""
clear buffer
:return: bool
"""
command = const.CMD_FREE_DATA
cmd_response = self.__send_command(command)
if cmd_response.get('status'):
return True
else:
raise ZKErrorResponse("can't free data") | 0.006494 |
def find_all(s, sub, start=0, end=0, limit=-1, reverse=False):
"""
Find all indexes of sub in s.
:param s: the string to search
:param sub: the string to search for
:param start: the index in s at which to begin the search (same as in ''.find)
:param end: the index in s at which to stop searching (same as in ''.find)
:param limit: the maximum number of matches to find
:param reverse: if False search s forwards; otherwise search backwards
:return: all occurrences of substring sub in string s
"""
indexes = []
if not bool(s and sub):
return indexes
lstr = len(s)
if lstr <= start:
return indexes
lsub = len(sub)
if lstr < lsub:
return indexes
if limit == 0:
return indexes
elif limit < 0:
limit = lstr
end = min(end, lstr) or lstr
idx = s.rfind(sub, start, end) if reverse else s.find(sub, start, end)
while idx != -1:
indexes.append(idx)
if reverse:
idx = s.rfind(sub, start, idx - lstr)
else:
idx = s.find(sub, idx + lsub, end)
if len(indexes) >= limit:
break
return indexes | 0.001686 |
def validateOpfJsonValue(value, opfJsonSchemaFilename):
"""
Validate a python object against an OPF json schema file
:param value: target python object to validate (typically a dictionary)
:param opfJsonSchemaFilename: (string) OPF json schema filename containing the
json schema object. (e.g., opfTaskControlSchema.json)
:raises: jsonhelpers.ValidationError when value fails json validation
"""
# Create a path by joining the filename with our local json schema root
jsonSchemaPath = os.path.join(os.path.dirname(__file__),
"jsonschema",
opfJsonSchemaFilename)
# Validate
jsonhelpers.validate(value, schemaPath=jsonSchemaPath)
return | 0.010914 |
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script) | 0.005338 |
def iter_annotation_values(graph, annotation: str) -> Iterable[str]:
"""Iterate over all of the values for an annotation used in the graph.
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to grab
"""
return (
value
for _, _, data in graph.edges(data=True)
if edge_has_annotation(data, annotation)
for value in data[ANNOTATIONS][annotation]
) | 0.002331 |
def update_password(self, password: str) -> bool:
"""
ไฟฎๆน็ปๅ
ฅๅฏ็ขผ
"""
try:
# ๆไฝๆ้่ณ่จ
payload = {
'pass': password,
'submit': 'sumit'
}
# ไฟฎๆนๅฏ็ขผ
response = self.__session.post(
self.__url + '/changePasswd', data=payload, timeout=0.5, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
# ๅๅณ็ตๆ
return str(soup.find('body')).split()[-2].strip() == 'Success'
except requests.exceptions.Timeout:
return None | 0.004967 |
def get_input_files(dirname, *ext):
"""Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
"""
filelist = [f for f in os.listdir(dirname) if
os.path.splitext(f)[-1] in ext]
return [os.path.join(dirname, f) for f in filelist] | 0.002695 |
def insert_strain_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
data_reading_group_multi = parser.add_argument_group("Options for obtaining"
" h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
"if the --psd-estimation option is given. This group "
"supports reading from multiple ifos simultaneously.")
# Required options
data_reading_group_multi.add_argument("--gps-start-time", nargs='+',
action=MultiDetOptionAction, metavar='IFO:TIME',
help="The gps start time of the data "
"(integer seconds)", type=int)
data_reading_group_multi.add_argument("--gps-end-time", nargs='+', type=int,
action=MultiDetOptionAction, metavar='IFO:TIME',
help="The gps end time of the data "
"(integer seconds)")
data_reading_group_multi.add_argument("--strain-high-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="High pass frequency")
data_reading_group_multi.add_argument("--pad-data", nargs='+',
action=MultiDetOptionAction,
type=int, metavar='IFO:LENGTH',
help="Extra padding to remove highpass corruption "
"(integer seconds)")
data_reading_group_multi.add_argument("--taper-data", nargs='+',
action=MultiDetOptionAction,
type=int, default=0, metavar='IFO:LENGTH',
help="Taper ends of data to zero using the "
"supplied length as a window (integer seconds)")
data_reading_group_multi.add_argument("--sample-rate", type=int, nargs='+',
action=MultiDetOptionAction, metavar='IFO:RATE',
help="The sample rate to use for h(t) generation "
" (integer Hz).")
data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+',
action=MultiDetOptionActionSpecial,
metavar='IFO:CHANNEL',
help="The channel containing the gravitational "
"strain data")
#Read from cache file
data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_CACHE',
help="Cache file containing the frame locations.")
#Read from frame files
data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_FILES',
help="list of frame files")
# Use datafind to get frame files
data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_TYPE',
help="(optional) Replaces frame-files. "
"Use datafind to get the needed frame "
"file(s) of this type.")
#Filter frame files by URL
data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_SIEVE',
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
#Generate gaussian noise with given psd
data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+",
action=MultiDetOptionAction, metavar='IFO:CHOICE',
help="Name of model PSD for generating fake "
"gaussian noise. Choose from %s or zeroNoise" \
%((', ').join(pycbc.psd.get_lalsim_psd_list()),) )
data_reading_group_multi.add_argument("--fake-strain-seed", type=int,
default=0, nargs="+", action=MultiDetOptionAction,
metavar='IFO:SEED',
help="Seed value for the generation of fake "
"colored gaussian noise")
data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="File containing ASD for generating fake "
"noise from it.")
#optional
data_reading_group_multi.add_argument("--injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"waveforms into the strain")
data_reading_group_multi.add_argument("--sgburst-injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"sine-Gaussian burst waveforms into the strain")
data_reading_group_multi.add_argument("--injection-scale-factor",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL", default=1.,
help="Multiple injections by this factor "
"before injecting into the data.")
data_reading_group_multi.add_argument("--gating-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Text file of gating segments to apply."
" Format of each line is (all times in secs):"
" gps_time zeros_half_width pad_half_width")
data_reading_group_multi.add_argument('--autogating-threshold', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group_multi.add_argument('--autogating-cluster', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group_multi.add_argument('--autogating-width', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group_multi.add_argument('--autogating-taper', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group_multi.add_argument('--autogating-pad', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group_multi.add_argument("--normalize-strain", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:VALUE',
help="(optional) Divide frame data by constant.")
data_reading_group_multi.add_argument("--zpk-z", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group_multi.add_argument("--zpk-p", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group_multi.add_argument("--zpk-k", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
return data_reading_group_multi | 0.012428 |
def getDetailInfo(self, CorpNum, MgtKeyType, MgtKey):
""" ์์ธ์ ๋ณด ํ์ธ
args
CorpNum : ํ์ ์ฌ์
์ ๋ฒํธ
MgtKeyType : ๊ด๋ฆฌ๋ฒํธ ์ ํ one of ['SELL','BUY','TRUSTEE']
MgtKey : ํํธ๋ ๊ด๋ฆฌ๋ฒํธ
return
์ฒ๋ฆฌ๊ฒฐ๊ณผ. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "๊ด๋ฆฌ๋ฒํธ ํํ๊ฐ ์ฌ๋ฐ๋ฅด์ง ์์ต๋๋ค.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "๊ด๋ฆฌ๋ฒํธ๊ฐ ์
๋ ฅ๋์ง ์์์ต๋๋ค.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "?Detail", CorpNum) | 0.005848 |
def get_gradebook_columns_by_genus_type(self, gradebook_column_genus_type):
"""Gets a ``GradebookColumnList`` corresponding to the given gradebook column genus ``Type`` which does not include gradebook columns of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known gradebook
columns or an error results. Otherwise, the returned list may
contain only those gradebook columns that are accessible through
this session.
arg: gradebook_column_genus_type (osid.type.Type): a
gradebook column genus type
return: (osid.grading.GradebookColumnList) - the returned
``GradebookColumn`` list
raise: NullArgument - ``gradebook_column_genus_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('grading',
collection='GradebookColumn',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(gradebook_column_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.GradebookColumnList(result, runtime=self._runtime, proxy=self._proxy) | 0.002438 |
def setup_experiment(debug=True, verbose=False, app=None):
"""Check the app and, if it's compatible with Wallace, freeze its state."""
print_header()
# Verify that the package is usable.
log("Verifying that directory is compatible with Wallace...")
if not verify_package(verbose=verbose):
raise AssertionError(
"This is not a valid Wallace app. " +
"Fix the errors and then try running 'wallace verify'.")
# Verify that the Postgres server is running.
try:
psycopg2.connect(database="x", user="postgres", password="nada")
except psycopg2.OperationalError, e:
if "could not connect to server" in str(e):
raise RuntimeError("The Postgres server isn't running.")
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
# Check that the demo-specific requirements are satisfied.
try:
with open("requirements.txt", "r") as f:
dependencies = f.readlines()
except:
dependencies = []
pkg_resources.require(dependencies)
# Generate a unique id for this experiment.
id = "w" + str(uuid.uuid4())[0:28]
# If the user provided an app name, use it everywhere that's user-facing.
if app:
id_long = id
id = str(app)
log("Running as experiment " + id + "...")
# Copy this directory into a temporary folder, ignoring .git
dst = os.path.join(tempfile.mkdtemp(), id)
to_ignore = shutil.ignore_patterns(
".git/*",
"*.db",
"snapshots",
"data",
"server.log"
)
shutil.copytree(os.getcwd(), dst, ignore=to_ignore)
click.echo(dst)
# Save the experiment id
with open(os.path.join(dst, "experiment_id.txt"), "w") as file:
if app:
file.write(id_long)
else:
file.write(id)
# Zip up the temporary directory and place it in the cwd.
if not debug:
log("Freezing the experiment package...")
shutil.make_archive(
os.path.join("snapshots", id + "-code"), "zip", dst)
# Change directory to the temporary folder.
cwd = os.getcwd()
os.chdir(dst)
# Check directories.
if not os.path.exists("static/scripts"):
os.makedirs("static/scripts")
if not os.path.exists("templates"):
os.makedirs("templates")
if not os.path.exists("static/css"):
os.makedirs("static/css")
# Rename experiment.py to wallace_experiment.py to aviod psiTurk conflict.
os.rename(
os.path.join(dst, "experiment.py"),
os.path.join(dst, "wallace_experiment.py"))
# Copy files into this experiment package.
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"custom.py")
shutil.copy(src, os.path.join(dst, "custom.py"))
heroku_files = [
"Procfile",
"requirements.txt",
"psiturkapp.py",
"worker.py",
"clock.py",
]
for filename in heroku_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
filename)
shutil.copy(src, os.path.join(dst, filename))
clock_on = config.getboolean('Server Parameters', 'clock_on')
# If the clock process has been disabled, overwrite the Procfile.
if not clock_on:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
"Procfile_no_clock")
shutil.copy(src, os.path.join(dst, "Procfile"))
frontend_files = [
"static/css/wallace.css",
"static/scripts/wallace.js",
"static/scripts/reqwest.min.js",
"templates/error_wallace.html",
"templates/launch.html",
"templates/complete.html",
"static/robots.txt"
]
for filename in frontend_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"frontend",
filename)
shutil.copy(src, os.path.join(dst, filename))
time.sleep(0.25)
os.chdir(cwd)
return (id, dst) | 0.000487 |
def builtin(cls, name):
"""
Generate a default legend.
Args:
name (str): The name of the legend you want. Not case sensitive.
'nsdoe': Nova Scotia Dept. of Energy
'canstrat': Canstrat
'nagmdm__6_2': USGS N. Am. Geol. Map Data Model 6.2
'nagmdm__6_1': USGS N. Am. Geol. Map Data Model 6.1
'nagmdm__4_3': USGS N. Am. Geol. Map Data Model 4.3
'sgmc': USGS State Geologic Map Compilation
Default 'nagmdm__6_2'.
Returns:
Legend: The legend stored in `defaults.py`.
"""
names = {
'nsdoe': LEGEND__NSDOE,
'canstrat': LEGEND__Canstrat,
'nagmdm__6_2': LEGEND__NAGMDM__6_2,
'nagmdm__6_1': LEGEND__NAGMDM__6_1,
'nagmdm__4_3': LEGEND__NAGMDM__4_3,
'sgmc': LEGEND__SGMC,
}
return cls.from_csv(text=names[name.lower()]) | 0.001959 |
def sum_dice(spec):
""" Replace the dice roll arrays from roll_dice in place with summations of
the rolls. """
if spec[0] == 'c': return spec[1]
elif spec[0] == 'r': return sum(spec[1])
elif spec[0] == 'x':
return [sum_dice(r) for r in spec[1]]
elif spec[0] in ops:
return (spec[0], sum_dice(spec[1]), sum_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | 0.009592 |
def ToTimedelta(self):
"""Converts Duration to timedelta."""
return timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND)) | 0.005102 |
def on_success(self, fn, *args, **kwargs):
"""
Call the given callback if or when the connected deferred succeeds.
"""
self._callbacks.append((fn, args, kwargs))
result = self._resulted_in
if result is not _NOTHING_YET:
self._succeed(result=result) | 0.006431 |
def nodes_map(self):
"""
Build a mapping from node type to a list of nodes.
A typed mapping helps avoid polymorphism at non-persistent layers.
"""
dct = dict()
for node in self.nodes.values():
cls = next(base for base in getmro(node.__class__) if "__tablename__" in base.__dict__)
key = getattr(cls, "__alias__", underscore(cls.__name__))
dct.setdefault(key, []).append(node)
return dct | 0.00625 |
def stop(self):
"""
Stop the Runner if it's running.
Called as a classmethod, stop the running instance if any.
"""
if self.is_running:
log.info('Stopping')
self.is_running = False
self.__class__._INSTANCE = None
try:
self.thread and self.thread.stop()
except:
log.error('Error stopping thread')
traceback.print_exc()
self.thread = None
return True | 0.005758 |
def add_creg(self, creg):
"""Add all wires in a classical register."""
if not isinstance(creg, ClassicalRegister):
raise DAGCircuitError("not a ClassicalRegister instance.")
if creg.name in self.cregs:
raise DAGCircuitError("duplicate register %s" % creg.name)
self.cregs[creg.name] = creg
for j in range(creg.size):
self._add_wire((creg, j)) | 0.004785 |
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p)) | 0.009132 |
def directive_SPACE(self, label, params):
"""
label SPACE num
Allocate space on the stack. `num` is the number of bytes to allocate
"""
# TODO allow equations
params = params.strip()
try:
self.convert_to_integer(params)
except ValueError:
warnings.warn("Unknown parameters; {}".format(params))
return
self.labels[label] = self.space_pointer
if params in self.equates:
params = self.equates[params]
self.space_pointer += self.convert_to_integer(params) | 0.003378 |
def p_attr_field(self, p):
"""attr_field : ID EQ primitive NL
| ID EQ tag_ref NL"""
if p[3] is NullToken:
p[0] = AstAttrField(
self.path, p.lineno(1), p.lexpos(1), p[1], None)
else:
p[0] = AstAttrField(
self.path, p.lineno(1), p.lexpos(1), p[1], p[3]) | 0.005666 |
def angle(vec1, vec2):
"""Returns the angle between two vectors"""
dot_vec = dot(vec1, vec2)
mag1 = vec1.length()
mag2 = vec2.length()
result = dot_vec / (mag1 * mag2)
return math.acos(result) | 0.00463 |
def keras_tuples(stream, inputs=None, outputs=None):
"""Reformat data objects as keras-compatible tuples.
For more detail: https://keras.io/models/model/#fit
Parameters
----------
stream : iterable
Stream of data objects.
inputs : string or iterable of strings, None
Keys to use for ordered input data.
If not specified, returns `None` in its place.
outputs : string or iterable of strings, default=None
Keys to use for ordered output data.
If not specified, returns `None` in its place.
Yields
------
x : np.ndarray, list of np.ndarray, or None
If `inputs` is a string, `x` is a single np.ndarray.
If `inputs` is an iterable of strings, `x` is a list of np.ndarrays.
If `inputs` is a null type, `x` is None.
y : np.ndarray, list of np.ndarray, or None
If `outputs` is a string, `y` is a single np.ndarray.
If `outputs` is an iterable of strings, `y` is a list of np.ndarrays.
If `outputs` is a null type, `y` is None.
Raises
------
DataError
If the stream contains items that are not data-like.
"""
flatten_inputs, flatten_outputs = False, False
if inputs and isinstance(inputs, six.string_types):
inputs = [inputs]
flatten_inputs = True
if outputs and isinstance(outputs, six.string_types):
outputs = [outputs]
flatten_outputs = True
inputs, outputs = (inputs or []), (outputs or [])
if not inputs + outputs:
raise PescadorError('At least one key must be given for '
'`inputs` or `outputs`')
for data in stream:
try:
x = list(data[key] for key in inputs) or None
if len(inputs) == 1 and flatten_inputs:
x = x[0]
y = list(data[key] for key in outputs) or None
if len(outputs) == 1 and flatten_outputs:
y = y[0]
yield (x, y)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) | 0.000481 |
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates | 0.001736 |
def _renameClasses(classes, prefix):
"""
Replace class IDs with nice strings.
"""
renameMap = {}
for classID, glyphList in classes.items():
if len(glyphList) == 0:
groupName = "%s_empty_lu.%d_st.%d_cl.%d" % (prefix, classID[0], classID[1], classID[2])
elif len(glyphList) == 1:
groupName = list(glyphList)[0]
else:
glyphList = list(sorted(glyphList))
groupName = prefix + glyphList[0]
renameMap[classID] = groupName
return renameMap | 0.003731 |
def _to_original_callable(obj):
"""
Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...).
"""
while True:
if inspect.isfunction(obj) or inspect.isclass(obj):
f = inspect.getfile(obj)
if f.startswith('<') and f.endswith('>'):
return None
return obj
if inspect.ismethod(obj):
obj = obj.__func__
elif isinstance(obj, functools.partial):
obj = obj.func
elif isinstance(obj, property):
obj = obj.fget
else:
return None | 0.002301 |
def QueueMessages(self, messages):
"""Push messages to the input queue."""
# Push all the messages to our input queue
for message in messages:
self._in_queue.put(message, block=True) | 0.01 |
def read(self, n):
"""Read n bytes."""
self.bitcount = self.bits = 0
return self.input.read(n) | 0.016949 |
def import_image_tags(self, name, tags, repository, insecure=False):
"""Import image tags from specified container repository.
:param name: str, name of ImageStream object
:param tags: iterable, tags to be imported
:param repository: str, remote location of container image
in the format <registry>/<repository>
:param insecure: bool, indicates whenever registry is secure
:return: bool, whether tags were imported
"""
stream_import_file = os.path.join(self.os_conf.get_build_json_store(),
'image_stream_import.json')
with open(stream_import_file) as f:
stream_import = json.load(f)
return self.os.import_image_tags(name, stream_import, tags,
repository, insecure) | 0.002291 |
def metasound(text: str, length: int = 4) -> str:
"""
Thai MetaSound
:param str text: Thai text
:param int length: preferred length of the MetaSound (default is 4)
:return: MetaSound for the text
**Example**::
>>> from pythainlp.metasound import metasound
>>> metasound("เธฅเธฑเธ")
'เธฅ100'
>>> metasound("เธฃเธฑเธ")
'เธฃ100'
>>> metasound("เธฃเธฑเธเธฉเน")
'เธฃ100'
>>> metasound("เธเธนเธฃเธเธเธฒเธฃ", 5))
'เธ5515'
"""
if not text or not isinstance(text, str):
return ""
# keep only consonants and thanthakhat
chars = []
for ch in text:
if ch in _CONS_THANTHAKHAT:
chars.append(ch)
# remove karan (thanthakhat and a consonant before it)
i = 0
while i < len(chars):
if chars[i] == _THANTHAKHAT:
if i > 0:
chars[i - 1] = " "
chars[i] = " "
i += 1
# retain first consonant, encode the rest
chars = chars[:length]
i = 1
while i < len(chars):
if chars[i] in _C1:
chars[i] = "1"
elif chars[i] in _C2:
chars[i] = "2"
elif chars[i] in _C3:
chars[i] = "3"
elif chars[i] in _C4:
chars[i] = "4"
elif chars[i] in _C5:
chars[i] = "5"
elif chars[i] in _C6:
chars[i] = "6"
elif chars[i] in _C7:
chars[i] = "7"
elif chars[i] in _C8:
chars[i] = "8"
else:
chars[i] = "0"
i += 1
while len(chars) < length:
chars.append("0")
return "".join(chars) | 0.000614 |
def infer_map(value: Mapping[GenericAny, GenericAny]) -> Map:
"""Infer the :class:`~ibis.expr.datatypes.Map` type of `value`."""
if not value:
return Map(null, null)
return Map(
highest_precedence(map(infer, value.keys())),
highest_precedence(map(infer, value.values())),
) | 0.003195 |
def republish(self, hash, count=None, sources=None, destinations=None):
"""
Rebroadcast blocks starting at **hash** to the network
:param hash: Hash of block to start rebroadcasting from
:type hash: str
:param count: Max number of blocks to rebroadcast
:type count: int
:param sources: If set, additionally rebroadcasts source chain blocks
for receive/open up to **sources** depth
:type sources: int
:param destinations: If set, additionally rebroadcasts destination chain
blocks for receive/open up to **destinations** depth
:type destinations: int
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.republish(
... hash="991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948"
... )
[
"991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948",
"A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293"
]
"""
hash = self._process_value(hash, 'block')
payload = {"hash": hash}
if count is not None:
payload['count'] = self._process_value(count, 'int')
if sources is not None:
payload['sources'] = self._process_value(sources, 'int')
if destinations is not None:
payload['destinations'] = self._process_value(destinations, 'int')
resp = self.call('republish', payload)
return resp.get('blocks') or [] | 0.003232 |
def set_circulating(self, param):
"""
Sets whether to circulate - in effect whether the heater is on.
:param param: The mode to set, must be 0 or 1.
:return: Empty string.
"""
if param == 0:
self.is_circulating = param
self.circulate_commanded = False
elif param == 1:
self.is_circulating = param
self.circulate_commanded = True
return "" | 0.004435 |
def FindByName(cls, name):
"""Find a specific installed auth provider by name."""
reg = ComponentRegistry()
for _, entry in reg.load_extensions('iotile.auth_provider', name_filter=name):
return entry | 0.012712 |
def write_question(self, question):
"""Writes a question to the packet"""
self.write_name(question.name)
self.write_short(question.type)
self.write_short(question.clazz) | 0.00995 |
def unassign_objective_from_objective_bank(self, objective_id, objective_bank_id):
"""Removes an ``Objective`` from an ``ObjectiveBank``.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: NotFound - ``objective_id`` or ``objective_bank_id`` not
found or ``objective_id`` not mapped to
``objective_bank_id``
raise: NullArgument - ``objective_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy)
lookup_session.get_objective_bank(objective_bank_id) # to raise NotFound
self._unassign_object_from_catalog(objective_id, objective_bank_id) | 0.004068 |
def query(
self,
filters=None,
order_column="",
order_direction="",
page=None,
page_size=None,
select_columns=None,
):
"""
QUERY
:param filters:
dict with filters {<col_name>:<value,...}
:param order_column:
name of the column to order
:param order_direction:
the direction to order <'asc'|'desc'>
:param page:
the current page
:param page_size:
the current page size
"""
query = self.session.query(self.obj)
query = self._query_select_options(query, select_columns)
if len(order_column.split('.')) >= 2:
for join_relation in order_column.split('.')[:-1]:
relation_tuple = self.get_related_model_and_join(join_relation)
model_relation, relation_join = relation_tuple
if not self.is_model_already_joinded(query, model_relation):
query = query.join(model_relation, relation_join, isouter=True)
query_count = self.session.query(func.count('*')).select_from(self.obj)
query_count = self._get_base_query(query=query_count, filters=filters)
query = self._get_base_query(
query=query,
filters=filters,
order_column=order_column,
order_direction=order_direction,
)
count = query_count.scalar()
if page:
query = query.offset(page * page_size)
if page_size:
query = query.limit(page_size)
return count, query.all() | 0.002387 |
def _prepare_env(self, kwargs):
"""Returns a modifed copy of kwargs['env'], and a copy of kwargs with 'env' removed.
If there is no 'env' field in the kwargs, os.environ.copy() is used.
env['PATH'] is set/modified to contain the Node distribution's bin directory at the front.
:param kwargs: The original kwargs.
:returns: An (env, kwargs) tuple containing the modified env and kwargs copies.
:rtype: (dict, dict)
"""
kwargs = kwargs.copy()
env = kwargs.pop('env', os.environ).copy()
env['PATH'] = create_path_env_var(self.extra_paths, env=env, prepend=True)
return env, kwargs | 0.006421 |
def register_target_artifact_metadata(self, target: str, metadata: dict):
"""Register the artifact metadata dictionary for a built target."""
with self.context_lock:
self.artifacts_metadata[target.name] = metadata | 0.008299 |
def get_random(self, n, l=None):
""" Return n random sequences from this Fasta object """
random_f = Fasta()
if l:
ids = self.ids[:]
random.shuffle(ids)
i = 0
while (i < n) and (len(ids) > 0):
seq_id = ids.pop()
if (len(self[seq_id]) >= l):
start = random.randint(0, len(self[seq_id]) - l)
random_f["random%s" % (i + 1)] = self[seq_id][start:start+l]
i += 1
if len(random_f) != n:
sys.stderr.write("Not enough sequences of required length")
return
else:
return random_f
else:
choice = random.sample(self.ids, n)
for i in range(n):
random_f[choice[i]] = self[choice[i]]
return random_f | 0.004561 |
def removeContour(self, contour):
"""
Remove ``contour`` from the glyph.
>>> glyph.removeContour(contour)
``contour`` may be a :ref:`BaseContour` or an :ref:`type-int`
representing a contour index.
"""
if isinstance(contour, int):
index = contour
else:
index = self._getContourIndex(contour)
index = normalizers.normalizeIndex(index)
if index >= len(self):
raise ValueError("No contour located at index %d." % index)
self._removeContour(index) | 0.00349 |
def is_prime(n):
"""
is_prime returns True if N is a prime number, False otherwise
Parameters:
Input, integer N, the number to be checked.
Output, boolean value, True or False
"""
if n != int(n) or n < 2:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
# All primes >3 are of the form 6n+1 or 6n+5 (6n, 6n+2, 6n+4 are 2-divisible, 6n+3 is 3-divisible)
p = 5
root = int(np.ceil(np.sqrt(n)))
while p <= root:
if n % p == 0 or n % (p + 2) == 0:
return False
p += 6
return True | 0.00318 |
def set_options_from_dict(self, data_dict, filename=None):
"""Load options from a dictionary.
:param dict data_dict: Dictionary with the options to load.
:param str filename: If provided, assume that non-absolute
paths provided are in reference to the file.
"""
if filename is not None:
filename = os.path.dirname(filename)
for k in data_dict:
if not isinstance(data_dict[k], dict):
raise ValueError("The input data has to be a dict of dict")
for sk in data_dict[k]:
if self.gc[(self.gc["k1"] == k) &
(self.gc["k2"] == sk)].shape[0] == 0:
continue
if isinstance(data_dict[k][sk], six.string_types):
data_dict[k][sk] = str(data_dict[k][sk])
_type = self.gc[(self.gc["k1"] == k) &
(self.gc["k2"] == sk)][["type"]].values[0]
data_dict[k][sk] = ev.cast(data_dict[k][sk], _type)
if self.get_option(k, sk, True) != data_dict[k][sk]:
try:
self.set_option(k, sk, data_dict[k][sk])
# Provided paths do not work: try add them relative
# to the config file
except IOError:
if filename is None:
raise IOError('Error path: {0}.{1}'.format(k, sk))
npat = os.path.join(filename, data_dict[k][sk])
self.set_option(k, sk, os.path.normpath(npat))
except ValueError:
pass | 0.001182 |
def nl_cb_set(cb, type_, kind, func, arg):
"""Set up a callback. Updates `cb` in place.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L293
Positional arguments:
cb -- nl_cb class instance.
type_ -- callback to modify (integer).
kind -- kind of implementation (integer).
func -- callback function (NL_CB_CUSTOM).
arg -- argument passed to callback.
Returns:
0 on success or a negative error code.
"""
if type_ < 0 or type_ > NL_CB_TYPE_MAX or kind < 0 or kind > NL_CB_KIND_MAX:
return -NLE_RANGE
if kind == NL_CB_CUSTOM:
cb.cb_set[type_] = func
cb.cb_args[type_] = arg
else:
cb.cb_set[type_] = cb_def[type_][kind]
cb.cb_args[type_] = arg
return 0 | 0.002601 |
def load(self, **kwargs):
"""Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return:
"""
kwargs['transform_name'] = True
kwargs = self._mutate_name(kwargs)
return self._load(**kwargs) | 0.004065 |
def help_cli_basic(self):
""" Help for Workbench CLI Basics """
help = '%sWorkbench: Getting started...' % (color.Yellow)
help += '\n%sLoad in a sample:' % (color.Green)
help += '\n\t%s> load_sample /path/to/file' % (color.LightBlue)
help += '\n\n%sNotice the prompt now shows the md5 of the sample...'% (color.Yellow)
help += '\n%sRun workers on the sample:' % (color.Green)
help += '\n\t%s> view' % (color.LightBlue)
help += '\n%sType the \'help workers\' or the first part of the worker <tab>...' % (color.Green)
help += '\n\t%s> help workers (lists all possible workers)' % (color.LightBlue)
help += '\n\t%s> pe_<tab> (will give you pe_classifier, pe_deep_sim, pe_features, pe_indicators, pe_peid)%s' % (color.LightBlue, color.Normal)
return help | 0.013064 |
def label_const(self, const:Any=0, label_cls:Callable=None, **kwargs)->'LabelList':
"Label every item with `const`."
return self.label_from_func(func=lambda o: const, label_cls=label_cls, **kwargs) | 0.051643 |
def remove(self, index):
"""Removes specified item from the model.
:index: Should have a form "<itemref>.<subitemref...>" (e.g. "1.1").
:index: Item's index.
"""
data = self.data
index = self._split(index)
for j, c in enumerate(index):
i = int(c) - 1
if j + 1 == len(index):
try:
del data[i]
except IndexError:
raise NoItemError('.'.join(index))
else:
data = data[i][4] | 0.003623 |
def query_package_version_metrics(self, package_version_id_query, feed_id, package_id, project=None):
"""QueryPackageVersionMetrics.
[Preview API]
:param :class:`<PackageVersionMetricsQuery> <azure.devops.v5_1.feed.models.PackageVersionMetricsQuery>` package_version_id_query:
:param str feed_id:
:param str package_id:
:param str project: Project ID or project name
:rtype: [PackageVersionMetrics]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
content = self._serialize.body(package_version_id_query, 'PackageVersionMetricsQuery')
response = self._send(http_method='POST',
location_id='e6ae8caa-b6a8-4809-b840-91b2a42c19ad',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[PackageVersionMetrics]', self._unwrap_collection(response)) | 0.007496 |
def start_scan(self, scan_id, targets, parallel=1):
""" Handle N parallel scans if 'parallel' is greater than 1. """
os.setsid()
multiscan_proc = []
logger.info("%s: Scan started.", scan_id)
target_list = targets
if target_list is None or not target_list:
raise OSPDError('Erroneous targets list', 'start_scan')
for index, target in enumerate(target_list):
while len(multiscan_proc) >= parallel:
progress = self.calculate_progress(scan_id)
self.set_scan_progress(scan_id, progress)
multiscan_proc = self.check_pending_target(scan_id,
multiscan_proc)
time.sleep(1)
#If the scan status is stopped, does not launch anymore target scans
if self.get_scan_status(scan_id) == ScanStatus.STOPPED:
return
logger.info("%s: Host scan started on ports %s.", target[0], target[1])
scan_process = multiprocessing.Process(target=self.parallel_scan,
args=(scan_id, target[0]))
multiscan_proc.append((scan_process, target[0]))
scan_process.start()
self.set_scan_status(scan_id, ScanStatus.RUNNING)
# Wait until all single target were scanned
while multiscan_proc:
multiscan_proc = self.check_pending_target(scan_id, multiscan_proc)
if multiscan_proc:
progress = self.calculate_progress(scan_id)
self.set_scan_progress(scan_id, progress)
time.sleep(1)
# Only set the scan as finished if the scan was not stopped.
if self.get_scan_status(scan_id) != ScanStatus.STOPPED:
self.finish_scan(scan_id) | 0.002706 |
def get_interfaces(self, socket_connection=None):
"""Returns the a list of Interface objects the service implements."""
if not socket_connection:
socket_connection = self.open_connection()
close_socket = True
else:
close_socket = False
# noinspection PyUnresolvedReferences
_service = self.handler(self._interfaces["org.varlink.service"], socket_connection)
self.info = _service.GetInfo()
if close_socket:
socket_connection.close()
return self.info['interfaces'] | 0.005199 |
def delete_module(modname):
"""
Delete module and sub-modules from `sys.module`
"""
try:
_ = sys.modules[modname]
except KeyError:
raise ValueError("Module not found in sys.modules: '{}'".format(modname))
for module in list(sys.modules.keys()):
if module and module.startswith(modname):
del sys.modules[module] | 0.005391 |
def dist_euclidean(src, tar, qval=2, alphabet=None):
"""Return the normalized Euclidean distance between two strings.
This is a wrapper for :py:meth:`Euclidean.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Euclidean distance
Examples
--------
>>> round(dist_euclidean('cat', 'hat'), 12)
0.57735026919
>>> round(dist_euclidean('Niall', 'Neil'), 12)
0.683130051064
>>> round(dist_euclidean('Colin', 'Cuilen'), 12)
0.727606875109
>>> dist_euclidean('ATCG', 'TAGC')
1.0
"""
return Euclidean().dist(src, tar, qval, alphabet) | 0.001067 |
def flatten(value):
"""value can be any nesting of tuples, arrays, dicts.
returns 1D numpy array and an unflatten function."""
if isinstance(value, np.ndarray):
def unflatten(vector):
return np.reshape(vector, value.shape)
return np.ravel(value), unflatten
elif isinstance(value, float):
return np.array([value]), lambda x: x[0]
elif isinstance(value, tuple):
if not value:
return np.array([]), lambda x: ()
flattened_first, unflatten_first = flatten(value[0])
flattened_rest, unflatten_rest = flatten(value[1:])
def unflatten(vector):
N = len(flattened_first)
return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])
return np.concatenate((flattened_first, flattened_rest)), unflatten
elif isinstance(value, list):
if not value:
return np.array([]), lambda x: []
flattened_first, unflatten_first = flatten(value[0])
flattened_rest, unflatten_rest = flatten(value[1:])
def unflatten(vector):
N = len(flattened_first)
return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])
return np.concatenate((flattened_first, flattened_rest)), unflatten
elif isinstance(value, dict):
flattened = []
unflatteners = []
lengths = []
keys = []
for k, v in sorted(value.items(), key=itemgetter(0)):
cur_flattened, cur_unflatten = flatten(v)
flattened.append(cur_flattened)
unflatteners.append(cur_unflatten)
lengths.append(len(cur_flattened))
keys.append(k)
def unflatten(vector):
split_ixs = np.cumsum(lengths)
pieces = np.split(vector, split_ixs)
return {key: unflattener(piece)
for piece, unflattener, key in zip(pieces,
unflatteners,
keys)}
return np.concatenate(flattened), unflatten
else:
raise Exception("Don't know how to flatten type {}".format(type(value))
) | 0.000451 |
def set_n_gram(self, value):
''' setter '''
if isinstance(value, Ngram):
self.__n_gram = value
else:
raise TypeError("The type of n_gram must be Ngram.") | 0.00995 |
def _round_to(dt, hour, minute, second):
"""
Route the given datetime to the latest time with the hour, minute, second
before it.
"""
new_dt = dt.replace(hour=hour, minute=minute, second=second)
if new_dt == dt:
return new_dt
elif new_dt < dt:
before = new_dt
after = new_dt + timedelta(days=1)
elif new_dt > dt:
before = new_dt - timedelta(days=1)
after = new_dt
d1 = dt - before
d2 = after - dt
if d1 < d2:
return before
elif d1 > d2:
return after
else:
return before | 0.001704 |
def __make_request_method(self, teststep_dict, entry_json):
""" parse HAR entry request method, and make teststep method.
"""
method = entry_json["request"].get("method")
if not method:
logging.exception("method missed in request.")
sys.exit(1)
teststep_dict["request"]["method"] = method | 0.005666 |
def iter_comments(self, number=-1):
"""Iterate over the comments on this issue.
:param int number: (optional), number of comments to iterate over
:returns: iterator of
:class:`IssueComment <github3.issues.comment.IssueComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, IssueComment) | 0.007519 |
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered | 0.002894 |
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
qry = (
db.session.query(Slice)
.filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
),
)
.order_by(Slice.changed_on.desc())
)
payload = [{
'id': o.id,
'title': o.slice_name,
'url': o.slice_url,
'dttm': o.changed_on,
'viz_type': o.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser)) | 0.002558 |
def sync_firmware(self):
"""Syncs the emulator's firmware version and the DLL's firmware.
This method is useful for ensuring that the firmware running on the
J-Link matches the firmware supported by the DLL.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
"""
serial_no = self.serial_number
if self.firmware_newer():
# The J-Link's firmware is newer than the one compatible with the
# DLL (though there are promises of backwards compatibility), so
# perform a downgrade.
try:
# This may throw an exception on older versions of the J-Link
# software due to the software timing out after a firmware
# upgrade.
self.invalidate_firmware()
self.update_firmware()
except errors.JLinkException as e:
pass
res = self.open(serial_no=serial_no)
if self.firmware_newer():
raise errors.JLinkException('Failed to sync firmware version.')
return res
elif self.firmware_outdated():
# The J-Link's firmware is older than the one compatible with the
# DLL, so perform a firmware upgrade.
try:
# This may throw an exception on older versions of the J-Link
# software due to the software timing out after a firmware
# upgrade.
self.update_firmware()
except errors.JLinkException as e:
pass
if self.firmware_outdated():
raise errors.JLinkException('Failed to sync firmware version.')
return self.open(serial_no=serial_no)
return None | 0.001103 |
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords) | 0.002278 |
def django_object_from_row(row, model, field_names=None, ignore_fields=('id', 'pk'), ignore_related=True, strip=True, ignore_errors=True, verbosity=0):
"""Construct Django model instance from values provided in a python dict or Mapping
Args:
row (list or dict): Data (values of any type) to be assigned to fields in the Django object.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
Model instance: Django model instance constructed with values from `row` in fields
from `field_names` or `model`'s fields
"""
field_dict, errors = field_dict_from_row(row, model, field_names=field_names, ignore_fields=ignore_fields, strip=strip,
ignore_errors=ignore_errors, ignore_related=ignore_related, verbosity=verbosity)
if verbosity >= 3:
print 'field_dict = %r' % field_dict
try:
obj = model(**field_dict)
return obj, errors
except:
print_exc()
raise ValueError('Unable to coerce the dict = %r into a %r object' % (field_dict, model)) | 0.010888 |
def get_media_detail_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_media_detail = ET.Element("get_media_detail")
config = get_media_detail
output = ET.SubElement(get_media_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002821 |
def enum(description, **kwargs) -> typing.Type:
"""Create a :class:`~doctor.types.Enum` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Enum`
"""
kwargs['description'] = description
return type('Enum', (Enum,), kwargs) | 0.00303 |
def loop(self):
"""Lazy event loop initialization"""
if not self._loop:
self._loop = IOLoop.current()
return self._loop
return self._loop | 0.010811 |
def openstack_exception(func):
'''
Openstack exceptions decorator
'''
async def wrap(*args, **kwargs):
try:
return await func(*args, **kwargs)
except Exception as e:
logging.error(e)
raise IaasException
return wrap | 0.006061 |
def invalidate(*tables_or_models, **kwargs):
"""
Clears what was cached by django-cachalot implying one or more SQL tables
or models from ``tables_or_models``.
If ``tables_or_models`` is not specified, all tables found in the database
(including those outside Django) are invalidated.
If ``cache_alias`` is specified, it only clears the SQL queries stored
on this cache, otherwise queries from all caches are cleared.
If ``db_alias`` is specified, it only clears the SQL queries executed
on this database, otherwise queries from all databases are cleared.
:arg tables_or_models: SQL tables names, models or models lookups
(or a combination)
:type tables_or_models: tuple of strings or models
:arg cache_alias: Alias from the Django ``CACHES`` setting
:type cache_alias: string or NoneType
:arg db_alias: Alias from the Django ``DATABASES`` setting
:type db_alias: string or NoneType
:returns: Nothing
:rtype: NoneType
"""
# TODO: Replace with positional arguments when we drop Python 2 support.
cache_alias = kwargs.pop('cache_alias', None)
db_alias = kwargs.pop('db_alias', None)
for k in kwargs:
raise TypeError(
"invalidate() got an unexpected keyword argument '%s'" % k)
send_signal = False
invalidated = set()
for cache_alias, db_alias, tables in _cache_db_tables_iterator(
list(_get_tables(tables_or_models)), cache_alias, db_alias):
cache = cachalot_caches.get_cache(cache_alias, db_alias)
if not isinstance(cache, AtomicCache):
send_signal = True
_invalidate_tables(cache, db_alias, tables)
invalidated.update(tables)
if send_signal:
for table in invalidated:
post_invalidation.send(table, db_alias=db_alias) | 0.000541 |
def watch_run_status(server, project, run, apikey, timeout=None, update_period=1):
"""
Monitor a linkage run and yield status updates. Will immediately yield an update and then
only yield further updates when the status object changes. If a timeout is provided and the
run hasn't entered a terminal state (error or completed) when the timeout is reached,
updates will cease and a TimeoutError will be raised.
:param server: Base url of the upstream server.
:param project:
:param run:
:param apikey:
:param timeout: Stop waiting after this many seconds. The default (None) is to never give you up.
:param update_period: Time in seconds between queries to the run's status.
:raises TimeoutError
"""
start_time = time.time()
status = old_status = run_get_status(server, project, run, apikey)
yield status
def time_not_up():
return (
(timeout is None) or
(time.time() - start_time < timeout)
)
while time_not_up():
if status['state'] in {'error', 'completed'}:
# No point continuing as run has entered a terminal state
yield status
return
if old_status != status:
yield status
time.sleep(update_period)
old_status = status
try:
status = run_get_status(server, project, run, apikey)
except RateLimitedClient:
time.sleep(1)
raise TimeoutError("Timeout exceeded before run {} terminated".format(run)) | 0.003906 |
def _set_conf(self):
"""
Set configuration parameters from the Conf object into the detector
object.
Time values are converted to samples, and amplitude values are in mV.
"""
self.rr_init = 60 * self.fs / self.conf.hr_init
self.rr_max = 60 * self.fs / self.conf.hr_min
self.rr_min = 60 * self.fs / self.conf.hr_max
# Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1
self.qrs_width = int(self.conf.qrs_width * self.fs)
self.qrs_radius = int(self.conf.qrs_radius * self.fs)
self.qrs_thr_init = self.conf.qrs_thr_init
self.qrs_thr_min = self.conf.qrs_thr_min
self.ref_period = int(self.conf.ref_period * self.fs)
self.t_inspect_period = int(self.conf.t_inspect_period * self.fs) | 0.002478 |
def zip_strip_namespace(zip_src, namespace, logger=None):
""" Given a namespace, strips 'namespace__' from all files and filenames
in the zip
"""
namespace_prefix = "{}__".format(namespace)
lightning_namespace = "{}:".format(namespace)
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
for name in zip_src.namelist():
orig_content = zip_src.read(name)
try:
orig_content = orig_content.decode("utf-8")
except UnicodeDecodeError:
# if we cannot decode the content, don't try and replace it.
new_content = orig_content
else:
new_content = orig_content.replace(namespace_prefix, "")
new_content = new_content.replace(lightning_namespace, "c:")
name = name.replace(namespace_prefix, "") # not...sure...this..gets...used
if orig_content != new_content and logger:
logger.info(
" {file_name}: removed {namespace}".format(
file_name=name, namespace=namespace_prefix
)
)
new_content = new_content.encode("utf-8")
zip_dest.writestr(name, new_content)
return zip_dest | 0.001606 |
def add_context(
self,
name,
cluster_name=None,
user_name=None,
namespace_name=None,
**attrs
):
"""Add a context to config."""
if self.context_exists(name):
raise KubeConfError("context with the given name already exists.")
contexts = self.get_contexts()
# Add parameters.
new_context = {'name': name, 'context':{}}
# Add attributes
attrs_ = new_context['context']
if cluster_name is not None:
attrs_['cluster'] = cluster_name
if user_name is not None:
attrs_['user'] = user_name
if namespace_name is not None:
attrs_['namespace'] = namespace_name
attrs_.update(attrs)
contexts.append(new_context) | 0.00612 |
def equal_set(self, a, b):
"See if a and b have the same elements"
if len(a) != len(b):
return 0
if a == b:
return 1
return self.subset(a, b) and self.subset(b, a) | 0.009132 |
def create_api_vrf(self):
"""Get an instance of Api Vrf services facade."""
return ApiVrf(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | 0.009217 |
def order_by(self, field_path, direction=ASCENDING):
"""Modify the query to add an order clause on a specific field.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
Successive :meth:`~.firestore_v1beta1.query.Query.order_by` calls
will further refine the ordering of results returned by the query
(i.e. the new "order by" fields will be added to existing ones).
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
direction (Optional[str]): The direction to order by. Must be one
of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to
:attr:`ASCENDING`.
Returns:
~.firestore_v1beta1.query.Query: An ordered query. Acts as a
copy of the current query, modified with the newly added
"order by" constraint.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``direction`` is not one of :attr:`ASCENDING` or
:attr:`DESCENDING`.
"""
field_path_module.split_field_path(field_path) # raises
order_pb = self._make_order(field_path, direction)
new_orders = self._orders + (order_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=new_orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) | 0.001191 |
def append_system_paths(self):
"""Append system paths to $PATH."""
from rez.shells import Shell, create_shell
sh = self.interpreter if isinstance(self.interpreter, Shell) \
else create_shell()
paths = sh.get_syspaths()
paths_str = os.pathsep.join(paths)
self.env.PATH.append(paths_str) | 0.00578 |
def save(self, path, overWrite):
"""
save OptimMethod
:param path path
:param overWrite whether to overwrite
"""
method=self.value
return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite) | 0.014652 |
def decode_wireformat_uuid(rawguid):
"""Decode a wire format UUID
It handles the rather particular scheme where half is little endian
and half is big endian. It returns a string like dmidecode would output.
"""
if isinstance(rawguid, list):
rawguid = bytearray(rawguid)
lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8]))
bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:]))
return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format(
lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2]) | 0.001751 |
def _leu16(ins):
''' Compares & pops top 2 operands out of the stack, and checks
if the 1st operand <= 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
16 bit unsigned version
'''
output = _16bit_oper(ins.quad[2], ins.quad[3], reversed=True)
output.append('or a')
output.append('sbc hl, de') # Carry if A > B
output.append('ccf') # Negates the result => Carry if A <= B
output.append('sbc a, a')
output.append('push af')
return output | 0.001961 |
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret | 0.006309 |
def load_glossary(file_path: str, read_json=False) -> List[str]:
"""
A glossary is a text file, one entry per line.
Args:
file_path (str): path to a text file containing a glossary.
read_json (bool): set True if the glossary is in json format
Returns: List of the strings in the glossary.
"""
if read_json:
if file_path.endswith(".gz"):
return json.load(gzip.open(file_path))
return json.load(open(file_path))
return open(file_path).read().splitlines() | 0.003484 |
def best_score(self, seqs, scan_rc=True, normalize=False):
"""
give the score of the best match of each motif in each sequence
returns an iterator of lists containing floats
"""
self.set_threshold(threshold=0.0)
if normalize and len(self.meanstd) == 0:
self.set_meanstd()
means = np.array([self.meanstd[m][0] for m in self.motif_ids])
stds = np.array([self.meanstd[m][1] for m in self.motif_ids])
for matches in self.scan(seqs, 1, scan_rc):
scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0])
if normalize:
scores = (scores - means) / stds
yield scores | 0.004093 |
def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42):
"""
Generate a set of random SDR's
@param numSDR:
@param nDim:
@param numActiveInputBits:
"""
randomSDRs = np.zeros((numSDR, numDims), dtype=uintType)
indices = np.array(range(numDims))
np.random.seed(seed)
for i in range(numSDR):
randomIndices = np.random.permutation(indices)
activeBits = randomIndices[:numActiveInputBits]
randomSDRs[i, activeBits] = 1
return randomSDRs | 0.014675 |
def request(self, method: str, path: str, content: Optional[Union[dict, bytes, str]] = None,
timestamp: Optional[int] = None, external_url: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
query_params: Optional[Dict[str, Any]] = None,
api_path: str = "/_matrix/client/r0") -> Awaitable[dict]:
"""
Make a raw HTTP request.
Args:
method: The HTTP method to use.
path: The API endpoint to call. Does not include the base path (e.g. /_matrix/client/r0).
content: The content to post as a dict (json) or bytes/str (raw).
timestamp: The timestamp query param used for timestamp massaging.
external_url: The external_url field to send in the content
(only applicable if content is dict).
headers: The dict of HTTP headers to send.
query_params: The dict of query parameters to send.
api_path: The base API path.
Returns:
The response as a dict.
"""
content = content or {}
headers = headers or {}
query_params = query_params or {}
query_params["access_token"] = self.token
if timestamp is not None:
if isinstance(timestamp, datetime):
timestamp = int(timestamp.replace(tzinfo=timezone.utc).timestamp() * 1000)
query_params["ts"] = timestamp
if isinstance(content, dict) and external_url is not None:
content["external_url"] = external_url
method = method.upper()
if method not in ["GET", "PUT", "DELETE", "POST"]:
raise MatrixError("Unsupported HTTP method: %s" % method)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
if headers.get("Content-Type", None) == "application/json":
content = json.dumps(content)
if self.identity and not self.is_real_user:
query_params["user_id"] = self.identity
self._log_request(method, path, content, query_params)
endpoint = self.base_url + api_path + path
return self._send(method, endpoint, content, query_params, headers or {}) | 0.004882 |
def _user_path(self, subdir, basename=''):
'''
Gets the full path to the 'subdir/basename' file in the user binwalk directory.
@subdir - Subdirectory inside the user binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file.
'''
try:
return self._file_path(os.path.join(self.user_dir, self.BINWALK_USER_DIR, subdir), basename)
except KeyboardInterrupt as e:
raise e
except Exception:
return None | 0.007055 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.