text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def format_param_list(listed_params, output_name):
'''
Utility method for formatting lists of parameters for api consumption
Useful for email address lists, etc
Args:
listed_params (list of values) - the list to format
output_name (str) - the parameter name to prepend to each key
'''
output_payload = {}
if listed_params:
for index, item in enumerate(listed_params):
output_payload[str(output_name) + "[" + str(index) + "]" ] = item
return output_payload | 0.00846 |
def handle(client_message, handle_event_entry=None, to_object=None):
""" Event handler """
message_type = client_message.get_message_type()
if message_type == EVENT_ENTRY and handle_event_entry is not None:
key = None
if not client_message.read_bool():
key = client_message.read_data()
value = None
if not client_message.read_bool():
value = client_message.read_data()
old_value = None
if not client_message.read_bool():
old_value = client_message.read_data()
merging_value = None
if not client_message.read_bool():
merging_value = client_message.read_data()
event_type = client_message.read_int()
uuid = client_message.read_str()
number_of_affected_entries = client_message.read_int()
handle_event_entry(key=key, value=value, old_value=old_value, merging_value=merging_value, event_type=event_type, uuid=uuid, number_of_affected_entries=number_of_affected_entries) | 0.001959 |
def mount_point_ready(self, path):
"""! Check if a mount point is ready for file operations
@return Returns True if the given path exists, False otherwise
@details Calling the Windows command `dir` instead of using the python
`os.path.exists`. The latter causes a Python error box to appear claiming
there is "No Disk" for some devices that are in the ejected state. Calling
`dir` prevents this since it uses the Windows API to determine if the
device is ready before accessing the file system.
"""
stdout, stderr, retcode = self._run_cli_process("dir %s" % path)
result = True if retcode == 0 else False
return result | 0.005658 |
def setbpf(self, bpf):
"""Set number of bits per float output"""
self._bpf = min(bpf, self.BPF)
self._rng_n = int((self._bpf + self.RNG_RANGE_BITS - 1) / self.RNG_RANGE_BITS) | 0.015152 |
def map_exp_ids(self, exp):
"""Maps ids to feature names.
Args:
exp: list of tuples [(id, weight), (id,weight)]
Returns:
list of tuples (feature_name, weight)
"""
names = self.exp_feature_names
if self.discretized_feature_names is not None:
names = self.discretized_feature_names
return [(names[x[0]], x[1]) for x in exp] | 0.004819 |
def _execute_command(self, command, *args):
"""Execute the state transition command."""
try:
command(*args)
except libvirt.libvirtError as error:
raise RuntimeError("Unable to execute command. %s" % error) | 0.007905 |
def remove_ext(fname):
"""Removes the extension from a filename
"""
bn = os.path.basename(fname)
return os.path.splitext(bn)[0] | 0.006993 |
def golowtran(c1: Dict[str, Any]) -> xarray.Dataset:
"""directly run Fortran code"""
# %% default parameters
c1.setdefault('time', None)
defp = ('h1', 'h2', 'angle', 'im', 'iseasn', 'ird1', 'range_km', 'zmdl', 'p', 't')
for p in defp:
c1.setdefault(p, 0)
c1.setdefault('wmol', [0]*12)
# %% input check
assert len(c1['wmol']) == 12, 'see Lowtran user manual for 12 values of WMOL'
assert np.isfinite(c1['h1']), 'per Lowtran user manual Table 14, H1 must always be defined'
# %% setup wavelength
c1.setdefault('wlstep', 20)
if c1['wlstep'] < 5:
logging.critical('minimum resolution 5 cm^-1, specified resolution 20 cm^-1')
wlshort, wllong, nwl = nm2lt7(c1['wlshort'], c1['wllong'], c1['wlstep'])
if not 0 < wlshort and wllong <= 50000:
logging.critical('specified model range 0 <= wavelength [cm^-1] <= 50000')
# %% invoke lowtran
"""
Note we invoke case "3a" from table 14, only observer altitude and apparent
angle are specified
"""
Tx, V, Alam, trace, unif, suma, irrad, sumvv = lowtran7.lwtrn7(
True, nwl, wllong, wlshort, c1['wlstep'],
c1['model'], c1['itype'], c1['iemsct'], c1['im'],
c1['iseasn'], c1['ird1'],
c1['zmdl'], c1['p'], c1['t'], c1['wmol'],
c1['h1'], c1['h2'], c1['angle'], c1['range_km'])
dims = ('time', 'wavelength_nm', 'angle_deg')
TR = xarray.Dataset({'transmission': (dims, Tx[:, 9][None, :, None]),
'radiance': (dims, sumvv[None, :, None]),
'irradiance': (dims, irrad[:, 0][None, :, None]),
'pathscatter': (dims, irrad[:, 2][None, :, None])},
coords={'time': [c1['time']],
'wavelength_nm': Alam*1e3,
'angle_deg': [c1['angle']]})
return TR | 0.003209 |
def inpaint(self):
""" Replace masked-out elements in an array using an iterative image inpainting algorithm. """
import inpaint
filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np.float32), 3, 0.01, 2)
self.raster_data = np.ma.masked_invalid(filled) | 0.012903 |
def p2s(self, p=None):
"""Convert from plot to screen coordinates"""
if not p: p = [0, 0]
s = self.p2c(p)
return self.c2s(s) | 0.019108 |
def isconst(cls, val):
''' Whether the value is a string color literal.
Checks for a well-formed hexadecimal color value or a named color.
Args:
val (str) : the value to check
Returns:
True, if the value is a string color literal
'''
return isinstance(val, string_types) and \
((len(val) == 7 and val[0] == "#") or val in enums.NamedColor) | 0.006977 |
def is_daylight_saving_hour(self, datetime):
"""Check if a datetime is a daylight saving time."""
if not self.daylight_saving_period:
return False
return self.daylight_saving_period.isTimeIncluded(datetime.hoy) | 0.00813 |
def labels(self, value):
"""
Setter for **self.__labels** attribute.
:param value: Attribute value.
:type value: tuple
"""
if value is not None:
assert type(value) is tuple, "'{0}' attribute: '{1}' type is not 'tuple'!".format("labels", value)
assert len(value) == 2, "'{0}' attribute: '{1}' length should be '2'!".format("labels", value)
for index in range(len(value)):
assert type(value[index]) is unicode, \
"'{0}' attribute element '{1}': '{2}' type is not 'unicode'!".format("labels", index, value)
self.__labels = value | 0.007645 |
def merge_Fm(dfs_data):
"""Merges Fm-1 and Fm, as defined on page 19 of the paper."""
FG = dfs_data['FG']
m = FG['m']
FGm = FG[m]
FGm1 = FG[m-1]
if FGm[0]['u'] < FGm1[0]['u']:
FGm1[0]['u'] = FGm[0]['u']
if FGm[0]['v'] > FGm1[0]['v']:
FGm1[0]['v'] = FGm[0]['v']
if FGm[1]['x'] < FGm1[1]['x']:
FGm1[1]['x'] = FGm[1]['x']
if FGm[1]['y'] > FGm1[1]['y']:
FGm1[1]['y'] = FGm[1]['y']
del FG[m]
FG['m'] -= 1 | 0.002083 |
def _logger_stream(self):
"""Add stream logging handler."""
sh = logging.StreamHandler()
sh.set_name('sh')
sh.setLevel(logging.INFO)
sh.setFormatter(self._logger_formatter)
self.log.addHandler(sh) | 0.008197 |
def _deleteTrackers(self, trackers):
"""
Delete the given signup trackers and their associated signup resources.
@param trackers: sequence of L{_SignupTrackers}
"""
for tracker in trackers:
if tracker.store is None:
# we're not updating the list of live signups client side, so
# we might get a signup that has already been deleted
continue
sig = tracker.signupItem
# XXX the only reason we're doing this here is that we're afraid to
# add a whenDeleted=CASCADE to powerups because it's inefficient,
# however, this is arguably the archetypical use of
# whenDeleted=CASCADE. Soon we need to figure out a real solution
# (but I have no idea what it is). -glyph
for iface in sig.store.interfacesFor(sig):
sig.store.powerDown(sig, iface)
tracker.deleteFromStore()
sig.deleteFromStore() | 0.001976 |
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
) | 0.001131 |
def netconf_config_change_changed_by_server_or_user_server_server(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
changed_by = ET.SubElement(netconf_config_change, "changed-by")
server_or_user = ET.SubElement(changed_by, "server-or-user")
server = ET.SubElement(server_or_user, "server")
server = ET.SubElement(server, "server")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006211 |
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot | 0.005291 |
def _area_is_empty(self, screen, write_position):
"""
Return True when the area below the write position is still empty.
(For floats that should not hide content underneath.)
"""
wp = write_position
Transparent = Token.Transparent
for y in range(wp.ypos, wp.ypos + wp.height):
if y in screen.data_buffer:
row = screen.data_buffer[y]
for x in range(wp.xpos, wp.xpos + wp.width):
c = row[x]
if c.char != ' ' or c.token != Transparent:
return False
return True | 0.003165 |
def tree_prune_tax_ids(self, tree, tax_ids):
"""Prunes a tree back to contain only the tax_ids in the list and their parents.
Parameters
----------
tree : `skbio.tree.TreeNode`
The root node of the tree to perform this operation on.
tax_ids : `list`
A `list` of taxonomic IDs to keep in the tree.
Returns
-------
`skbio.tree.TreeNode`, the root of a tree containing the given taxonomic IDs and their
parents, leading back to the root node.
"""
tax_ids_to_keep = []
for tax_id in tax_ids:
tax_ids_to_keep.append(tax_id)
tax_ids_to_keep.extend([x.name for x in tree.find(tax_id).ancestors()])
tree = tree.copy()
tree.remove_deleted(lambda n: n.name not in tax_ids_to_keep)
return tree | 0.005848 |
def lte(max_value):
"""
Validates that a field value is less than or equal to the
value given to this validator.
"""
def validate(value):
if value > max_value:
return e("{} is not less than or equal to {}", value, max_value)
return validate | 0.003521 |
def run_command(self, input_file, output_dir=None):
"""Return the command for running bfconvert as a list.
:param input_file: path to microscopy image to be converted
:param ouput_dir: directory to write output tiff files to
:returns: list
"""
base_name = os.path.basename(input_file)
name, suffix = base_name.split('.', 1)
output_file = '{}{}.tif'.format(name, self.split_pattern)
if output_dir:
output_file = os.path.join(output_dir, output_file)
return ['bfconvert', input_file, output_file] | 0.005042 |
def rect(self, x, y, width, height, color):
"""
See the Processing function rect():
https://processing.org/reference/rect_.html
"""
self.context.set_source_rgb(*color)
self.context.rectangle(self.tx(x), self.ty(y), self.tx(width), self.ty(height))
self.context.fill() | 0.009288 |
def get_environments(self):
"""
Returns the environments
"""
response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)
return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'] | 0.013746 |
def expand(expression):
"""
Expand a reference expression to individual spans.
Also works on space-separated ID lists, although a sequence of space
characters will be considered a delimiter.
>>> expand('a1')
'a1'
>>> expand('a1[3:5]')
'a1[3:5]'
>>> expand('a1[3:5+6:7]')
'a1[3:5]+a1[6:7]'
>>> expand('a1 a2 a3')
'a1 a2 a3'
"""
tokens = []
for (pre, _id, _range) in robust_ref_re.findall(expression):
if not _range:
tokens.append('{}{}'.format(pre, _id))
else:
tokens.append(pre)
tokens.extend(
'{}{}[{}:{}]'.format(delim, _id, start, end)
for delim, start, end in span_re.findall(_range)
)
return ''.join(tokens) | 0.001289 |
def generate_secret(length=30):
"""
Generate an ASCII secret using random.SysRandom
Based on oauthlib's common.generate_token function
"""
rand = random.SystemRandom()
ascii_characters = string.ascii_letters + string.digits
return ''.join(rand.choice(ascii_characters) for _ in range(length)) | 0.003106 |
def _execute_if_not_empty(func):
""" Execute function only if one of input parameters is not empty """
def wrapper(*args, **kwargs):
if any(args[1:]) or any(kwargs.items()):
return func(*args, **kwargs)
return wrapper | 0.004016 |
def watchTextSelection(self, event=None):
""" Callback used to see if there is a new text selection. In certain
cases we manually add the text to the clipboard (though on most
platforms the correct behavior happens automatically). """
# Note that this isn't perfect - it is a key click behind when
# selections are made via shift-arrow. If this becomes important, it
# can likely be fixed with after().
if self.entry.selection_present(): # entry must be text entry type
i1 = self.entry.index(SEL_FIRST)
i2 = self.entry.index(SEL_LAST)
if i1 >= 0 and i2 >= 0 and i2 > i1:
sel = self.entry.get()[i1:i2]
# Add to clipboard on platforms where necessary.
print('selected: "'+sel+'"') | 0.003663 |
def shape_type(self):
"""
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``.
"""
if self.is_placeholder:
return MSO_SHAPE_TYPE.PLACEHOLDER
if self._sp.has_custom_geometry:
return MSO_SHAPE_TYPE.FREEFORM
if self._sp.is_autoshape:
return MSO_SHAPE_TYPE.AUTO_SHAPE
if self._sp.is_textbox:
return MSO_SHAPE_TYPE.TEXT_BOX
msg = 'Shape instance of unrecognized shape type'
raise NotImplementedError(msg) | 0.003578 |
def replace_blocks(self, blocks):
"""Replace multiple blocks. blocks must be a list of tuples where
each tuple consists of (namespace, offset, key, data, flags)"""
start = 0
bulk_insert = self.bulk_insert
blocks_len = len(blocks)
select = 'SELECT ?,?,?,?,?'
query = 'REPLACE INTO gauged_data (namespace, offset, `key`, ' \
'data, flags) '
execute = self.cursor.execute
while start < blocks_len:
rows = blocks[start:start+bulk_insert]
params = [param for params in rows for param in params]
insert = (select + ' UNION ') * (len(rows) - 1) + select
execute(query + insert, params)
start += bulk_insert | 0.002692 |
def threshold_image(img, bkground_thresh, bkground_value=0.0):
"""
Thresholds a given image at a value or percentile.
Replacement value can be specified too.
Parameters
-----------
image_in : ndarray
Input image
bkground_thresh : float
a threshold value to identify the background
bkground_value : float
a value to fill the background elements with. Default 0.
Returns
-------
thresholded_image : ndarray
thresholded and/or filled image
"""
if bkground_thresh is None:
return img
if isinstance(bkground_thresh, str):
try:
thresh_perc = float(bkground_thresh.replace('%', ''))
except:
raise ValueError(
'percentile specified could not be parsed correctly '
' - must be a string of the form "5%", "10%" etc')
else:
thresh_value = np.percentile(img, thresh_perc)
elif isinstance(bkground_thresh, (float, int)):
thresh_value = bkground_thresh
else:
raise ValueError('Invalid specification for background threshold.')
img[img < thresh_value] = bkground_value
return img | 0.001672 |
def to_dict(self, depth=-1, **kwargs):
"""Returns a dict representation of the object."""
out = super(Link, self).to_dict(depth=-1, **kwargs)
out['url'] = self.url
return out | 0.009709 |
def imagedatadict_to_ndarray(imdict):
"""
Converts the ImageData dictionary, imdict, to an nd image.
"""
arr = imdict['Data']
im = None
if isinstance(arr, parse_dm3.array.array):
im = numpy.asarray(arr, dtype=arr.typecode)
elif isinstance(arr, parse_dm3.structarray):
t = tuple(arr.typecodes)
im = numpy.frombuffer(
arr.raw_data,
dtype=structarray_to_np_map[t])
# print "Image has dmimagetype", imdict["DataType"], "numpy type is", im.dtype
assert dm_image_dtypes[imdict["DataType"]][1] == im.dtype
assert imdict['PixelDepth'] == im.dtype.itemsize
im = im.reshape(imdict['Dimensions'][::-1])
if imdict["DataType"] == 23: # RGB
im = im.view(numpy.uint8).reshape(im.shape + (-1, ))[..., :-1] # strip A
# NOTE: RGB -> BGR would be [:, :, ::-1]
return im | 0.003456 |
def make_variant(cls, converters, re_opts=None, compiled=False, strict=True):
"""
Creates a type converter for a number of type converter alternatives.
The first matching type converter is used.
REQUIRES: type_converter.pattern attribute
:param converters: List of type converters as alternatives.
:param re_opts: Regular expression options zu use (=default_re_opts).
:param compiled: Use compiled regexp matcher, if true (=False).
:param strict: Enable assertion checks.
:return: Type converter function object.
.. note::
Works only with named fields in :class:`parse.Parser`.
Parser needs group_index delta for unnamed/fixed fields.
This is not supported for user-defined types.
Otherwise, you need to use :class:`parse_type.parse.Parser`
(patched version of the :mod:`parse` module).
"""
# -- NOTE: Uses double-dispatch with regex pattern rematch because
# match is not passed through to primary type converter.
assert converters, "REQUIRE: Non-empty list."
if len(converters) == 1:
return converters[0]
if re_opts is None:
re_opts = cls.default_re_opts
pattern = r")|(".join([tc.pattern for tc in converters])
pattern = r"("+ pattern + ")"
group_count = len(converters)
for converter in converters:
group_count += pattern_group_count(converter.pattern)
if compiled:
convert_variant = cls.__create_convert_variant_compiled(converters,
re_opts, strict)
else:
convert_variant = cls.__create_convert_variant(re_opts, strict)
convert_variant.pattern = pattern
convert_variant.converters = tuple(converters)
# OLD: convert_variant.group_count = group_count
convert_variant.regex_group_count = group_count
return convert_variant | 0.002447 |
def _detect_byteorder(ccp4file):
"""Detect the byteorder of stream `ccp4file` and return format character.
Try all endinaness and alignment options until we find
something that looks sensible ("MAPS " in the first 4 bytes).
(The ``machst`` field could be used to obtain endianness, but
it does not specify alignment.)
.. SeeAlso::
:mod:`struct`
"""
bsaflag = None
ccp4file.seek(52 * 4)
mapbin = ccp4file.read(4)
for flag in '@=<>':
mapstr = struct.unpack(flag + '4s', mapbin)[0].decode('utf-8')
if mapstr.upper() == 'MAP ':
bsaflag = flag
break # Only possible value according to spec.
else:
raise TypeError(
"Cannot decode header --- corrupted or wrong format?")
ccp4file.seek(0)
return bsaflag | 0.003311 |
def _get_stddevs(self, C, mag, stddev_types, sites):
"""
Return standard deviation as defined on page 29 in
equation 8a,b,c and 9.
"""
num_sites = sites.vs30.size
sigma_intra = np.zeros(num_sites)
# interevent stddev
tau = sigma_intra + C['tau']
# intraevent std (equations 8a-8c page 29)
if mag < 5.0:
sigma_intra += C['sigmaM6'] - C['sigSlope']
elif 5.0 <= mag < 7.0:
sigma_intra += C['sigmaM6'] + C['sigSlope'] * (mag - 6)
else:
sigma_intra += C['sigmaM6'] + C['sigSlope']
std = []
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
# equation 9 page 29
std += [np.sqrt(sigma_intra**2 + tau**2)]
elif stddev_type == const.StdDev.INTRA_EVENT:
std.append(sigma_intra)
elif stddev_type == const.StdDev.INTER_EVENT:
std.append(tau)
return std | 0.001957 |
def age_ratios():
"""Helper to get list of age ratio from the options dialog.
:returns: List of age ratio.
:rtype: list
"""
# FIXME(IS) set a correct parameter container
parameter_container = None
youth_ratio = parameter_container.get_parameter_by_guid(
youth_ratio_field['key']).value
adult_ratio = parameter_container.get_parameter_by_guid(
adult_ratio_field['key']).value
elderly_ratio = parameter_container.get_parameter_by_guid(
elderly_ratio_field['key']).value
ratios = [youth_ratio, adult_ratio, elderly_ratio]
return ratios | 0.003035 |
def indent(text, amount, ch=' '):
"""Indents a string by the given amount of characters."""
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True)) | 0.005319 |
def i2c_slave_last_transmit_size(self):
"""Returns the number of bytes transmitted by the slave."""
ret = api.py_aa_i2c_slave_write_stats(self.handle)
_raise_error_if_negative(ret)
return ret | 0.008969 |
async def get(self, key):
"""Decode the value."""
value = await self.conn.get(key)
if self.cfg.jsonpickle:
if isinstance(value, bytes):
return jsonpickle.decode(value.decode('utf-8'))
if isinstance(value, str):
return jsonpickle.decode(value)
return value | 0.005797 |
def export(self, name, columns, points):
"""Write the points to the Cassandra cluster."""
logger.debug("Export {} stats to Cassandra".format(name))
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for (k, v) in dict(zip(columns, points)).iteritems() if isinstance(v, Number)}
# Write input to the Cassandra table
try:
stmt = "INSERT INTO {} (plugin, time, stat) VALUES (?, ?, ?)".format(self.table)
query = self.session.prepare(stmt)
self.session.execute(
query,
(name, uuid_from_time(datetime.now()), data)
)
except Exception as e:
logger.error("Cannot export {} stats to Cassandra ({})".format(name, e)) | 0.006313 |
def add_unique_template_variables(self, options):
"""Update map template variables specific to a raster visual"""
options.update(dict(
tiles_url=self.tiles_url,
tiles_size=self.tiles_size,
tiles_minzoom=self.tiles_minzoom,
tiles_maxzoom=self.tiles_maxzoom,
tiles_bounds=self.tiles_bounds if self.tiles_bounds else 'undefined')) | 0.007444 |
def update(self, puts, deletes):
"""Applies the given puts and deletes atomically.
Args:
puts (:iterable:`tuple`): an iterable of key/value pairs to insert
deletes (:iterable:str:) an iterable of keys to delete
"""
with self._lmdb.begin(write=True, buffers=True) as txn:
cursor = txn.cursor(self._main_db)
# Process deletes first, to handle the case of new items replacing
# old index locations
for key in deletes:
if not cursor.set_key(key.encode()):
# value doesn't exist
continue
value = self._deserializer(bytes(cursor.value()))
cursor.delete()
for (index_db, index_key_fn) in self._indexes.values():
index_keys = index_key_fn(value)
index_cursor = txn.cursor(index_db)
for idx_key in index_keys:
if index_cursor.set_key(idx_key):
index_cursor.delete()
# process all the inserts
for key, value in puts:
packed = self._serializer(value)
cursor.put(key.encode(), packed, overwrite=True)
for (index_db, index_key_fn) in self._indexes.values():
index_keys = index_key_fn(value)
index_cursor = txn.cursor(index_db)
for idx_key in index_keys:
index_cursor.put(idx_key, key.encode())
self.sync() | 0.001263 |
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(EPDTC, self)._save_to_input_dict()
input_dict["class"] = "GPy.inference.latent_function_inference.expectation_propagation.EPDTC"
if self.ga_approx_old is not None:
input_dict["ga_approx_old"] = self.ga_approx_old.to_dict()
if self._ep_approximation is not None:
input_dict["_ep_approximation"] = {}
input_dict["_ep_approximation"]["post_params"] = self._ep_approximation[0].to_dict()
input_dict["_ep_approximation"]["ga_approx"] = self._ep_approximation[1].to_dict()
input_dict["_ep_approximation"]["log_Z_tilde"] = self._ep_approximation[2]
return input_dict | 0.009202 |
def getlanguages(self, event):
"""Compile and return a human readable list of registered translations"""
self.log('Client requests all languages.', lvl=verbose)
result = {
'component': 'hfos.ui.clientmanager',
'action': 'getlanguages',
'data': language_token_to_name(all_languages())
}
self.fireEvent(send(event.client.uuid, result)) | 0.007317 |
def bind(self, queue='', exchange='', routing_key='', virtual_host='/',
arguments=None):
"""Bind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param dict|None arguments: Bind key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
bind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'routing_key': routing_key,
'source': exchange,
'arguments': arguments or {},
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.post(API_QUEUE_BIND %
(
virtual_host,
exchange,
queue
),
payload=bind_payload) | 0.002457 |
def to_json(self):
"""
Returns the JSON representation of the webhook.
"""
result = super(Webhook, self).to_json()
result.update({
'name': self.name,
'url': self.url,
'topics': self.topics,
'httpBasicUsername': self.http_basic_username,
'headers': self.headers
})
if self.filters:
result.update({'filters': self.filters})
if self.transformation:
result.update({'transformation': self.transformation})
return result | 0.003484 |
def blacklist(self, term=None):
"""List blacklisted entries.
When no term is given, the method will list the entries that
exist in the blacklist. If 'term' is set, the method will list
only those entries that match with that term.
:param term: term to match
"""
try:
bl = api.blacklist(self.db, term)
self.display('blacklist.tmpl', blacklist=bl)
except NotFoundError as e:
self.error(str(e))
return e.code
return CMD_SUCCESS | 0.003656 |
def run_without_time_limit(self, cmd):
"""Runs docker command without time limit.
Args:
cmd: list with the command line arguments which are passed to docker
binary
Returns:
how long it took to run submission in seconds
Raises:
WorkerError: if error occurred during execution of the submission
"""
cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd
logging.info('Docker command: %s', ' '.join(cmd))
start_time = time.time()
retval = subprocess.call(cmd)
elapsed_time_sec = int(time.time() - start_time)
logging.info('Elapsed time of attack: %d', elapsed_time_sec)
logging.info('Docker retval: %d', retval)
if retval != 0:
logging.warning('Docker returned non-zero retval: %d', retval)
raise WorkerError('Docker returned non-zero retval ' + str(retval))
return elapsed_time_sec | 0.003413 |
def scoreatpercentile(inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent * len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i)
return score | 0.003205 |
def _compile_interpretation(data):
"""
Compile the interpretation data into a list of multiples, based on the keys provided.
Disassemble the key to figure out how to place the data
:param dict data: Interpretation data (unsorted)
:return dict: Interpretation data (sorted)
"""
# KEY FORMAT : "interpretation1_somekey"
_count = 0
# Determine how many entries we are going to need, by checking the interpretation index in the string
for _key in data.keys():
_key_low = _key.lower()
# Get regex match
m = re.match(re_interpretation, _key_low)
# If regex match was successful..
if m:
# Check if this interpretation count is higher than what we have.
_curr_count = int(m.group(1))
if _curr_count > _count:
# New max count, record it.
_count = _curr_count
# Create the empty list with X entries for the interpretation data
_tmp = [{} for i in range(0, _count)]
# Loop over all the interpretation keys and data
for k, v in data.items():
# Get the resulting regex data.
# EXAMPLE ENTRY: "interpretation1_variable"
# REGEX RESULT: ["1", "variable"]
m = re.match(re_interpretation, k)
# Get the interpretation index number
idx = int(m.group(1))
# Get the field variable
key = m.group(2)
# Place this data in the _tmp array. Remember to adjust given index number for 0-indexing
_tmp[idx-1][key] = v
# Return compiled interpretation data
return _tmp | 0.002511 |
def trocar_codigo_de_ativacao(self, novo_codigo_ativacao,
opcao=constantes.CODIGO_ATIVACAO_REGULAR,
codigo_emergencia=None):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
resp = self._http_post('trocarcodigodeativacao',
novo_codigo_ativacao=novo_codigo_ativacao,
opcao=opcao,
codigo_emergencia=codigo_emergencia)
conteudo = resp.json()
return RespostaSAT.trocar_codigo_de_ativacao(conteudo.get('retorno')) | 0.010938 |
def cyvcf2(context, vcf, include, exclude, chrom, start, end, loglevel, silent,
individual, no_inds):
"""fast vcf parsing with cython + htslib"""
coloredlogs.install(log_level=loglevel)
start_parsing = datetime.now()
log.info("Running cyvcf2 version %s", __version__)
if include and exclude:
log.warning("Can not use include and exclude at the same time")
context.abort()
region = ''
if (chrom or start or end):
if not (chrom and start and end):
log.warning("Please specify chromosome, start and end for region")
context.abort()
else:
region = "{0}:{1}-{2}".format(chrom, start, end)
vcf_obj = VCF(vcf)
for inclusion in include:
if vcf_obj.contains(inclusion):
log.info("Including %s in output", inclusion)
else:
log.warning("%s does not exist in header", inclusion)
context.abort()
for exclusion in exclude:
if vcf_obj.contains(exclusion):
log.info("Excluding %s in output", exclusion)
else:
log.warning("%s does not exist in header", exclusion)
context.abort()
if individual:
# Check if the choosen individuals exists in vcf
test = True
for ind_id in individual:
if ind_id not in vcf_obj.samples:
log.warning("Individual '%s' does not exist in vcf", ind_id)
test = False
if not test:
context.abort()
# Convert individuals to list for VCF.set_individuals
individual = list(individual)
else:
individual = None
# Set individual to be empty list to skip all genotypes
if no_inds:
individual = []
if not silent:
print_header(vcf_obj, include, exclude, individual)
nr_variants = None
try:
for nr_variants, variant in enumerate(vcf_obj(region)):
if not silent:
print_variant(variant, include, exclude)
except Exception as err:
log.warning(err)
context.abort()
if nr_variants is None:
log.info("No variants in vcf")
return
log.info("{0} variants parsed".format(nr_variants+1))
log.info("Time to parse variants: {0}".format(datetime.now() - start_parsing)) | 0.002977 |
def detect(self, color_im, depth_im, cfg, camera_intr,
T_camera_world,
vis_foreground=False, vis_segmentation=False, segmask=None):
"""Detects all relevant objects in an rgbd image pair using foreground masking.
Parameters
----------
color_im : :obj:`ColorImage`
color image for detection
depth_im : :obj:`DepthImage`
depth image for detection (corresponds to color image)
cfg : :obj:`YamlConfig`
parameters of detection function
camera_intr : :obj:`CameraIntrinsics`
intrinsics of the camera
T_camera_world : :obj:`autolab_core.RigidTransform`
registration of the camera to world frame
segmask : :obj:`BinaryImage`
optional segmask of invalid pixels
Returns
-------
:obj:`list` of :obj:`RgbdDetection`
all detections in the image
"""
# read params
min_pt_box = np.array(cfg['min_pt'])
max_pt_box = np.array(cfg['max_pt'])
min_contour_area = cfg['min_contour_area']
max_contour_area = cfg['max_contour_area']
min_box_area = cfg['min_box_area']
max_box_area = cfg['max_box_area']
box_padding_px = cfg['box_padding_px']
crop_height = cfg['image_height']
crop_width = cfg['image_width']
depth_grad_thresh = cfg['depth_grad_thresh']
point_cloud_mask_only = cfg['point_cloud_mask_only']
w = cfg['filter_dim']
half_crop_height = float(crop_height) / 2
half_crop_width = float(crop_width) / 2
half_crop_dims = np.array([half_crop_height, half_crop_width])
fill_depth = np.max(depth_im.data[depth_im.data > 0])
kinect2_denoising = False
if 'kinect2_denoising' in cfg.keys() and cfg['kinect2_denoising']:
kinect2_denoising = True
depth_offset = cfg['kinect2_noise_offset']
max_depth = cfg['kinect2_noise_max_depth']
box = Box(min_pt_box, max_pt_box, 'world')
# project into 3D
point_cloud_cam = camera_intr.deproject(depth_im)
point_cloud_world = T_camera_world * point_cloud_cam
seg_point_cloud_world, _ = point_cloud_world.box_mask(box)
seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world
depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)
# mask image using background detection
bgmodel = color_im.background_model()
binary_im = depth_im_seg.to_binary()
if segmask is not None:
binary_im = binary_im.mask_binary(segmask.inverse())
# filter the image
y, x = np.ogrid[-w/2+1:w/2+1, -w/2+1:w/2+1]
mask = x*x + y*y <= w/2*w/2
filter_struct = np.zeros([w,w]).astype(np.uint8)
filter_struct[mask] = 1
binary_im_filtered_data = snm.binary_dilation(binary_im.data, structure=filter_struct)
binary_im_filtered = BinaryImage(binary_im_filtered_data.astype(np.uint8),
frame=binary_im.frame,
threshold=0)
# find all contours
contours = binary_im_filtered.find_contours(min_area=min_contour_area, max_area=max_contour_area)
if vis_foreground:
plt.figure()
plt.subplot(1,3,1)
plt.imshow(color_im.data)
plt.imshow(segmask.data, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(binary_im.data, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(binary_im_filtered.data, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
# switch to just return the mean of nonzero_px
if point_cloud_mask_only == 1:
center_px = np.mean(binary_im_filtered.nonzero_pixels(), axis=0)
ci = center_px[0]
cj = center_px[1]
binary_thumbnail = binary_im_filtered.crop(crop_height, crop_width, ci, cj)
color_thumbnail = color_im.crop(crop_height, crop_width, ci, cj)
depth_thumbnail = depth_im.crop(crop_height, crop_width, ci, cj)
thumbnail_intr = camera_intr
if camera_intr is not None:
thumbnail_intr = camera_intr.crop(crop_height, crop_width, ci, cj)
query_box = Box(center_px - half_crop_dims, center_px + half_crop_dims)
return [RgbdDetection(color_thumbnail,
depth_thumbnail,
query_box,
binary_thumbnail=binary_thumbnail,
contour=None,
camera_intr=thumbnail_intr)]
# convert contours to detections
detections = []
for i, contour in enumerate(contours):
orig_box = contour.bounding_box
logging.debug('Orig box %d area: %.3f' %(i, orig_box.area))
if orig_box.area > min_box_area and orig_box.area < max_box_area:
# convert orig bounding box to query bounding box
min_pt = orig_box.center - half_crop_dims
max_pt = orig_box.center + half_crop_dims
query_box = Box(min_pt, max_pt, frame=orig_box.frame)
# segment color to get refined detection
contour_mask = binary_im_filtered.contour_mask(contour)
binary_thumbnail = contour_mask.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
else:
# otherwise take original bounding box
query_box = Box(contour.bounding_box.min_pt - box_padding_px,
contour.bounding_box.max_pt + box_padding_px,
frame = contour.bounding_box.frame)
binary_thumbnail = binary_im_filtered.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
# crop to get thumbnails
color_thumbnail = color_im.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
depth_thumbnail = depth_im.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
thumbnail_intr = camera_intr
if camera_intr is not None:
thumbnail_intr = camera_intr.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
# fix depth thumbnail
depth_thumbnail = depth_thumbnail.replace_zeros(fill_depth)
if kinect2_denoising:
depth_data = depth_thumbnail.data
min_depth = np.min(depth_data)
binary_mask_data = binary_thumbnail.data
depth_mask_data = depth_thumbnail.mask_binary(binary_thumbnail).data
depth_mask_data += depth_offset
depth_data[binary_mask_data > 0] = depth_mask_data[binary_mask_data > 0]
depth_thumbnail = DepthImage(depth_data, depth_thumbnail.frame)
# append to detections
detections.append(RgbdDetection(color_thumbnail,
depth_thumbnail,
query_box,
binary_thumbnail=binary_thumbnail,
contour=contour,
camera_intr=thumbnail_intr))
return detections | 0.004089 |
def branch(self, name, desc=None):
"""
Create a branch of this repo at 'name'.
:param name: Name of new branch
:param desc: Repo description.
:return: New Local instance.
"""
return Local.new(path=os.path.join(self.path, name), desc=desc, bare=True) | 0.009772 |
def uint8_3(self, name):
"""parse a tuple of 3 uint8 values"""
self._assert_is_string(name)
frame = self._next_frame()
if len(frame) != 3:
raise MessageParserError("Expected exacty 3 byte for 3 unit8 values")
vals = unpack("BBB", frame)
self.results.__dict__[name] = vals
return self | 0.008547 |
def simxAddStatusbarMessage(clientID, message, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(message) is str):
message=message.encode('utf-8')
return c_AddStatusbarMessage(clientID, message, operationMode) | 0.008876 |
def thanks(request, redirect_url=settings.LOGIN_REDIRECT_URL):
"""A user gets redirected here after hitting Twitter and authorizing your app to use their data.
This is the view that stores the tokens you want
for querying data. Pay attention to this.
"""
# Now that we've got the magic tokens back from Twitter, we need to exchange
# for permanent ones and store them...
oauth_token = request.session['request_token']['oauth_token']
oauth_token_secret = request.session['request_token']['oauth_token_secret']
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET,
oauth_token, oauth_token_secret)
# Retrieve the tokens we want...
authorized_tokens = twitter.get_authorized_tokens(request.GET['oauth_verifier'])
# If they already exist, grab them, login and redirect to a page displaying stuff.
try:
user = User.objects.get(username=authorized_tokens['screen_name'])
except User.DoesNotExist:
# We mock a creation here; no email, password is just the token, etc.
user = User.objects.create_user(authorized_tokens['screen_name'], "[email protected]", authorized_tokens['oauth_token_secret'])
profile = TwitterProfile()
profile.user = user
profile.oauth_token = authorized_tokens['oauth_token']
profile.oauth_secret = authorized_tokens['oauth_token_secret']
profile.save()
user = authenticate(
username=authorized_tokens['screen_name'],
password=authorized_tokens['oauth_token_secret']
)
login(request, user)
redirect_url = request.session.get('next_url', redirect_url)
HttpResponseRedirect(redirect_url) | 0.003538 |
def _check_preferences(prefs, pref_type=None):
"""Check cipher, digest, and compression preference settings.
MD5 is not allowed. This is `not 1994`__. SHA1 is allowed_ grudgingly_.
__ http://www.cs.colorado.edu/~jrblack/papers/md5e-full.pdf
.. _allowed: http://eprint.iacr.org/2008/469.pdf
.. _grudgingly: https://www.schneier.com/blog/archives/2012/10/when_will_we_se.html
"""
if prefs is None: return
cipher = frozenset(['AES256', 'AES192', 'AES128',
'CAMELLIA256', 'CAMELLIA192',
'TWOFISH', '3DES'])
digest = frozenset(['SHA512', 'SHA384', 'SHA256', 'SHA224', 'RMD160',
'SHA1'])
compress = frozenset(['BZIP2', 'ZLIB', 'ZIP', 'Uncompressed'])
trust = frozenset(['gpg', 'classic', 'direct', 'always', 'auto'])
pinentry = frozenset(['loopback'])
all = frozenset([cipher, digest, compress, trust, pinentry])
if isinstance(prefs, str):
prefs = set(prefs.split())
elif isinstance(prefs, list):
prefs = set(prefs)
else:
msg = "prefs must be list of strings, or space-separated string"
log.error("parsers._check_preferences(): %s" % message)
raise TypeError(message)
if not pref_type:
pref_type = 'all'
allowed = str()
if pref_type == 'cipher':
allowed += ' '.join(prefs.intersection(cipher))
if pref_type == 'digest':
allowed += ' '.join(prefs.intersection(digest))
if pref_type == 'compress':
allowed += ' '.join(prefs.intersection(compress))
if pref_type == 'trust':
allowed += ' '.join(prefs.intersection(trust))
if pref_type == 'pinentry':
allowed += ' '.join(prefs.intersection(pinentry))
if pref_type == 'all':
allowed += ' '.join(prefs.intersection(all))
return allowed | 0.003751 |
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
_default = _default or translation(DEFAULT_LANGUAGE)
translation_object = getattr(_active, 'value', _default)
result = translation_object.gettext(message)
return result | 0.002193 |
def get_terminal_size(p_getter=None):
"""
Try to determine terminal size at run time. If that is not possible,
returns the default size of 80x24.
By default, the size is determined with provided get_terminal_size by
shutil. Sometimes an UI may want to specify the desired width, then it can
provide a getter that returns a named tuple (columns, lines) with the size.
"""
try:
return get_terminal_size.getter()
except AttributeError:
if p_getter:
get_terminal_size.getter = p_getter
else:
def inner():
try:
# shutil.get_terminal_size was added to the standard
# library in Python 3.3
try:
from shutil import get_terminal_size as _get_terminal_size # pylint: disable=no-name-in-module
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size # pylint: disable=import-error
size = _get_terminal_size()
except ValueError:
# This can result from the 'underlying buffer being detached', which
# occurs during running the unittest on Windows (but not on Linux?)
terminal_size = namedtuple('Terminal_Size', 'columns lines')
size = terminal_size(80, 24)
return size
get_terminal_size.getter = inner
return get_terminal_size.getter() | 0.003844 |
def delete_host(zone, name, nameserver='127.0.0.1', timeout=5, port=53,
**kwargs):
'''
Delete the forward and reverse records for a host.
Returns true if any records are deleted.
CLI Example:
.. code-block:: bash
salt ns1 ddns.delete_host example.com host1
'''
fqdn = '{0}.{1}'.format(name, zone)
request = dns.message.make_query(fqdn, 'A')
answer = dns.query.udp(request, nameserver, timeout, port)
try:
ips = [i.address for i in answer.answer[0].items]
except IndexError:
ips = []
res = delete(zone, name, nameserver=nameserver, timeout=timeout, port=port,
**kwargs)
fqdn = fqdn + '.'
for ip in ips:
parts = ip.split('.')[::-1]
popped = []
# Iterate over possible reverse zones
while len(parts) > 1:
p = parts.pop(0)
popped.append(p)
zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.')
name = '.'.join(popped)
ptr = delete(zone, name, 'PTR', fqdn, nameserver=nameserver,
timeout=timeout, port=port, **kwargs)
if ptr:
res = True
return res | 0.000829 |
def get(self, deviceId):
"""
lists all known active measurements.
"""
measurementsByName = self.measurements.get(deviceId)
if measurementsByName is None:
return []
else:
return list(measurementsByName.values()) | 0.007092 |
def get_instance(self, payload):
"""
Build an instance of WorkerStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.worker.worker_statistics.WorkerStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_statistics.WorkerStatisticsInstance
"""
return WorkerStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
worker_sid=self._solution['worker_sid'],
) | 0.006803 |
def config_mode(self, config_command="config term", pattern=""):
"""
Enter into configuration mode on remote device.
Cisco IOS devices abbreviate the prompt at 20 chars in config mode
"""
if not pattern:
pattern = re.escape(self.base_prompt[:16])
return super(CiscoBaseConnection, self).config_mode(
config_command=config_command, pattern=pattern
) | 0.004662 |
def filedir_lookup(self, p, fd=None):
"""
A helper method for find_file() that looks up a directory for
a file we're trying to find. This only creates the Dir Node if
it exists on-disk, since if the directory doesn't exist we know
we won't find any files in it... :-)
It would be more compact to just use this as a nested function
with a default keyword argument (see the commented-out version
below), but that doesn't work unless you have nested scopes,
so we define it here just so this work under Python 1.5.2.
"""
if fd is None:
fd = self.default_filedir
dir, name = os.path.split(fd)
drive, d = _my_splitdrive(dir)
if not name and d[:1] in ('/', OS_SEP):
#return p.fs.get_root(drive).dir_on_disk(name)
return p.fs.get_root(drive)
if dir:
p = self.filedir_lookup(p, dir)
if not p:
return None
norm_name = _my_normcase(name)
try:
node = p.entries[norm_name]
except KeyError:
return p.dir_on_disk(name)
if isinstance(node, Dir):
return node
if isinstance(node, Entry):
node.must_be_same(Dir)
return node
return None | 0.002266 |
def new_template(template_name: str, ordering: int, formatting: dict=None, **kwargs):
"""
Templates have no unique ID.
:param template_name:
:param ordering:
:param formatting:
:param kwargs:
:return:
"""
if formatting is not None:
kwargs.update(formatting)
template = dict([
('name', template_name),
('qfmt', DEFAULT_TEMPLATE['qfmt']),
('did', None),
('bafmt', DEFAULT_TEMPLATE['bafmt']),
('afmt', DEFAULT_TEMPLATE['afmt']),
('ord', ordering),
('bqfmt', DEFAULT_TEMPLATE['bqfmt'])
])
for k, v in template.items():
if k in kwargs.keys():
template[k] = kwargs[k]
return template | 0.006188 |
def send_metric(self, name, metric):
"""Send metric and its snapshot."""
config = SERIALIZER_CONFIG[class_name(metric)]
mmap(
self._buffered_send_metric,
self.serialize_metric(
metric,
name,
config['keys'],
config['serialized_type']
)
)
if hasattr(metric, 'snapshot') and config.get('snapshot_keys'):
mmap(
self._buffered_send_metric,
self.serialize_metric(
metric.snapshot,
name,
config['snapshot_keys'],
config['serialized_type']
)
) | 0.002743 |
def rtouches(self, span):
"""
Returns true if the start of this span touches the right (ending) side of the given span.
"""
if isinstance(span, list):
return [sp for sp in span if self._rtouches(sp)]
return self._rtouches(span) | 0.010714 |
def parents(self, id, level=None, featuretype=None, order_by=None,
reverse=False, completely_within=False, limit=None):
"""
Return parents of feature `id`.
{_relation_docstring}
"""
return self._relation(
id, join_on='parent', join_to='child', level=level,
featuretype=featuretype, order_by=order_by, reverse=reverse,
limit=limit, completely_within=completely_within) | 0.006536 |
def add_qtl_to_marker(marker, qtls):
"""Add the number of QTLs found for a given marker.
:arg marker, the marker we are looking for the QTL's.
:arg qtls, the list of all QTLs found.
"""
cnt = 0
for qtl in qtls:
if qtl[-1] == marker[0]:
cnt = cnt + 1
marker.append(str(cnt))
return marker | 0.002924 |
def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
"""Create train and eval hooks for Experiment."""
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
# Adding to both training and eval so that eval aborts as well
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks | 0.006632 |
def report_intermediate_result(metric):
"""Reports intermediate result to Assessor.
metric: serializable object.
"""
global _intermediate_seq
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_intermediate_result'
metric = json_tricks.dumps({
'parameter_id': _params['parameter_id'],
'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID,
'type': 'PERIODICAL',
'sequence': _intermediate_seq,
'value': metric
})
_intermediate_seq += 1
platform.send_metric(metric) | 0.003521 |
def gamma(self, x, y, kwargs, diff=diff):
"""
computes the shear
:return: gamma1, gamma2
"""
f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, diff=diff)
gamma1 = 1./2 * (f_xx - f_yy)
gamma2 = f_xy
return gamma1, gamma2 | 0.007018 |
def Backup(self, duration=0):
'''
method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration>
:param duration:
:return:
'''
total = 0
duration_total = duration * 4
children = self.GetChildrenIndexes()
notes = 0
for voice in children:
v = self.GetChild(voice)
indexes = v.GetChildrenIndexes()
if len(indexes) > 1:
indexes.reverse()
for index in indexes:
notes += 1
note = v.GetChild(index)
if hasattr(note, "duration"):
total += note.duration
if total >= duration_total:
break
gap = [
v.GetChild(i).duration for i in range(
0,
self.index -
notes) if hasattr(
v.GetChild(i),
"duration")]
previous = 0
for item in gap:
if item == previous:
self.gap -= previous
item = item / 2
self.gap += item
previous = item
#self.gap = sum([])
self.index -= notes | 0.003082 |
def get_location(self):
"""
Return the absolute location of this widget on the Screen, taking into account the
current state of the Frame that is displaying it and any label offsets of the Widget.
:returns: A tuple of the form (<X coordinate>, <Y coordinate>).
"""
origin = self._frame.canvas.origin
return (self._x + origin[0] + self._offset,
self._y + origin[1] - self._frame.canvas.start_line) | 0.008529 |
def _compress_data(self, data, options):
'''Compress data'''
compression_algorithm_id = options['compression_algorithm_id']
if compression_algorithm_id not in self.compression_algorithms:
raise Exception('Unknown compression algorithm id: %d'
% compression_algorithm_id)
compression_algorithm = \
self.compression_algorithms[compression_algorithm_id]
algorithm = self._get_algorithm_info(compression_algorithm)
compressed = self._encode(data, algorithm)
if len(compressed) < len(data):
data = compressed
else:
options['compression_algorithm_id'] = 0
return data | 0.002797 |
def plot_script_validate(self, script):
"""
checks the plottype of the script and plots it accordingly
Args:
script: script to be plotted
"""
script.plot_validate([self.matplotlibwidget_1.figure, self.matplotlibwidget_2.figure])
self.matplotlibwidget_1.draw()
self.matplotlibwidget_2.draw() | 0.008333 |
def unstack(self, dim=None):
"""
Unstack existing dimensions corresponding to MultiIndexes into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str or sequence of str, optional
Dimension(s) over which to unstack. By default unstacks all
MultiIndexes.
Returns
-------
unstacked : Dataset
Dataset with unstacked data.
See also
--------
Dataset.stack
"""
if dim is None:
dims = [d for d in self.dims if isinstance(self.get_index(d),
pd.MultiIndex)]
else:
dims = [dim] if isinstance(dim, str) else dim
missing_dims = [d for d in dims if d not in self.dims]
if missing_dims:
raise ValueError('Dataset does not contain the dimensions: %s'
% missing_dims)
non_multi_dims = [d for d in dims if not
isinstance(self.get_index(d), pd.MultiIndex)]
if non_multi_dims:
raise ValueError('cannot unstack dimensions that do not '
'have a MultiIndex: %s' % non_multi_dims)
result = self.copy(deep=False)
for dim in dims:
result = result._unstack_once(dim)
return result | 0.00137 |
def resource(url_prefix_or_resource_cls: Union[str, Type[Resource]],
resource_cls: Optional[Type[Resource]] = None,
*,
member_param: Optional[str] = None,
unique_member_param: Optional[str] = None,
rules: Optional[Iterable[Union[Route, RouteGenerator]]] = None,
subresources: Optional[Iterable[RouteGenerator]] = None,
) -> RouteGenerator:
"""
This function is used to register a :class:`Resource`'s routes.
Example usage::
routes = lambda: [
prefix('/api/v1', [
resource('/products', ProductResource),
])
]
Or with the optional prefix argument::
routes = lambda: [
resource('/products', ProductResource),
]
Specify ``rules`` to only include those routes from the resource::
routes = lambda: [
resource('/users', UserResource, rules=[
get('/', UserResource.list),
get('/<int:id>', UserResource.get),
]),
]
Specify ``subresources`` to nest resource routes::
routes = lambda: [
resource('/users', UserResource, subresources=[
resource('/roles', RoleResource)
]),
]
Subresources can be nested as deeply as you want, however it's not recommended
to go more than two or three levels deep at the most, otherwise your URLs will
become unwieldy.
:param url_prefix_or_resource_cls: The resource class, or a url prefix for
all of the rules from the resource class
passed as the second argument.
:param resource_cls: If a url prefix was given as the first argument, then
the resource class must be passed as the second argument.
:param member_param: Optionally override the controller's member_param attribute.
:param rules: An optional list of rules to limit/customize the routes included
from the resource.
:param subresources: An optional list of subresources.
"""
url_prefix, resource_cls = _normalize_args(
url_prefix_or_resource_cls, resource_cls, _is_resource_cls)
member_param = member_param or resource_cls.Meta.member_param
unique_member_param = unique_member_param or resource_cls.Meta.unique_member_param
url_prefix = url_prefix or resource_cls.Meta.url_prefix
routes = getattr(resource_cls, CONTROLLER_ROUTES_ATTR)
if rules is not None:
routes = {method_name: method_routes
for method_name, method_routes in routes.items()
if method_name in resource_cls.resource_methods}
for route in rules:
routes[route.method_name] = route
yield from _normalize_controller_routes(routes.values(), resource_cls,
url_prefix=url_prefix,
member_param=member_param,
unique_member_param=unique_member_param)
for subroute in _reduce_routes(subresources):
subroute._parent_resource_cls = resource_cls
subroute._parent_member_param = member_param
subroute._unique_member_param = unique_member_param
subroute = subroute.copy()
subroute.rule = rename_parent_resource_param_name(
subroute, rule=join(url_prefix, member_param, subroute.rule,
trailing_slash=subroute.rule.endswith('/')))
yield subroute | 0.002223 |
def register_classes():
"""Register these classes with the `LinkFactory` """
CopyBaseROI.register_class()
CopyBaseROI_SG.register_class()
SimulateROI.register_class()
SimulateROI_SG.register_class()
RandomDirGen.register_class()
RandomDirGen_SG.register_class() | 0.00346 |
def load_pickle(file, encoding=None):
"""Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
"""
# TODO: test set encoding='latin1' for 2/3 incompatibility
if encoding:
with open(file, 'rb') as f:
return pickle.load(f, encoding=encoding)
with open(file, 'rb') as f:
return pickle.load(f) | 0.004808 |
def _parse_status(self, output):
'''
Unit testing is so much easier when Vagrant is removed from the
equation.
'''
parsed = self._parse_machine_readable_output(output)
statuses = []
# group tuples by target name
# assuming tuples are sorted by target name, this should group all
# the tuples with info for each target.
for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]):
# transform tuples into a dict mapping "type" to "data"
info = {kind: data for timestamp, _, kind, data in tuples}
status = Status(name=target, state=info.get('state'),
provider=info.get('provider-name'))
statuses.append(status)
return statuses | 0.002509 |
def render_cvmfs_sc(cvmfs_volume):
"""Render REANA_CVMFS_SC_TEMPLATE."""
name = CVMFS_REPOSITORIES[cvmfs_volume]
rendered_template = dict(REANA_CVMFS_SC_TEMPLATE)
rendered_template['metadata']['name'] = "csi-cvmfs-{}".format(name)
rendered_template['parameters']['repository'] = cvmfs_volume
return rendered_template | 0.002941 |
def start_order_threading(self):
"""开启查询子线程(实盘中用)
"""
self.if_start_orderthreading = True
self.order_handler.if_start_orderquery = True
self.trade_engine.create_kernel('ORDER', daemon=True)
self.trade_engine.start_kernel('ORDER')
self.sync_order_and_deal() | 0.006369 |
def find_multiline_pattern(self, regexp, cursor, findflag):
"""Reimplement QTextDocument's find method
Add support for *multiline* regular expressions"""
pattern = to_text_string(regexp.pattern())
text = to_text_string(self.toPlainText())
try:
regobj = re.compile(pattern)
except sre_constants.error:
return
if findflag & QTextDocument.FindBackward:
# Find backward
offset = min([cursor.selectionEnd(), cursor.selectionStart()])
text = text[:offset]
matches = [_m for _m in regobj.finditer(text, 0, offset)]
if matches:
match = matches[-1]
else:
return
else:
# Find forward
offset = max([cursor.selectionEnd(), cursor.selectionStart()])
match = regobj.search(text, offset)
if match:
pos1, pos2 = match.span()
fcursor = self.textCursor()
fcursor.setPosition(pos1)
fcursor.setPosition(pos2, QTextCursor.KeepAnchor)
return fcursor | 0.001736 |
def check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta_df, col_meta_df):
"""
Makes sure that (if entered) id inputs entered are of one type (string id or index)
Input:
- rid (list or None): if not None, a list of rids
- ridx (list or None): if not None, a list of indexes
- cid (list or None): if not None, a list of cids
- cidx (list or None): if not None, a list of indexes
Output:
- a tuple of the ordered ridx and cidx
"""
(row_type, row_ids) = check_id_idx_exclusivity(rid, ridx)
(col_type, col_ids) = check_id_idx_exclusivity(cid, cidx)
row_ids = check_and_convert_ids(row_type, row_ids, row_meta_df)
ordered_ridx = get_ordered_idx(row_type, row_ids, row_meta_df)
col_ids = check_and_convert_ids(col_type, col_ids, col_meta_df)
ordered_cidx = get_ordered_idx(col_type, col_ids, col_meta_df)
return (ordered_ridx, ordered_cidx) | 0.002155 |
def report(
vulnerabilities,
fileobj,
print_sanitised,
):
"""
Prints issues in color-coded text format.
Args:
vulnerabilities: list of vulnerabilities to report
fileobj: The output file object, which may be sys.stdout
"""
n_vulnerabilities = len(vulnerabilities)
unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)]
n_unsanitised = len(unsanitised_vulnerabilities)
n_sanitised = n_vulnerabilities - n_unsanitised
heading = "{} vulnerabilit{} found{}.\n".format(
'No' if n_unsanitised == 0 else n_unsanitised,
'y' if n_unsanitised == 1 else 'ies',
" (plus {} sanitised)".format(n_sanitised) if n_sanitised else "",
)
vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities
with fileobj:
for i, vulnerability in enumerate(vulnerabilities_to_print, start=1):
fileobj.write(vulnerability_to_str(i, vulnerability))
if n_unsanitised == 0:
fileobj.write(color(heading, GOOD))
else:
fileobj.write(color(heading, DANGER)) | 0.002577 |
def _parse_json_with_fieldnames(self):
""" Parse the raw JSON with all attributes/methods defined in the class, except for the
ones defined starting with '_' or flagged in cls._TO_EXCLUDE.
The final result is stored in self.json
"""
for key in dir(self):
if not key.startswith('_') and key not in self._TO_EXCLUDE:
self.fieldnames.append(key)
value = getattr(self, key)
if value:
self.json[key] = value
# Add OK attribute even if value is "False"
self.json['ok'] = self.ok | 0.004862 |
def get_next_valid_day(self, timestamp):
"""Get next valid day for timerange
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid day (midnight) in LOCAL time.
:rtype: int | None
"""
if self.get_next_future_timerange_valid(timestamp) is None:
# this day is finish, we check for next period
(start_time, _) = self.get_start_and_end_time(get_day(timestamp) + 86400)
else:
(start_time, _) = self.get_start_and_end_time(timestamp)
if timestamp <= start_time:
return get_day(start_time)
if self.is_time_day_valid(timestamp):
return get_day(timestamp)
return None | 0.003984 |
def path_to_str(path):
""" Convert pathlib.Path objects to str; return other objects as-is. """
try:
from pathlib import Path as _Path
except ImportError: # Python < 3.4
class _Path:
pass
if isinstance(path, _Path):
return str(path)
return path | 0.003322 |
def cmdline(argv, flags):
"""A cmdopts wrapper that takes a list of flags and builds the
corresponding cmdopts rules to match those flags."""
rules = dict([(flag, {'flags': ["--%s" % flag]}) for flag in flags])
return parse(argv, rules) | 0.003922 |
def _construct_deployment(self, rest_api):
"""Constructs and returns the ApiGateway Deployment.
:param model.apigateway.ApiGatewayRestApi rest_api: the RestApi for this Deployment
:returns: the Deployment to which this SAM Api corresponds
:rtype: model.apigateway.ApiGatewayDeployment
"""
deployment = ApiGatewayDeployment(self.logical_id + 'Deployment',
attributes=self.passthrough_resource_attributes)
deployment.RestApiId = rest_api.get_runtime_attr('rest_api_id')
deployment.StageName = 'Stage'
return deployment | 0.006329 |
def add(self, target, args=None, kwargs=None, **options):
"""Add an Async job to this context.
Takes an Async object or the arguments to construct an Async
object as arguments. Returns the newly added Async object.
"""
from furious.async import Async
from furious.batcher import Message
if self._tasks_inserted:
raise errors.ContextAlreadyStartedError(
"This Context has already had its tasks inserted.")
if not isinstance(target, (Async, Message)):
target = Async(target, args, kwargs, **options)
target.update_options(_context_id=self.id)
if self.persist_async_results:
target.update_options(persist_result=True)
self._tasks.append(target)
self._options['_task_ids'].append(target.id)
return target | 0.003472 |
def addDataToQueue(self, coordinate, reset, sequenceId):
"""
Add the given data item to the sensor's internal queue. Calls to compute
will cause items in the queue to be dequeued in FIFO order.
@param coordinate A list containing the N-dimensional integer coordinate
space to be encoded. This list can be specified in two
ways, as a python list of integers or as a string which
can evaluate to a python list of integers.
@param reset An int or string that is 0 or 1. resetOut will be set to
this value when this item is computed.
@param sequenceId An int or string with an integer ID associated with this
token and its sequence (document).
"""
if type(coordinate) == type(""):
coordinateList = eval(coordinate)
elif type(coordinate) == type([]):
coordinateList = coordinate
else:
raise Exception("CoordinateSensor.addDataToQueue: unknown type for "
"coordinate")
self.queue.appendleft({
"sequenceId": int(sequenceId),
"reset": int(reset),
"coordinate": coordinateList,
}) | 0.005025 |
def write(self, writer=None, encoding='utf-8', indent=0, newline='',
omit_declaration=False, node_depth=0, quote_char='"'):
"""
Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node.
"""
xml4h.write_node(self,
writer=writer, encoding=encoding, indent=indent,
newline=newline, omit_declaration=omit_declaration,
node_depth=node_depth, quote_char=quote_char) | 0.002491 |
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64 | 0.005525 |
def write_area_data(self, file):
""" Writes area data to file.
"""
file.write("%% area data" + "\n")
file.write("%\tno.\tprice_ref_bus" + "\n")
file.write("areas = [" + "\n")
# TODO: Implement areas
file.write("\t1\t1;" + "\n")
file.write("];" + "\n") | 0.006329 |
def setup(self):
"Connect incoming connection to a telnet session"
try:
self.TERM = self.request.term
except:
pass
self.setterm(self.TERM)
self.sock = self.request._sock
for k in self.DOACK.keys():
self.sendcommand(self.DOACK[k], k)
for k in self.WILLACK.keys():
self.sendcommand(self.WILLACK[k], k) | 0.007444 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.