text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def matches(self, ldap_filter):
# type: (Any[str, pelix.ldapfilter.LDAPFilter]) -> bool
"""
Tests the properties of this EndpointDescription against the given
filter
:param ldap_filter: A filter
:return: True if properties matches the filter
"""
return pelix.ldapfilter.get_ldap_filter(ldap_filter).matches(
self.__properties
) | 0.007282 |
def array(self) -> numpy.ndarray:
"""The aggregated data of all logged |IOSequence| objects contained
in one single |numpy.ndarray| object.
The documentation on |NetCDFVariableAgg.shape| explains how
|NetCDFVariableAgg.array| is structured. This first example
confirms that, under default configuration (`timeaxis=1`),
the first axis corresponds to the location, while the second
one corresponds to time:
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableAgg
>>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.average_series())
>>> ncvar.array
array([[ 12. , 13. , 14. , 15. ],
[ 16.5, 18.5, 20.5, 22.5],
[ 25. , 28. , 31. , 34. ]])
When using the first axis as the "timeaxis", the resulting
|NetCDFVariableAgg.array| is the transposed:
>>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=0)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.average_series())
>>> ncvar.array
array([[ 12. , 16.5, 25. ],
[ 13. , 18.5, 28. ],
[ 14. , 20.5, 31. ],
[ 15. , 22.5, 34. ]])
"""
array = numpy.full(self.shape, fillvalue, dtype=float)
for idx, subarray in enumerate(self.arrays.values()):
array[self.get_timeplaceslice(idx)] = subarray
return array | 0.00112 |
def setup_lldpad_ports(self):
"""Setup the flows for passing LLDP/VDP frames in OVS. """
# Creating the physical bridge and setting up patch ports is done by
# OpenStack
ovs_bridges = ovs_lib.get_bridges(self.root_helper)
if self.ext_br not in ovs_bridges or self.integ_br not in ovs_bridges:
self.uplink_fail_reason = cconstants.bridge_not_cfgd_reason % (
ovs_bridges, self.integ_br, self.ext_br)
LOG.error("%s", self.uplink_fail_reason)
raise dfae.DfaAgentFailed(reason=self.uplink_fail_reason)
br = ovs_lib.OVSBridge(self.ext_br, root_helper=self.root_helper)
self.ext_br_obj = br
int_br = ovs_lib.OVSBridge(self.integ_br, root_helper=self.root_helper)
self.integ_br_obj = int_br
self.phy_peer_port, self.int_peer_port = self.find_interconnect_ports()
if self.phy_peer_port is None or self.int_peer_port is None:
self.uplink_fail_reason = cconstants.veth_not_cfgd_reason % (
self.phy_peer_port, self.int_peer_port)
LOG.error("%s", self.uplink_fail_reason)
raise dfae.DfaAgentFailed(reason=self.uplink_fail_reason)
lldp_ovs_veth_str = constants.LLDPAD_OVS_VETH_PORT + self.uplink
if len(lldp_ovs_veth_str) > constants.MAX_VETH_NAME:
lldp_ovs_veth_str = self.gen_veth_str(
constants.LLDPAD_OVS_VETH_PORT,
self.uplink)
lldp_loc_veth_str = constants.LLDPAD_LOC_VETH_PORT + self.uplink
if len(lldp_loc_veth_str) > constants.MAX_VETH_NAME:
lldp_loc_veth_str = self.gen_veth_str(
constants.LLDPAD_LOC_VETH_PORT,
self.uplink)
ip_wrapper = ip_lib.IPWrapper()
self.delete_vdp_flows()
br.delete_port(lldp_ovs_veth_str)
if ip_lib.device_exists(lldp_ovs_veth_str):
# What about OVS restart cases fixme(padkrish)
# IMPORTANT.. The link delete should be done only for non-restart
# cases. Otherwise, The MAC address of the veth interface changes
# for every delete/create. So, if lldpad has the association sent
# already, retriggering it will make the ASSOC appear as coming
# from another station and more than one VSI instance will appear
# at the Leaf. Deleting the assoc and creating the assoc for new
# veth is not optimal. fixme(padkrish)
# ip_lib.IPDevice(lldp_ovs_veth_str,self.root_helper).link.delete()
lldp_loc_veth = ip_wrapper.device(lldp_loc_veth_str)
lldp_ovs_veth = ip_wrapper.device(lldp_ovs_veth_str)
else:
# fixme(padkrish) Due to above reason, do the vethcreate below only
# if it doesn't exist and not deleted.
lldp_loc_veth, lldp_ovs_veth = (
ip_wrapper.add_veth(lldp_loc_veth_str,
lldp_ovs_veth_str))
if not br.port_exists(self.uplink):
phy_port_num = br.add_port(self.uplink)
else:
phy_port_num = br.get_port_ofport(self.uplink)
if phy_port_num == cconstants.INVALID_OFPORT:
self.uplink_fail_reason = cconstants.invalid_uplink_ofport_reason
LOG.error("%s", self.uplink_fail_reason)
return False
if not br.port_exists(lldp_ovs_veth_str):
lldp_ovs_portnum = br.add_port(lldp_ovs_veth)
else:
lldp_ovs_portnum = br.get_port_ofport(lldp_ovs_veth)
if lldp_ovs_portnum == cconstants.INVALID_OFPORT:
self.uplink_fail_reason = cconstants.lldp_ofport_not_detect_reason
LOG.error("%s", self.uplink_fail_reason)
return False
lldp_loc_veth.link.set_up()
lldp_ovs_veth.link.set_up()
# What about OVS restart cases fixme(padkrish)
self.program_vdp_flows(lldp_ovs_portnum, phy_port_num)
self.phy_peer_port_num = br.get_port_ofport(self.phy_peer_port)
self.int_peer_port_num = int_br.get_port_ofport(self.int_peer_port)
if (self.phy_peer_port_num == cconstants.INVALID_OFPORT or
self.int_peer_port_num == cconstants.INVALID_OFPORT):
self.uplink_fail_reason = cconstants.invalid_peer_ofport_reason % (
self.phy_peer_port_num, self.int_peer_port_num)
LOG.error("%s", self.uplink_fail_reason)
return False
self.lldpad_info = (lldpad.LldpadDriver(lldp_loc_veth_str, self.uplink,
self.root_helper))
ret = self.lldpad_info.enable_evb()
if not ret:
self.uplink_fail_reason = cconstants.evb_cfg_fail_reason
LOG.error("%s", self.uplink_fail_reason)
return False
self.lldp_local_veth_port = lldp_loc_veth_str
self.lldp_ovs_veth_port = lldp_ovs_veth_str
LOG.info("Setting up lldpad ports complete")
return True | 0.0004 |
def aes_pad(s, block_size=32, padding='{'):
""" Adds padding to get the correct block sizes for AES encryption
@s: #str being AES encrypted or decrypted
@block_size: the AES block size
@padding: character to pad with
-> padded #str
..
from vital.security import aes_pad
aes_pad("swing")
# -> 'swing{{{{{{{{{{{{{{{{{{{{{{{{{{{'
..
"""
return s + (block_size - len(s) % block_size) * padding | 0.002049 |
def print_there(x, y, text):
""""
allows display of a game of life on a console via
resetting cursor position to a set point - looks 'ok'
for testing but not production quality.
"""
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text))
sys.stdout.flush() | 0.003484 |
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8') | 0.005102 |
def _load_mol2(self, mol2_lines, mol2_code, columns):
"""Load mol2 contents into assert_raise_message instance"""
if columns is None:
col_names = COLUMN_NAMES
col_types = COLUMN_TYPES
else:
col_names, col_types = [], []
for i in range(len(columns)):
col_names.append(columns[i][0])
col_types.append(columns[i][1])
try:
self.mol2_text = ''.join(mol2_lines)
self.code = mol2_code
except TypeError:
mol2_lines = [m.decode() for m in mol2_lines]
self.mol2_text = ''.join(mol2_lines)
self.code = mol2_code.decode()
self._df = self._construct_df(mol2_lines, col_names, col_types) | 0.002621 |
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species.items():
radius += occu * (specie.ionic_radius
if isinstance(specie, Specie)
and specie.ionic_radius
else specie.average_ionic_radius)
total_occu += occu
vis_radius = 0.2 + 0.002 * radius
for specie, occu in site.species.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color,
start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(site.coords, vis_radius, (1,1,1),
start_angle, start_angle + 360 * (1 - total_occu))
self.mapper_map[mapper] = [site] | 0.00456 |
def timer(func):
"""Time a method and print its duration after return
"""
name = func.__name__
@wraps(func)
def timed_func(self, *args, **kwargs): # pylint: disable=missing-docstring
_start = time.time()
out = func(self, *args, **kwargs)
self.log(2, '{0} took {1:.1f} sec'.format(name, time.time() - _start))
return out
return timed_func | 0.002525 |
def read_data(header, fh=None, filename=None, index_order='F'):
"""Read data from file into :class:`numpy.ndarray`
The two parameters :obj:`fh` and :obj:`filename` are optional depending on the parameters but it never hurts to
specify both. The file handle (:obj:`fh`) is necessary if the header is attached with the NRRD data. However, if
the NRRD data is detached from the header, then the :obj:`filename` parameter is required to obtain the absolute
path to the data file.
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
Parameters
----------
header : :class:`dict` (:class:`str`, :obj:`Object`)
Parsed fields/values obtained from :meth:`read_header` function
fh : file-object, optional
File object pointing to first byte of data. Only necessary if data is attached to header.
filename : :class:`str`, optional
Filename of the header file. Only necessary if data is detached from the header. This is used to get the
absolute data path.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
See Also
--------
:meth:`read`, :meth:`read_header`
"""
if index_order not in ['F', 'C']:
raise NRRDError('Invalid index order')
# Check that the required fields are in the header
for field in _NRRD_REQUIRED_FIELDS:
if field not in header:
raise NRRDError('Header is missing required field: "%s".' % field)
if header['dimension'] != len(header['sizes']):
raise NRRDError('Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i' % (
header['dimension'], len(header['sizes'])))
# Determine the data type from the header
dtype = _determine_datatype(header)
# Determine the byte skip, line skip and the data file
# These all can be written with or without the space according to the NRRD spec, so we check them both
line_skip = header.get('lineskip', header.get('line skip', 0))
byte_skip = header.get('byteskip', header.get('byte skip', 0))
data_filename = header.get('datafile', header.get('data file', None))
# If the data file is separate from the header file, then open the data file to read from that instead
if data_filename is not None:
# If the pathname is relative, then append the current directory from the filename
if not os.path.isabs(data_filename):
if filename is None:
raise NRRDError('Filename parameter must be specified when a relative data file path is given')
data_filename = os.path.join(os.path.dirname(filename), data_filename)
# Override the fh parameter with the data filename
# Note that this is opened without a "with" block, thus it must be closed manually in all circumstances
fh = open(data_filename, 'rb')
# Get the total number of data points by multiplying the size of each dimension together
total_data_points = header['sizes'].prod()
# Skip the number of lines requested when line_skip >= 0
# Irrespective of the NRRD file having attached/detached header
# Lines are skipped before getting to the beginning of the data
if line_skip >= 0:
for _ in range(line_skip):
fh.readline()
else:
# Must close the file because if the file was opened above from detached filename, there is no "with" block to
# close it for us
fh.close()
raise NRRDError('Invalid lineskip, allowed values are greater than or equal to 0')
# Skip the requested number of bytes or seek backward, and then parse the data using NumPy
if byte_skip < -1:
# Must close the file because if the file was opened above from detached filename, there is no "with" block to
# close it for us
fh.close()
raise NRRDError('Invalid byteskip, allowed values are greater than or equal to -1')
elif byte_skip >= 0:
fh.seek(byte_skip, os.SEEK_CUR)
elif byte_skip == -1 and header['encoding'] not in ['gzip', 'gz', 'bzip2', 'bz2']:
fh.seek(-dtype.itemsize * total_data_points, os.SEEK_END)
else:
# The only case left should be: byte_skip == -1 and header['encoding'] == 'gzip'
byte_skip = -dtype.itemsize * total_data_points
# If a compression encoding is used, then byte skip AFTER decompressing
if header['encoding'] == 'raw':
data = np.fromfile(fh, dtype)
elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']:
data = np.fromfile(fh, dtype, sep=' ')
else:
# Handle compressed data now
# Construct the decompression object based on encoding
if header['encoding'] in ['gzip', 'gz']:
decompobj = zlib.decompressobj(zlib.MAX_WBITS | 16)
elif header['encoding'] in ['bzip2', 'bz2']:
decompobj = bz2.BZ2Decompressor()
else:
# Must close the file because if the file was opened above from detached filename, there is no "with" block
# to close it for us
fh.close()
raise NRRDError('Unsupported encoding: "%s"' % header['encoding'])
# Loop through the file and read a chunk at a time (see _READ_CHUNKSIZE why it is read in chunks)
decompressed_data = bytearray()
# Read all of the remaining data from the file
# Obtain the length of the compressed data since we will be using it repeatedly, more efficient
compressed_data = fh.read()
compressed_data_len = len(compressed_data)
start_index = 0
# Loop through data and decompress it chunk by chunk
while start_index < compressed_data_len:
# Calculate the end index = start index plus chunk size
# Set to the string length to read the remaining chunk at the end
end_index = min(start_index + _READ_CHUNKSIZE, compressed_data_len)
# Decompress and append data
decompressed_data += decompobj.decompress(compressed_data[start_index:end_index])
# Update start index
start_index = end_index
# Delete the compressed data since we do not need it anymore
# This could potentially be using a lot of memory
del compressed_data
# Byte skip is applied AFTER the decompression. Skip first x bytes of the decompressed data and parse it using
# NumPy
data = np.frombuffer(decompressed_data[byte_skip:], dtype)
# Close the file, even if opened using "with" block, closing it manually does not hurt
fh.close()
if total_data_points != data.size:
raise NRRDError('Size of the data does not equal the product of all the dimensions: {0}-{1}={2}'
.format(total_data_points, data.size, total_data_points - data.size))
# In the NRRD header, the fields are specified in Fortran order, i.e, the first index is the one that changes
# fastest and last index changes slowest. This needs to be taken into consideration since numpy uses C-order
# indexing.
# The array shape from NRRD (x,y,z) needs to be reversed as numpy expects (z,y,x).
data = np.reshape(data, tuple(header['sizes'][::-1]))
# Transpose data to enable Fortran indexing if requested.
if index_order == 'F':
data = data.T
return data | 0.00465 |
def netconf_config_change_changed_by_server_or_user_by_user_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
changed_by = ET.SubElement(netconf_config_change, "changed-by")
server_or_user = ET.SubElement(changed_by, "server-or-user")
by_user = ET.SubElement(server_or_user, "by-user")
session_id = ET.SubElement(by_user, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.005626 |
def aboveAt(self, offset=0):
""" Returns point in the center of the region's top side (offset to the top
by negative ``offset``) """
return Location(self.getX() + (self.getW() / 2), self.getY() + offset) | 0.013216 |
def dens_floc(ConcAl, ConcClay, DIM_FRACTAL, DiamTarget, coag, material, Temp):
"""Calculate floc density as a function of size."""
WaterDensity = pc.density_water(Temp).magnitude
return ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude
- WaterDensity
)
* (material.Diameter / DiamTarget)**(3 - DIM_FRACTAL)
+ WaterDensity
) | 0.002445 |
def execute(script, *args, **kwargs):
"""
Executes a command through the shell. Spaces should breakup the args. Usage: execute('grep', 'TODO', '*')
NOTE: Any kwargs will be converted to args in the destination command.
E.g. execute('grep', 'TODO', '*', **{'--before-context': 5}) will be $grep todo * --before-context=5
"""
popen_args = [script] + list(args)
if kwargs:
popen_args.extend(_kwargs_to_execute_args(kwargs))
try:
return check_call(popen_args, shell=False)
except CalledProcessError as ex:
_print(ex)
sys.exit(ex.returncode)
except Exception as ex:
_print('Error: {} with script: {} and args {}'.format(ex, script, args))
sys.exit(1) | 0.005442 |
def scaled_pressure3_encode(self, time_boot_ms, press_abs, press_diff, temperature):
'''
Barometer readings for 3rd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
return MAVLink_scaled_pressure3_message(time_boot_ms, press_abs, press_diff, temperature) | 0.012158 |
def create_post(post_uid, post_data):
'''
create the post.
'''
title = post_data['title'].strip()
if len(title) < 2:
return False
cur_rec = MPost.get_by_uid(post_uid)
if cur_rec:
return False
entry = TabPost.create(
title=title,
date=datetime.now(),
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'].strip()),
cnt_html=tools.markdown2html(post_data['cnt_md']),
uid=post_uid,
time_create=post_data.get('time_create', tools.timestamp()),
time_update=post_data.get('time_update', tools.timestamp()),
user_name=post_data['user_name'],
view_count=post_data['view_count'] if 'view_count' in post_data else 1,
logo=post_data['logo'],
memo=post_data['memo'] if 'memo' in post_data else '',
order=post_data['order'] if 'order' in post_data else '',
keywords=post_data['keywords'] if 'keywords' in post_data else '',
extinfo=post_data['extinfo'] if 'extinfo' in post_data else {},
kind=post_data['kind'] if 'kind' in post_data else '1',
valid=post_data.get('valid', 1)
)
return entry.uid | 0.003123 |
def random(self, max_number=None):
""" Return a random integer between min and max (inclusive).
"""
min_number = self.obj
if max_number is None:
min_number = 0
max_number = self.obj
return random.randrange(min_number, max_number) | 0.006826 |
def go_standby(self, comment=None):
"""
Executes a Go-Standby operation on the specified node.
To get the status of the current node/s, run :func:`status`
:param str comment: optional comment to audit
:raises NodeCommandFailed: engine cannot go standby
:return: None
"""
self.make_request(
NodeCommandFailed,
method='update',
resource='go_standby',
params={'comment': comment}) | 0.004082 |
def get_profile_dir ():
"""Return path where all profiles of current user are stored."""
basedir = unicode(os.environ["HOME"])
return os.path.join(basedir, u"Library", u"Safari") | 0.010526 |
def offTagDel(self, name, func):
'''
Unregister a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.rem(name, func)
return
cblist = self.ontagdels.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass | 0.003876 |
def value_to_position(self, y):
"""Convert value to position in pixels"""
vsb = self.editor.verticalScrollBar()
return (y-vsb.minimum())*self.get_scale_factor()+self.offset | 0.010204 |
def _set_intrinsics(self):
"""Read the intrinsics matrix from the stream.
"""
strm = self._profile.get_stream(rs.stream.color)
obj = strm.as_video_stream_profile().get_intrinsics()
self._intrinsics[0, 0] = obj.fx
self._intrinsics[1, 1] = obj.fy
self._intrinsics[0, 2] = obj.ppx
self._intrinsics[1, 2] = obj.ppy | 0.005348 |
def get_view_url(self, view_name, user,
url_kwargs=None, context_kwargs=None,
follow_parent=True, check_permissions=True):
"""
Returns the url for a given view_name. If the view isn't
found or the user does not have permission None is returned.
A NoReverseMatch error may be raised if the view was unable
to find the correct keyword arguments for the reverse function
from the given url_kwargs and context_kwargs.
:param view_name: The name of the view that you want.
:param user: The user who is requesting the view
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param context_kwargs: Extra arguments that will be passed \
to the view for consideration in the final keyword arguments \
for reverse.
:param follow_parent: If we encounter a parent reference should \
we follow it. Defaults to True.
:param check_permisions: Run permissions checks. Defaults to True.
"""
view, url_name = self.get_initialized_view_and_name(view_name,
follow_parent=follow_parent)
if isinstance(view, URLAlias):
view_name = view.get_view_name(view_name)
bundle = view.get_bundle(self, url_kwargs, context_kwargs)
if bundle and isinstance(bundle, Bundle):
return bundle.get_view_url(view_name, user,
url_kwargs=url_kwargs,
context_kwargs=context_kwargs,
follow_parent=follow_parent,
check_permissions=check_permissions)
elif view:
# Get kwargs from view
if not url_kwargs:
url_kwargs = {}
url_kwargs = view.get_url_kwargs(context_kwargs, **url_kwargs)
view.kwargs = url_kwargs
if check_permissions and not view.can_view(user):
return None
url = reverse("admin:%s" % url_name, kwargs=url_kwargs)
return url | 0.002068 |
def copy_memory(self, address, size):
"""
Copy the bytes from address to address+size into Unicorn
Used primarily for copying memory maps
:param address: start of buffer to copy
:param size: How many bytes to copy
"""
start_time = time.time()
map_bytes = self._cpu._raw_read(address, size)
self._emu.mem_write(address, map_bytes)
if time.time() - start_time > 3:
logger.info(f"Copying {hr_size(size)} map at {hex(address)} took {time.time() - start_time} seconds") | 0.005396 |
def _set_italian_leading_zeros_for_phone_number(national_number, numobj):
"""A helper function to set the values related to leading zeros in a
PhoneNumber."""
if len(national_number) > 1 and national_number[0] == U_ZERO:
numobj.italian_leading_zero = True
number_of_leading_zeros = 1
# Note that if the number is all "0"s, the last "0" is not counted as
# a leading zero.
while (number_of_leading_zeros < len(national_number) - 1 and
national_number[number_of_leading_zeros] == U_ZERO):
number_of_leading_zeros += 1
if number_of_leading_zeros != 1:
numobj.number_of_leading_zeros = number_of_leading_zeros | 0.00142 |
def virtual_machines_list(resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
List all virtual machines within a resource group.
:param resource_group: The resource group name to list virtual
machines within.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list testgroup
'''
result = {}
compconn = __utils__['azurearm.get_client']('compute', **kwargs)
try:
vms = __utils__['azurearm.paged_object_to_list'](
compconn.virtual_machines.list(
resource_group_name=resource_group
)
)
for vm in vms: # pylint: disable=invalid-name
result[vm['name']] = vm
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
result = {'error': str(exc)}
return result | 0.001129 |
def _run2(self):
"""Workhorse for do_run_2"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#0. Find _last_residuals, _last_error, etc:
_last_residuals = self.calc_residuals().copy()
_last_error = 1*self.error
_last_vals = self.param_vals.copy()
#1. Calculate 2 possible steps
delta_params_1 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping()
delta_params_2 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping(undo_decrease=True)
#2. Check which step is best:
er1 = self.update_function(self.param_vals + delta_params_1)
er2 = self.update_function(self.param_vals + delta_params_2)
triplet = (self.error, er1, er2)
best_step = find_best_step(triplet)
if best_step == 0:
#Both bad steps, put back & increase damping:
_ = self.update_function(self.param_vals.copy())
grad = self.calc_grad()
CLOG.debug('Bad step, increasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er_new = self.update_function(self.param_vals + delta_vals)
good_step = er_new < self.error
if good_step:
#Update params, error, break:
self.update_param_vals(delta_vals, incremental=True)
self.error = er_new
CLOG.debug('Sufficiently increased damping')
CLOG.debug('%f\t%f' % (triplet[0], self.error))
break
else: #for-break-else
#Throw a warning, put back the parameters
CLOG.warn('Stuck!')
self.error = self.update_function(self.param_vals.copy())
elif best_step == 1:
#er1 <= er2:
good_step = True
CLOG.debug('Good step, same damping')
CLOG.debug('%f\t%f\t%f' % triplet)
#Update to er1 params:
er1_1 = self.update_function(self.param_vals + delta_params_1)
if np.abs(er1_1 - er1) > 1e-6:
raise RuntimeError('Function updates are not exact.')
self.update_param_vals(delta_params_1, incremental=True)
self.error = er1
elif best_step == 2:
#er2 < er1:
good_step = True
self.error = er2
CLOG.debug('Good step, decreasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
#-we're already at the correct parameters
self.update_param_vals(delta_params_2, incremental=True)
self.decrease_damping()
#3. Run with current J, damping; update what we need to::
if good_step:
self._last_residuals = _last_residuals
self._last_error = _last_error
self._last_vals = _last_vals
self.error
self.do_internal_run(initial_count=1) | 0.005069 |
def setup_auth_paths(app, auth, prefix, params):
"""Add URL rules for auth paths."""
base = urljoin('/', prefix + '/') # Must end in slash
app.add_url_rule(base + 'login', prefix + 'login_handler',
auth.login_handler, defaults=params)
app.add_url_rule(base + 'logout', prefix + 'logout_handler',
auth.logout_handler, defaults=params)
if (auth.client_id_handler):
app.add_url_rule(base + 'client', prefix + 'client_id_handler',
auth.client_id_handler, defaults=params)
app.add_url_rule(base + 'token', prefix + 'access_token_handler',
auth.access_token_handler, defaults=params)
if (auth.home_handler):
app.add_url_rule(base + 'home', prefix + 'home_handler',
auth.home_handler, defaults=params) | 0.001174 |
def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper | 0.001534 |
def _memory_sized_lists(self,
instances: Iterable[Instance]) -> Iterable[List[Instance]]:
"""
Breaks the dataset into "memory-sized" lists of instances,
which it yields up one at a time until it gets through a full epoch.
For example, if the dataset is already an in-memory list, and each epoch
represents one pass through the dataset, it just yields back the dataset.
Whereas if the dataset is lazily read from disk and we've specified to
load 1000 instances at a time, then it yields lists of 1000 instances each.
"""
lazy = is_lazy(instances)
# Get an iterator over the next epoch worth of instances.
iterator = self._take_instances(instances, self._instances_per_epoch)
# We have four different cases to deal with:
# With lazy instances and no guidance about how many to load into memory,
# we just load ``batch_size`` instances at a time:
if lazy and self._max_instances_in_memory is None:
yield from lazy_groups_of(iterator, self._batch_size)
# If we specified max instances in memory, lazy or not, we just
# load ``max_instances_in_memory`` instances at a time:
elif self._max_instances_in_memory is not None:
yield from lazy_groups_of(iterator, self._max_instances_in_memory)
# If we have non-lazy instances, and we want all instances each epoch,
# then we just yield back the list of instances:
elif self._instances_per_epoch is None:
yield ensure_list(instances)
# In the final case we have non-lazy instances, we want a specific number
# of instances each epoch, and we didn't specify how to many instances to load
# into memory. So we convert the whole iterator to a list:
else:
yield list(iterator) | 0.005288 |
def _sign(self,params):
'''
Generate API sign code
'''
for k, v in params.iteritems():
if type(v) == int: v = str(v)
elif type(v) == float: v = '%.2f'%v
elif type(v) in (list, set):
v = ','.join([str(i) for i in v])
elif type(v) == bool: v = 'true' if v else 'false'
elif type(v) == datetime.datetime: v = v.strftime('%Y-%m-%d %X')
if type(v) == unicode:
params[k] = v.encode('utf-8')
else:
params[k] = v
src = self.APP_SECRET + ''.join(["%s%s" % (k, v) for k, v in sorted(params.iteritems())])
return md5(src).hexdigest().upper() | 0.014065 |
def get_resource_data(incoming_request):
"""Return the data from the incoming *request* based on the
Content-type."""
content_type = incoming_request.headers['Content-type'].split(';')[0]
if ('Content-type' not in incoming_request.headers or
content_type in JSON_CONTENT_TYPES):
return incoming_request.json
elif content_type in HTML_CONTENT_TYPES:
if not incoming_request.form:
raise InvalidAPIUsage(400)
return incoming_request.form
else:
# HTTP 415: Unsupported Media Type
raise InvalidAPIUsage(
415,
UNSUPPORTED_CONTENT_TYPE_MESSAGE.format(
types=incoming_request.headers['Content-type'])) | 0.001387 |
def _remove_whitespace(text):
"""Remove excess whitespace from the ends of a given input string."""
# while True:
# old_text = text
# text = text.replace(' ', ' ')
# if text == old_text:
# return text
non_spaces = re.finditer(r'[^ ]', text)
if not non_spaces:
return text
first_non_space = non_spaces.next()
first_non_space = first_non_space.start()
last_non_space = None
for item in non_spaces:
last_non_space = item
if not last_non_space:
return text[first_non_space:]
else:
last_non_space = last_non_space.end()
return text[first_non_space:last_non_space] | 0.002649 |
def dump_nodes(self):
"""
Dump tag,rule,id and value cache. For debug.
example::
R = [
#dump_nodes
]
"""
print("DUMP NODE LOCAL INFOS")
try:
print("map Id->node name")
for k, v in self.id_cache.items():
print("[%d]=%s" % (k, v))
print("map tag->capture infos")
for k, v in self.tag_cache.items():
print("[%s]=%s" % (k, v))
print("map nodes->tag resolution")
for k, v in self.rule_nodes.items():
txt = "['%s']=%d" % (k, id(v))
if k in self.tag_cache:
tag = self.tag_cache[k]
txt += " tag <%s>" % tag
k = "%d:%d" % (tag._begin, tag._end)
if k in self._stream.value_cache:
txt += " cache <%s>" % self._stream.value_cache[k]
print(txt)
except Exception as err:
print("RECV Exception %s" % err)
import sys
sys.stdout.flush()
return True | 0.000998 |
def _get_animation_frames(self, all_datasets, shape, fill_value=None,
ignore_missing=False):
"""Create enhanced image frames to save to a file."""
for idx, ds in enumerate(all_datasets):
if ds is None and ignore_missing:
continue
elif ds is None:
log.debug("Missing frame: %d", idx)
data = da.zeros(shape, dtype=np.uint8, chunks=shape)
data = xr.DataArray(data)
else:
img = get_enhanced_image(ds)
data, mode = img.finalize(fill_value=fill_value)
if data.ndim == 3:
# assume all other shapes are (y, x)
# we need arrays grouped by pixel so
# transpose if needed
data = data.transpose('y', 'x', 'bands')
yield data.data | 0.003322 |
def to_query(self, fields=None):
""" Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context) | 0.010417 |
def buildCommands(self,files,args):
"""
Given a list of (input) files, buildCommands builds all the commands.
This is one of the two key methods of MapExecutor.
"""
commands = []
count = args.count_from
# For each file, a command is created:
for fileName in files:
commands.append(self.buildCommand(fileName,count,args))
count = count+1
return commands | 0.013393 |
def filename_to_url(filename: str) -> Tuple[str, str]:
"""
Recovers the the url from the encoded filename. Returns it and the ETag
(which may be ``None``)
"""
try:
# If there is an etag, it's everything after the first period
decoded, etag = filename.split(".", 1)
except ValueError:
# Otherwise, use None
decoded, etag = filename, None
filename_bytes = decoded.encode('utf-8')
url_bytes = base64.b64decode(filename_bytes)
return url_bytes.decode('utf-8'), etag | 0.001887 |
def becomeMemberOf(self, groupRole):
"""
Instruct this (user or group) Role to become a member of a group role.
@param groupRole: The role that this group should become a member of.
"""
self.store.findOrCreate(RoleRelationship,
group=groupRole,
member=self) | 0.005525 |
def get_details(self):
"""Build details dictionary"""
body = helpers.req_body(self.manager, 'devicedetail')
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api('/131airpurifier/v1/device/deviceDetail',
method='post', headers=head, json=body)
if r is not None and helpers.check_response(r, 'airpur_detail'):
self.device_status = r.get('deviceStatus', 'unknown')
self.connection_status = r.get('connectionStatus', 'unknown')
self.details['active_time'] = r.get('activeTime', 0)
self.details['filter_life'] = r.get('filterLife', {})
self.details['screeen_status'] = r.get('screenStatus', 'unknown')
self.details['mode'] = r.get('mode', 'unknown')
self.details['level'] = r.get('level', None) | 0.002328 |
def downlad_file(url, fname):
"""Download file from url and save as fname."""
print("Downloading {} as {}".format(url, fname))
response = urlopen(url)
download = response.read()
with open(fname, 'wb') as fh:
fh.write(download) | 0.003937 |
def from_file(cls, vert, frag, **kwargs):
"""
Reads the shader programs, given the vert and frag filenames
Arguments:
- vert (str): The filename of the vertex shader program (ex: 'vertshader.vert')
- frag (str): The filename of the fragment shader program (ex: 'fragshader.frag')
Returns:
- shader (Shader): The Shader using these files.
"""
vert_program = open(vert).read()
frag_program = open(frag).read()
return cls(vert=vert_program, frag=frag_program, **kwargs) | 0.007042 |
def getFragment(self):
"""Return the final fragment"""
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment | 0.009346 |
def get_calendar_holidays(self, year):
"""
Take into account the eventual shift to the next MON if any holiday
falls on SUN.
"""
# Unshifted days are here:
days = super(ChineseNewYearCalendar, self).get_calendar_holidays(year)
if self.shift_sunday_holidays:
days_to_inspect = copy(days)
for day_shifted in self.get_shifted_holidays(days_to_inspect):
days.append(day_shifted)
return days | 0.004082 |
def create_dialog(self):
"""Create the dialog."""
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.idx_ok = bbox.button(QDialogButtonBox.Ok)
self.idx_cancel = bbox.button(QDialogButtonBox.Cancel)
filebutton = QPushButton()
filebutton.setText('Choose')
self.idx_filename = filebutton
self.xp_format = FormMenu(['CSV', 'Brain Vision'])
self.all_types = FormBool('All event types')
self.idx_evt_type = QListWidget()
self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection)
filebutton.clicked.connect(self.save_as)
self.all_types.connect(self.toggle_buttons)
bbox.clicked.connect(self.button_clicked)
form = QFormLayout()
form.addRow('Filename', self.idx_filename)
form.addRow('Format', self.xp_format)
form.addRow(self.all_types)
form.addRow('Event type(s)', self.idx_evt_type)
btnlayout = QHBoxLayout()
btnlayout.addStretch(1)
btnlayout.addWidget(bbox)
vlayout = QVBoxLayout()
vlayout.addLayout(form)
vlayout.addStretch(1)
vlayout.addLayout(btnlayout)
self.setLayout(vlayout) | 0.005473 |
def pretty_unique_identifier(inst, identifier):
'''
Create a human-readable representation a unique identifier.
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in metaclass.identifying_attributes:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (identifier, values) | 0.005556 |
def angstrom_alpha(aod1, lambda1, aod2, lambda2):
r"""
Calculate Angstrom alpha exponent.
Parameters
----------
aod1 : numeric
first aerosol optical depth
lambda1 : numeric
wavelength in nanometers corresponding to ``aod1``
aod2 : numeric
second aerosol optical depth
lambda2 : numeric
wavelength in nanometers corresponding to ``aod2``
Returns
-------
alpha : numeric
Angstrom :math:`\alpha` exponent for AOD in ``(lambda1, lambda2)``
See also
--------
angstrom_aod_at_lambda
"""
return - np.log(aod1 / aod2) / np.log(lambda1 / lambda2) | 0.001553 |
def _convert(self, format):
"""Return a new Image instance with the given format.
Returns self if the format is already the same.
"""
if self.format == format:
return self
else:
image = Image(self.pil_image)
image._format = format
return image | 0.006006 |
def _get_weights(max_length):
"""Get weights for each offset in str of certain max length.
Args:
max_length: max length of the strings.
Returns:
A list of ints as weights.
Example:
If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa",
"ab", "b", "ba", "bb". So the weight for the first char is 3.
"""
weights = [1]
for i in range(1, max_length):
weights.append(weights[i-1] * len(_ALPHABET) + 1)
weights.reverse()
return weights | 0.012295 |
def get_permissions_for_role(role, brain_or_object):
"""Return the permissions of the role which are granted on the object
Code extracted from `IRoleManager.permissionsOfRole`
:param role: The role to check the permission
:param brain_or_object: Catalog brain or object
:returns: List of permissions of the role
"""
obj = api.get_object(brain_or_object)
# Raise an error if the role is invalid
valid_roles = get_valid_roles_for(obj)
if role not in valid_roles:
raise ValueError("The Role '{}' is invalid.".format(role))
out = []
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
if role in permission.getRoles():
out.append(name)
return out | 0.001145 |
def eval(self, edate, etime):
"""Evaluate the schedule according to the provided date and time and
return the appropriate present value, or None if not in the effective
period."""
if _debug: LocalScheduleInterpreter._debug("eval %r %r", edate, etime)
# reference the schedule object
sched_obj = self.sched_obj
if _debug: LocalScheduleInterpreter._debug(" sched_obj: %r", sched_obj)
# verify the date falls in the effective period
if not match_date_range(edate, sched_obj.effectivePeriod):
return None
# the event priority is a list of values that are in effect for
# exception schedules with the special event priority, see 135.1-2013
# clause 7.3.2.23.10.3.8, Revision 4 Event Priority Test
event_priority = [None] * 16
next_day = (24, 0, 0, 0)
next_transition_time = [None] * 16
# check the exception schedule values
if sched_obj.exceptionSchedule:
for special_event in sched_obj.exceptionSchedule:
if _debug: LocalScheduleInterpreter._debug(" - special_event: %r", special_event)
# check the special event period
special_event_period = special_event.period
if special_event_period is None:
raise RuntimeError("special event period required")
match = False
calendar_entry = special_event_period.calendarEntry
if calendar_entry:
if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry)
match = date_in_calendar_entry(edate, calendar_entry)
else:
# get the calendar object from the application
calendar_object = sched_obj._app.get_object_id(special_event_period.calendarReference)
if not calendar_object:
raise RuntimeError("invalid calendar object reference")
if _debug: LocalScheduleInterpreter._debug(" - calendar_object: %r", calendar_object)
for calendar_entry in calendar_object.dateList:
if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry)
match = date_in_calendar_entry(edate, calendar_entry)
if match:
break
# didn't match the period, try the next special event
if not match:
if _debug: LocalScheduleInterpreter._debug(" - no matching calendar entry")
continue
# event priority array index
priority = special_event.eventPriority - 1
if _debug: LocalScheduleInterpreter._debug(" - priority: %r", priority)
# look for all of the possible times
for time_value in special_event.listOfTimeValues:
tval = time_value.time
if tval <= etime:
if isinstance(time_value.value, Null):
if _debug: LocalScheduleInterpreter._debug(" - relinquish exception @ %r", tval)
event_priority[priority] = None
next_transition_time[priority] = None
else:
if _debug: LocalScheduleInterpreter._debug(" - consider exception @ %r", tval)
event_priority[priority] = time_value.value
next_transition_time[priority] = next_day
else:
next_transition_time[priority] = tval
break
# assume the next transition will be at the start of the next day
earliest_transition = next_day
# check if any of the special events came up with something
for priority_value, next_transition in zip(event_priority, next_transition_time):
if next_transition is not None:
earliest_transition = min(earliest_transition, next_transition)
if priority_value is not None:
if _debug: LocalScheduleInterpreter._debug(" - priority_value: %r", priority_value)
return priority_value, earliest_transition
# start out with the default
daily_value = sched_obj.scheduleDefault
# check the daily schedule
if sched_obj.weeklySchedule:
daily_schedule = sched_obj.weeklySchedule[edate[3]]
if _debug: LocalScheduleInterpreter._debug(" - daily_schedule: %r", daily_schedule)
# look for all of the possible times
for time_value in daily_schedule.daySchedule:
if _debug: LocalScheduleInterpreter._debug(" - time_value: %r", time_value)
tval = time_value.time
if tval <= etime:
if isinstance(time_value.value, Null):
if _debug: LocalScheduleInterpreter._debug(" - back to normal @ %r", tval)
daily_value = sched_obj.scheduleDefault
else:
if _debug: LocalScheduleInterpreter._debug(" - new value @ %r", tval)
daily_value = time_value.value
else:
earliest_transition = min(earliest_transition, tval)
break
# return what was matched, if anything
return daily_value, earliest_transition | 0.005874 |
def get_downsample_pct(in_bam, target_counts, data):
"""Retrieve percentage of file to downsample to get to target counts.
Avoids minimal downsample which is not especially useful for
improving QC times; 90& or more of reads.
"""
total = sum(x.aligned for x in idxstats(in_bam, data))
with pysam.Samfile(in_bam, "rb") as work_bam:
n_rgs = max(1, len(work_bam.header.get("RG", [])))
rg_target = n_rgs * target_counts
if total > rg_target:
pct = float(rg_target) / float(total)
if pct < 0.9:
return pct | 0.001757 |
def parameter_distribution(self, parameter, bp, bins=30, merge=False, merge_method='mean', masked=False):
"""To get the parameter distribution of either a specific base-pair/step or a DNA segment over the MD simulation.
parameters
----------
parameter : str
Name of a base-pair or base-step or helical parameter
For details about accepted keywords, see ``parameter`` in the method
:meth:`DNA.get_parameters`.
bp : 1D list or array
base-pairs to analyze
Example: ::
bp = [6] # merge = False
bp = [4,15] # merge = True
bins int
Number of bins to calculate histogram
merge : bool
``Default=False``: As shown above, if ``True``, bp should a list of
range otherwise a list of single value. If ``bp = True``, the
parameter for the respective DNA segment could be merged or
calculated by ``merge_method``.
merge_method : str
Method to calculate the parameter of a DNA segment from local
parameters of all base-pairs/steps that are between the range given
through ``bp``.
Currently accepted keywords are as follows:
* ``merge_method = mean``: Average of local parameters
* ``merge_method = sum``: Sum of local parameters
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
Returns
-------
values : 1D array
Array containing parameter values
density : 1D array
Array containing density for respective parameter values
"""
if not (isinstance(bp, list) or isinstance(bp, np.ndarray)):
raise AssertionError(
"type %s is not list or np.ndarray" % type(bp))
if (len(bp) > 1) and (merge == False):
raise AssertionError(
"bp %s contains more than two values, whereas merge=False. Use either one value in bp or merge=True" % bp)
exit(1)
if len(bp) == 1:
merge = False
if (merge == True) and not ((merge_method == 'mean') or (merge_method == 'sum')):
raise AssertionError(
"merge method %s is not available." % merge_method)
exit(1)
if len(bp) == 1:
param_value, bp_idx = self.get_parameters(
parameter, bp, bp_range=False, masked=masked)
else:
param_value, bp_idx = self.get_parameters(
parameter, bp, bp_range=True, masked=masked)
if (merge == True) and (merge_method == 'mean'):
param_value = np.mean(param_value, axis=0)
elif (merge == True) and (merge_method == 'sum'):
param_value = np.sum(param_value, axis=0)
else:
param_value = param_value[0]
density, bin_edges = np.histogram(param_value, bins=bins, density=True)
bin_width = bin_edges[1] - bin_edges[0]
density = np.insert(density, 0, 0.0)
density = np.append(density, 0.0)
values = []
for i in range(len(bin_edges) - 1):
values.append((bin_edges[i] + bin_edges[i + 1]) / 2)
values = np.asarray(values)
values = np.append(values, values[-1] + bin_width)
values = np.insert(values, 0, values[0] - bin_width)
return np.array(values), density | 0.002784 |
def limitReal(x, max_denominator=1000000):
"""Creates an pysmt Real constant from x.
Args:
x (number): A number to be cast to a pysmt constant.
max_denominator (int, optional): The maximum size of the denominator.
Default 1000000.
Returns:
A Real constant with the given value and the denominator limited.
"""
f = Fraction(x).limit_denominator(max_denominator)
return Real((f.numerator, f.denominator)) | 0.002151 |
def _check_node_list(path, sample, template, start_enumerate=0):
"""Check a list of nodes, e.g. function body"""
if len(sample) != len(template):
raise ASTNodeListMismatch(path, sample, template)
for i, (sample_node, template_node) in enumerate(zip(sample, template), start=start_enumerate):
if callable(template_node):
# Checker function inside a list
template_node(sample_node, path+[i])
else:
assert_ast_like(sample_node, template_node, path+[i]) | 0.003831 |
def firmware_version(self):
"""Return the firmware version."""
if (self._firmware_version is None) or \
(datetime.now() - timedelta(hours=24) > self._fw_last_read):
self._fw_last_read = datetime.now()
with self._bt_interface.connect(self._mac) as connection:
res_firmware = connection.read_handle(_HANDLE_READ_FIRMWARE_VERSION) # pylint: disable=no-member
_LOGGER.debug('Received result for handle %s: %s',
_HANDLE_READ_FIRMWARE_VERSION, res_firmware)
res_battery = connection.read_handle(_HANDLE_READ_BATTERY_LEVEL) # pylint: disable=no-member
_LOGGER.debug('Received result for handle %s: %d',
_HANDLE_READ_BATTERY_LEVEL, res_battery)
if res_firmware is None:
self._firmware_version = None
else:
self._firmware_version = res_firmware.decode("utf-8")
if res_battery is None:
self.battery = 0
else:
self.battery = int(ord(res_battery))
return self._firmware_version | 0.003422 |
def create(cls, name, dead_interval=40, hello_interval=10,
hello_interval_type='normal', dead_multiplier=1,
mtu_mismatch_detection=True, retransmit_interval=5,
router_priority=1, transmit_delay=1,
authentication_type=None, password=None,
key_chain_ref=None):
"""
Create custom OSPF interface settings profile
:param str name: name of interface settings
:param int dead_interval: in seconds
:param str hello_interval: in seconds
:param str hello_interval_type: \|normal\|fast_hello
:param int dead_multipler: fast hello packet multipler
:param bool mtu_mismatch_detection: True|False
:param int retransmit_interval: in seconds
:param int router_priority: set priority
:param int transmit_delay: in seconds
:param str authentication_type: \|password\|message_digest
:param str password: max 8 chars (required when
authentication_type='password')
:param str,Element key_chain_ref: OSPFKeyChain (required when
authentication_type='message_digest')
:raises CreateElementFailed: create failed with reason
:return: instance with meta
:rtype: OSPFInterfaceSetting
"""
json = {'name': name,
'authentication_type': authentication_type,
'password': password,
'key_chain_ref': element_resolver(key_chain_ref),
'dead_interval': dead_interval,
'dead_multiplier': dead_multiplier,
'hello_interval': hello_interval,
'hello_interval_type': hello_interval_type,
'mtu_mismatch_detection': mtu_mismatch_detection,
'retransmit_interval': retransmit_interval,
'router_priority': router_priority,
'transmit_delay': transmit_delay}
return ElementCreator(cls, json) | 0.006549 |
def sfilter(self, source):
"""Filter."""
return self._filter(source.text, source.context, source.encoding) | 0.01626 |
def xarray_var_iter(data, var_names=None, combined=False, skip_dims=None, reverse_selections=False):
"""Convert xarray data to an iterator over vectors.
Iterates over each var_name and all of its coordinates, returning the 1d
data.
Parameters
----------
data : xarray.Dataset
Posterior data in an xarray
var_names : iterator of strings (optional)
Should be a subset of data.data_vars. Defaults to all of them.
combined : bool
Whether to combine chains or leave them separate
skip_dims : set
dimensions to not iterate over
reverse_selections : bool
Whether to reverse selections before iterating.
Returns
-------
Iterator of (str, dict(str, any), np.array)
The string is the variable name, the dictionary are coordinate names to values,
and the array are the values of the variable at those coordinates.
"""
if skip_dims is None:
skip_dims = set()
if combined:
skip_dims = skip_dims.union({"chain", "draw"})
else:
skip_dims.add("draw")
if var_names is None:
if isinstance(data, xr.Dataset):
var_names = list(data.data_vars)
elif isinstance(data, xr.DataArray):
var_names = [data.name]
data = {data.name: data}
for var_name in var_names:
if var_name in data:
new_dims = [dim for dim in data[var_name].dims if dim not in skip_dims]
vals = [purge_duplicates(data[var_name][dim].values) for dim in new_dims]
dims = [{k: v for k, v in zip(new_dims, prod)} for prod in product(*vals)]
if reverse_selections:
dims = reversed(dims)
for selection in dims:
yield var_name, selection, data[var_name].sel(**selection).values | 0.003825 |
def update_campaign_start(self, campaign_id, **kwargs): # noqa: E501
"""Start a campaign. # noqa: E501
This command will begin the process of starting a campaign. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_campaign_start(campaign_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str campaign_id: The campaign ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_campaign_start_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.update_campaign_start_with_http_info(campaign_id, **kwargs) # noqa: E501
return data | 0.001998 |
def get_records(self, zone_id, ttl=None, data=None, host=None,
record_type=None):
"""List, and optionally filter, records within a zone.
:param zone: the zone name in which to search.
:param int ttl: time in seconds
:param str data: the records data
:param str host: record's host
:param str record_type: the type of record
:returns: A list of dictionaries representing the matching records
within the specified zone.
"""
_filter = utils.NestedDict()
if ttl:
_filter['resourceRecords']['ttl'] = utils.query_filter(ttl)
if host:
_filter['resourceRecords']['host'] = utils.query_filter(host)
if data:
_filter['resourceRecords']['data'] = utils.query_filter(data)
if record_type:
_filter['resourceRecords']['type'] = utils.query_filter(
record_type.lower())
results = self.service.getResourceRecords(
id=zone_id,
mask='id,expire,domainId,host,minimum,refresh,retry,'
'mxPriority,ttl,type,data,responsiblePerson',
filter=_filter.to_dict(),
)
return results | 0.002423 |
def orchestrate_single(fun, name, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_single fun=salt.wheel name=key.list_all
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.single'](
fun,
name,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret | 0.003468 |
def get_content(request):
"""Retrieve content using the ident-hash (uuid@version).
Depending on extension or HTTP_ACCEPT header return HTML or JSON.
"""
ext = request.matchdict.get('ext')
accept = request.headers.get('ACCEPT', '')
if not ext:
if ('application/xhtml+xml' in accept):
result, resp = get_content_html(request)
else: # default to json
result, resp = get_content_json(request)
elif ext == '.html':
result, resp = get_content_html(request)
elif ext == '.json':
result, resp = get_content_json(request)
else:
raise httpexceptions.HTTPNotFound()
if result['stateid'] not in [1, 8]:
# state 1 = current, state 8 = fallback
cc = resp.cache_control
cc.prevent_auto = True
cc.no_cache = True
cc.no_store = True
cc.must_revalidate = True
else:
resp.cache_control.public = True
# Build the canonical link
resp.headerlist.append(
('Link', '<{}> ;rel="Canonical"'.format(result['canon_url'])))
return resp | 0.00091 |
def _format_links_fields(self, links):
"""
Format the fields containing links into 4-tuples printable by _print_fields().
"""
fields = list()
for link in links:
linked_model = link['mdl'](super_context)
null = self._marker_true if link['null'] is True else self._marker_false
# In LinkProxy, if reverse_name is empty then only reverse has the name
# of the field on the link_source side
field_name = link['field'] or link['reverse']
fields.append((self._field_prefix, field_name, '%s()' % linked_model.title, null))
fields.sort(key=lambda f: f[1])
return fields | 0.008708 |
def linop_scale(w, op):
"""Creates weighted `LinOp` from existing `LinOp`."""
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with tf.name_scope("linop_scale"):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, tf.linalg.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, tf.linalg.LinearOperatorDiag):
return tf.linalg.LinearOperatorDiag(
diag=w[..., tf.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorLowerTriangular):
return tf.linalg.LinearOperatorLowerTriangular(
tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__)) | 0.005316 |
def cmd(send, msg, args):
"""Runs eix with the given arguments.
Syntax: {command} <package>
"""
if not msg:
result = subprocess.run(['eix', '-c'], env={'EIX_LIMIT': '0', 'HOME': os.environ['HOME']}, stdout=subprocess.PIPE, universal_newlines=True)
if result.returncode:
send("eix what?")
return
send(choice(result.stdout.splitlines()))
return
args = ['eix', '-c'] + msg.split()
result = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
if result.returncode:
send("%s isn't important enough for Gentoo." % msg)
else:
send(result.stdout.splitlines()[0].strip()) | 0.004213 |
def search_address(self, address, filters=None, startDate=None, endDate=None, types=None):
''' Perform a catalog search over an address string
Args:
address: any address string
filters: Array of filters. Optional. Example:
[
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 10",
"offNadirAngle < 10"
]
startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
types: Array of types to search for. Optional. Example (and default): ["Acquisition"]
Returns:
catalog search resultset
'''
lat, lng = self.get_address_coords(address)
return self.search_point(lat,lng, filters=filters, startDate=startDate, endDate=endDate, types=types) | 0.007415 |
def int2str(self, num):
"""Converts an integer into a string.
:param num: A numeric value to be converted to another base as a
string.
:rtype: string
:raise TypeError: when *num* isn't an integer
:raise ValueError: when *num* isn't positive
"""
if int(num) != num:
raise TypeError('number must be an integer')
if num < 0:
raise ValueError('number must be positive')
radix, alphabet = self.radix, self.alphabet
if radix in (8, 10, 16) and \
alphabet[:radix].lower() == BASE85[:radix].lower():
return ({8: '%o', 10: '%d', 16: '%x'}[radix] % num).upper()
ret = ''
while True:
ret = alphabet[num % radix] + ret
if num < radix:
break
num //= radix
return ret | 0.002262 |
def DbGetDevicePropertyHist(self, argin):
""" Retrieve device property history
:param argin: Str[0] = Device name
Str[1] = Property name
:type: tango.DevVarStringArray
:return: Str[0] = Property name
Str[1] = date
Str[2] = Property value number (array case)
Str[3] = Property value 1
Str[n] = Property value n
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetDevicePropertyHist()")
device_name = argin[0]
prop_name = argin[1]
return self.db.get_device_property_hist(device_name, prop_name) | 0.003252 |
def dump_table(data: List[dict], fieldnames: Sequence[str]) -> str:
"""
:param data:
:param fieldnames:
:return: Table string
"""
def min3(num: int) -> int:
return 3 if num < 4 else num
width_by_col: Dict[str, int] = {
f: min3(max([string_width(str(d.get(f))) for d in data] + [string_width(f)])) for f in fieldnames
}
def fill_spaces(word: str, width: int, center=False):
""" aaa, 4 => ' aaa ' """
to_fills: int = width - string_width(word)
return f" {' ' * floor(to_fills / 2)}{word}{' ' * ceil(to_fills / 2)} " if center \
else f" {word}{' ' * to_fills} "
def to_record(r: dict) -> str:
return f"|{'|'.join([fill_spaces(str(r.get(f)), width_by_col.get(f)) for f in fieldnames])}|"
return f"""
|{'|'.join([fill_spaces(x, width_by_col.get(x), center=True) for x in fieldnames])}|
|{'|'.join([fill_spaces(width_by_col.get(x) * "-", width_by_col.get(x)) for x in fieldnames])}|
{os.linesep.join([to_record(x) for x in data])}
""".lstrip() | 0.005731 |
def __universal_read(file_path, file_type):
"""
Use a file path to create file metadata and load a file in the appropriate way, according to the provided file type.
:param str file_path: Path to file
:param str file_type: One of approved file types: xls, xlsx, txt, lpd
:return none:
"""
global files, cwd, settings
# check that we are using the correct function to load this file type. (i.e. readNoaa for a .txt file)
correct_ext = load_fn_matches_ext(file_path, file_type)
# Check that this path references a file
valid_path = path_type(file_path, "file")
# is the path a file?
if valid_path and correct_ext:
# get file metadata for one file
file_meta = collect_metadata_file(file_path)
# append to global files, then load in D
if file_type == ".lpd":
# add meta to global file meta
files[".lpd"].append(file_meta)
# append to global files
elif file_type in [".xls", ".xlsx"]:
print("reading: {}".format(print_filename(file_meta["full_path"])))
files[".xls"].append(file_meta)
# append to global files
elif file_type == ".txt":
print("reading: {}".format(print_filename(file_meta["full_path"])))
files[".txt"].append(file_meta)
# we want to move around with the files we load
# change dir into the dir of the target file
cwd = file_meta["dir"]
if cwd:
os.chdir(cwd)
return | 0.002633 |
def get_roles_by_account_sis_id(self, account_sis_id, params={}):
"""
List the roles for an account, for the passed account SIS ID.
"""
return self.get_roles_in_account(self._sis_id(account_sis_id,
sis_field="account"),
params) | 0.00565 |
def get_wsgi_server(self, name=None, defaults=None):
"""
Reads the configuration source and finds and loads a WSGI server
defined by the server entry with the name ``name`` per the PasteDeploy
configuration format and loading mechanism.
:param name: The named WSGI server to find, load and return. Defaults
to ``None`` which becomes ``main`` inside
:func:`paste.deploy.loadserver`.
:param defaults: The ``global_conf`` that will be used during server
instantiation.
:return: A WSGI server runner callable which accepts a WSGI app.
"""
name = self._maybe_get_default_name(name)
defaults = self._get_defaults(defaults)
return loadserver(
self.pastedeploy_spec,
name=name,
relative_to=self.relative_to,
global_conf=defaults,
) | 0.00221 |
def contribute_error_pages(self):
"""Contributes generic static error massage pages to an existing section."""
static_dir = self.settings.STATIC_ROOT
if not static_dir:
# Source static directory is not configured. Use temporary.
import tempfile
static_dir = os.path.join(tempfile.gettempdir(), self.project_name)
self.settings.STATIC_ROOT = static_dir
self.section.routing.set_error_pages(
common_prefix=os.path.join(static_dir, 'uwsgify')) | 0.005607 |
def request(self, *args, **kwargs):
"""
Define a Decorate to be called before a request.
eg: @middleware.request
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.request_middleware.append(middleware)
return middleware
return register_middleware() | 0.005291 |
def log(*args, **kwargs):
"""Log things with the global logger."""
level = kwargs.pop('level', logging.INFO)
logger.log(level, *args, **kwargs) | 0.006452 |
def annotation_has_expired(event, key, timeout):
"""Check if an event error has expired."""
anns = get_annotations(event, key)
if anns:
return (time.time() - anns[0]["ts"]) > timeout
else:
return False | 0.004292 |
def parse_indicators(parts, ethnicity):
"""
Parses terms that may or may not be "indicators" of trafficking. Some terms are used for
non-trafficking related purposes (e.g. matching or identification problems). Also good to
note is that this list has been growing slowly for about 2 years and some indicators have
been removed/combined. Thus, you may notice that some numeric values are non-existent.
TODO: Move logic from hard-coded into JSON config file(s).
parts -> The backpage ad's posting_body, separated into substrings
ethnicity -> The ethnicity list that we parsed for the ad
"""
ret_val=[]
for part in parts:
part=part.lower()
part = part.replace('virginia', '').replace('fresh pot', '')
part = re.sub(r'virgin ?island', '', part)
part = re.sub(r'no teen', '', part)
if re.compile(r'new ?to ?the ?(usa?|country)').search(part):
ret_val.append(1)
if "natasha" in part or "svetlana" in part:
ret_val.append(2)
if 'young' in part:
ret_val.append(3)
if re.compile(r'just *(hit|turned) *18').search(part):
ret_val.append(5)
if re.compile(r'fresh *meat').search(part):
ret_val.append(6)
if 'virgin' in part:
ret_val.append(7)
if 'foreign' in part:
ret_val.append(8)
if re.compile(r'(just|fresh)( *off *)?( *the *)boat').search(part):
ret_val.append(9)
if re.compile(r'fresh from over ?sea').search(part):
ret_val.append(9)
if re.compile(r'easy *sex').search(part):
ret_val.append(10)
if re.compile(r'come *chat *with *me').search(part):
ret_val.append(11)
if re.compile(r'\b(massage|nuru)\b').search(part):
ret_val.append(12)
if re.compile(r'escort *agency').search(part):
ret_val.append(13)
if re.compile(r'((https?)|www)\.\w{5,30}?.com').search(part):
ret_val.append(14)
if (re.compile(r'world( )*series').search(part) or re.compile(r'grand( )*slam').search(part) or
re.compile(r'base( )?ball').search(part) or re.compile(r'double( )?play').search(part) or
'cws' in part or re.compile(r'home( )?run').search(part) or re.compile(r'batter( )?up').search(part) or
re.compile(r'triple( )?play').search(part) or re.compile(r'strike( )?out').search(part) or
'sports' in part):
ret_val.append(15)
if (re.compile(r'new ?girls').search(part) or re.compile(r'new ?arrivals').search(part) or
re.compile(r'just ?in ?from ? \w{3,15}\W').search(part) or re.compile(r'new \w{3,9} staff').search(part)):
ret_val.append(17)
if re.compile(r'brand *new').search(part):
ret_val.append(18)
if re.compile(r'coll(e|a)ge').search(part) and 15 not in ret_val:
ret_val.append(19)
if 'teen' in part:
ret_val.append(20)
if re.compile(r'high ?school').search(part):
ret_val.append(21)
if re.compile(r'daddy\'?s? ?little ?girl').search(part):
ret_val.append(22)
if 'fresh' in part:
ret_val.append(23)
phrases = [(r'100%' + re.escape(eth)) for eth in ethnicity]
if any(re.compile(phrase).search(part) for phrase in phrases):
ret_val.append(24)
if re.compile(r'speaks? \d\d? language').search(part):
ret_val.append(25)
if re.compile(r'new to the (country|us)').search(part):
ret_val.append(26)
if re.compile(r'massage ?parlou?r').search(part):
ret_val.append(27)
if re.compile(r'come see us at ').search(part):
ret_val.append(28)
if (re.compile(r'420 ?friendly').search(part) or re.compile(r'party ?friendly').search(part) or
re.compile(r'420 ?sp').search(part) or ' 420 ' in part):
ret_val.append(30)
if 'under 35' in part:
ret_val.append(31)
if re.compile(r'\b(avail(able)?|open) *24(/|\\|-)7\b').search(part):
ret_val.append(33)
if re.compile(r'no ?(indian)').search(part) or re.compile(r'indians? not ((allow)|(welcome))'):
ret_val.append(36)
if re.compile(r'no ?(hispanic|mexican)').search(part) or re.compile(r'(hispanic|mexican)s? not ((allow)|(welcome))'):
ret_val.append(37)
if 'incall' in part:
ret_val.append(38)
if 'outcall' in part:
ret_val.append(39)
parts = part.split('from ')
parts.pop(0)
for p in parts:
p = re.sub(r' +', ' ', p)
if p.split(' ')[0].lower() in countries:
ret_val.append(27)
break
eastern_euro_countries = ['estonia', 'latvia', 'lithuania', 'armenia', 'russia', 'kazakhstan', 'ukrain', 'belarus',
'moldova', 'czech', 'austria', 'croatia', 'hungary', 'poland', 'slovakia', 'slovenia',
'albania', 'bosnia', 'bulgaria', 'greece', 'macedonia', 'romania']
if any(c in part for c in eastern_euro_countries):
ret_val.append(28)
ret_val = list(set(ret_val))
return ret_val | 0.013837 |
def _exception_message(excp):
"""Return the message from an exception as either a str or unicode object. Supports both
Python 2 and Python 3.
>>> msg = "Exception message"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
>>> msg = u"unicöde"
>>> excp = Exception(msg)
>>> msg == _exception_message(excp)
True
"""
if isinstance(excp, Py4JJavaError):
# 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message'
# attribute in Python 2. We should call 'str' function on this exception in general but
# 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work
# around by the direct call, '__str__()'. Please see SPARK-23517.
return excp.__str__()
if hasattr(excp, "message"):
return excp.message
return str(excp) | 0.005624 |
def hash(self, value):
"""
Generate a hash of the given iterable.
This is for use in a cache key.
"""
if is_iterable(value):
value = tuple(to_bytestring(v) for v in value)
return hashlib.md5(six.b(':').join(value)).hexdigest() | 0.006969 |
def deduplicate(
ctx, strategy, time_source, regexp, dry_run, message_id,
size_threshold, content_threshold, show_diff, maildirs):
""" Deduplicate mails from a set of maildir folders.
Run a first pass computing the canonical hash of each encountered mail from
their headers, then a second pass to apply the deletion strategy on each
subset of duplicate mails.
\b
Removal strategies for each subsets of duplicate mails:
- delete-older: Deletes the olders, keeps the newests.
- delete-oldest: Deletes the oldests, keeps the newers.
- delete-newer: Deletes the newers, keeps the oldests.
- delete-newest: Deletes the newests, keeps the olders.
- delete-smaller: Deletes the smallers, keeps the biggests.
- delete-smallest: Deletes the smallests, keeps the biggers.
- delete-bigger: Deletes the biggers, keeps the smallests.
- delete-biggest: Deletes the biggests, keeps the smallers.
- delete-matching-path: Deletes all duplicates whose file path match the
regular expression provided via the --regexp parameter.
- delete-non-matching-path: Deletes all duplicates whose file path
doesn't match the regular expression provided via the --regexp parameter.
Deletion strategy on a duplicate set only applies if no major differences
between mails are uncovered during a fine-grained check differences during
the second pass. Limits can be set via the threshold options.
"""
# Print help screen and exit if no maildir folder provided.
if not maildirs:
click.echo(ctx.get_help())
ctx.exit()
# Validate exclusive options requirement depending on strategy.
requirements = [
(time_source, '-t/--time-source', [
DELETE_OLDER, DELETE_OLDEST, DELETE_NEWER, DELETE_NEWEST]),
(regexp, '-r/--regexp', [
DELETE_MATCHING_PATH, DELETE_NON_MATCHING_PATH])]
for param_value, param_name, required_strategies in requirements:
if strategy in required_strategies:
if not param_value:
raise click.BadParameter(
'{} strategy requires the {} parameter.'.format(
strategy, param_name))
elif param_value:
raise click.BadParameter(
'{} parameter not allowed in {} strategy.'.format(
param_name, strategy))
conf = Config(
strategy=strategy,
time_source=time_source,
regexp=regexp,
dry_run=dry_run,
show_diff=show_diff,
message_id=message_id,
size_threshold=size_threshold,
content_threshold=content_threshold,
# progress=progress,
)
dedup = Deduplicate(conf)
logger.info('=== Start phase #1: load mails and compute hashes.')
for maildir in maildirs:
dedup.add_maildir(maildir)
logger.info('=== Start phase #2: deduplicate mails.')
dedup.run()
dedup.report() | 0.000332 |
def compose(*decorators):
"""Helper to compose decorators::
@a
@b
def f():
pass
Is equivalent to::
@compose(a, b)
def f():
...
"""
def composed(f):
for decor in reversed(decorators):
f = decor(f)
return f
return composed | 0.002985 |
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias):
"""Called prior to self-attention, to incorporate memory items.
Args:
segment: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias)
"""
assert memory_antecedent is None, "We only support language modeling"
# In eval mode, batch size may be variable
memory_batch_size = tf.shape(self.previous_vals)[0]
current_batch_size = tf.shape(query_antecedent)[0]
amount_to_pad = memory_batch_size - current_batch_size
# If segment id is zero, don't attend back to the memory
previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast(
tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9
sliced_previous_vals = self.previous_vals[:current_batch_size, :, :]
new_memory_antecedent = tf.concat(
[tf.stop_gradient(sliced_previous_vals), query_antecedent], 1)
new_bias = tf.concat([
tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]),
tf.tile(bias, [current_batch_size, 1, 1, 1]),
], -1)
remember_segment = tf.pad(segment, [[0, amount_to_pad]])
# TODO(kitaev): The code assumes that we always either increment the chunk
# number or reset it to zero. This assumption will not hold if we re-run the
# model for each token, e.g. for autoregressive greedy/beam/sampling decode.
remember_vals = tf.pad(query_antecedent,
[[0, amount_to_pad], [0, 0], [0, 0]])
# Query position is on axis -2 for bias: as long as a token can be attended
# to from at least one query position (i.e. it's not padding), memorize it.
remember_bias = tf.tile(
tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1])
# Assume that query_antecedent is always a full chunk (i.e. not truncated)
if self.chunk_length < self.tokens_to_cache:
remember_vals = tf.concat([self.previous_vals, remember_vals], 1)
remember_bias = tf.concat([
self.previous_bias - 1e9 * tf.cast(
tf.equal(
tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None],
0), tf.float32),
remember_bias
], -1)
if self.chunk_length != self.tokens_to_cache:
remember_vals = remember_vals[:, -self.tokens_to_cache:, :]
remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:]
token = (remember_segment, remember_vals, remember_bias)
return token, query_antecedent, new_memory_antecedent, new_bias | 0.002384 |
def main(argv: typing.Optional[typing.Sequence] = None) -> typing.NoReturn:
"""Main entry point for the konch CLI."""
args = parse_args(argv)
if args["--debug"]:
logging.basicConfig(
format="%(levelname)s %(filename)s: %(message)s", level=logging.DEBUG
)
logger.debug(args)
config_file: typing.Union[Path, None]
if args["init"]:
config_file = Path(args["<config_file>"] or CONFIG_FILE)
init_config(config_file)
else:
config_file = Path(args["<config_file>"]) if args["<config_file>"] else None
if args["edit"]:
edit_config(config_file)
elif args["allow"]:
allow_config(config_file)
elif args["deny"]:
deny_config(config_file)
mod = use_file(Path(args["--file"]) if args["--file"] else None)
if hasattr(mod, "setup"):
mod.setup() # type: ignore
if args["--name"]:
if args["--name"] not in _config_registry:
print_error(f'Invalid --name: "{args["--name"]}"')
sys.exit(1)
config_dict = _config_registry[args["--name"]]
logger.debug(f'Using named config: "{args["--name"]}"')
logger.debug(config_dict)
else:
config_dict = _cfg
# Allow default shell to be overriden by command-line argument
shell_name = args["--shell"]
if shell_name:
config_dict["shell"] = SHELL_MAP.get(shell_name.lower(), AutoShell)
logger.debug(f"Starting with config {config_dict}")
start(**config_dict)
if hasattr(mod, "teardown"):
mod.teardown() # type: ignore
sys.exit(0) | 0.001852 |
def _library_path(self):
"""Return full path to the shared library.
A couple of regular unix paths like ``/usr/lib/`` is searched by
default. If your library is not in one of those, set a
``LD_LIBRARY_PATH`` environment variable to the directory with your
shared library.
If the library cannot be found, a ``RuntimeError`` with debug
information is raised.
"""
# engine is an existing library name
# TODO change add directory to library path
if os.path.isfile(self.engine):
return self.engine
pathname = 'LD_LIBRARY_PATH'
separator = ':'
if platform.system() == 'Darwin':
pathname = 'DYLD_LIBRARY_PATH'
separator = ':'
if platform.system() == 'Windows':
# windows does not separate between dll path's and exe paths
pathname = 'PATH'
separator = ';'
lib_path_from_environment = os.environ.get(pathname, '')
# Expand the paths with the system path if it exists
if lib_path_from_environment:
known_paths = [
path for path in lib_path_from_environment.split(separator)] + self.known_paths
else:
known_paths = self.known_paths
# expand ~
known_paths = [os.path.expanduser(path) for path in known_paths]
possible_libraries = [os.path.join(path, self._libname())
for path in known_paths]
for library in possible_libraries:
if os.path.exists(library):
logger.info("Using model fortran library %s", library)
return library
msg = "Library not found, looked in %s" % ', '.join(possible_libraries)
raise RuntimeError(msg) | 0.001666 |
def _set_clear_mpls_ldp_neighbor(self, v, load=False):
"""
Setter method for clear_mpls_ldp_neighbor, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_ldp_neighbor (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_mpls_ldp_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_mpls_ldp_neighbor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=clear_mpls_ldp_neighbor.clear_mpls_ldp_neighbor, is_leaf=True, yang_name="clear-mpls-ldp-neighbor", rest_name="clear-mpls-ldp-neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsLdpNeighbor'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clear_mpls_ldp_neighbor must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=clear_mpls_ldp_neighbor.clear_mpls_ldp_neighbor, is_leaf=True, yang_name="clear-mpls-ldp-neighbor", rest_name="clear-mpls-ldp-neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsLdpNeighbor'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__clear_mpls_ldp_neighbor = t
if hasattr(self, '_set'):
self._set() | 0.006204 |
def find_issues(self, criteria={}, jql=None, order='KEY ASC', verbose=False, changelog=True):
"""Return a list of issues with changelog metadata.
Searches for the `issue_types`, `project`, `valid_resolutions` and
'jql_filter' set in the passed-in `criteria` object.
Pass a JQL string to further qualify the query results.
"""
query = []
if criteria.get('project', False):
query.append('project IN (%s)' % ', '.join(['"%s"' % p for p in criteria['project']]))
if criteria.get('issue_types', False):
query.append('issueType IN (%s)' % ', '.join(['"%s"' % t for t in criteria['issue_types']]))
if criteria.get('valid_resolutions', False):
query.append('(resolution IS EMPTY OR resolution IN (%s))' % ', '.join(['"%s"' % r for r in criteria['valid_resolutions']]))
if criteria.get('jql_filter') is not None:
query.append('(%s)' % criteria['jql_filter'])
if jql is not None:
query.append('(%s)' % jql)
queryString = "%s ORDER BY %s" % (' AND '.join(query), order,)
if verbose:
print("Fetching issues with query:", queryString)
fromRow=0
issues = []
while True:
try:
if changelog:
pageofissues = self.jira.search_issues(queryString, expand='changelog', maxResults=self.settings['max_results'],startAt=fromRow)
else:
pageofissues = self.jira.search_issues(queryString, maxResults=self.settings['max_results'],startAt=fromRow)
fromRow = fromRow + int(self.settings['max_results'])
issues += pageofissues
if verbose:
print("Got %s lines per jira query from result starting at line number %s " % (self.settings['max_results'], fromRow))
if len(pageofissues)==0:
break
except JIRAError as e:
print("Jira query error with: {}\n{}".format(queryString, e))
return []
if verbose:
print("Fetched", len(issues), "issues")
return issues | 0.006381 |
def chi_eff(mass1, mass2, spin1z, spin2z):
"""Returns the effective spin from mass1, mass2, spin1z, and spin2z."""
return (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2) | 0.005525 |
def unregister_watch(self, uid):
"""
Unregister the watch with the given UUID.
"""
# Do not raise an error if UUID is
# not present in the watches.
Log.info("Unregister a watch with uid: " + str(uid))
self.watches.pop(uid, None) | 0.003906 |
def __SoInit(self):
'''fast_cut函数需要使用thulac.so,在这里导入.so文件'''
if(not self.__user_specified_dict_name):
self.__user_specified_dict_name = ''
return SoExtention(self.__prefix, self.__user_specified_dict_name, self.__useT2S, self.__seg_only) | 0.010989 |
def arg_match(m_arg, arg, comparator=eq, default=False):
"""
:param m_arg: value to match against or callable
:param arg: arg to match
:param comparator: function that returns True if m_arg and arg match
:param default: will be returned if m_arg is None
if m_arg is a callable it will be called with arg
>>> arg_match(1, 1)
True
>>> arg_match(1, 2)
True
You can match by sub args by passing in a dict
>>> from collections import namedtuple
>>> Msg = namedtuple("msg", ["note", "type"])
>>> m = Msg(note=1, type="note_on")
>>> arg_match(dict(note=1), m)
True
"""
if m_arg is None:
return default
if isinstance(m_arg, dict):
for name, value in m_arg.items():
name, _comparator = arg_comparitor(name)
subarg = getattr(arg, name, InvalidArg)
if subarg is InvalidArg:
return subarg
matched = arg_match(subarg, value, _comparator, default)
if not matched or matched is InvalidArg:
return False
return True
else:
if hasattr(m_arg, "__call__"):
return m_arg(arg)
else:
return comparator(arg, m_arg) | 0.000812 |
def _set_dscp_exp(self, v, load=False):
"""
Setter method for dscp_exp, mapped from YANG variable /qos_mpls/map/dscp_exp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_exp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_exp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_exp_map_name",dscp_exp.dscp_exp, yang_name="dscp-exp", rest_name="dscp-exp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-exp-map-name', extensions={u'tailf-common': {u'info': u'Configure Dscp exp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd3Callpoint', u'cli-mode-name': u'dscp-exp-$(dscp-exp-map-name)'}}), is_container='list', yang_name="dscp-exp", rest_name="dscp-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Dscp exp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd3Callpoint', u'cli-mode-name': u'dscp-exp-$(dscp-exp-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_exp must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dscp_exp_map_name",dscp_exp.dscp_exp, yang_name="dscp-exp", rest_name="dscp-exp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-exp-map-name', extensions={u'tailf-common': {u'info': u'Configure Dscp exp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd3Callpoint', u'cli-mode-name': u'dscp-exp-$(dscp-exp-map-name)'}}), is_container='list', yang_name="dscp-exp", rest_name="dscp-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Dscp exp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd3Callpoint', u'cli-mode-name': u'dscp-exp-$(dscp-exp-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""",
})
self.__dscp_exp = t
if hasattr(self, '_set'):
self._set() | 0.00436 |
def addlogo(community_id, logo):
"""Add logo to the community."""
# Create the bucket
c = Community.get(community_id)
if not c:
click.secho('Community {0} does not exist.'.format(community_id),
fg='red')
return
ext = save_and_validate_logo(logo, logo.name, c.id)
c.logo_ext = ext
db.session.commit() | 0.002755 |
def norm(self):
""" Returns the norm of the quaternion
norm = w**2 + x**2 + y**2 + z**2
"""
tmp = self.w**2 + self.x**2 + self.y**2 + self.z**2
return tmp**0.5 | 0.014423 |
def raw(node):
"""
Add some raw html (possibly as a block)
"""
o = nodes.raw(node.literal, node.literal, format='html')
if node.sourcepos is not None:
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o += n
return o | 0.003774 |
def get_output_jsonpath_field(self, sub_output=None):
"""attempts to create an output jsonpath from a particular ouput field"""
if sub_output is not None:
if self.output_fields is None or\
(isinstance(self.output_fields, dict) and not sub_output in self.output_fields.itervalues()) or\
(isinstance(self.output_fields, list) and not sub_output in self.output_fields):
raise ValueError(
"Cannot generate output jsonpath because this ExtractorProcessor will not output {}".format(sub_output))
output_jsonpath_field = sub_output
else:
output_jsonpath_field = self.output_field
return output_jsonpath_field | 0.009447 |
def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info | 0.003963 |
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
pretty = '{}switch ({}):\n'.format(spaces, condition)
pretty += self.body.pretty_str(indent=indent + 2)
return pretty | 0.004739 |
def LDRD(cpu, dest1, dest2, src, offset=None):
"""Loads double width data from memory."""
assert dest1.type == 'register'
assert dest2.type == 'register'
assert src.type == 'memory'
mem1 = cpu.read_int(src.address(), 32)
mem2 = cpu.read_int(src.address() + 4, 32)
writeback = cpu._compute_writeback(src, offset)
dest1.write(mem1)
dest2.write(mem2)
cpu._cs_hack_ldr_str_writeback(src, offset, writeback) | 0.004149 |
def _actuator_on_off(self, on_off, service_location_id, actuator_id,
duration=None):
"""
Turn actuator on or off
Parameters
----------
on_off : str
'on' or 'off'
service_location_id : int
actuator_id : int
duration : int, optional
300,900,1800 or 3600 , specifying the time in seconds the actuator
should be turned on. Any other value results in turning on for an
undetermined period of time.
Returns
-------
requests.Response
"""
url = urljoin(URLS['servicelocation'], service_location_id,
"actuator", actuator_id, on_off)
headers = {"Authorization": "Bearer {}".format(self.access_token)}
if duration is not None:
data = {"duration": duration}
else:
data = {}
r = requests.post(url, headers=headers, json=data)
r.raise_for_status()
return r | 0.002956 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.