text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def anchore_print(msg, do_formatting=False):
"""
Print to stdout using the proper formatting for the command.
:param msg: output to be printed, either an object or a string. Objects will be serialized according to config
:return:
"""
if do_formatting:
click.echo(formatter(msg))
else:
click.echo(msg) | 0.005797 |
def _parse_xdot_directive(self, name, new):
""" Handles parsing Xdot drawing directives.
"""
parser = XdotAttrParser()
components = parser.parse_xdot_data(new)
# The absolute coordinate of the drawing container wrt graph origin.
x1 = min( [c.x for c in components] )
y1 = min( [c.y for c in components] )
print "X1/Y1:", name, x1, y1
# Components are positioned relative to their container. This
# function positions the bottom-left corner of the components at
# their origin rather than relative to the graph.
# move_to_origin( components )
for c in components:
if isinstance(c, Ellipse):
component.x_origin -= x1
component.y_origin -= y1
# c.position = [ c.x - x1, c.y - y1 ]
elif isinstance(c, (Polygon, BSpline)):
print "Points:", c.points
c.points = [ (t[0] - x1, t[1] - y1) for t in c.points ]
print "Points:", c.points
elif isinstance(c, Text):
# font = str_to_font( str(c.pen.font) )
c.text_x, c.text_y = c.x - x1, c.y - y1
container = Container(auto_size=True,
position=[ x1, y1 ],
bgcolor="yellow")
container.add( *components )
if name == "_draw_":
self.drawing = container
elif name == "_hdraw_":
self.arrowhead_drawing = container
else:
raise | 0.009162 |
def INIT_TLS_SESSION(self):
"""
XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key.
"""
self.cur_session = tlsSession(connection_end="server")
self.cur_session.server_certs = [self.mycert]
self.cur_session.server_key = self.mykey
if isinstance(self.mykey, PrivKeyRSA):
self.cur_session.server_rsa_key = self.mykey
# elif isinstance(self.mykey, PrivKeyECDSA):
# self.cur_session.server_ecdsa_key = self.mykey
raise self.WAITING_CLIENTFLIGHT1() | 0.002821 |
def start(self):
"""Start the app for the engines subcommand."""
self.log.info("IPython cluster: started")
# First see if the cluster is already running
# Now log and daemonize
self.log.info(
'Starting engines with [daemon=%r]' % self.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if self.daemonize:
if os.name=='posix':
daemonize()
dc = ioloop.DelayedCallback(self.start_engines, 0, self.loop)
dc.start()
# Now write the new pid file AFTER our new forked pid is active.
# self.write_pid_file()
try:
self.loop.start()
except KeyboardInterrupt:
pass
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
pass
else:
raise | 0.003367 |
def record_absent(name, zone, type, data, profile):
'''
Ensures a record is absent.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created, the domain name
:type zone: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: ``str``
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
try:
matching_zone = [z for z in zones if z['domain'] == zone][0]
except IndexError:
return state_result(False, 'Zone could not be found', name)
records = __salt__['libcloud_dns.list_records'](matching_zone['id'], profile)
matching_records = [record for record in records
if record['name'] == name and
record['type'] == type and
record['data'] == data]
if matching_records:
result = []
for record in matching_records:
result.append(__salt__['libcloud_dns.delete_record'](
matching_zone['id'],
record['id'],
profile))
return state_result(all(result), 'Removed {0} records'.format(len(result)), name)
else:
return state_result(True, 'Records already absent', name) | 0.001854 |
def identifier_simple(mesh):
"""
Return a basic identifier for a mesh, consisting of properties
that have been hand tuned to be somewhat robust to rigid
transformations and different tesselations.
Parameters
----------
mesh : Trimesh object
Source geometry
Returns
----------
identifier : (6,) float
Identifying values of the mesh
"""
# verify the cache once
mesh._cache.verify()
# don't check hashes during identifier as we aren't
# changing any data values of the mesh inside block
# if we did change values in cache block things would break
with mesh._cache:
# pre-allocate identifier so indexes of values can't move around
# like they might if we used hstack or something else
identifier = np.zeros(6, dtype=np.float64)
# avoid thrashing the cache unnecessarily
mesh_area = mesh.area
# start with properties that are valid regardless of watertightness
# note that we're going to try to make all parameters relative
# to area so other values don't get blown up at weird scales
identifier[0] = mesh_area
# topological constant and the only thing we can really
# trust in this fallen world
identifier[1] = mesh.euler_number
# if we have a watertight mesh include volume and inertia
if mesh.is_volume:
# side length of a cube ratio
# 1.0 for cubes, different values for other things
identifier[2] = (((mesh_area / 6.0) ** (1.0 / 2.0)) /
(mesh.volume ** (1.0 / 3.0)))
# save vertices for radius calculation
vertices = mesh.vertices - mesh.center_mass
# we are going to special case radially symmetric meshes
# to replace their surface area with ratio of their
# surface area to a primitive sphere or cylinder surface area
# this is because tessellated curved surfaces are really rough
# to reliably hash as they are very sensitive to floating point
# and tessellation error. By making area proportionate to a fit
# primitive area we are able to reliably hash at more sigfigs
if mesh.symmetry == 'radial':
# cylinder height
h = np.dot(vertices, mesh.symmetry_axis).ptp()
# section radius
R2 = (np.dot(vertices, mesh.symmetry_section.T)
** 2).sum(axis=1).max()
# area of a cylinder primitive
area = (2 * np.pi * (R2**.5) * h) + (2 * np.pi * R2)
# replace area in this case with area ratio
identifier[0] = mesh_area / area
elif mesh.symmetry == 'spherical':
# handle a spherically symmetric mesh
R2 = (vertices ** 2).sum(axis=1).max()
area = 4 * np.pi * R2
identifier[0] = mesh_area / area
else:
# if we don't have a watertight mesh add information about the
# convex hull, which is slow to compute and unreliable
# just what we're looking for in a hash but hey
identifier[3] = mesh_area / mesh.convex_hull.area
# cube side length ratio for the hull
identifier[4] = (((mesh.convex_hull.area / 6.0) ** (1.0 / 2.0)) /
(mesh.convex_hull.volume ** (1.0 / 3.0)))
vertices = mesh.vertices - mesh.centroid
# add in max radius^2 to area ratio
R2 = (vertices ** 2).sum(axis=1).max()
identifier[5] = R2 / mesh_area
return identifier | 0.000271 |
def duration(self):
"""
Return a timedelta for this build.
Measure the time between this build's start and end time, or "now"
if the build has not yet finished.
:returns: timedelta object
"""
if self.completion_ts:
end = self.completed
else:
end = datetime.utcnow()
return end - self.started | 0.005141 |
def get_fullpath(self, withext=True):
"""Return the filepath with the filename
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: None
:rtype: None
:raises: None
"""
p = self.get_path(self._obj)
n = self.get_name(self._obj, withext)
fp = os.path.join(p,n)
return os.path.normpath(fp) | 0.007371 |
def left_indent(text, indent=12, end='\n'):
''' A bit of the ol' ultraviolence :-/ '''
indent = ' ' * indent
lines = [indent + line for line in text.splitlines(True)]
lines.append(end)
return ''.join(lines) | 0.004405 |
def random_int(self, min=0, max=9999, step=1):
"""
Returns a random integer between two values.
:param min: lower bound value (inclusive; default=0)
:param max: upper bound value (inclusive; default=9999)
:param step: range step (default=1)
:returns: random integer between min and max
"""
return self.generator.random.randrange(min, max + 1, step) | 0.004843 |
def cluster_on_extra_high_voltage(network, busmap, with_time=True):
""" Main function of the EHV-Clustering approach. Creates a new clustered
pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the
same network.
Parameters
----------
network : pypsa.Network
Container for all network components.
busmap : dict
Maps old bus_ids to new bus_ids.
with_time : bool
If true time-varying data will also be aggregated.
Returns
-------
network : pypsa.Network
Container for all network components of the clustered network.
"""
network_c = Network()
buses = aggregatebuses(
network, busmap, {
'x': _leading(
busmap, network.buses), 'y': _leading(
busmap, network.buses)})
# keep attached lines
lines = network.lines.copy()
mask = lines.bus0.isin(buses.index)
lines = lines.loc[mask, :]
# keep attached links
links = network.links.copy()
mask = links.bus0.isin(buses.index)
links = links.loc[mask, :]
# keep attached transformer
transformers = network.transformers.copy()
mask = transformers.bus0.isin(buses.index)
transformers = transformers.loc[mask, :]
io.import_components_from_dataframe(network_c, buses, "Bus")
io.import_components_from_dataframe(network_c, lines, "Line")
io.import_components_from_dataframe(network_c, links, "Link")
io.import_components_from_dataframe(network_c, transformers, "Transformer")
if with_time:
network_c.snapshots = network.snapshots
network_c.set_snapshots(network.snapshots)
network_c.snapshot_weightings = network.snapshot_weightings.copy()
# dealing with generators
network.generators.control = "PV"
network.generators['weight'] = 1
new_df, new_pnl = aggregategenerators(network, busmap, with_time)
io.import_components_from_dataframe(network_c, new_df, 'Generator')
for attr, df in iteritems(new_pnl):
io.import_series_from_dataframe(network_c, df, 'Generator', attr)
# dealing with all other components
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
for one_port in aggregate_one_ports:
new_df, new_pnl = aggregateoneport(
network, busmap, component=one_port, with_time=with_time)
io.import_components_from_dataframe(network_c, new_df, one_port)
for attr, df in iteritems(new_pnl):
io.import_series_from_dataframe(network_c, df, one_port, attr)
network_c.determine_network_topology()
return network_c | 0.000377 |
def Emailer(recipients, sender=None):
""" Sends messages as emails to the given list
of recipients. """
import smtplib
hostname = socket.gethostname()
if not sender:
sender = 'lggr@{0}'.format(hostname)
smtp = smtplib.SMTP('localhost')
try:
while True:
logstr = (yield)
try:
smtp.sendmail(sender, recipients, logstr)
except smtplib.SMTPException:
pass
except GeneratorExit:
smtp.quit() | 0.001949 |
async def message_fields(self, msg, fields, obj=None):
"""
Load/dump individual message fields
:param msg:
:param fields:
:param obj:
:return:
"""
for field in fields:
await self.message_field(msg, field, obj)
return msg | 0.006579 |
def delete_table(cls):
"""
delete_table
Manually delete a temporary table for model in test data base.
:return:
"""
schema_editor = getattr(connection, 'schema_editor', None)
if schema_editor:
with connection.schema_editor() as schema_editor:
schema_editor.delete_model(cls)
else:
cursor = connection.cursor()
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'unknown table')
cursor.execute('DROP TABLE IF EXISTS {0}'.format(cls._meta.db_table))
finally:
cursor.close() | 0.004335 |
def kubectl(*args, input=None, **flags):
"""Simple wrapper to kubectl."""
# Build command line call.
line = ['kubectl'] + list(args)
line = line + get_flag_args(**flags)
if input is not None:
line = line + ['-f', '-']
# Run subprocess
output = subprocess.run(
line,
input=input,
capture_output=True,
text=True
)
return output | 0.002494 |
def estimate_bitstring_probs(results):
"""
Given an array of single shot results estimate the probability distribution over all bitstrings.
:param np.array results: A 2d array where the outer axis iterates over shots
and the inner axis over bits.
:return: An array with as many axes as there are qubit and normalized such that it sums to one.
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:rtype: np.array
"""
nshots, nq = np.shape(results)
outcomes = np.array([int("".join(map(str, r)), 2) for r in results])
probs = np.histogram(outcomes, bins=np.arange(-.5, 2 ** nq, 1))[0] / float(nshots)
return _bitstring_probs_by_qubit(probs) | 0.008368 |
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
**DEPRECATED** - The group command was deprecated in MongoDB 3.4. The
:meth:`~group` method is deprecated and will be removed in PyMongo 4.0.
Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce`
instead.
.. versionchanged:: 3.5
Deprecated the group method.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
warnings.warn("The group method is deprecated and will be removed in "
"PyMongo 4.0. Use the aggregate method with the $group "
"stage or the map_reduce method instead.",
DeprecationWarning, stacklevel=2)
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session=None) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation,
user_fields={'retval': 1})["retval"] | 0.001151 |
def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
'''
Enumerates the values in a registry key or hive.
Args:
hive (str):
The name of the hive. Can be one of the following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name. If a key is not
passed, the values under the hive will be returned.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64 bit installations.
On 32bit machines this is ignored.
include_default (bool):
Toggle whether to include the '(Default)' value.
Returns:
list: A list of values under the hive or key.
Usage:
.. code-block:: python
import salt.utils.win_reg
winreg.list_values(hive='HKLM', key='SYSTEM\\CurrentControlSet\\Services\\Tcpip')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
registry = Registry()
try:
hkey = registry.hkeys[local_hive]
except KeyError:
raise CommandExecutionError('Invalid Hive: {0}'.format(local_hive))
access_mask = registry.registry_32[use_32bit_registry]
handle = None
values = list()
try:
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
for i in range(win32api.RegQueryInfoKey(handle)[1]):
vname, vdata, vtype = win32api.RegEnumValue(handle, i)
if not vname:
if not include_default:
continue
vname = '(Default)'
value = {'hive': local_hive,
'key': local_key,
'vname': _to_mbcs(vname),
'vtype': registry.vtype_reverse[vtype],
'success': True}
# Only convert text types to unicode
if vtype == win32con.REG_MULTI_SZ:
value['vdata'] = [_to_mbcs(i) for i in vdata]
elif vtype in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]:
value['vdata'] = _to_mbcs(vdata)
else:
value['vdata'] = vdata
values.append(value)
except Exception as exc: # pylint: disable=E0602
log.debug(r'Cannot find key: %s\%s', hive, key, exc_info=True)
return False, r'Cannot find key: {0}\{1}'.format(hive, key)
finally:
if handle:
handle.Close()
return values | 0.001122 |
def ensure_dir(path):
"""Ensure directory exists.
Args:
path(str): dir path
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath) | 0.004673 |
def expected_os2_weight(style):
"""The weight name and the expected OS/2 usWeightClass value inferred from
the style part of the font name
The Google Font's API which serves the fonts can only serve
the following weights values with the corresponding subfamily styles:
250, Thin
275, ExtraLight
300, Light
400, Regular
500, Medium
600, SemiBold
700, Bold
800, ExtraBold
900, Black
Thin is not set to 100 because of legacy Windows GDI issues:
https://www.adobe.com/devnet/opentype/afdko/topic_font_wt_win.html
"""
if not style:
return None
# Weight name to value mapping:
GF_API_WEIGHTS = {
"Thin": 250,
"ExtraLight": 275,
"Light": 300,
"Regular": 400,
"Medium": 500,
"SemiBold": 600,
"Bold": 700,
"ExtraBold": 800,
"Black": 900
}
if style == "Italic":
weight_name = "Regular"
elif style.endswith("Italic"):
weight_name = style.replace("Italic", "")
else:
weight_name = style
expected = GF_API_WEIGHTS[weight_name]
return weight_name, expected | 0.009381 |
def print_func_call(ignore_first_arg=False, max_call_number=100):
""" utility function to facilitate debug, it will print input args before
function call, and print return value after function call
usage:
@print_func_call
def some_func_to_be_debu():
pass
:param ignore_first_arg: whether print the first arg or not.
useful when ignore the `self` parameter of an object method call
"""
from functools import wraps
def display(x):
x = to_string(x)
try:
x.decode('ascii')
except BaseException:
return 'NON_PRINTABLE'
return x
local = {'call_number': 0}
def inner(f):
@wraps(f)
def wrapper(*args, **kwargs):
local['call_number'] += 1
tmp_args = args[1:] if ignore_first_arg and len(args) else args
this_call_number = local['call_number']
print(('{0}#{1} args: {2}, {3}'.format(
f.__name__,
this_call_number,
', '.join([display(x) for x in tmp_args]),
', '.join(display(key) + '=' + to_string(value)
for key, value in kwargs.items())
)))
res = f(*args, **kwargs)
print(('{0}#{1} return: {2}'.format(
f.__name__,
this_call_number,
display(res))))
if local['call_number'] > 100:
raise Exception("Touch max call number!")
return res
return wrapper
return inner | 0.000635 |
def calc_adev_phase(phase, rate, mj, stride):
""" Main algorithm for adev() (stride=mj) and oadev() (stride=1)
see http://www.leapsecond.com/tools/adev_lib.c
stride = mj for nonoverlapping allan deviation
Parameters
----------
phase: np.array
Phase data in seconds.
rate: float
The sampling rate for phase or frequency, in Hz
mj: int
M index value for stride
stride: int
Size of stride
Returns
-------
(dev, deverr, n): tuple
Array of computed values.
Notes
-----
stride = mj for nonoverlapping Allan deviation
stride = 1 for overlapping Allan deviation
References
----------
* http://en.wikipedia.org/wiki/Allan_variance
* http://www.leapsecond.com/tools/adev_lib.c
NIST SP 1065, eqn (7) and (11) page 16
"""
mj = int(mj)
stride = int(stride)
d2 = phase[2 * mj::stride]
d1 = phase[1 * mj::stride]
d0 = phase[::stride]
n = min(len(d0), len(d1), len(d2))
if n == 0:
RuntimeWarning("Data array length is too small: %i" % len(phase))
n = 1
v_arr = d2[:n] - 2 * d1[:n] + d0[:n]
s = np.sum(v_arr * v_arr)
dev = np.sqrt(s / (2.0 * n)) / mj * rate
deverr = dev / np.sqrt(n)
return dev, deverr, n | 0.001538 |
def write_model_to_file(self, mdl, fn):
"""
Write a YANG model that was extracted from a source identifier
(URL or source .txt file) to a .yang destination file
:param mdl: YANG model, as a list of lines
:param fn: Name of the YANG model file
:return:
"""
# Write the model to file
output = ''.join(self.post_process_model(mdl, self.add_line_refs))
if fn:
fqfn = self.dst_dir + '/' + fn
if os.path.isfile(fqfn):
self.error("File '%s' exists" % fqfn)
return
with open(fqfn, 'w') as of:
of.write(output)
of.close()
self.extracted_models.append(fn)
else:
self.error("Output file name can not be determined; YANG file not created") | 0.003559 |
def remote(func):
"""
Decorator to mark a function as invoking a remote procedure call.
When invoked in server mode, the function will be called; when
invoked in client mode, an RPC will be initiated.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.mode == 'server':
# In server mode, call the function
return func(self, *args, **kwargs)
# Make sure we're connected
if not self.conn:
self.connect()
# Call the remote function
self.conn.send('CALL', func.__name__, args, kwargs)
# Receive the response
cmd, payload = self.conn.recv()
if cmd == 'ERR':
self.close()
raise Exception("Catastrophic error from server: %s" %
payload[0])
elif cmd == 'EXC':
exc_type = utils.find_entrypoint(None, payload[0])
raise exc_type(payload[1])
elif cmd != 'RES':
self.close()
raise Exception("Invalid command response from server: %s" % cmd)
return payload[0]
# Mark it a callable
wrapper._remote = True
# Return the wrapped function
return wrapper | 0.000814 |
def add_host(self, host_id=None, host='localhost', port=6379,
unix_socket_path=None, db=0, password=None,
ssl=False, ssl_options=None):
"""Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense.
"""
if host_id is None:
raise RuntimeError('Host ID is required')
elif not isinstance(host_id, (int, long)):
raise ValueError('The host ID has to be an integer')
host_id = int(host_id)
with self._lock:
if host_id in self.hosts:
raise TypeError('Two hosts share the same host id (%r)' %
(host_id,))
self.hosts[host_id] = HostInfo(host_id=host_id, host=host,
port=port, db=db,
unix_socket_path=unix_socket_path,
password=password, ssl=ssl,
ssl_options=ssl_options)
self._hosts_age += 1 | 0.003265 |
def items(self):
"""Get an iter of VenvDirs and VenvFiles within the directory."""
contents = self.paths
contents = (
VenvFile(path.path) if path.is_file else VenvDir(path.path)
for path in contents
)
return contents | 0.007143 |
def bear_rhumb(ra1, dec1, ra2, dec2):
"""
Calculate the bearing of point 2 from point 1 along a Rhumb line.
The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180]
Parameters
----------
ra1, dec1, ra2, dec2 : float
The sky coordinates (degrees) of the two points.
Returns
-------
dist : float
The bearing of point 2 from point 1 along a Rhumb line (degrees).
"""
# verified against website to give correct results
phi1 = np.radians(dec1)
phi2 = np.radians(dec2)
lambda1 = np.radians(ra1)
lambda2 = np.radians(ra2)
dlambda = lambda2 - lambda1
dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2))
theta = np.arctan2(dlambda, dpsi)
return np.degrees(theta) | 0.002427 |
def get_nowait_from_queue(queue):
""" Collect all immediately available items from a queue """
data = []
for _ in range(queue.qsize()):
try:
data.append(queue.get_nowait())
except q.Empty:
break
return data | 0.003817 |
def get_values(self):
"""
Returns the cpd
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd = TabularCPD('grade', 3, [[0.1, 0.1],
... [0.1, 0.1],
... [0.8, 0.8]],
... evidence='evi1', evidence_card=2)
>>> cpd.get_values()
array([[ 0.1, 0.1],
[ 0.1, 0.1],
[ 0.8, 0.8]])
"""
if self.variable in self.variables:
return self.values.reshape(self.cardinality[0], np.prod(self.cardinality[1:]))
else:
return self.values.reshape(1, np.prod(self.cardinality)) | 0.004149 |
def parse_input_args(input_args):
""" Parses EOWorkflow input arguments provided by user and raises an error if something is wrong. This is
done automatically in the process of workflow execution
"""
input_args = input_args if input_args else {}
for task, args in input_args.items():
if not isinstance(task, EOTask):
raise ValueError('Invalid input argument {}, should be an instance of EOTask'.format(task))
if not isinstance(args, (tuple, dict)):
raise ValueError('Execution input arguments of each task should be a dictionary or a tuple, for task '
'{} got arguments of type {}'.format(task.__class__.__name__, type(args)))
return input_args | 0.007538 |
def copy(self, other):
"""Copy metadata from another :py:class:`Metadata` object.
Returns the :py:class:`Metadata` object, allowing convenient
code like this::
md = Metadata().copy(other_md)
:param Metadata other: The metadata to copy.
:rtype: :py:class:`Metadata`
"""
# copy from other to self
self.data.update(other.data)
if other.comment is not None:
self.comment = other.comment
return self | 0.003976 |
def validate_options(self):
"""
Validate options
"""
for option in self._option_list:
# validate options
if option.required and option.key not in self._results:
raise RuntimeError('Option %s is required.' % option.name) | 0.006873 |
def from_array(cls, array, filename=None, channels=3, scale_to_range=False,
grayscale=False, channel_bitdepth=8,
has_alpha=False, palette=None):
"""
Creates an image by using a provided array. The array may be ready to
be written or still need fine-tuning via set_pixel().
The array should not have more than 3 dimensions or the output might be
unexpected.
"""
if scale_to_range:
amax = array.max()
amin = array.min()
_array = (2**channel_bitdepth - 1) * (array - amin) / (amax - amin)
else:
_array = array
_array = numpy.rint(_array).astype(dtype=PNGWriter.get_dtype(channel_bitdepth)) # proper rounding
return cls(_array, filename, channels=channels,
grayscale=grayscale, channel_bitdepth=channel_bitdepth,
has_alpha=has_alpha, palette=palette) | 0.00516 |
def read_stdout(self):
"""
Reads the current state of the print buffer (if it exists) and returns
a body-ready dom object of those contents without adding them to the
actual report body. This is useful for creating intermediate body
values for display while the method is still executing.
:return:
A dom string for the current state of the print buffer contents
"""
try:
contents = self.stdout_interceptor.read_all()
except Exception as err:
contents = ''
return render_texts.preformatted_text(contents) | 0.003221 |
def createFieldDescription(self):
"""
Provides a field description dict for swarm description.
:return: (dict)
"""
return {
"fieldName": self.getName(),
"fieldType": self._dataType,
"minValue": self._min,
"maxValue": self._max
} | 0.003623 |
def startup_script(self):
"""
Returns the content of the current startup script
"""
script_file = self.script_file
if script_file is None:
return None
try:
with open(script_file, "rb") as f:
return f.read().decode("utf-8", errors="replace")
except OSError as e:
raise VPCSError('Cannot read the startup script file "{}": {}'.format(script_file, e)) | 0.00655 |
def alias(*aliases):
"""
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments.
"""
def decorator(cls):
# alias must be set in globals from caller's frame
caller = sys._getframe(1)
globals_dict = caller.f_globals
for alias in aliases:
globals_dict[alias] = cls
return cls
return decorator | 0.002294 |
def bind(self, **fields):
"""
Return a new L{Message} with this message's contents plus the
additional given bindings.
"""
contents = self._contents.copy()
contents.update(fields)
return Message(contents, self._serializer) | 0.007194 |
def string_matches_sans_whitespace(self, str1, str2_fuzzy_whitespace):
"""Check if two strings match, modulo their whitespace."""
str2_fuzzy_whitespace = re.sub('\s+', '\s*', str2_fuzzy_whitespace)
return re.search(str2_fuzzy_whitespace, str1) is not None | 0.014337 |
def weather(self, latitude=None, longitude=None, date=None):
# type:(float, float, datetime) -> Weather
"""
:param float latitude: Locations latitude
:param float longitude: Locations longitude
:param datetime or str or int date: Date/time for historical weather data
:raises requests.exceptions.HTTPError: Raises on bad http response
:raises TypeError: Raises on invalid param types
:rtype: Weather
Example uses
.. code-block:: python
# DarkSky instantiation
>>> darksky = pydarksky.DarkSky(api_key)
# Pre-define values
>>> darksky.latitude = -34.9285
>>> darksky.longitude = 138.6005
>>> weather = darksky.weather()
# Pass values as params
>>> weather = darksky.weather(latitude=-34.9285, longitude=138.6005)
# Pass values from dict
>>> kwargs = {"longitude": 138.6005, "latitude": -34.9285}
>>> weather = darksky.weather(**kwargs)
"""
# If params are default(None) check if latitude/longitude have already been defined(Not None)
# Otherwise TypeError is raised
if latitude is None:
if self.latitude is None:
raise TypeError("latitude must be type '<class 'str'>' is None")
else:
self.latitude = latitude
if longitude is None:
if self.longitude is None:
raise TypeError("longitude must be type '<class 'str'>' is None")
else:
self.longitude = longitude
self._date = date
url = self.url
log.debug(url)
self._response = requests.get(url, headers={"Accept-Encoding": "gzip"}, timeout=5)
self._response.raise_for_status()
self._weather = Weather(self._response.text)
return self._weather | 0.004727 |
def _ParseShVariables(self, lines):
"""Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
"""
paths = {}
for line in lines:
for entry in line:
if "=" in entry:
# Pad out the list so that it's always 2 elements, even if the split
# failed.
target, vals = (entry.split("=", 1) + [""])[:2]
if vals:
path_vals = vals.split(":")
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif entry not in self._SH_CONTINUATION:
# Stop processing the line unless the entry might allow paths to still
# be set, e.g.
# reserved words: "export"
# conditions: { PATH=VAL } && PATH=:$PATH || PATH=.
break
return paths | 0.01209 |
def get_config_input_source_config_source_running_running(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_config = ET.Element("get_config")
config = get_config
input = ET.SubElement(get_config, "input")
source = ET.SubElement(input, "source")
config_source = ET.SubElement(source, "config-source")
running = ET.SubElement(config_source, "running")
running = ET.SubElement(running, "running")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003378 |
def maybeStartBuildsForWorker(self, worker_name):
"""
Call this when something suggests that a particular worker may now be
available to start a build.
@param worker_name: the name of the worker
"""
builders = self.getBuildersForWorker(worker_name)
self.brd.maybeStartBuildsOn([b.name for b in builders]) | 0.00554 |
def readOutput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read only output files for a GSSHA project to the database.
Use this method to read a project when only post-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# Read Project File
self.read(directory, projectFileName, session, spatial, spatialReferenceID)
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Read Mask (dependency of some output files)
maskMap = WatershedMaskFile()
maskMapFilename = self.getCard('WATERSHED_MASK').value.strip('"')
maskMap.read(session=session, directory=directory, filename=maskMapFilename, spatial=spatial)
maskMap.projectFile = self
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read Output Files
self._readXput(self.OUTPUT_FILES, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Read WMS Dataset Files
self._readWMSDatasets(self.WMS_DATASETS, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | 0.006002 |
def _find_xinput(self):
"""Find most recent xinput library."""
for dll in XINPUT_DLL_NAMES:
try:
self.xinput = getattr(ctypes.windll, dll)
except OSError:
pass
else:
# We found an xinput driver
self.xinput_dll = dll
break
else:
# We didn't find an xinput library
warn(
"No xinput driver dll found, gamepads not supported.",
RuntimeWarning) | 0.003738 |
def calculate_file_distances(dicom_files, field_weights=None,
dist_method_cls=None, **kwargs):
"""
Calculates the DicomFileDistance between all files in dicom_files, using an
weighted Levenshtein measure between all field names in field_weights and
their corresponding weights.
Parameters
----------
dicom_files: iterable of str
Dicom file paths
field_weights: dict of str to float
A dict with header field names to float scalar values, that
indicate a distance measure ratio for the levenshtein distance
averaging of all the header field names in it. e.g., {'PatientID': 1}
dist_method_cls: DicomFileDistance class
Distance method object to compare the files.
If None, the default DicomFileDistance method using Levenshtein
distance between the field_wieghts will be used.
kwargs: DicomFileDistance instantiation named arguments
Apart from the field_weitghts argument.
Returns
-------
file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN
Levenshtein distances between each of the N items in dicom_files.
"""
if dist_method_cls is None:
dist_method = LevenshteinDicomFileDistance(field_weights)
else:
try:
dist_method = dist_method_cls(field_weights=field_weights, **kwargs)
except:
log.exception('Could not instantiate {} object with field_weights '
'and {}'.format(dist_method_cls, kwargs))
dist_dtype = np.float16
n_files = len(dicom_files)
try:
file_dists = np.zeros((n_files, n_files), dtype=dist_dtype)
except MemoryError as mee:
import scipy.sparse
file_dists = scipy.sparse.lil_matrix((n_files, n_files),
dtype=dist_dtype)
for idxi in range(n_files):
dist_method.set_dicom_file1(dicom_files[idxi])
for idxj in range(idxi+1, n_files):
dist_method.set_dicom_file2(dicom_files[idxj])
if idxi != idxj:
file_dists[idxi, idxj] = dist_method.transform()
return file_dists | 0.001376 |
def track(self, event_key, user_id, attributes=None, event_tags=None):
""" Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))
return
if not validator.is_non_empty_string(event_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))
return
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return
if not self._validate_user_inputs(attributes, event_tags):
return
event = self.config.get_event(event_key)
if not event:
self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key))
return
conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)
self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id))
self.logger.debug('Dispatching conversion event to URL %s with params %s.' % (
conversion_event.url,
conversion_event.params
))
try:
self.event_dispatcher.dispatch_event(conversion_event)
except:
self.logger.exception('Unable to dispatch conversion event!')
self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id,
attributes, event_tags, conversion_event) | 0.011635 |
def OnCellFontUnderline(self, event):
"""Cell font underline event handler"""
with undo.group(_("Underline")):
self.grid.actions.toggle_attr("underline")
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() | 0.007042 |
def bb(self,*args,**kwargs):
"""
NAME:
bb
PURPOSE:
return Galactic latitude
INPUT:
t - (optional) time at which to get bb
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
b(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'bb')
lbd= self._lbd(*args,**kwargs)
return lbd[:,1] | 0.015228 |
def split(self, grouper):
'''Split the current DenseRunVariable into multiple columns.
Parameters
----------
grouper : :obj:`pandas.DataFrame`
Binary DF specifying the design matrix to use for splitting. Number
of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable``
will be generated for each column in the grouper.
Returns
-------
A list of DenseRunVariables, one per unique value in the grouper.
'''
values = grouper.values * self.values.values
df = pd.DataFrame(values, columns=grouper.columns)
return [DenseRunVariable(name='%s.%s' % (self.name, name),
values=df[name].values,
run_info=self.run_info,
source=self.source,
sampling_rate=self.sampling_rate)
for i, name in enumerate(df.columns)] | 0.003027 |
def __make_request_url(self, teststep_dict, entry_json):
""" parse HAR entry request url and queryString, and make teststep url and params
Args:
entry_json (dict):
{
"request": {
"url": "https://httprunner.top/home?v=1&w=2",
"queryString": [
{"name": "v", "value": "1"},
{"name": "w", "value": "2"}
],
},
"response": {}
}
Returns:
{
"name: "/home",
"request": {
url: "https://httprunner.top/home",
params: {"v": "1", "w": "2"}
}
}
"""
request_params = utils.convert_list_to_dict(
entry_json["request"].get("queryString", [])
)
url = entry_json["request"].get("url")
if not url:
logging.exception("url missed in request.")
sys.exit(1)
parsed_object = urlparse.urlparse(url)
if request_params:
parsed_object = parsed_object._replace(query='')
teststep_dict["request"]["url"] = parsed_object.geturl()
teststep_dict["request"]["params"] = request_params
else:
teststep_dict["request"]["url"] = url
teststep_dict["name"] = parsed_object.path | 0.002046 |
def to_serializable(self, use_bytes=False, bytes_type=bytes):
"""Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable`
"""
schema_version = "2.0.0"
record = {name: array2bytes(vector)
for name, vector in self.data_vectors.items()}
record['sample'] = array2bytes(np.packbits(self.record.sample > 0))
if not use_bytes:
for name in record:
record[name] = base64.b64encode(record[name]).decode("UTF-8")
return {"basetype": "SampleSet",
"type": type(self).__name__,
"record": record,
"sample_dtype": str(self.record.sample.dtype), # need this to unpack
"sample_shape": self.record.sample.shape, # need this to unpack
"variable_type": self.vartype.name,
"info": self.info,
"version": {"dimod": __version__,
"sampleset_schema": schema_version},
"variable_labels": list(self.variables),
"use_bytes": bool(use_bytes)} | 0.002805 |
def answer(request):
"""
Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
}
"""
if request.method == 'GET':
return render(request, 'models_answer.html', {}, help_text=answer.__doc__)
elif request.method == 'POST':
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
saved_answers = _save_answers(request, practice_context, True)
return render_json(request, saved_answers, status=200, template='models_answer.html')
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | 0.005087 |
def _process_dates(self, timezone_offset, first_date, start, end, title, track_points=False):
''' a helper method to process datetime information for other requests
:param timezone_offset: integer with timezone offset from user profile details
:param first_date: string with ISO date from user profile details firstDate
:param start: float with starting datetime for daily summaries
:param end: float with ending datetime for daily summaries
:param title: string with request method name
:param track_points: [optional] boolean to provide detailed tracking of user movement
:return: dictionary of parameters to add to request
'''
# validate inputs
input_fields = {
'timezone_offset': timezone_offset,
'first_date': first_date,
'start': start,
'end': end
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate datetimes
max_time = 30 * 24 * 60 * 60 + 1
max_days = '31'
if track_points:
max_days = '7'
max_time = 6 * 24 * 60 * 60 + 1
from time import time
end_ISO = ''
start_ISO = ''
if end:
if end > time():
raise ValueError('%s(end=%s) datetime must not be in the future.' % (title, end))
end_ISO = self._convert_dt(end, timezone_offset)
if start:
start_ISO = self._convert_dt(start, timezone_offset)
if start_ISO < first_date:
raise ValueError("%s(start=%s) datetime must not precede user signup first date." % (title, start))
if start and end:
if start > end:
raise ValueError('%s(start=%s) must be a datetime before end=%s.' % (title, start, end))
if end - start > max_time:
raise ValueError('%s(start=%s, end=%s) must not be more than %s days apart.' % (title, start, end, max_days))
# construct request parameters
request_parameters = {}
if not start_ISO and not end_ISO:
request_parameters['pastDays'] = max_days
else:
if start_ISO and not end_ISO:
end_dt = start + max_time
current_ISO = self._convert_dt(time(), timezone_offset)
end_ISO = self._convert_dt(end_dt, timezone_offset)
if current_ISO < end_ISO:
end_ISO = current_ISO
elif end_ISO and not start_ISO:
start_dt = end - max_time
start_ISO = self._convert_dt(start_dt, timezone_offset)
if start_ISO < first_date:
start_ISO = first_date
request_parameters['from'] = start_ISO
request_parameters['to'] = end_ISO
return request_parameters | 0.003295 |
def get_uid(brain_or_object):
"""Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
"""
if is_portal(brain_or_object):
return '0'
if is_brain(brain_or_object) and base_hasattr(brain_or_object, "UID"):
return brain_or_object.UID
return get_object(brain_or_object).UID() | 0.002105 |
def segment_str(text: str, phoneme_inventory: Set[str] = PHONEMES) -> str:
"""
Takes as input a string in Kunwinjku and segments it into phoneme-like
units based on the standard orthographic rules specified at
http://bininjgunwok.org.au/
"""
text = text.lower()
text = segment_into_tokens(text, phoneme_inventory)
return text | 0.002793 |
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
:raises AstroidTypeError: When the given index cannot be used as a
subscript index, or if this node is not subscriptable.
:raises AstroidIndexError: If the given index does not exist in the
dictionary.
"""
for key, value in self.items:
# TODO(cpopa): no support for overriding yet, {1:2, **{1: 3}}.
if isinstance(key, DictUnpack):
try:
return value.getitem(index, context)
except (exceptions.AstroidTypeError, exceptions.AstroidIndexError):
continue
for inferredkey in key.infer(context):
if inferredkey is util.Uninferable:
continue
if isinstance(inferredkey, Const) and isinstance(index, Const):
if inferredkey.value == index.value:
return value
raise exceptions.AstroidIndexError(index) | 0.002643 |
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0 | 0.00641 |
def _format_operation_list(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %s`` and the output
will be a query like ``SELECT ?``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Sequence[Any]
:param parameters: Sequence of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = ["?" for _ in parameters]
try:
return operation % tuple(formatted_params)
except TypeError as exc:
raise exceptions.ProgrammingError(exc) | 0.001221 |
def bounds(self) -> Tuple[float, float, float, float]:
"""Returns the bounds of the shape.
Bounds are given in the following order in the origin crs:
west, south, east, north
"""
return self.shape.bounds | 0.00823 |
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False | 0.001147 |
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle | 0.001101 |
def _sbase_notes_dict(sbase, notes):
"""Set SBase notes based on dictionary.
Parameters
----------
sbase : libsbml.SBase
SBML object to set notes on
notes : notes object
notes information from cobra object
"""
if notes and len(notes) > 0:
tokens = ['<html xmlns = "http://www.w3.org/1999/xhtml" >'] + \
["<p>{}: {}</p>".format(k, v) for (k, v) in notes.items()] + \
["</html>"]
_check(
sbase.setNotes("\n".join(tokens)),
"Setting notes on sbase: {}".format(sbase)
) | 0.001718 |
def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None,
center_data=True, fit_offset=True,
use_fft=True, freq_oversampling=5, nyquist_factor=2,
trig_sum_kwds=None):
"""Compute a lomb-scargle periodogram for the given data
This implements both an O[N^2] method if use_fft==False, or an
O[NlogN] method if use_fft==True.
Parameters
----------
t, y, dy : array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape. If dy is not specified, a
constant error will be used.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
Defaults, with T = t.max() - t.min():
- f0 = 0
- df is set such that there are ``freq_oversampling`` points per
peak width. ``freq_oversampling`` defaults to 5.
- Nf is set such that the highest frequency is ``nyquist_factor``
times the so-called "average Nyquist frequency".
``nyquist_factor`` defaults to 2.
Note that for unevenly-spaced data, the periodogram can be sensitive
to frequencies far higher than the average Nyquist frequency.
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_offset : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
Other Parameters
----------------
freq_oversampling : float (default=5)
Oversampling factor for the frequency bins. Only referenced if
``df`` is not specified
nyquist_factor : float (default=2)
Parameter controlling the highest probed frequency. Only referenced
if ``Nf`` is not specified.
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipies in C (2002)
"""
# Validate and setup input data
t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy))
w = 1. / (dy ** 2)
w /= w.sum()
# Validate and setup frequency grid
if df is None:
peak_width = 1. / (t.max() - t.min())
df = peak_width / freq_oversampling
if Nf is None:
avg_Nyquist = 0.5 * len(t) / (t.max() - t.min())
Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df)
Nf = int(Nf)
assert(df > 0)
assert(Nf > 0)
freq = f0 + df * np.arange(Nf)
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_offset:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
#----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_offset:
S, C = trig_sum(t, w, **kwargs)
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if f0 == 0:
warnings.simplefilter("ignore")
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
# fix NaN at zero frequency
if np.isnan(tan_2omega_tau[0]):
tan_2omega_tau[0] = 0
else:
tan_2omega_tau = S2 / C2
# slower/less stable way: we'll use trig identities instead
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
#----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_offset:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if fit_offset and f0 == 0:
warnings.simplefilter("ignore")
power = (YC * YC / CC + YS * YS / SS) / YY
# fix NaN and INF at zero frequency
if np.isnan(power[0]) or np.isinf(power[0]):
power[0] = 0
return freq, power | 0.000528 |
def n_atom(self, node):
"""atom ::=
('(' [yield_expr|testlist_gexp] ')'
| '[' [listmaker] ']'
| '{' [dictmaker] '}'
| '`' testlist1 '`'
| NAME | NUMBER | STRING+)
"""
length = len(node)
if length == 1:
self.preorder(node[0])
elif length == 3:
self.preorder(node[0])
self.preorder(node[1])
self.preorder(node[2])
else:
assert False, "Expecting atom to have length 1 or 3"
self.prune() | 0.003448 |
def __remove_redundant_proper_names(self, docs, lemma_set):
""" Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade
loendi (hulga) põhjal;
"""
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame analyysid, mis tuleks loendi järgi eemaldada
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set:
toDelete.append( analysis )
# 2) Eemaldame yleliigsed analyysid
if toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | 0.005767 |
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels | 0.041451 |
def generate_enum(self):
"""
Means that only value specified in the enum is valid.
.. code-block:: python
{
'enum': ['a', 'b'],
}
"""
enum = self._definition['enum']
if not isinstance(enum, (list, tuple)):
raise JsonSchemaDefinitionException('enum must be an array')
with self.l('if {variable} not in {enum}:'):
enum = str(enum).replace('"', '\\"')
self.l('raise JsonSchemaException("{name} must be one of {}")', enum) | 0.005435 |
def get_event_tags_from_dn(dn):
"""
This grabs the event tags from the dn designator. They look like this:
uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h
"""
tags = []
node = get_node_from_dn(dn)
if node:
tags.append("node:" + node)
app = get_app_from_dn(dn)
if app:
tags.append("app:" + app)
bd = get_bd_from_dn(dn)
if bd:
tags.append("bd:" + bd)
cep = get_cep_from_dn(dn)
if cep:
tags.append("mac:" + cep)
ip = get_ip_from_dn(dn)
if ip:
tags.append("ip:" + ip)
epg = get_epg_from_dn(dn)
if epg:
tags.append("epg:" + epg)
return tags | 0.001473 |
def _dowork(self, dir1, dir2, copyfunc=None, updatefunc=None):
""" Private attribute for doing work """
if self._verbose:
self.log('Source directory: %s:' % dir1)
self._dcmp = self._compare(dir1, dir2)
# Files & directories only in target directory
if self._purge:
for f2 in self._dcmp.right_only:
fullf2 = os.path.join(self._dir2, f2)
if self._verbose:
self.log('Deleting %s' % fullf2)
try:
if os.path.isfile(fullf2):
try:
os.remove(fullf2)
self._deleted.append(fullf2)
self._numdelfiles += 1
except OSError as e:
self.log(str(e))
self._numdelffld += 1
elif os.path.isdir(fullf2):
try:
shutil.rmtree(fullf2, True)
self._deleted.append(fullf2)
self._numdeldirs += 1
except shutil.Error as e:
self.log(str(e))
self._numdeldfld += 1
except Exception as e: # of any use ?
self.log(str(e))
continue
# Files & directories only in source directory
for f1 in self._dcmp.left_only:
try:
st = os.stat(os.path.join(self._dir1, f1))
except os.error:
continue
if stat.S_ISREG(st.st_mode):
if copyfunc:
copyfunc(f1, self._dir1, self._dir2)
self._added.append(os.path.join(self._dir2, f1))
elif stat.S_ISDIR(st.st_mode):
to_make = os.path.join(self._dir2, f1)
if not os.path.exists(to_make):
os.makedirs(to_make)
self._numnewdirs += 1
self._added.append(to_make)
# common files/directories
for f1 in self._dcmp.common:
try:
st = os.stat(os.path.join(self._dir1, f1))
except os.error:
continue
if stat.S_ISREG(st.st_mode):
if updatefunc:
updatefunc(f1, self._dir1, self._dir2) | 0.00082 |
def _build_cookie_jar(cls, session: AppSession):
'''Build the cookie jar'''
if not session.args.cookies:
return
if session.args.load_cookies or session.args.save_cookies:
session.factory.set('CookieJar', BetterMozillaCookieJar)
cookie_jar = session.factory.new('CookieJar')
if session.args.load_cookies:
cookie_jar.load(session.args.load_cookies, ignore_discard=True)
else:
cookie_jar = session.factory.new('CookieJar')
policy = session.factory.new('CookiePolicy', cookie_jar=cookie_jar)
cookie_jar.set_policy(policy)
_logger.debug(__('Loaded cookies: {0}', list(cookie_jar)))
cookie_jar_wrapper = session.factory.new(
'CookieJarWrapper',
cookie_jar,
save_filename=session.args.save_cookies,
keep_session_cookies=session.args.keep_session_cookies,
)
return cookie_jar_wrapper | 0.002024 |
def convert_text_w_syntax_to_CONLL( text, feature_generator, layer=LAYER_CONLL ):
''' Converts given estnltk Text object into CONLL format and returns as a
string.
Uses given *feature_generator* to produce fields ID, FORM, LEMMA, CPOSTAG,
POSTAG, FEATS for each token.
Fills fields to predict (HEAD, DEPREL) with the syntactic information from
given *layer* (default: LAYER_CONLL).
This method is used in preparing training data for MaltParser.
Parameters
-----------
text : estnltk.text.Text
Morphologically analysed text from which the CONLL file is generated;
feature_generator : CONLLFeatGenerator
An instance of CONLLFeatGenerator, which has method *generate_features()*
for generating morphological features for a single token;
layer : str
Name of the *text* layer from which syntactic information is to be taken.
Defaults to LAYER_CONLL.
The aimed format looks something like this:
1 Öö öö S S sg|n 2 @SUBJ _ _
2 oli ole V V s 0 ROOT _ _
3 täiesti täiesti D D _ 4 @ADVL _ _
4 tuuletu tuuletu A A sg|n 2 @PRD _ _
5 . . Z Z _ 4 xxx _ _
'''
from estnltk.text import Text
if not isinstance( text, Text ):
raise Exception('(!) Unexpected type of input argument! Expected EstNLTK\'s Text. ')
assert layer in text, ' (!) The layer "'+layer+'" is missing form the Text object.'
try:
granularity = feature_generator.parseScope
except AttributeError:
granularity = SENTENCES
assert granularity in [SENTENCES, CLAUSES], '(!) Unsupported granularity: "'+str(granularity)+'"!'
sentenceStrs = []
if granularity == CLAUSES:
_create_clause_based_dep_links( text, layer )
for sentence_text in text.split_by( granularity ):
sentence_text[WORDS] = __sort_analyses( sentence_text[WORDS] )
for i in range(len( sentence_text[WORDS] )):
# Generate features ID, FORM, LEMMA, CPOSTAG, POSTAG, FEATS
strForm = feature_generator.generate_features( sentence_text, i )
# Get syntactic analysis of the token
syntaxToken = sentence_text[layer][i]
firstSyntaxRel = syntaxToken[PARSER_OUT][0]
# *** HEAD (syntactic parent)
parentLabel = str( firstSyntaxRel[1] + 1 )
strForm.append( parentLabel )
strForm.append( '\t' )
# *** DEPREL (label of the syntactic relation)
if parentLabel == '0':
strForm.append( 'ROOT' )
strForm.append( '\t' )
else:
strForm.append( firstSyntaxRel[0] )
strForm.append( '\t' )
# *** PHEAD
strForm.append( '_' )
strForm.append( '\t' )
# *** PDEPREL
strForm.append( '_' )
sentenceStrs.append( ''.join( strForm ) )
sentenceStrs.append( '' )
return '\n'.join( sentenceStrs ) | 0.019367 |
def set_weather(self, weather_type):
"""Queue up a set weather command. It will be applied when `tick` or `step` is called next.
By the next tick, the lighting, skysphere, fog, and relevant particle systems will be updated and/or spawned
to the given weather. If there is no skysphere or directional light in the world, the command may not function
properly but will not cause a crash.
NOTE: Because this command can effect the fog density, any changes made by a change_fog_density command before
a set_weather command called will be undone. It is recommended to call change_fog_density after calling set
weather.
Args:
weather_type (str): The type of weather, which can be 'Rain' or 'Cloudy'. In all downloadable worlds,
the weather is clear by default. If the given type string is not available, the command will not be sent.
"""
if not SetWeatherCommand.has_type(weather_type.lower()):
raise HolodeckException("Invalid weather type " + weather_type)
self._should_write_to_command_buffer = True
command_to_send = SetWeatherCommand(weather_type.lower())
self._commands.add_command(command_to_send) | 0.007246 |
def _get_resampled(self,rule,how={'ohlc':'last','volume':'sum'},df=None,**kwargs):
"""
Returns a resampled DataFrame
Parameters
----------
rule : str
the offset string or object representing target conversion
for all aliases available see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
how : str or dict
states the form in which the resampling will be done.
Examples:
how={'volume':'sum'}
how='count'
df : DataFrame
If omitted then the QuantFigure.DataFrame is resampled.
kwargs
For more information see http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
"""
df=self.df.copy() if df is None else df
if rule==None:
return df
else:
if isinstance(how,dict):
if 'ohlc' in how:
v=how.pop('ohlc')
for _ in ['open','high','low','close']:
how[_]=v
_how=how.copy()
for _ in _how:
if _ not in self._d:
del how[_]
return df.resample(rule=rule,**kwargs).apply(how) | 0.054475 |
def last(self, predicate=None):
'''The last element in a sequence (optionally satisfying a predicate).
If the predicate is omitted or is None this query returns the last
element in the sequence; otherwise, it returns the last element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is no such element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the last element of the source sequence will
be returned.
Returns:
The last element of the sequence if predicate is None, otherwise
the last element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If the source sequence is empty.
ValueError: If there are no elements matching the predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call last() on a closed Queryable.")
return self._last() if predicate is None else self._last_predicate(predicate) | 0.002131 |
def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
data = values.of({
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
'FriendlyName': friendly_name,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return TriggerInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) | 0.004004 |
def ping(self):
"""
Sending ICMP packets.
:return: ``ping`` command execution result.
:rtype: :py:class:`.PingResult`
:raises ValueError: If parameters not valid.
"""
self.__validate_ping_param()
ping_proc = subprocrunner.SubprocessRunner(self.__get_ping_command())
ping_proc.run()
return PingResult(ping_proc.stdout, ping_proc.stderr, ping_proc.returncode) | 0.006803 |
async def clear_state(self, turn_context: TurnContext):
"""
Clears any state currently stored in this state scope.
NOTE: that save_changes must be called in order for the cleared state to be persisted to the underlying store.
:param turn_context: The context object for this turn.
:return: None
"""
if turn_context == None:
raise TypeError('BotState.clear_state(): turn_context cannot be None.')
# Explicitly setting the hash will mean IsChanged is always true. And that will force a Save.
cache_value = CachedBotState()
cache_value.hash = ''
turn_context.turn_state[self._context_service_key] = cache_value | 0.011019 |
def get_address(self, address_id, **params):
"""https://developers.coinbase.com/api/v2#show-addresss"""
return self.api_client.get_address(self.id, address_id, **params) | 0.010811 |
def DbDeleteDeviceAlias(self, argin):
""" Delete a device alias.
:param argin: device alias name
:type: tango.DevString
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbDeleteDeviceAlias()")
self.db.delete_device_alias(argin) | 0.006897 |
def is_absolute(self):
"""Return True if xllcorner==yllcorner==0 indicating that points
in question are absolute.
"""
# FIXME(Ole): It is unfortunate that decision about whether points
# are absolute or not lies with the georeference object. Ross pointed this out.
# Moreover, this little function is responsible for a large fraction of the time
# using in data fitting (something in like 40 - 50%.
# This was due to the repeated calls to allclose.
# With the flag method fitting is much faster (18 Mar 2009).
# FIXME(Ole): HACK to be able to reuse data already cached (18 Mar 2009).
# Remove at some point
if not hasattr(self, 'absolute'):
self.absolute = num.allclose([self.xllcorner, self.yllcorner], 0)
# Return absolute flag
return self.absolute | 0.005701 |
def get_attribute_data(attr_ids, node_ids, **kwargs):
"""
For a given attribute or set of attributes, return all the resources and
resource scenarios in the network
"""
node_attrs = db.DBSession.query(ResourceAttr).\
options(joinedload_all('attr')).\
filter(ResourceAttr.node_id.in_(node_ids),
ResourceAttr.attr_id.in_(attr_ids)).all()
ra_ids = []
for ra in node_attrs:
ra_ids.append(ra.id)
resource_scenarios = db.DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all()
for rs in resource_scenarios:
if rs.dataset.hidden == 'Y':
try:
rs.dataset.check_read_permission(kwargs.get('user_id'))
except:
rs.dataset.value = None
db.DBSession.expunge(rs)
return node_attrs, resource_scenarios | 0.016231 |
def write(self, arg):
""" Write a string or bytes object to the buffer """
if isinstance(arg, str):
arg = arg.encode(self.encoding)
return self._buffer.write(arg) | 0.010101 |
def store(self, filename=None, location=None, path=None, container=None, region=None, access=None, base64decode=None):
"""
Uploads and stores the current transformation as a Fileink
*returns* [Filestack.Filelink]
```python
filelink = transform.store()
```
"""
if path:
path = '"{}"'.format(path)
filelink_obj = self.add_transform_task('store', locals())
response = utils.make_call(filelink_obj.url, 'get')
if response.ok:
data = json.loads(response.text)
handle = re.match(r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)', data['url']).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception(response.text) | 0.006039 |
def application_path(path):
"""
Join application project_dir and path
"""
from uliweb import application
return os.path.join(application.project_dir, path) | 0.005714 |
def set_attr_value(self, key, attr, value):
""" set the value of a given attribute for a given key
"""
idx = self._keys[key]
self._attrs[attr][idx].set(value) | 0.010526 |
def vectorize(self, token_list):
'''
Tokenize token list.
Args:
token_list: The list of tokens..
Returns:
[vector of token, vector of token, vector of token, ...]
'''
vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list]
return vector_list | 0.013298 |
def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Encrypts a stream of bytes from in_stream to out_stream using mode.'
encrypter = Encrypter(mode, padding = padding)
_feed_stream(encrypter, in_stream, out_stream, block_size) | 0.027682 |
def _refresh_state(self):
""" Refresh the job info. """
# DataFlow's DataflowPipelineResult does not refresh state, so we have to do it ourselves
# as a workaround.
self._runner_results._job = (
self._runner_results._runner.dataflow_client.get_job(self._runner_results.job_id()))
self._is_complete = self._runner_results.state in ['STOPPED', 'DONE', 'FAILED', 'CANCELLED']
self._fator_error = getattr(self._runner_results._runner, 'last_error_msg', None) | 0.010267 |
def _check_timeout(start_time, timeout):
'''
Name of the last installed kernel, for Red Hat based systems.
Returns:
List with name of last installed kernel as it is interpreted in output of `uname -a` command.
'''
timeout_milisec = timeout * 60000
if timeout_milisec < (int(round(time.time() * 1000)) - start_time):
raise salt.exceptions.TimeoutError('Timeout expired.') | 0.004819 |
def knn_initialize(
X,
missing_mask,
verbose=False,
min_dist=1e-6,
max_dist_multiplier=1e6):
"""
Fill X with NaN values if necessary, construct the n_samples x n_samples
distance matrix and set the self-distance of each row to infinity.
Returns contents of X laid out in row-major, the distance matrix,
and an "effective infinity" which is larger than any entry of the
distance matrix.
"""
X_row_major = X.copy("C")
if missing_mask.sum() != np.isnan(X_row_major).sum():
# if the missing values have already been zero-filled need
# to put NaN's back in the data matrix for the distances function
X_row_major[missing_mask] = np.nan
D = all_pairs_normalized_distances(X_row_major)
D_finite_flat = D[np.isfinite(D)]
if len(D_finite_flat) > 0:
max_dist = max_dist_multiplier * max(1, D_finite_flat.max())
else:
max_dist = max_dist_multiplier
# set diagonal of distance matrix to a large value since we don't want
# points considering themselves as neighbors
np.fill_diagonal(D, max_dist)
D[D < min_dist] = min_dist # prevents 0s
D[D > max_dist] = max_dist # prevents infinities
return X_row_major, D, max_dist | 0.000792 |
def is_indexing(working_dir):
"""
Is the blockstack daemon synchronizing with the blockchain?
"""
indexing_path = get_indexing_lockfile(working_dir)
if os.path.exists( indexing_path ):
return True
else:
return False | 0.011765 |
def normalize_pred_string(predstr):
"""
Normalize the predicate string *predstr* to a conventional form.
This makes predicate strings more consistent by removing quotes and
the `_rel` suffix, and by lowercasing them.
Examples:
>>> normalize_pred_string('"_dog_n_1_rel"')
'_dog_n_1'
>>> normalize_pred_string('_dog_n_1')
'_dog_n_1'
"""
tokens = [t for t in split_pred_string(predstr)[:3] if t is not None]
if predstr.lstrip('\'"')[:1] == '_':
tokens = [''] + tokens
return '_'.join(tokens).lower() | 0.001742 |
def _move_consonant(self, letters: list, positions: List[int]) -> List[str]:
"""
Given a list of consonant positions, move the consonants according to certain
consonant syllable behavioral rules for gathering and grouping.
:param letters:
:param positions:
:return:
"""
for pos in positions:
previous_letter = letters[pos - 1]
consonant = letters[pos]
next_letter = letters[pos + 1]
if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter):
return string_utils.move_consonant_right(letters, [pos])
if self._contains_vowels(previous_letter) and self._ends_with_vowel(
previous_letter) and len(previous_letter) == 1:
return string_utils.move_consonant_left(letters, [pos])
if previous_letter + consonant in self.constants.ASPIRATES:
return string_utils.move_consonant_left(letters, [pos])
if consonant + next_letter in self.constants.ASPIRATES:
return string_utils.move_consonant_right(letters, [pos])
if next_letter[0] == consonant:
return string_utils.move_consonant_left(letters, [pos])
if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS:
return string_utils.move_consonant_right(letters, [pos])
if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']:
return string_utils.move_consonant_right(letters, [pos])
if self._contains_consonants(next_letter[0]) and self._starts_with_vowel(
previous_letter[-1]):
return string_utils.move_consonant_left(letters, [pos])
# fall through case
if self._contains_consonants(next_letter[0]):
return string_utils.move_consonant_right(letters, [pos])
return letters | 0.003553 |
def clear_option_value(self, opt_name):
""" Clear the stored option value (so the default will be used)
:param opt_name: option name
:type opt_name: str
"""
if not self.has_option(opt_name):
raise ValueError("Unknow option name (%s)" % opt_name)
self._options[opt_name].clear() | 0.005917 |
def __insert_wrapper(func):
"""Make sure the arguments given to the insert methods are correct"""
def check_func(self, key, new_item, instance=0):
if key not in self.keys():
raise KeyError("%s not a key in label" % (key))
if not isinstance(new_item, (list, OrderedMultiDict)):
raise TypeError("The new item must be a list or PVLModule")
if isinstance(new_item, OrderedMultiDict):
new_item = list(new_item)
return func(self, key, new_item, instance)
return check_func | 0.003413 |
def lock_online(self, comment=None):
"""
Executes a Lock-Online operation on the specified node
:param str comment: comment for audit
:raises NodeCommandFailed: cannot lock online
:return: None
"""
self.make_request(
NodeCommandFailed,
method='update',
resource='lock_online',
params={'comment': comment}) | 0.004878 |
def value(self):
"""return float Cumulative Distribution Function.
The return value represents a floating point number of the CDF of the
largest eigenvalue of a Wishart(n, p) evaluated at chisq_val.
"""
wishart = self._wishart_cdf
# Prepare variables for integration algorithm
A = self.A
p = self._gammainc_a
g = gamma(wishart.alpha_vec)
q_ind = np.arange(2 * wishart.n_min - 2)
q_vec = 2 * wishart.alpha + q_ind + 2
q = np.float_power(0.5, q_vec) * gamma(q_vec) * gammainc(q_vec, self._chisq_val)
# Perform integration (i.e. calculate Pfaffian CDF)
for i in xrange(wishart.n_min):
# TODO consider index tricks instead of iteration here
b = 0.5 * p[i] * p[i]
for j in xrange(i, wishart.n_min - 1):
b -= q[i + j] / (g[i] * g[j + 1])
A[j + 1, i] = p[i] * p[j + 1] - 2 * b
A[i, j + 1] = -A[j + 1, i]
if np.any(np.isnan(A)):
return 0
return np.sqrt(det(A)) | 0.002778 |
def fetch_items(self, category, **kwargs):
"""Fetch the pages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
reviews_api = kwargs['reviews_api']
mediawiki_version = self.client.get_version()
logger.info("MediaWiki version: %s", mediawiki_version)
if reviews_api:
if ((mediawiki_version[0] == 1 and mediawiki_version[1] >= 27) or mediawiki_version[0] > 1):
fetcher = self.__fetch_1_27(from_date)
else:
logger.warning("Reviews API only available in MediaWiki >= 1.27")
logger.warning("Using the Pages API instead")
fetcher = self.__fetch_pre1_27(from_date)
else:
fetcher = self.__fetch_pre1_27(from_date)
for page_reviews in fetcher:
yield page_reviews | 0.004149 |
def to_bool_alt(value):
'''
Convert python to zfs yes/no value
'''
value = from_bool_alt(value)
if isinstance(value, bool):
value = 'yes' if value else 'no'
elif value is None:
value = 'none'
return value | 0.004016 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.