text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def mark_log(self, filename='system.log'):
"""
Returns "a mark" to the current position of this node Cassandra log.
This is for use with the from_mark parameter of watch_log_for_* methods,
allowing to watch the log from the position when this method was called.
"""
log_file = os.path.join(self.get_path(), 'logs', filename)
if not os.path.exists(log_file):
return 0
with open(log_file) as f:
f.seek(0, os.SEEK_END)
return f.tell() | 0.007533 |
def _smartos_zone_pkgsrc_data():
'''
SmartOS zone pkgsrc information
'''
# Provides:
# pkgsrcversion
# pkgsrcpath
grains = {
'pkgsrcversion': 'Unknown',
'pkgsrcpath': 'Unknown',
}
pkgsrcversion = re.compile('^release:\\s(.+)')
if os.path.isfile('/etc/pkgsrc_version'):
with salt.utils.files.fopen('/etc/pkgsrc_version', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
match = pkgsrcversion.match(line)
if match:
grains['pkgsrcversion'] = match.group(1)
pkgsrcpath = re.compile('PKG_PATH=(.+)')
if os.path.isfile('/opt/local/etc/pkg_install.conf'):
with salt.utils.files.fopen('/opt/local/etc/pkg_install.conf', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
match = pkgsrcpath.match(line)
if match:
grains['pkgsrcpath'] = match.group(1)
return grains | 0.001885 |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
# Probably name should be removed altogether until its usage is decided, see
# https://github.com/LEMS/LEMS/issues/4
# '''(' name = "{0}"'.format(self.name) if self.name else '') +\'''
return '<Unit' +\
(' symbol = "{0}"'.format(self.symbol) if self.symbol else '') +\
(' dimension = "{0}"'.format(self.dimension) if self.dimension else '') +\
(' power = "{0}"'.format(self.power) if self.power else '') +\
(' scale = "{0}"'.format(self.scale) if self.scale else '') +\
(' offset = "{0}"'.format(self.offset) if self.offset else '') +\
(' description = "{0}"'.format(self.description) if self.description else '') +\
'/>' | 0.008557 |
def rsolve(A, y):
"""
Robust solve Ax=y.
"""
from numpy_sugar.linalg import rsolve as _rsolve
try:
beta = _rsolve(A, y)
except LinAlgError:
msg = "Could not converge to solve Ax=y."
msg += " Setting x to zero."
warnings.warn(msg, RuntimeWarning)
beta = zeros(A.shape[0])
return beta | 0.002841 |
def _getState(self, name, default=None):
"private wrapper around C{self.db.state.getState}"
d = self.getObjectId()
@d.addCallback
def get(objectid):
return self.db.state.getState(objectid, name, default)
return d | 0.007547 |
def wait(*coros_or_futures, limit=0, timeout=None, loop=None,
return_exceptions=False, return_when='ALL_COMPLETED'):
"""
Wait for the Futures and coroutine objects given by the sequence
futures to complete, with optional concurrency limit.
Coroutines will be wrapped in Tasks.
``timeout`` can be used to control the maximum number of seconds to
wait before returning. timeout can be an int or float.
If timeout is not specified or None, there is no limit to the wait time.
If ``return_exceptions`` is True, exceptions in the tasks are treated the
same as successful results, and gathered in the result list; otherwise,
the first raised exception will be immediately propagated to the
returned future.
``return_when`` indicates when this function should return.
It must be one of the following constants of the concurrent.futures module.
All futures must share the same event loop.
This functions is mostly compatible with Python standard
``asyncio.wait()``.
Arguments:
*coros_or_futures (iter|list):
an iterable collection yielding coroutines functions.
limit (int):
optional concurrency execution limit. Use ``0`` for no limit.
timeout (int/float):
maximum number of seconds to wait before returning.
return_exceptions (bool):
exceptions in the tasks are treated the same as successful results,
instead of raising them.
return_when (str):
indicates when this function should return.
loop (asyncio.BaseEventLoop):
optional event loop to use.
*args (mixed):
optional variadic argument to pass to the coroutines function.
Returns:
tuple: Returns two sets of Future: (done, pending).
Raises:
TypeError: in case of invalid coroutine object.
ValueError: in case of empty set of coroutines or futures.
TimeoutError: if execution takes more than expected.
Usage::
async def sum(x, y):
return x + y
done, pending = await paco.wait(
sum(1, 2),
sum(3, 4))
[task.result() for task in done]
# => [3, 7]
"""
# Support iterable as first argument for better interoperability
if len(coros_or_futures) == 1 and isiter(coros_or_futures[0]):
coros_or_futures = coros_or_futures[0]
# If no coroutines to schedule, return empty list
# Mimics asyncio behaviour.
if len(coros_or_futures) == 0:
raise ValueError('paco: set of coroutines/futures is empty')
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop,
coros=coros_or_futures)
# Wait until all the tasks finishes
return (yield from pool.run(timeout=timeout,
return_when=return_when,
return_exceptions=return_exceptions)) | 0.000335 |
def phase_type(self, value):
'''compresses the waveform horizontally; one of
``"normal"``, ``"resync"``, ``"resync2"``'''
self._params.phase_type = value
self._overwrite_lock.disable() | 0.009259 |
def join(cls, splits):
"""
Join an array of ids into a compound id string
"""
segments = []
for split in splits:
segments.append('"{}",'.format(split))
if len(segments) > 0:
segments[-1] = segments[-1][:-1]
jsonString = '[{}]'.format(''.join(segments))
return jsonString | 0.005587 |
def now(years=0, days=0, hours=0, minutes=0, seconds=0):
"""
:param years: int delta of years from now
:param days: int delta of days from now
:param hours: int delta of hours from now
:param minutes: int delta of minutes from now
:param seconds: float delta of seconds from now
:return: str of the now timestamp
"""
date_time = datetime.utcnow()
date_time += timedelta(days=days + years * 365, hours=hours,
minutes=minutes, seconds=seconds)
return datetime_to_str(date_time) | 0.001786 |
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the zlib compressed stream cannot be decompressed.
"""
try:
uncompressed_data = self._zlib_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._zlib_decompressor, 'unused_data', b'')
except zlib.error as exception:
raise errors.BackEndError((
'Unable to decompress zlib compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data | 0.005376 |
def Page(QLExportable):
''' For multi-page files, e.g. if pdf preview '''
def __init__(self, filename, page_id):
self.id = page_id
super(Page, self).__init__(filename)
def export(self, export_format=ExportFormat.PNG):
pass | 0.016598 |
def subdivide(network, pores, shape, labels=[]):
r'''
It trim the pores and replace them by cubic networks with the sent shape.
Parameters
----------
network : OpenPNM Network Object
pores : array_like
The first group of pores to be replaced
shape : array_like
The shape of cubic networks in the target locations
Notes
-----
- It works only for cubic networks.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 6, 5], spacing=0.001)
>>> pn.Np
150
>>> nano_pores = [2, 13, 14, 15]
>>> op.topotools.subdivide(network=pn, pores=nano_pores, shape=[4, 7, 3],
... labels='nano')
>>> pn.Np
482
'''
mro = network._mro()
if 'Cubic' not in mro:
raise Exception('Subdivide is only supported for Cubic Networks')
from openpnm.network import Cubic
pores = network._parse_indices(pores)
# Checks to find boundary pores in the selected pores
if 'pore.boundary' in network.labels():
if (sp.in1d(pores, network.pores('boundary'))).any():
raise Exception('boundary pores cannot be subdivided!')
if not hasattr(network, '_subdivide_flag'):
network._subdivide_flag = True
else:
raise Exception('The network has subdivided pores, so the method \
does not support another subdivision')
# Assigning right shape and division
if sp.size(shape) != 2 and sp.size(shape) != 3:
raise Exception('Subdivide not implemented for Networks other than 2D \
and 3D')
elif sp.size(shape) == 3 and 1 not in shape:
div = sp.array(shape, ndmin=1)
single_dim = None
else:
single_dim = sp.where(sp.array(network.shape) == 1)[0]
if sp.size(single_dim) == 0:
single_dim = None
if sp.size(shape) == 3:
div = sp.array(shape, ndmin=1)
else:
div = sp.zeros(3, dtype=sp.int32)
if single_dim is None:
dim = 2
else:
dim = single_dim
div[dim] = 1
div[-sp.array(div, ndmin=1, dtype=bool)] = sp.array(shape, ndmin=1)
# Creating small network and handling labels
networkspacing = network.spacing
new_netspacing = networkspacing/div
new_net = Cubic(shape=div, spacing=new_netspacing)
main_labels = ['left', 'right', 'front', 'back', 'top', 'bottom']
if single_dim is not None:
label_groups = sp.array([['front', 'back'],
['left', 'right'],
['top', 'bottom']])
non_single_labels = label_groups[sp.array([0, 1, 2]) != single_dim]
for l in main_labels:
new_net['pore.surface_' + l] = False
network['pore.surface_' + l] = False
if single_dim is None:
new_net['pore.surface_' + l][new_net.pores(labels=l)] = True
else:
for ind in [0, 1]:
loc = (non_single_labels[ind] == l)
temp_pores = new_net.pores(non_single_labels[ind][loc])
new_net['pore.surface_' + l][temp_pores] = True
old_coords = sp.copy(new_net['pore.coords'])
if labels == []:
labels = ['pore.subdivided_' + new_net.name]
for P in pores:
# Shifting the new network to the right location and attaching it to
# the main network
shift = network['pore.coords'][P] - networkspacing/2
new_net['pore.coords'] += shift
Pn = network.find_neighbor_pores(pores=P)
try:
Pn_new_net = network.pores(labels)
except KeyError:
Pn_new_net = []
Pn_old_net = Pn[~sp.in1d(Pn, Pn_new_net)]
Np1 = network.Np
extend(pore_coords=new_net['pore.coords'],
throat_conns=new_net['throat.conns'] + Np1,
labels=labels, network=network)
# Moving the temporary labels to the big network
for l in main_labels:
network['pore.surface_'+l][Np1:] = new_net['pore.surface_'+l]
# Stitching the old pores of the main network to the new extended pores
surf_pores = network.pores('surface_*')
surf_coord = network['pore.coords'][surf_pores]
for neighbor in Pn:
neighbor_coord = network['pore.coords'][neighbor]
dist = [round(sp.inner(neighbor_coord-x, neighbor_coord-x),
20) for x in surf_coord]
nearest_neighbor = surf_pores[dist == sp.amin(dist)]
if neighbor in Pn_old_net:
coplanar_labels = network.labels(pores=nearest_neighbor)
new_neighbors = network.pores(coplanar_labels,
mode='xnor')
# This might happen to the edge of the small network
if sp.size(new_neighbors) == 0:
labels = network.labels(pores=nearest_neighbor,
mode='xnor')
common_label = [l for l in labels if 'surface_' in l]
new_neighbors = network.pores(common_label)
elif neighbor in Pn_new_net:
new_neighbors = nearest_neighbor
connect_pores(network=network, pores1=neighbor,
pores2=new_neighbors, labels=labels)
# Removing temporary labels
for l in main_labels:
network['pore.surface_' + l] = False
new_net['pore.coords'] = sp.copy(old_coords)
label_faces(network=network)
for l in main_labels:
del network['pore.surface_'+l]
trim(network=network, pores=pores)
ws = network.project.workspace
ws.close_project(new_net.project) | 0.001038 |
def do_fish_complete(cli, prog_name):
"""Do the fish completion
Parameters
----------
cli : click.Command
The main click Command of the program
prog_name : str
The program name on the command line
Returns
-------
bool
True if the completion was successful, False otherwise
"""
commandline = os.environ['COMMANDLINE']
args = split_args(commandline)[1:]
if args and not commandline.endswith(' '):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ''
for item, help in get_choices(cli, prog_name, args, incomplete):
if help:
echo("%s\t%s" % (item, re.sub('\s', ' ', help)))
else:
echo(item)
return True | 0.002635 |
def keyPressEvent(self, event):
"""
Listens for the left/right keys and the escape key to control
the slides.
:param event | <QtCore.Qt.QKeyEvent>
"""
if event.key() == QtCore.Qt.Key_Escape:
self.cancel()
elif event.key() == QtCore.Qt.Key_Left:
self.goBack()
elif event.key() == QtCore.Qt.Key_Right:
self.goForward()
elif event.key() == QtCore.Qt.Key_Home:
self.restart()
super(XWalkthroughWidget, self).keyPressEvent(event) | 0.006768 |
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand) | 0.000362 |
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v2'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals() | 0.044248 |
def map(cls, x, palette, limits, na_value=None, oob=censor):
"""
Map values to a continuous palette
Parameters
----------
x : array_like
Continuous values to scale
palette : callable ``f(x)``
palette to use
na_value : object
Value to use for missing values.
oob : callable ``f(x)``
Function to deal with values that are
beyond the limits
Returns
-------
out : array_like
Values mapped onto a palette
"""
x = oob(rescale(x, _from=limits))
pal = palette(x)
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal | 0.002472 |
def calc_lfp_layer(self):
"""
Calculate the LFP from concatenated subpopulations residing in a
certain layer, e.g all L4E pops are summed, according to the `mapping_Yy`
attribute of the `hybridLFPy.Population` objects.
"""
LFPdict = {}
lastY = None
for Y, y in self.mapping_Yy:
if lastY != Y:
try:
LFPdict.update({Y : self.LFPdict[y]})
except KeyError:
pass
else:
try:
LFPdict[Y] += self.LFPdict[y]
except KeyError:
pass
lastY = Y
return LFPdict | 0.005722 |
def is_valid_image_extension(file_path):
"""is_valid_image_extension."""
valid_extensions = ['.jpeg', '.jpg', '.gif', '.png']
_, extension = os.path.splitext(file_path)
return extension.lower() in valid_extensions | 0.004367 |
def uncons_term(params, c):
"""
Description:
Computes an additional value for the objective function value
when used in an unconstrained optimization formulation.
Parameters:
params: all parameters for the Plackett-Luce mixture model (numpy ndarray)
c: constant multiplier scaling factor of the returned term
"""
return (c * ((np.sum(params[1:5]) - 1)**2)) + (c * ((np.sum(params[5:]) - 1)**2)) | 0.006652 |
def load_ds_ids_from_config(self):
"""Get the dataset ids from the config."""
ids = []
for dataset in self.datasets.values():
# xarray doesn't like concatenating attributes that are lists
# https://github.com/pydata/xarray/issues/2060
if 'coordinates' in dataset and \
isinstance(dataset['coordinates'], list):
dataset['coordinates'] = tuple(dataset['coordinates'])
# Build each permutation/product of the dataset
id_kwargs = []
for key in DATASET_KEYS:
val = dataset.get(key)
if key in ["wavelength", "modifiers"] and isinstance(val,
list):
# special case: wavelength can be [min, nominal, max]
# but is still considered 1 option
# it also needs to be a tuple so it can be used in
# a dictionary key (DatasetID)
id_kwargs.append((tuple(val), ))
elif key == "modifiers" and val is None:
# empty modifiers means no modifiers applied
id_kwargs.append((tuple(), ))
elif isinstance(val, (list, tuple, set)):
# this key has multiple choices
# (ex. 250 meter, 500 meter, 1000 meter resolutions)
id_kwargs.append(val)
elif isinstance(val, dict):
id_kwargs.append(val.keys())
else:
# this key only has one choice so make it a one
# item iterable
id_kwargs.append((val, ))
for id_params in itertools.product(*id_kwargs):
dsid = DatasetID(*id_params)
ids.append(dsid)
# create dataset infos specifically for this permutation
ds_info = dataset.copy()
for key in DATASET_KEYS:
if isinstance(ds_info.get(key), dict):
ds_info.update(ds_info[key][getattr(dsid, key)])
# this is important for wavelength which was converted
# to a tuple
ds_info[key] = getattr(dsid, key)
self.ids[dsid] = ds_info
return ids | 0.000837 |
def newTextLen(content, len):
"""Creation of a new text node with an extra parameter for the
content's length """
ret = libxml2mod.xmlNewTextLen(content, len)
if ret is None:raise treeError('xmlNewTextLen() failed')
return xmlNode(_obj=ret) | 0.011407 |
def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, callback=None, verbose=True):
"""This algorithm reproduce the simplest evolutionary algorithm as
presented in chapter 7 of [Back2000]_.
Modified to allow checking if there is no change for ngen, as a simple
rule for convergence. Interface is similar to eaSimple(). However, in
eaSimple, ngen is total number of iterations; in eaSimpleConverge, we
terminate only when the best is NOT updated for ngen iterations.
"""
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
# Begin the generational process
gen = 1
best = (0,)
while True:
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
if callback is not None:
callback(halloffame[0], gen)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
current_best = record['max']
if gen % 20 == 0 and verbose:
print("Current iteration {0}: max_score={1}".
format(gen, current_best), file=sys.stderr)
if current_best > best:
best = current_best
updated = gen
gen += 1
if gen - updated > ngen:
break
return population | 0.000426 |
def find_time_base(self, gps):
'''work out time basis for the log - PX4 native'''
t = gps.GPSTime * 1.0e-6
self.timebase = t - self.px4_timebase | 0.011905 |
def room(self, name, participantIdentity=None, **kwargs):
"""
Create a <Room> element
:param name: Room name
:param participantIdentity: Participant identity when connecting to the Room
:param kwargs: additional attributes
:returns: <Room> element
"""
return self.nest(Room(name, participantIdentity=participantIdentity, **kwargs)) | 0.010076 |
def write_nowait(self, item):
"""
Write in the box in a non-blocking manner.
If the box is full, an exception is thrown. You should always check
for fullness with `full` or `wait_not_full` before calling this method.
:param item: An item.
"""
self._queue.put_nowait(item)
self._can_read.set()
if self._queue.full():
self._can_write.clear() | 0.004695 |
def _choi_to_chi(data, input_dim, output_dim):
"""Transform Choi representation to the Chi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits) | 0.004926 |
def format_line_context(filename, lineno, context=10):
'''
Formats the the line context for error rendering.
:param filename: the location of the file, within which the error occurred
:param lineno: the offending line number
:param context: number of lines of code to display before and after the
offending line.
'''
with open(filename) as f:
lines = f.readlines()
lineno = lineno - 1 # files are indexed by 1 not 0
if lineno > 0:
start_lineno = max(lineno - context, 0)
end_lineno = lineno + context
lines = [escape(l, True) for l in lines[start_lineno:end_lineno]]
i = lineno - start_lineno
lines[i] = '<strong>%s</strong>' % lines[i]
else:
lines = [escape(l, True) for l in lines[:context]]
msg = '<pre style="background-color:#ccc;padding:2em;">%s</pre>'
return msg % ''.join(lines) | 0.003279 |
def _get_tls_object(self, ssl_params):
"""
Return a TLS object to establish a secure connection to a server
"""
if ssl_params is None:
return None
if not ssl_params["verify"] and ssl_params["ca_certs"]:
self.warning(
"Incorrect configuration: trying to disable server certificate validation, "
"while also specifying a capath. No validation will be performed. Fix your "
"configuration to remove this warning"
)
validate = ssl.CERT_REQUIRED if ssl_params["verify"] else ssl.CERT_NONE
if ssl_params["ca_certs"] is None or os.path.isfile(ssl_params["ca_certs"]):
tls = ldap3.core.tls.Tls(
local_private_key_file=ssl_params["key"],
local_certificate_file=ssl_params["cert"],
ca_certs_file=ssl_params["ca_certs"],
version=ssl.PROTOCOL_SSLv23,
validate=validate,
)
elif os.path.isdir(ssl_params["ca_certs"]):
tls = ldap3.core.tls.Tls(
local_private_key_file=ssl_params["key"],
local_certificate_file=ssl_params["cert"],
ca_certs_path=ssl_params["ca_certs"],
version=ssl.PROTOCOL_SSLv23,
validate=validate,
)
else:
raise ConfigurationError(
'Invalid path {} for ssl_ca_certs: no such file or directory'.format(ssl_params['ca_certs'])
)
return tls | 0.003859 |
def print_tables(xmldoc, output, output_format, tableList = [], columnList = [],
round_floats = True, decimal_places = 2, format_links = True,
title = None, print_table_names = True, unique_rows = False,
row_span_columns = [], rspan_break_columns = []):
"""
Method to print tables in an xml file in other formats.
Input is an xmldoc, output is a file object containing the
tables.
@xmldoc: document to convert
@output: file object to write output to; if None, will write to stdout
@output_format: format to convert to
@tableList: only convert the listed tables. Default is
to convert all the tables found in the xmldoc. Tables
not converted will not be included in the returned file
object.
@columnList: only print the columns listed, in the order given.
This applies to all tables (if a table doesn't have a listed column, it's just
skipped). To specify a column in a specific table, use table_name:column_name.
Default is to print all columns.
@round_floats: If turned on, will smart_round floats to specifed
number of places.
@format_links: If turned on, will convert any html hyperlinks to specified
output_format.
@decimal_places: If round_floats turned on, will smart_round to this
number of decimal places.
@title: Add a title to this set of tables.
@unique_rows: If two consecutive rows are exactly the same, will condense into
one row.
@print_table_names: If set to True, will print the name of each table
in the caption section.
@row_span_columns: For the columns listed, will
concatenate consecutive cells with the same values
into one cell that spans those rows. Default is to span no rows.
@rspan_break_column: Columns listed will prevent all cells
from rowspanning across two rows in which values in the
columns are diffrent. Default is to have no break columns.
"""
# get the tables to convert
if tableList == []:
tableList = [tb.getAttribute("Name") for tb in xmldoc.childNodes[0].getElementsByTagName(u'Table')]
# set the output
if output is None:
output = sys.stdout
# get table bits
ttx, xtt, tx, xt, capx, xcap, rx, xr, cx, xc, rspx, xrsp, hlx, hxl, xhl = set_output_format( output_format )
# set the title if desired
if title is not None:
print >> output, "%s%s%s" %(ttx,str(title),xtt)
# cycle over the tables in the xmldoc
for table_name in tableList:
this_table = table.get_table(xmldoc, table_name)
if columnList == []:
col_names = [ col.getAttribute("Name").split(":")[-1]
for col in this_table.getElementsByTagName(u'Column') ]
else:
requested_columns = [col.split(':')[-1] for col in columnList if not (':' in col and col.split(':')[0] != table_name) ]
requested_columns = sorted(set(requested_columns), key=requested_columns.index)
actual_columns = [actual_column.getAttribute("Name").split(":")[-1]
for actual_column in this_table.getElementsByTagName(u'Column') ]
col_names = [col for col in requested_columns if col in actual_columns]
# get the relevant row_span/break column indices
rspan_indices = [ n for n,col in enumerate(col_names) if col in row_span_columns or ':'.join([table_name,col]) in row_span_columns ]
break_indices = [ n for n,col in enumerate(col_names) if col in rspan_break_columns or ':'.join([table_name,col]) in rspan_break_columns ]
# start the table and print table name
print >> output, tx
if print_table_names:
print >> output, "%s%s%s" %(capx, table_name, xcap)
print >> output, "%s%s%s%s%s" %(rx, cx, (xc+cx).join(format_header_cell(val) for val in col_names), xc, xr)
# format the data in the table
out_table = []
last_row = ''
for row in this_table:
out_row = [ str(format_cell( get_row_data(row, col_name),
round_floats = round_floats, decimal_places = decimal_places,
format_links = format_links, hlx = hlx, hxl = hxl, xhl = xhl ))
for col_name in col_names ]
if unique_rows and out_row == last_row:
continue
out_table.append(out_row)
last_row = out_row
rspan_count = {}
for mm, row in enumerate(out_table[::-1]):
this_row_idx = len(out_table) - (mm+1)
next_row_idx = this_row_idx - 1
# cheack if it's ok to do row-span
rspan_ok = rspan_indices != [] and this_row_idx != 0
if rspan_ok:
for jj in break_indices:
rspan_ok = out_table[this_row_idx][jj] == out_table[next_row_idx][jj]
if not rspan_ok: break
# cycle over columns in the row setting row span values
for nn, val in enumerate(row):
# check if this cell should be spanned;
# if so, delete it, update rspan_count and go on to next cell
if rspan_ok and nn in rspan_indices:
if val == out_table[next_row_idx][nn]:
out_table[this_row_idx][nn] = ''
if (this_row_idx, nn) in rspan_count:
rspan_count[(next_row_idx,nn)] = rspan_count[(this_row_idx,nn)] + 1
del rspan_count[(this_row_idx,nn)]
else:
rspan_count[(next_row_idx,nn)] = 2
elif (this_row_idx, nn) in rspan_count:
out_table[this_row_idx][nn] = ''.join([rspx, str(rspan_count[(this_row_idx,nn)]), xrsp, str(val), xc])
else:
out_table[this_row_idx][nn] = ''.join([cx, str(val), xc])
continue
# format cell appropriately
if (this_row_idx, nn) in rspan_count:
out_table[this_row_idx][nn] = ''.join([rspx, str(rspan_count[(this_row_idx,nn)]), xrsp, str(val), xc])
else:
out_table[this_row_idx][nn] = ''.join([cx, str(val), xc])
# print the table to output
for row in out_table:
print >> output, "%s%s%s" % (rx, ''.join(row), xr)
# close the table and go on to the next
print >> output, xt | 0.014594 |
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'event':
name = 'do_'+change['name']
if hasattr(self.proxy, name):
handler = getattr(self.proxy, name)
handler()
else:
super(WebView, self)._update_proxy(change) | 0.005277 |
def request_path(request):
"""Path component of request-URI, as defined by RFC 2965."""
url = request.get_full_url()
parts = urlsplit(url)
path = escape_path(parts.path)
if not path.startswith("/"):
# fix bad RFC 2396 absoluteURI
path = "/" + path
return path | 0.003344 |
def node(self,port, hub_address=("localhost", 4444)):
''' java -jar selenium-server.jar -role node -port 5555 -hub http://127.0.0.1:4444/grid/register/
@param port: listen port of selenium node
@param hub_address: hub address which node will connect to
'''
self._ip, self._port = hub_address
self.command = [self._conf["java_path"], "-jar", self._conf["jar_path"], "-port", str(port), "-role", "node", "-hub", "http://%s:%s/grid/register/" %(self._ip, self._port)]
return self | 0.014572 |
def native(self):
"""
The native Python datatype representation of this value
:return:
A byte string or None
"""
if self.contents is None:
return None
if self._parsed is not None:
return self._parsed[0].native
else:
return self.__bytes__() | 0.00578 |
def create_value(cls, prop_name, val, model=None): # @NoSelf
"""This is used to create a value to be assigned to a
property. Depending on the type of the value, different values
are created and returned. For example, for a list, a
ListWrapper is created to wrap it, and returned for the
assignment. model is different from None when the value is
changed (a model exists). Otherwise, during property creation
model is None"""
if isinstance(val, tuple):
# this might be a class instance to be wrapped
# (thanks to Tobias Weber for
# providing a bug fix to avoid TypeError (in 1.99.1)
if len(val) == 3:
try:
wrap_instance = isinstance(val[1], val[0]) and \
(isinstance(val[2], tuple) or
isinstance(val[2], list))
except TypeError:
pass # not recognized, it must be another type of tuple
else:
if wrap_instance:
res = wrappers.ObsUserClassWrapper(val[1], val[2])
if model:
res.__add_model__(model, prop_name)
return res
elif isinstance(val, list):
res = wrappers.ObsListWrapper(val)
if model:
res.__add_model__(model, prop_name)
return res
elif isinstance(val, set):
res = wrappers.ObsSetWrapper(val)
if model:
res.__add_model__(model, prop_name)
return res
elif isinstance(val, dict):
res = wrappers.ObsMapWrapper(val)
if model:
res.__add_model__(model, prop_name)
return res
return val | 0.001083 |
def _transpose(cls, char):
"""Convert unicode char to something similar to it."""
try:
loc = ord(char) - 65
if loc < 0 or loc > 56:
return char
return cls.UNICODE_MAP[loc]
except UnicodeDecodeError:
return char | 0.006711 |
def get_nonmatching_blocks(matching_blocks):
"""Given a list of matching blocks, output the gaps between them.
Non-matches have the format (alo, ahi, blo, bhi). This specifies two index
ranges, one in the A sequence, and one in the B sequence.
"""
i = j = 0
for match in matching_blocks:
a, b, size = match
yield (i, a, j, b)
i = a + size
j = b + size | 0.002451 |
def _create_update_expression():
""" Create the grammar for an update expression """
ine = (
Word("if_not_exists")
+ Suppress("(")
+ var
+ Suppress(",")
+ var_val
+ Suppress(")")
)
list_append = (
Word("list_append")
+ Suppress("(")
+ var_val
+ Suppress(",")
+ var_val
+ Suppress(")")
)
fxn = Group(ine | list_append).setResultsName("set_function")
# value has to come before var to prevent parsing TRUE/FALSE as variables
path = value | fxn | var
set_val = (path + oneOf("+ -") + path) | path
set_cmd = Group(var + Suppress("=") + set_val)
set_expr = (Suppress(upkey("set")) + delimitedList(set_cmd)).setResultsName(
"set_expr"
)
add_expr = (
Suppress(upkey("add")) + delimitedList(Group(var + value))
).setResultsName("add_expr")
delete_expr = (
Suppress(upkey("delete")) + delimitedList(Group(var + value))
).setResultsName("delete_expr")
remove_expr = (Suppress(upkey("remove")) + delimitedList(var)).setResultsName(
"remove_expr"
)
return OneOrMore(set_expr | add_expr | delete_expr | remove_expr).setResultsName(
"update"
) | 0.003213 |
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records] | 0.004247 |
def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None,
key=None, keyid=None, profile=None):
'''
Create a Route53 hosted zone.
.. versionadded:: 2015.8.0
zone
DNS zone to create
private
True/False if the zone will be a private zone
vpc_id
VPC ID to associate the zone to (required if private is True)
vpc_region
VPC Region (required if private is True)
region
region endpoint to connect to
key
AWS key
keyid
AWS keyid
profile
AWS pillar profile
CLI Example::
salt myminion boto_route53.create_zone example.org
'''
if region is None:
region = 'universal'
if private:
if not vpc_id or not vpc_region:
msg = 'vpc_id and vpc_region must be specified for a private zone'
raise SaltInvocationError(msg)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
_zone = conn.get_zone(zone)
if _zone:
return False
conn.create_zone(zone, private_zone=private, vpc_id=vpc_id,
vpc_region=vpc_region)
return True | 0.00084 |
def websocket_safe_read(self):
"""Returns data if available, otherwise ''. Newlines indicate multiple messages """
data = ''
while True:
try:
data += '{0}\n'.format(self.websocket.recv())
except WebSocketException as e:
if isinstance(e, WebSocketConnectionClosedException):
logger.warning('lost websocket connection, try to reconnect now')
else:
logger.warning('websocket exception: %s', e)
self.reconnect()
except Exception as e:
if isinstance(e, SSLError) and e.errno == 2:
pass
else:
logger.warning('Exception in websocket_safe_read: %s', e)
return data.rstrip() | 0.004896 |
def send(self, data):
"""
:param data:
:type data: bytearray | bytes
:return:
:rtype:
"""
data = bytes(data) if type(data) is not bytes else data
self._wa_noiseprotocol.send(data) | 0.00823 |
def to_fastq_apipe_cl(sdf_file, start=None, end=None):
"""Return a command lines to provide streaming fastq input.
For paired end, returns a forward and reverse command line. For
single end returns a single command line and None for the pair.
"""
cmd = ["rtg", "sdf2fastq", "--no-gzip", "-o", "-"]
if start is not None:
cmd += ["--start-id=%s" % start]
if end is not None:
cmd += ["--end-id=%s" % end]
if is_paired(sdf_file):
out = []
for ext in ["left", "right"]:
out.append("<(%s)" % _rtg_cmd(cmd + ["-i", os.path.join(sdf_file, ext)]))
return out
else:
cmd += ["-i", sdf_file]
return ["<(%s)" % _rtg_cmd(cmd), None] | 0.002766 |
def add_or_update(self, app_id, value):
'''
Adding or updating the evalution.
:param app_id: the ID of the post.
:param value: the evaluation
:return: in JSON format.
'''
MEvaluation.add_or_update(self.userinfo.uid, app_id, value)
out_dic = {
'eval0': MEvaluation.app_evaluation_count(app_id, 0),
'eval1': MEvaluation.app_evaluation_count(app_id, 1)
}
return json.dump(out_dic, self) | 0.004073 |
def send(self, command):
"Send rcon command to server"
if self.secure_rcon == self.RCON_NOSECURE:
self.sock.send(rcon_nosecure_packet(self.password, command))
elif self.secure_rcon == self.RCON_SECURE_TIME:
self.sock.send(rcon_secure_time_packet(self.password, command))
elif self.secure_rcon == self.RCON_SECURE_CHALLENGE:
challenge = self.getchallenge()
self.sock.send(rcon_secure_challenge_packet(self.password,
challenge, command))
else:
raise ValueError("Bad value of secure_rcon") | 0.003115 |
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in list(self.nodes):
if len(n.links) <= depth:
self.remove_node(n.id) | 0.009259 |
def timing(self, stat, value, tags=None):
"""Report a timing."""
self._log('timing', stat, value, tags) | 0.016807 |
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._got_extradata():
raise ExtraData(ret, unpacker._get_extradata())
return ret | 0.002128 |
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key) | 0.000878 |
def parse_dict_strings(code):
"""Generator of elements of a dict that is given in the code string
Parsing is shallow, i.e. all content is yielded as strings
Parameters
----------
code: String
\tString that contains a dict
"""
i = 0
level = 0
chunk_start = 0
curr_paren = None
for i, char in enumerate(code):
if char in ["(", "[", "{"] and curr_paren is None:
level += 1
elif char in [")", "]", "}"] and curr_paren is None:
level -= 1
elif char in ['"', "'"]:
if curr_paren == char:
curr_paren = None
elif curr_paren is None:
curr_paren = char
if level == 0 and char in [':', ','] and curr_paren is None:
yield code[chunk_start: i].strip()
chunk_start = i + 1
yield code[chunk_start:i + 1].strip() | 0.001122 |
def process(self, batch, device=None):
""" Process a list of examples to create a torch.Tensor.
Pad, numericalize, and postprocess a batch and create a tensor.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
torch.autograd.Variable: Processed object given the input
and custom postprocessing Pipeline.
"""
padded = self.pad(batch)
tensor = self.numericalize(padded, device=device)
return tensor | 0.003788 |
def _load_general(data, targets):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
else:
assert d_targets[-1][0].stop == d_src.shape[0], \
"Batch size miss match. Expected %d, got %d"%( \
d_targets[-1][0].stop, d_src.shape[0])
for slice_idx, d_dst in d_targets:
d_src[slice_idx].copyto(d_dst) | 0.005639 |
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size) | 0.008117 |
def receive_offer(self, pkt):
"""Receive offer on SELECTING state."""
logger.debug("C2. Received OFFER?, in SELECTING state.")
if isoffer(pkt):
logger.debug("C2: T, OFFER received")
self.offers.append(pkt)
if len(self.offers) >= MAX_OFFERS_COLLECTED:
logger.debug("C2.5: T, raise REQUESTING.")
self.select_offer()
raise self.REQUESTING()
logger.debug("C2.5: F, raise SELECTING.")
raise self.SELECTING() | 0.003745 |
def _ep_need_close(self):
"""The remote has closed its end of the endpoint."""
LOG.debug("Connection remotely closed")
if self._handler:
cond = self._pn_connection.remote_condition
with self._callback_lock:
self._handler.connection_remote_closed(self, cond) | 0.006231 |
def prompt_yes_or_no(message):
""" prompt_yes_or_no: Prompt user to reply with a y/n response
Args: None
Returns: None
"""
user_input = input("{} [y/n]:".format(message)).lower()
if user_input.startswith("y"):
return True
elif user_input.startswith("n"):
return False
else:
return prompt_yes_or_no(message) | 0.002703 |
def stop(self, api=None):
"""
Stop automation run.
:param api: sevenbridges Api instance.
:return: AutomationRun object
"""
api = api or self._API
return api.post(
url=self._URL['actions'].format(
id=self.id, action=AutomationRunActions.STOP
)
).content | 0.005587 |
def print_table(graph, tails, node_id_map):
"""Print out a table of nodes and the blocks they have at each block height
starting with the common ancestor."""
node_count = len(tails)
# Get the width of the table columns
num_col_width = max(
floor(log(max(get_heights(tails)), 10)) + 1,
len("NUM"))
node_col_width = max(
floor(log(node_count, 10)) + 1,
8)
# Construct the output format string
format_str = ''
format_str += '{:<' + str(num_col_width) + '} '
for _ in range(node_count):
format_str += '{:<' + str(node_col_width) + '} '
nodes_header = ["NODE " + str(node_id_map[i]) for i in range(node_count)]
header = format_str.format("NUM", *nodes_header)
print(header)
print('-' * len(header))
prev_block_num = -1
node_list = [''] * node_count
for block_num, _, siblings in graph.walk():
if block_num != prev_block_num:
# Need to skip the first one
if prev_block_num != -1:
print(format_str.format(prev_block_num, *node_list))
node_list.clear()
node_list.extend([''] * node_count)
prev_block_num = block_num
for block_id, node_ids in siblings.items():
for node_id in node_ids:
node_list[node_id] = block_id[:8]
# Print the last one
print(format_str.format(prev_block_num, *node_list)) | 0.000701 |
def reward_proximity(self):
"""
Add a wall proximity reward
"""
if not 'proximity' in self.mode:
return
mode = self.mode['proximity']
# Calculate proximity reward
reward = 0
for sensor in self.player.sensors:
if sensor.sensed_type == 'wall':
reward += sensor.proximity_norm()
else:
reward += 1
reward /= len(self.player.sensors)
#reward = min(1.0, reward * 2)
reward = min(1.0, reward * reward)
# TODO: Configurable bonus reward threshold. Pass extra args to `__test_cond`?
#if mode and mode and reward > 0.75 and self.__test_cond(mode):
if mode and mode and self.__test_cond(mode):
# Apply bonus
reward *= mode['reward']
self.player.stats['reward'] += reward | 0.006865 |
def _extract_next_filename(self):
"""
changes metadata!
"""
self.ensure_metadata()
metadata, body = self[-1]
metadata['section'] = sanitize_section(metadata['section'])
metadata['root'] = root
path = "{root}/Misc/NEWS.d/next/{section}/{date}.bpo-{bpo}.{nonce}.rst".format_map(metadata)
for name in "root section date bpo nonce".split():
del metadata[name]
return path | 0.006536 |
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# Try to optimize
if not packed:
if self.greyscale == 'try':
rows1, rows2 = tee(rows)
greyrows = try_greyscale(rows1, self.alpha)
if greyrows is not False:
rows = greyrows
self.greyscale = True
self.planes -= 2
else:
self.greyscale = False
rows = rows2
if not self.palette:
# No palette, check for rescale
targetbitdepth = None
srcbitdepth = self.bitdepth
if self.alpha or not self.greyscale:
if self.bitdepth not in (8, 16):
targetbitdepth = (8, 16)[self.bitdepth > 8]
else:
assert self.greyscale
assert not self.alpha
if self.bitdepth not in (1, 2, 4, 8, 16):
if self.bitdepth > 8:
targetbitdepth = 16
elif self.bitdepth == 3:
targetbitdepth = 4
else:
assert self.bitdepth in (5, 6, 7)
targetbitdepth = 8
if targetbitdepth:
if packed:
raise Error("writing packed pixels not suitable for"
" bit depth %d" % self.bitdepth)
self.bitdepth = targetbitdepth
factor = \
float(2**targetbitdepth - 1) / float(2**srcbitdepth - 1)
def scalerow(inrows):
"""Rescale all pixels"""
for row in inrows:
yield [int(round(factor * x)) for x in row]
rows = scalerow(rows)
self.write_idat(outfile, self.comp_idat(self.idat(rows, packed)))
return self.irows | 0.00075 |
def integer_partition(size: int, nparts: int) -> Iterator[List[List[int]]]:
""" Partition a list of integers into a list of partitions """
for part in algorithm_u(range(size), nparts):
yield part | 0.004739 |
def enable_device(self):
"""
re-enable the connected device and allow user activity in device again
:return: bool
"""
cmd_response = self.__send_command(const.CMD_ENABLEDEVICE)
if cmd_response.get('status'):
self.is_enabled = True
return True
else:
raise ZKErrorResponse("Can't enable device") | 0.005181 |
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path | 0.010724 |
def create(self, phone_number, sms_capability, account_sid=values.unset,
friendly_name=values.unset, unique_name=values.unset,
cc_emails=values.unset, sms_url=values.unset,
sms_method=values.unset, sms_fallback_url=values.unset,
sms_fallback_method=values.unset, status_callback_url=values.unset,
status_callback_method=values.unset,
sms_application_sid=values.unset, address_sid=values.unset,
email=values.unset, verification_type=values.unset,
verification_document_sid=values.unset):
"""
Create a new HostedNumberOrderInstance
:param unicode phone_number: An E164 formatted phone number.
:param bool sms_capability: Specify SMS capability to host.
:param unicode account_sid: Account Sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param unicode cc_emails: A list of emails.
:param unicode sms_url: SMS URL.
:param unicode sms_method: SMS Method.
:param unicode sms_fallback_url: SMS Fallback URL.
:param unicode sms_fallback_method: SMS Fallback Method.
:param unicode status_callback_url: Status Callback URL.
:param unicode status_callback_method: Status Callback Method.
:param unicode sms_application_sid: SMS Application Sid.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param HostedNumberOrderInstance.VerificationType verification_type: Verification Type.
:param unicode verification_document_sid: Verification Document Sid
:returns: Newly created HostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderInstance
"""
data = values.of({
'PhoneNumber': phone_number,
'SmsCapability': sms_capability,
'AccountSid': account_sid,
'FriendlyName': friendly_name,
'UniqueName': unique_name,
'CcEmails': serialize.map(cc_emails, lambda e: e),
'SmsUrl': sms_url,
'SmsMethod': sms_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsFallbackMethod': sms_fallback_method,
'StatusCallbackUrl': status_callback_url,
'StatusCallbackMethod': status_callback_method,
'SmsApplicationSid': sms_application_sid,
'AddressSid': address_sid,
'Email': email,
'VerificationType': verification_type,
'VerificationDocumentSid': verification_document_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return HostedNumberOrderInstance(self._version, payload, ) | 0.005102 |
def delete(domain, key, user=None):
'''
Delete a default from the system
CLI Example:
.. code-block:: bash
salt '*' macdefaults.delete com.apple.CrashReporter DialogType
salt '*' macdefaults.delete NSGlobalDomain ApplePersistence
domain
The name of the domain to delete from
key
The key of the given domain to delete
user
The user to delete the defaults with
'''
cmd = 'defaults delete "{0}" "{1}"'.format(domain, key)
return __salt__['cmd.run_all'](cmd, runas=user, output_loglevel='debug') | 0.001727 |
def clear_to_reset(self, config_vars):
"""Clear all volatile information across a reset."""
self._logger.info("Config vars in sensor log reset: %s", config_vars)
super(SensorLogSubsystem, self).clear_to_reset(config_vars)
self.storage.destroy_all_walkers()
self.dump_walker = None
if config_vars.get('storage_fillstop', False):
self._logger.debug("Marking storage log fill/stop")
self.storage.set_rollover('storage', False)
if config_vars.get('streaming_fillstop', False):
self._logger.debug("Marking streaming log fill/stop")
self.storage.set_rollover('streaming', False) | 0.002941 |
def get_tasks(self, state=Task.ANY_MASK):
"""
Returns a list of Task objects with the given state.
:type state: integer
:param state: A bitmask of states.
:rtype: list[Task]
:returns: A list of tasks.
"""
return [t for t in Task.Iterator(self.task_tree, state)] | 0.006098 |
def update_history(self, it, j=0, M=None, **kwargs):
"""Add the current state for all kwargs to the history
"""
# Create a new entry in the history for new variables (if they don't exist)
if not np.any([k in self.history[j] for k in kwargs]):
for k in kwargs:
if M is None or M == 0:
self.history[j][k] = [[]]
else:
self.history[j][k] = [[] for m in range(M)]
"""
# Check that the variables have been updated once per iteration
elif np.any([[len(h)!=it+self.offset for h in self.history[j][k]] for k in kwargs.keys()]):
for k in kwargs.keys():
for n,h in enumerate(self.history[j][k]):
if len(h) != it+self.offset:
err_str = "At iteration {0}, {1}[{2}] already has {3} entries"
raise Exception(err_str.format(it, k, n, len(h)-self.offset))
"""
# Add the variables to the history
for k,v in kwargs.items():
if M is None or M == 0:
self._store_variable(j, k, 0, v)
else:
for m in range(M):
self._store_variable(j, k, m, v[m]) | 0.00556 |
def reindex(self, kdims=[], force=False):
"""Reindexes object dropping static or supplied kdims
Creates a new object with a reordered or reduced set of key
dimensions. By default drops all non-varying key dimensions.
Reducing the number of key dimensions will discard information
from the keys. All data values are accessible in the newly
created object as the new labels must be sufficient to address
each value uniquely.
Args:
kdims (optional): New list of key dimensions after reindexing
force (bool, optional): Whether to drop non-unique items
Returns:
Reindexed object
"""
old_kdims = [d.name for d in self.kdims]
if not isinstance(kdims, list):
kdims = [kdims]
elif not len(kdims):
kdims = [d for d in old_kdims
if not len(set(self.dimension_values(d))) == 1]
indices = [self.get_dimension_index(el) for el in kdims]
keys = [tuple(k[i] for i in indices) for k in self.data.keys()]
reindexed_items = OrderedDict(
(k, v) for (k, v) in zip(keys, self.data.values()))
reduced_dims = set([d.name for d in self.kdims]).difference(kdims)
dimensions = [self.get_dimension(d) for d in kdims
if d not in reduced_dims]
if len(set(keys)) != len(keys) and not force:
raise Exception("Given dimension labels not sufficient"
"to address all values uniquely")
if len(keys):
cdims = {self.get_dimension(d): self.dimension_values(d)[0] for d in reduced_dims}
else:
cdims = {}
with item_check(indices == sorted(indices)):
return self.clone(reindexed_items, kdims=dimensions,
cdims=cdims) | 0.001597 |
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100):
'''Follow a set of angle data, yielding dynamic joint torques.
Parameters
----------
angles : ndarray (num-frames x num-dofs)
Follow angle data provided by this array of angle values.
start : int, optional
Start following angle data after this frame. Defaults to the start
of the angle data.
end : int, optional
Stop following angle data after this frame. Defaults to the end of
the angle data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to follow the given joint angles. Defaults to
100N. Setting this value to be large results in more accurate
following but can cause oscillations in the PID controllers,
resulting in noisy torques.
Returns
-------
torques : sequence of torque frames
Returns a generator of joint torque data for the skeleton. One set
of joint torques will be generated for each frame of angle data
between `start` and `end`.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(angles):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# joseph's stability fix: step to compute torques, then reset the
# skeleton to the start of the step, and then step using computed
# torques. thus any numerical errors between the body states after
# stepping using angle constraints will be removed, because we
# will be stepping the model using the computed torques.
self.skeleton.enable_motors(max_force)
self.skeleton.set_target_angles(angles[frame_no])
self.ode_world.step(self.dt)
torques = self.skeleton.joint_torques
self.skeleton.disable_motors()
self.skeleton.set_body_states(states)
self.skeleton.add_torques(torques)
yield torques
self.ode_world.step(self.dt)
self.ode_contactgroup.empty() | 0.001121 |
def try_run(obj, names):
"""Given a list of possible method names, try to run them with the
provided object. Keep going until something works. Used to run
setup/teardown methods for module, package, and function tests.
"""
for name in names:
func = getattr(obj, name, None)
if func is not None:
if type(obj) == types.ModuleType:
# py.test compatibility
try:
args, varargs, varkw, defaults = inspect.getargspec(func)
except TypeError:
# Not a function. If it's callable, call it anyway
if hasattr(func, '__call__'):
func = func.__call__
try:
args, varargs, varkw, defaults = \
inspect.getargspec(func)
args.pop(0) # pop the self off
except TypeError:
raise TypeError("Attribute %s of %r is not a python "
"function. Only functions or callables"
" may be used as fixtures." %
(name, obj))
if len(args):
log.debug("call fixture %s.%s(%s)", obj, name, obj)
return func(obj)
log.debug("call fixture %s.%s", obj, name)
return func() | 0.002075 |
def makesubatoffset(self, bitoffset, *, _offsetideal=None):
"""Create a copy of this PromiseCollection with an offset applied to each contained promise and register each with their parent.
If this promise's primitive is being merged with another
primitive, a new subpromise may be required to keep track of
the new offset of data coming from the new primitive.
Args:
bitoffset: An integer offset of the data in the new primitive.
_offsetideal: An integer offset to use if the associated primitive supports arbitrary TDO control.
Returns:
A new TDOPromiseCollection registered with this promise
collection, and with the correct offset.
"""
if _offsetideal is None:
_offsetideal = bitoffset
if bitoffset is 0:
return self
newpromise = TDOPromiseCollection(self._chain)
for promise in self._promises:
newpromise.add(promise, bitoffset, _offsetideal=_offsetideal)
return newpromise | 0.00377 |
def _get_pretty_exception_message(e):
"""
Parses some DatabaseError to provide a better error message
"""
if (hasattr(e, 'message') and
'errorName' in e.message and
'message' in e.message):
return ('{name}: {message}'.format(
name=e.message['errorName'],
message=e.message['message']))
else:
return str(e) | 0.004577 |
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name | 0.002096 |
def select_action_key(self, next_action_arr, next_q_arr):
'''
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys.
'''
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
key = np.random.randint(low=0, high=next_action_arr.shape[0])
else:
key = next_q_arr.argmax()
return key | 0.005093 |
def pylint_raw(options):
"""
Use check_output to run pylint.
Because pylint changes the exit code based on the code score,
we have to wrap it in a try/except block.
:param options:
:return:
"""
command = ['pylint']
command.extend(options)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, __ = proc.communicate()
return outs.decode() | 0.004751 |
def key_exists(self, key, secret=False):
'''
Check is given key exists.
:param key: Key ID
:param secret: Check secret key
:rtype: bool
'''
if len(key) < 8:
return False
key = key.upper()
res = self.list_keys(secret)
for fingerprint in res.keys:
if fingerprint.endswith(key):
return True
return False | 0.004662 |
def lookup_folder(event, filesystem):
"""Lookup the parent folder in the filesystem content."""
for dirent in filesystem[event.parent_inode]:
if dirent.type == 'd' and dirent.allocated:
return ntpath.join(dirent.path, event.name) | 0.003891 |
def _root_unhook(self):
"""Change this root console into a normal Console object and
delete the root console from TCOD
"""
global _rootinitialized, _rootConsoleRef
# do we recognise this as the root console?
# if not then assume the console has already been taken care of
if(_rootConsoleRef and _rootConsoleRef() is self):
# turn this console into a regular console
unhooked = _lib.TCOD_console_new(self.width, self.height)
_lib.TCOD_console_blit(self.console_c,
0, 0, self.width, self.height,
unhooked, 0, 0, 1, 1)
# delete root console from TDL and TCOD
_rootinitialized = False
_rootConsoleRef = None
_lib.TCOD_console_delete(self.console_c)
# this Console object is now a regular console
self.console_c = unhooked | 0.002103 |
def QueryAndOwn(self, queue, lease_seconds=10, limit=1):
"""Returns a list of Tasks leased for a certain time.
Args:
queue: The queue to query from.
lease_seconds: The tasks will be leased for this long.
limit: Number of values to fetch.
Returns:
A list of GrrMessage() objects leased.
"""
with self.data_store.GetMutationPool() as mutation_pool:
return mutation_pool.QueueQueryAndOwn(queue, lease_seconds, limit,
self.frozen_timestamp) | 0.003745 |
def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
"""Run the model forward, yielding to user code at specified intervals.
Parameters
----------
tsnapstart : int
The timestep at which to begin yielding.
tstapint : int
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return | 0.008913 |
def matching_base_url(self, url):
"""
Return True if the initial part of `url` matches the base url
passed to the initialiser of this object, and False otherwise.
"""
n = len(self.baseurl)
return url[0:n] == self.baseurl | 0.007435 |
async def send_tokens(payment_handle: int, tokens: int, address: str) -> str:
"""
Sends tokens to an address
payment_handle is always 0
:param payment_handle: Integer
:param tokens: Integer
:param address: String
Example:
payment_handle = 0
amount = 1000
address = await Wallet.create_payment_address('00000000000000000000000001234567')
await Wallet.send_tokens(payment_handle, amount, address)
:return:
"""
logger = logging.getLogger(__name__)
if not hasattr(Wallet.send_tokens, "cb"):
logger.debug("vcx_wallet_send_tokens: Creating callback")
Wallet.send_tokens.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_payment_handle = c_uint32(payment_handle)
c_tokens = c_char_p(str(tokens).encode('utf-8'))
c_address = c_char_p(address.encode('utf-8'))
result = await do_call('vcx_wallet_send_tokens',
c_payment_handle,
c_tokens,
c_address,
Wallet.send_tokens.cb)
logger.debug("vcx_wallet_send_tokens completed")
return result | 0.00318 |
def _expand_address(addy):
'''
Convert the libcloud GCEAddress object into something more serializable.
'''
ret = {}
ret.update(addy.__dict__)
ret['extra']['zone'] = addy.region.name
return ret | 0.004525 |
def type_id(self):
"""
A short string representing the provider implementation id used for
serialization of :class:`.Credentials` and to identify the type of
provider in JavaScript.
The part before hyphen denotes the type of the provider, the part
after hyphen denotes the class id e.g.
``oauth2.Facebook.type_id = '2-5'``,
``oauth1.Twitter.type_id = '1-5'``.
"""
cls = self.__class__
mod = sys.modules.get(cls.__module__)
return str(self.PROVIDER_TYPE_ID) + '-' + \
str(mod.PROVIDER_ID_MAP.index(cls)) | 0.003257 |
def stop_all(self):
"""
Stop all nodes
"""
pool = Pool(concurrency=3)
for node in self.nodes.values():
pool.append(node.stop)
yield from pool.join() | 0.009615 |
def start(self, start_loop=True):
"""Start all the things.
:param start_loop bool: whether to start the ioloop. should be False if
the IOLoop is managed externally
"""
self.start_alerts()
if self.options.get('pidfile'):
with open(self.options.get('pidfile'), 'w') as fpid:
fpid.write(str(os.getpid()))
self.callback.start()
LOGGER.info('Reactor starts')
if start_loop:
self.loop.start() | 0.003817 |
def _get_reference(document_path, reference_map):
"""Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered.
"""
try:
return reference_map[document_path]
except KeyError:
msg = _BAD_DOC_TEMPLATE.format(document_path)
raise ValueError(msg) | 0.001175 |
def tag(iterable, tags=None, key='@tags'):
"""
Add tags to each dict or dict-like object in ``iterable``. Tags are added
to each dict with a key set by ``key``. If a key already exists under the
key given by ``key``, this function will attempt to ``.extend()``` it, but
will fall back to replacing it in the event of error.
"""
if not tags:
for item in iterable:
yield item
else:
for item in iterable:
yield _tag(item, tags, key) | 0.001992 |
def _process_human_orthos(self, limit=None):
"""
This table provides ortholog mappings between zebrafish and humans.
ZFIN has their own process of creating orthology mappings,
that we take in addition to other orthology-calling sources
(like PANTHER). We ignore the omim ids, and only use the gene_id.
Triples created:
<zfin gene id> a class
<zfin gene id> rdfs:label gene_symbol
<zfin gene id> dc:description gene_name
<human gene id> a class
<human gene id> rdfs:label gene_symbol
<human gene id> dc:description gene_name
<human gene id> equivalent class <omim id>
<zfin gene id> orthology association <human gene id>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Processing human orthos")
line_counter = 0
geno = Genotype(graph)
# model = Model(graph) # unused
raw = '/'.join((self.rawdir, self.files['human_orthos']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(zfin_id, zfin_symbol, zfin_name, human_symbol, human_name,
omim_id, gene_id, hgnc_id, evidence_code, pub_id
# , empty
) = row
if self.test_mode and zfin_id not in self.test_ids['gene']:
continue
# Add the zebrafish gene.
zfin_id = 'ZFIN:' + zfin_id.strip()
geno.addGene(zfin_id, zfin_symbol, None, zfin_name)
# Add the human gene.
gene_id = 'NCBIGene:' + gene_id.strip()
geno.addGene(gene_id, human_symbol, None, human_name)
# make the association
assoc = OrthologyAssoc(graph, self.name, zfin_id, gene_id)
# we don't know anything about the orthology type,
# so we just use the default
if re.match(r'ZDB', pub_id):
assoc.add_source('ZFIN:'+pub_id)
eco_id = self.get_orthology_evidence_code(evidence_code)
if eco_id is not None:
assoc.add_evidence(eco_id)
assoc.add_association_to_graph()
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with human orthos")
return | 0.001508 |
def get_all(self, attr, value, e=0.000001,
sort_by="__name__", reverse=False):
"""Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionchanged:: 0.0.5
"""
matched = list()
for _, klass in self.subclasses(sort_by, reverse):
try:
if getattr(klass, attr) == approx(value, e):
matched.append(klass)
except: # pragma: no cover
pass
return matched | 0.005755 |
def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
"""
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
"""
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True | 0.00078 |
def process_readme():
""" Function which will process README.md file and divide it into INTRO.md and INSTALL.md, which will be used in
documentation
"""
with open('../../README.md', 'r') as file:
readme = file.read()
readme = readme.replace('# eo-learn', '# Introduction').replace('docs/source/', '')
readme = readme.replace('**`', '**').replace('`**', '**')
chapters = [[]]
for line in readme.split('\n'):
if line.strip().startswith('## '):
chapters.append([])
if line.startswith('<img'):
line = '<p></p>'
chapters[-1].append(line)
chapters = ['\n'.join(chapter) for chapter in chapters]
intro = '\n'.join([chapter for chapter in chapters if not (chapter.startswith('## Install') or
chapter.startswith('## Documentation'))])
install = '\n'.join([chapter for chapter in chapters if chapter.startswith('## Install')])
with open(os.path.join(MARKDOWNS_FOLDER, 'INTRO.md'), 'w') as file:
file.write(intro)
with open(os.path.join(MARKDOWNS_FOLDER, 'INSTALL.md'), 'w') as file:
file.write(install) | 0.005063 |
def bam_conversion(job, samfile, sample_type, univ_options):
"""
This module converts SAMFILE from sam to bam
ARGUMENTS
1. samfile: <JSid for a sam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa()
"""
job.fileStore.logToMaster('Running sam2bam on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'aligned.sam': samfile}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
bamfile = '/'.join([work_dir, 'aligned.bam'])
parameters = ['view',
'-bS',
'-o', docker_path(bamfile),
input_files['aligned.sam']
]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = job.fileStore.writeGlobalFile(bamfile)
job.fileStore.deleteGlobalFile(samfile)
reheader_bam = job.wrapJobFn(fix_bam_header, output_file, sample_type, univ_options, disk='60G')
job.addChild(reheader_bam)
return reheader_bam.rv() | 0.002165 |
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | 0.004402 |
def get_dweets_for(thing_name, key=None, session=None):
"""Read all the dweets for a dweeter
"""
if key is not None:
params = {'key': key}
else:
params = None
return _request('get', '/get/dweets/for/{0}'.format(thing_name), params=params, session=None) | 0.006944 |
def from_phase(self, phase_name):
"""
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
"""
try:
return self.__result_dict[phase_name]
except KeyError:
raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join(
self.__result_dict.keys()))) | 0.004373 |
def zip_dicts(left, right, prefix=()):
"""
Modified zip through two dictionaries.
Iterate through all keys of left dictionary, returning:
- A nested path
- A value and parent for both dictionaries
"""
for key, left_value in left.items():
path = prefix + (key, )
right_value = right.get(key)
if isinstance(left_value, dict):
yield from zip_dicts(left_value, right_value or {}, path)
else:
yield path, left, left_value, right, right_value | 0.001887 |
def __calculate_radius(self, number_neighbors, radius):
"""!
@brief Calculate new connectivity radius.
@param[in] number_neighbors (uint): Average amount of neighbors that should be connected by new radius.
@param[in] radius (double): Current connectivity radius.
@return New connectivity radius.
"""
if (number_neighbors >= len(self._osc_loc)):
return radius * self.__increase_persent + radius;
return average_neighbor_distance(self._osc_loc, number_neighbors); | 0.01675 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.