text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _decorate(self, atype, n, o):
"""Decorates the specified object for automatic logging with acorn.
Args:
atype (str): one of the types specified in :attr:`atypes`.
varobj: object instance to decorate; no additional type checking is
performed.
"""
typemap = {"function": "functions",
"classobj": "classes",
"staticmethod": "methods",
"type": "classes"}
from acorn.logging.decoration import decorate_obj
try:
otype = typemap[atype]
decorate_obj(self.shell.user_ns, n, o, otype)
#Also create a log in the database for this execution; this allows a
#user to track the changes they make in prototyping function and
#class definitions.
self._logdef(n, o, otype)
msg.okay("Auto-decorated {}: {}.".format(n, o))
except:
msg.err("Error auto-decorating {}: {}.".format(n, o))
raise | 0.00677 |
def add_conversion_step(self, converter: Converter[S, T], inplace: bool = False):
"""
Utility method to add a converter to this chain. If inplace is True, this object is modified and
None is returned. Otherwise, a copy is returned
:param converter: the converter to add
:param inplace: boolean indicating whether to modify this object (True) or return a copy (False)
:return: None or a copy with the converter added
"""
# it the current chain is generic, raise an error
if self.is_generic() and converter.is_generic():
raise ValueError('Cannot chain this generic converter chain to the provided converter : it is generic too!')
# if the current chain is able to transform its input into a valid input for the new converter
elif converter.can_be_appended_to(self, self.strict):
if inplace:
self._converters_list.append(converter)
# update the current destination type
self.to_type = converter.to_type
return
else:
new = copy(self)
new._converters_list.append(converter)
# update the current destination type
new.to_type = converter.to_type
return new
else:
raise TypeError('Cannnot register a converter on this conversion chain : source type \'' +
get_pretty_type_str(converter.from_type)
+ '\' is not compliant with current destination type of the chain : \'' +
get_pretty_type_str(self.to_type) + ' (this chain performs '
+ ('' if self.strict else 'non-') + 'strict mode matching)') | 0.006162 |
def napalm_validate(
task: Task,
src: Optional[str] = None,
validation_source: ValidationSourceData = None,
) -> Result:
"""
Gather information with napalm and validate it:
http://napalm.readthedocs.io/en/develop/validate/index.html
Arguments:
src: file to use as validation source
validation_source (list): data to validate device's state
Returns:
Result object with the following attributes set:
* result (``dict``): dictionary with the result of the validation
* complies (``bool``): Whether the device complies or not
"""
device = task.host.get_connection("napalm", task.nornir.config)
r = device.compliance_report(
validation_file=src, validation_source=validation_source
)
return Result(host=task.host, result=r) | 0.001212 |
def type_object_attrgetter(obj, attr, *defargs):
"""
This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass which
*also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find
the "foo" property on the metaclass and resolve it. For the purposes of
autodoc we just want to document the "foo" property defined on the class,
not on the metaclass.
For example::
>>> class Meta(type):
... @property
... def foo(cls):
... return 'foo'
...
>>> class MyClass(metaclass=Meta):
... @property
... def foo(self):
... \"\"\"Docstring for MyClass.foo property.\"\"\"
... return 'myfoo'
...
>>> getattr(MyClass, 'foo')
'foo'
>>> type_object_attrgetter(MyClass, 'foo')
<property at 0x...>
>>> type_object_attrgetter(MyClass, 'foo').__doc__
'Docstring for MyClass.foo property.'
The last line of the example shows the desired behavior for the purposes
of autodoc.
"""
for base in obj.__mro__:
if attr in base.__dict__:
if isinstance(base.__dict__[attr], property):
# Note, this should only be used for properties--for any other
# type of descriptor (classmethod, for example) this can mess
# up existing expectations of what getattr(cls, ...) returns
return base.__dict__[attr]
break
return getattr(obj, attr, *defargs) | 0.000555 |
def _handle_file_ast(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_file_ast.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._root = ctxt = fields.Dom(stream)
ctxt._pfp__scope = scope
self._root._pfp__name = "__root"
self._root._pfp__interp = self
self._dlog("handling file AST with {} children".format(len(node.children())))
for child in node.children():
self._handle_node(child, scope, ctxt, stream)
ctxt._pfp__process_fields_metadata()
return ctxt | 0.004739 |
def optionIsSet(self, name):
"""
Check whether an option with a given name exists and has been set.
:param name: the name of the option to check; can be short or long name.
:return: true if an option matching the given name exists and it has had
it's value set by the user
"""
name = name.strip()
if not self.hasOption(name):
return False
return self.getOption(name).isSet() | 0.004695 |
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format)) | 0.005236 |
def plot_atacseq_insert_sizes(self, bam, plot, output_csv, max_insert=1500, smallest_insert=30):
"""
Heavy inspiration from here:
https://github.com/dbrg77/ATAC/blob/master/ATAC_seq_read_length_curve_fitting.ipynb
"""
try:
import pysam
import numpy as np
import matplotlib.mlab as mlab
from scipy.optimize import curve_fit
from scipy.integrate import simps
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
print("Necessary Python modules couldn't be loaded.")
return
try:
import seaborn as sns
sns.set_style("whitegrid")
except:
pass
def get_fragment_sizes(bam, max_insert=1500):
frag_sizes = list()
bam = pysam.Samfile(bam, 'rb')
for i, read in enumerate(bam):
if read.tlen < max_insert:
frag_sizes.append(read.tlen)
bam.close()
return np.array(frag_sizes)
def mixture_function(x, *p):
"""
Mixture function to model four gaussian (nucleosomal)
and one exponential (nucleosome-free) distributions.
"""
m1, s1, w1, m2, s2, w2, m3, s3, w3, m4, s4, w4, q, r = p
nfr = expo(x, 2.9e-02, 2.8e-02)
nfr[:smallest_insert] = 0
return (mlab.normpdf(x, m1, s1) * w1 +
mlab.normpdf(x, m2, s2) * w2 +
mlab.normpdf(x, m3, s3) * w3 +
mlab.normpdf(x, m4, s4) * w4 +
nfr)
def expo(x, q, r):
"""
Exponential function.
"""
return q * np.exp(-r * x)
# get fragment sizes
frag_sizes = get_fragment_sizes(bam)
# bin
numBins = np.linspace(0, max_insert, max_insert + 1)
y, scatter_x = np.histogram(frag_sizes, numBins, density=1)
# get the mid-point of each bin
x = (scatter_x[:-1] + scatter_x[1:]) / 2
# Parameters are empirical, need to check
paramGuess = [
200, 50, 0.7, # gaussians
400, 50, 0.15,
600, 50, 0.1,
800, 55, 0.045,
2.9e-02, 2.8e-02 # exponential
]
try:
popt3, pcov3 = curve_fit(
mixture_function, x[smallest_insert:], y[smallest_insert:],
p0=paramGuess, maxfev=100000)
except:
print("Nucleosomal fit could not be found.")
return
m1, s1, w1, m2, s2, w2, m3, s3, w3, m4, s4, w4, q, r = popt3
# Plot
plt.figure(figsize=(12, 12))
# Plot distribution
plt.hist(frag_sizes, numBins, histtype="step", ec="k", normed=1, alpha=0.5)
# Plot nucleosomal fits
plt.plot(x, mlab.normpdf(x, m1, s1) * w1, 'r-', lw=1.5, label="1st nucleosome")
plt.plot(x, mlab.normpdf(x, m2, s2) * w2, 'g-', lw=1.5, label="2nd nucleosome")
plt.plot(x, mlab.normpdf(x, m3, s3) * w3, 'b-', lw=1.5, label="3rd nucleosome")
plt.plot(x, mlab.normpdf(x, m4, s4) * w4, 'c-', lw=1.5, label="4th nucleosome")
# Plot nucleosome-free fit
nfr = expo(x, 2.9e-02, 2.8e-02)
nfr[:smallest_insert] = 0
plt.plot(x, nfr, 'k-', lw=1.5, label="nucleosome-free")
# Plot sum of fits
ys = mixture_function(x, *popt3)
plt.plot(x, ys, 'k--', lw=3.5, label="fit sum")
plt.legend()
plt.xlabel("Fragment size (bp)")
plt.ylabel("Density")
plt.savefig(plot, bbox_inches="tight")
# Integrate curves and get areas under curve
areas = [
["fraction", "area under curve", "max density"],
["Nucleosome-free fragments", simps(nfr), max(nfr)],
["1st nucleosome", simps(mlab.normpdf(x, m1, s1) * w1), max(mlab.normpdf(x, m1, s1) * w1)],
["2nd nucleosome", simps(mlab.normpdf(x, m2, s2) * w1), max(mlab.normpdf(x, m2, s2) * w2)],
["3rd nucleosome", simps(mlab.normpdf(x, m3, s3) * w1), max(mlab.normpdf(x, m3, s3) * w3)],
["4th nucleosome", simps(mlab.normpdf(x, m4, s4) * w1), max(mlab.normpdf(x, m4, s4) * w4)]
]
try:
import csv
with open(output_csv, "w") as f:
writer = csv.writer(f)
writer.writerows(areas)
except:
pass | 0.003531 |
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Number of coefficients', 'num_coefficients'),
('Number of examples', 'num_examples'),
('Number of classes', 'num_classes'),
('Number of feature columns', 'num_features'),
('Number of unpacked features', 'num_unpacked_features')]
hyperparam_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty')
]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Log-likelihood", 'training_loss')]
coefs = self.coefficients
top_coefs, bottom_coefs = _toolkit_get_topk_bottomk(coefs,k=5)
(coefs_list, titles_list) = _summarize_coefficients(top_coefs, \
bottom_coefs)
return ([ model_fields, hyperparam_fields, \
solver_fields, training_fields ] + coefs_list, \
[ 'Schema', 'Hyperparameters', \
'Training Summary', 'Settings' ] + titles_list ) | 0.009336 |
def options(self, context, module_options):
'''
INJECT If set to true, this allows PowerView to work over 'stealthier' execution methods which have non-interactive contexts (e.g. WMI) (default: True)
'''
self.exec_methods = ['smbexec', 'atexec']
self.inject = True
if 'INJECT' in module_options:
self.inject = bool(module_options['INJECT'])
if self.inject: self.exec_methods = None
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Recon/PowerView.ps1') | 0.009646 |
def register_admin_models(admin_site):
"""Registers dynamically created preferences models for Admin interface.
:param admin.AdminSite admin_site: AdminSite object.
"""
global __MODELS_REGISTRY
prefs = get_prefs()
for app_label, prefs_items in prefs.items():
model_class = get_pref_model_class(app_label, prefs_items, get_app_prefs)
if model_class is not None:
__MODELS_REGISTRY[app_label] = model_class
admin_site.register(model_class, get_pref_model_admin_class(prefs_items)) | 0.005484 |
def find_orfs(fa, seqs):
"""
find orfs and see if they overlap with insertions
# seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]]
"""
faa = '%s.prodigal.faa' % (fa)
fna = '%s.prodigal.fna' % (fa)
gbk = '%s.prodigal.gbk' % (fa)
if os.path.exists(faa) is False:
p = subprocess.Popen('prodigal -q -i %s -a %s -d %s -c -f gbk -m -n -o %s -p meta' \
% (fa, faa, fna, gbk), shell = True)
p.communicate()
for orf in parse_fasta(faa):
if orf[0] == []:
continue
id = orf[0].split('>')[1].split('_', 1)[0]
pos = sorted([int(i) for i in orf[0].split()[2:5] if i != '#'])
if id not in seqs:
continue
for i, ins in enumerate(seqs[id][2]):
if check_overlap(pos, ins, 0.90) is True:
seqs[id][2][i][4].append(orf)
return seqs, faa | 0.007584 |
def run_once(self):
"""Pump events to this App instance and then return.
This works in the way described in :any:`App.run` except it immediately
returns after the first :any:`update` call.
Having multiple :any:`App` instances and selectively calling runOnce on
them is a decent way to create a state machine.
"""
if not hasattr(self, '_App__prevTime'):
self.__prevTime = _time.clock() # initiate __prevTime
for event in get():
if event.type: # exclude custom events with a blank type variable
# call the ev_* methods
method = 'ev_%s' % event.type # ev_TYPE
getattr(self, method)(event)
if event.type == 'KEYDOWN':
# call the key_* methods
method = 'key_%s' % event.key # key_KEYNAME
if hasattr(self, method): # silently exclude undefined methods
getattr(self, method)(event)
newTime = _time.clock()
self.update(newTime - self.__prevTime)
self.__prevTime = newTime | 0.006335 |
def spawn(opts, conf):
""" Acts like twistd """
if opts.config is not None:
os.environ["CALLSIGN_CONFIG_FILE"] = opts.config
sys.argv[1:] = [
"-noy", sibpath(__file__, "callsign.tac"),
"--pidfile", conf['pidfile'],
"--logfile", conf['logfile'],
]
twistd.run() | 0.003215 |
def _sprite_file(map, sprite):
"""
Returns the relative path (from the images directory) to the original file
used when construction the sprite. This is suitable for passing to the
image_width and image_height helpers.
"""
map = StringValue(map).value
sprite_name = StringValue(sprite).value
sprite_map = sprite_maps.get(map)
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map)
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'])
if sprite:
return QuotedStringValue(sprite[1][0])
return StringValue(None) | 0.001477 |
def pixel_scale_angle_at_skycoord(skycoord, wcs, offset=1. * u.arcsec):
"""
Calculate the pixel scale and WCS rotation angle at the position of
a SkyCoord coordinate.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The SkyCoord coordinate.
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
offset : `~astropy.units.Quantity`
A small angular offset to use to compute the pixel scale and
position angle.
Returns
-------
scale : `~astropy.units.Quantity`
The pixel scale in arcsec/pixel.
angle : `~astropy.units.Quantity`
The angle (in degrees) measured counterclockwise from the
positive x axis to the "North" axis of the celestial coordinate
system.
Notes
-----
If distortions are present in the image, the x and y pixel scales
likely differ. This function computes a single pixel scale along
the North/South axis.
"""
# We take a point directly "above" (in latitude) the input position
# and convert it to pixel coordinates, then we use the pixel deltas
# between the input and offset point to calculate the pixel scale and
# angle.
# Find the coordinates as a representation object
coord = skycoord.represent_as('unitspherical')
# Add a a small perturbation in the latitude direction (since longitude
# is more difficult because it is not directly an angle)
coord_new = UnitSphericalRepresentation(coord.lon, coord.lat + offset)
coord_offset = skycoord.realize_frame(coord_new)
# Find pixel coordinates of offset coordinates and pixel deltas
x_offset, y_offset = skycoord_to_pixel(coord_offset, wcs, mode='all')
x, y = skycoord_to_pixel(skycoord, wcs, mode='all')
dx = x_offset - x
dy = y_offset - y
scale = offset.to(u.arcsec) / (np.hypot(dx, dy) * u.pixel)
angle = (np.arctan2(dy, dx) * u.radian).to(u.deg)
return scale, angle | 0.000501 |
def substitute_ref_with_url(self, txt):
"""
In the string `txt`, replace sphinx references with
corresponding links to online docs.
"""
# Find sphinx cross-references
mi = re.finditer(r':([^:]+):`([^`]+)`', txt)
if mi:
# Iterate over match objects in iterator returned by re.finditer
for mo in mi:
# Initialize link label and url for substitution
lbl = None
url = None
# Get components of current match: full matching text, the
# role label in the reference, and the name of the
# referenced type
mtxt = mo.group(0)
role = mo.group(1)
name = mo.group(2)
# If role is 'ref', the name component is in the form
# label <name>
if role == 'ref':
ma = re.match(r'\s*([^\s<]+)\s*<([^>]+)+>', name)
if ma:
name = ma.group(2)
lbl = ma.group(1)
# Try to look up the current cross-reference. Issue a
# warning if the lookup fails, and do the substitution
# if it succeeds.
try:
url = self.get_docs_url(role, name)
if role != 'ref':
lbl = self.get_docs_label(role, name)
except KeyError as ex:
if len(ex.args) == 1 or ex.args[1] != 'role':
print('Warning: %s' % ex.args[0])
else:
# If the cross-reference lookup was successful, replace
# it with an appropriate link to the online docs
rtxt = '[%s](%s)' % (lbl, url)
txt = re.sub(mtxt, rtxt, txt, flags=re.M)
return txt | 0.001046 |
def findMin(arr):
"""
in comparison to argrelmax() more simple and reliable peak finder
"""
out = np.zeros(shape=arr.shape, dtype=bool)
_calcMin(arr, out)
return out | 0.005263 |
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]:
"""
Convert a list of strings to a list of integers.
:param strings: a list of string
:return: a list of converted integers
.. doctest::
>>> strings_to_integers(['1', '1.0', '-0.2'])
[1, 1, 0]
"""
return strings_to_(strings, lambda x: int(float(x))) | 0.002762 |
def log_warning(self, msg):
"""
Log a warning if ``logger`` exists.
Args:
msg: Warning to log.
Warning:
Can raise a ``RuntimeError`` if this was asked in the constructor.
"""
if self.__logger:
self.__logger.warning(msg)
if self.__raise_exception_on_warning:
raise RuntimeError(msg) | 0.005141 |
def append_dist_to_stop_times(feed: "Feed", trip_stats: DataFrame) -> "Feed":
"""
Calculate and append the optional ``shape_dist_traveled`` field in
``feed.stop_times`` in terms of the distance units
``feed.dist_units``.
Need trip stats in the form output by
:func:`.trips.compute_trip_stats` for this.
Return the resulting Feed.
Notes
-----
- Does not always give accurate results, as described below.
- The algorithm works as follows.
Compute the ``shape_dist_traveled`` field by using Shapely to
measure the distance of a stop along its trip linestring.
If for a given trip this process produces a non-monotonically
increasing, hence incorrect, list of (cumulative) distances, then
fall back to estimating the distances as follows.
Get the average speed of the trip via ``trip_stats`` and use is to
linearly interpolate distances for stop times, assuming that the
first stop is at shape_dist_traveled = 0 (the start of the shape)
and the last stop is at shape_dist_traveled = the length of the trip
(taken from trip_stats and equal to the length of the shape, unless
``trip_stats`` was called with ``get_dist_from_shapes == False``).
This fallback method usually kicks in on trips with
self-intersecting linestrings.
Unfortunately, this fallback method will produce incorrect results
when the first stop does not start at the start of its shape
(so shape_dist_traveled != 0).
This is the case for several trips in `this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_, for example.
- Assume the following feed attributes are not ``None``:
* ``feed.stop_times``
* Those used in :func:`.shapes.build_geometry_by_shape`
* Those used in :func:`.stops.build_geometry_by_stop`
"""
feed = feed.copy()
geometry_by_shape = feed.build_geometry_by_shape(use_utm=True)
geometry_by_stop = feed.build_geometry_by_stop(use_utm=True)
# Initialize DataFrame
f = pd.merge(
feed.stop_times,
trip_stats[["trip_id", "shape_id", "distance", "duration"]],
).sort_values(["trip_id", "stop_sequence"])
# Convert departure times to seconds past midnight to ease calculations
f["departure_time"] = f["departure_time"].map(hp.timestr_to_seconds)
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
m_to_dist = hp.get_convert_dist("m", feed.dist_units)
def compute_dist(group):
# Compute the distances of the stops along this trip
shape = group["shape_id"].iat[0]
if not isinstance(shape, str):
group["shape_dist_traveled"] = np.nan
return group
elif np.isnan(group["distance"].iat[0]):
group["shape_dist_traveled"] = np.nan
return group
linestring = geometry_by_shape[shape]
distances = []
for stop in group["stop_id"].values:
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = m_to_dist(
hp.get_segment_length(linestring, geometry_by_stop[stop])
)
dist_by_stop_by_shape[shape][stop] = d
distances.append(d)
s = sorted(distances)
D = linestring.length
distances_are_reasonable = all([d < D + 100 for d in distances])
if distances_are_reasonable and s == distances:
# Good
pass
elif distances_are_reasonable and s == distances[::-1]:
# Reverse. This happens when the direction of a linestring
# opposes the direction of the bus trip.
distances = distances[::-1]
else:
# Totally redo using trip length, first and last stop times,
# and linear interpolation
dt = group["departure_time"]
times = dt.values # seconds
t0, t1 = times[0], times[-1]
d0, d1 = 0, group["distance"].iat[0]
# Get indices of nan departure times and
# temporarily forward fill them
# for the purposes of using np.interp smoothly
nan_indices = np.where(dt.isnull())[0]
dt.fillna(method="ffill")
# Interpolate
distances = np.interp(times, [t0, t1], [d0, d1])
# Nullify distances with nan departure times
for i in nan_indices:
distances[i] = np.nan
group["shape_dist_traveled"] = distances
return group
g = f.groupby("trip_id", group_keys=False).apply(compute_dist)
# Convert departure times back to time strings
g["departure_time"] = g["departure_time"].map(
lambda x: hp.timestr_to_seconds(x, inverse=True)
)
g = g.drop(["shape_id", "distance", "duration"], axis=1)
feed.stop_times = g
return feed | 0.000202 |
def find_tracker_url(ticket_url):
"""
Given http://tracker.ceph.com/issues/16673 or
tracker.ceph.com/issues/16673, return "http://tracker.ceph.com".
"""
if ticket_url.startswith('http://') or ticket_url.startswith('https://'):
o = urlparse(ticket_url)
scheme, netloc = o.scheme, o.netloc
else:
scheme = 'http'
(netloc, _) = ticket_url.split('/', 1)
return '%s://%s' % (scheme, netloc) | 0.002252 |
def filter_queryset(self, attrs, queryset):
"""
Filter the queryset to all instances matching the given attributes.
"""
# If this is an update, then any unprovided field should
# have it's value set based on the existing instance attribute.
if self.instance is not None:
for field_name in self.fields:
if field_name not in attrs:
attrs[field_name] = getattr(self.instance, field_name)
# Determine the filter keyword arguments and filter the queryset.
filter_kwargs = {
field_name: attrs[field_name]
for field_name in self.fields
}
return queryset.filter(**filter_kwargs) | 0.002766 |
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
objectness, rpn_box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression) | 0.00661 |
def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
'''Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis.
'''
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis | 0.004847 |
def _decode_header(auth_header, client_id, client_secret):
"""
Takes the header and tries to return an active token and decoded
payload.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
"""
try:
token = auth_header.split()[1]
payload = jwt.decode(
token,
client_secret,
audience=client_id)
except jwt.ExpiredSignature:
raise exceptions.NotAuthorizedException(
'Token has expired, please log in again.')
# is valid client
except jwt.InvalidAudienceError:
message = 'Incorrect audience, expected: {}'.format(
client_id)
raise exceptions.NotAuthorizedException(message)
# is valid token
except jwt.DecodeError:
raise exceptions.NotAuthorizedException(
'Token signature could not be validated.')
except Exception as e:
raise exceptions.NotAuthorizedException(
'Token signature was malformed. {}'.format(e.message))
return token, payload | 0.000932 |
def in_reply_to(self) -> Optional[UnstructuredHeader]:
"""The ``In-Reply-To`` header."""
try:
return cast(UnstructuredHeader, self[b'in-reply-to'][0])
except (KeyError, IndexError):
return None | 0.008299 |
def _normalize_basedir(basedir=None):
'''
Takes a basedir argument as a string or a list. If the string or list is
empty, then look up the default from the 'reposdir' option in the yum
config.
Returns a list of directories.
'''
# if we are passed a string (for backward compatibility), convert to a list
if isinstance(basedir, six.string_types):
basedir = [x.strip() for x in basedir.split(',')]
if basedir is None:
basedir = []
# nothing specified, so use the reposdir option as the default
if not basedir:
basedir = _get_yum_config_value('reposdir')
if not isinstance(basedir, list) or not basedir:
raise SaltInvocationError('Could not determine any repo directories')
return basedir | 0.001292 |
def strip_command(self, command_string, output):
"""Strip command_string from output string."""
output_list = output.split(command_string)
return self.RESPONSE_RETURN.join(output_list) | 0.009615 |
def fit_size(min_length: int = 0, max_length: int = None,
message=None) -> Filter_T:
"""
Validate any sized object to ensure the size/length
is in a given range [min_length, max_length].
"""
def validate(value):
length = len(value) if value is not None else 0
if length < min_length or \
(max_length is not None and length > max_length):
_raise_failure(message)
return value
return validate | 0.002079 |
def execute_pubsub(self, command, *channels):
"""Executes Redis (p)subscribe/(p)unsubscribe commands.
ConnectionsPool picks separate connection for pub/sub
and uses it until explicitly closed or disconnected
(unsubscribing from all channels/patterns will leave connection
locked for pub/sub use).
There is no auto-reconnect for this PUB/SUB connection.
Returns asyncio.gather coroutine waiting for all channels/patterns
to receive answers.
"""
conn, address = self.get_connection(command)
if conn is not None:
return conn.execute_pubsub(command, *channels)
else:
return self._wait_execute_pubsub(address, command, channels, {}) | 0.002663 |
def resolve_module(module, definitions):
"""Resolve (through indirections) the program contents of a module definition.
The result is a list of program chunks."""
assert module in definitions, "No definition for module '%s'" % module
d = definitions[module]
if type(d) == dict:
if 'filename' in d:
with open(d['filename']) as f:
return [f.read().strip()]
elif 'reference' in d:
return resolve_module(d['reference'], definitions)
elif 'group' in d:
return sum([resolve_module(m,definitions) for m in d['group']],[])
else:
assert False
else:
assert type(d) == str
return [d] | 0.021739 |
def do_thaw(client, args):
"""Execute the thaw operation, pulling in an actual Vault
client if neccesary"""
vault_client = None
if args.gpg_pass_path:
vault_client = client.connect(args)
aomi.filez.thaw(vault_client, args.icefile, args)
sys.exit(0) | 0.003559 |
def _interpret_contents(contentstream, initial_shorthand=UNIT_SQUARE):
"""Interpret the PDF content stream.
The stack represents the state of the PDF graphics stack. We are only
interested in the current transformation matrix (CTM) so we only track
this object; a full implementation would need to track many other items.
The CTM is initialized to the mapping from user space to device space.
PDF units are 1/72". In a PDF viewer or printer this matrix is initialized
to the transformation to device space. For example if set to
(1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches.
Images are always considered to be (0, 0) -> (1, 1). Before drawing an
image there should be a 'cm' that sets up an image coordinate system
where drawing from (0, 0) -> (1, 1) will draw on the desired area of the
page.
PDF units suit our needs so we initialize ctm to the identity matrix.
According to the PDF specification, the maximum stack depth is 32. Other
viewers tolerate some amount beyond this. We issue a warning if the
stack depth exceeds the spec limit and set a hard limit beyond this to
bound our memory requirements. If the stack underflows behavior is
undefined in the spec, but we just pretend nothing happened and leave the
CTM unchanged.
"""
stack = []
ctm = PdfMatrix(initial_shorthand)
xobject_settings = []
inline_images = []
found_vector = False
vector_ops = set('S s f F f* B B* b b*'.split())
image_ops = set('BI ID EI q Q Do cm'.split())
operator_whitelist = ' '.join(vector_ops | image_ops)
for n, graphobj in enumerate(
_normalize_stack(
pikepdf.parse_content_stream(contentstream, operator_whitelist)
)
):
operands, operator = graphobj
if operator == 'q':
stack.append(ctm)
if len(stack) > 32: # See docstring
if len(stack) > 128:
raise RuntimeError(
"PDF graphics stack overflowed hard limit, operator %i" % n
)
warn("PDF graphics stack overflowed spec limit")
elif operator == 'Q':
try:
ctm = stack.pop()
except IndexError:
# Keeping the ctm the same seems to be the only sensible thing
# to do. Just pretend nothing happened, keep calm and carry on.
warn("PDF graphics stack underflowed - PDF may be malformed")
elif operator == 'cm':
ctm = PdfMatrix(operands) @ ctm
elif operator == 'Do':
image_name = operands[0]
settings = XobjectSettings(
name=image_name, shorthand=ctm.shorthand, stack_depth=len(stack)
)
xobject_settings.append(settings)
elif operator == 'INLINE IMAGE': # BI/ID/EI are grouped into this
iimage = operands[0]
inline = InlineSettings(
iimage=iimage, shorthand=ctm.shorthand, stack_depth=len(stack)
)
inline_images.append(inline)
elif operator in vector_ops:
found_vector = True
return ContentsInfo(
xobject_settings=xobject_settings,
inline_images=inline_images,
found_vector=found_vector,
) | 0.000894 |
def _connect(self):
"""Connects via SSH.
"""
ssh = self._ssh_client()
logger.debug("Connecting with %s",
', '.join('%s=%r' % (k, v if k != "password" else "***")
for k, v in iteritems(self.destination)))
ssh.connect(**self.destination)
logger.debug("Connected to %s", self.destination['hostname'])
self._ssh = ssh | 0.004751 |
def session_preparation(self):
"""Prepare the session after the connection has been established."""
# 0 will defer to the global delay factor
delay_factor = self.select_delay_factor(delay_factor=0)
self._test_channel_read()
self.set_base_prompt()
cmd = "{}set cli mode -page OFF{}".format(self.RETURN, self.RETURN)
self.disable_paging(command=cmd)
time.sleep(1 * delay_factor)
self.set_base_prompt()
time.sleep(0.3 * delay_factor)
self.clear_buffer() | 0.003717 |
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer) | 0.000597 |
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self._can_uninstall():
return
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
) | 0.001194 |
def _createGaVariantAnnotation(self):
"""
Convenience method to set the common fields in a GA VariantAnnotation
object from this variant set.
"""
ret = protocol.VariantAnnotation()
ret.created = self._creationTime
ret.variant_annotation_set_id = self.getId()
return ret | 0.006006 |
def close(self) -> None:
"""Closes all loaded tables."""
self.available_tables.clear()
self.zipinfo.clear()
self.block_age = 0
self.block_cache.clear()
while self.streams:
_, stream = self.streams.popitem()
stream.close() | 0.006757 |
def get_item(self, *key):
"""
The recommended way of retrieving an item by key when extending configmanager's behaviour.
Attribute and dictionary key access is configurable and may not always return items
(see PlainConfig for example), whereas this method will always return the corresponding
Item as long as NOT_FOUND hook callbacks don't break this convention.
Args:
*key
Returns:
item (:class:`.Item`):
"""
item = self._get_item_or_section(key)
if not item.is_item:
raise RuntimeError('{} is a section, not an item'.format(key))
return item | 0.007474 |
def get_farthest_entries(self, type_measurement):
"""!
@brief Find pair of farthest entries of the node.
@param[in] type_measurement (measurement_type): Measurement type that is used for obtaining farthest entries.
@return (list) Pair of farthest entries of the node that are represented by list.
"""
farthest_entity1 = None;
farthest_entity2 = None;
farthest_distance = 0;
for i in range(0, len(self.entries)):
candidate1 = self.entries[i];
for j in range(i + 1, len(self.entries)):
candidate2 = self.entries[j];
candidate_distance = candidate1.get_distance(candidate2, type_measurement);
if (candidate_distance > farthest_distance):
farthest_distance = candidate_distance;
farthest_entity1 = candidate1;
farthest_entity2 = candidate2;
return [farthest_entity1, farthest_entity2]; | 0.020852 |
def to_hsl(self):
''' Return a corresponding HSL color for this RGB color.
Returns:
:class:`~bokeh.colors.rgb.RGB`
'''
from .hsl import HSL # prevent circular import
h, l, s = colorsys.rgb_to_hls(float(self.r)/255, float(self.g)/255, float(self.b)/255)
return HSL(round(h*360), s, l, self.a) | 0.011331 |
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex) | 0.003003 |
def set_alt(self, i, alt, break_alt=None, change_time=True):
'''set rally point altitude(s)'''
if i < 1 or i > self.rally_count():
print("Inavlid rally point number %u" % i)
return
self.rally_points[i-1].alt = int(alt)
if (break_alt != None):
self.rally_points[i-1].break_alt = break_alt
if change_time:
self.last_change = time.time() | 0.007109 |
def prt_details(self, prt=sys.stdout):
"""Print summary of codes and groups that can be inputs to get_evcodes."""
prt.write('EVIDENCE CODES:\n')
for grp, code2nt in self.grp2code2nt.items():
prt.write(' {GROUP}:\n'.format(GROUP=grp))
for code, ntd in code2nt.items():
prt.write(' {CODE:>3} {NAME}\n'.format(CODE=code, NAME=ntd.name)) | 0.009828 |
def combine_counts(
fns,
define_sample_name=None,
):
"""
Combine featureCounts output files for multiple samples.
Parameters
----------
fns : list of strings
Filenames of featureCounts output files to combine.
define_sample_name : function
A function mapping the featureCounts output filenames to sample names.
If this is not provided, the header of the last column in the
featureCounts output will be used as the sample name.
Returns
-------
combined_counts : pandas.DataFrame
Combined featureCount counts.
"""
counts = []
for fn in fns:
df = pd.read_table(fn, skiprows=1, index_col=0)
counts.append(df[df.columns[-1]])
combined_counts = pd.DataFrame(counts).T
if define_sample_name:
names = [define_sample_name(x) for x in fns]
combined_counts.columns = names
combined_counts.index.name = ''
return combined_counts | 0.003093 |
def shorten_go_name_ptbl3(self, name, dcnt):
"""Shorten GO description for Table 3 in manuscript."""
if self._keep_this(name):
return name
name = name.replace("positive regulation of immune system process",
"+ reg. of immune sys. process")
name = name.replace("positive regulation of immune response",
"+ reg. of immune response")
name = name.replace("positive regulation of cytokine production",
"+ reg. of cytokine production")
if dcnt < 40:
name = name.replace("antigen processing and presentation", "a.p.p.")
if dcnt < 10:
name = name.replace("negative", "-")
name = name.replace("positive", "+")
#name = name.replace("tumor necrosis factor production", "tumor necrosis factor prod.")
name = name.replace("tumor necrosis factor production", "TNF production")
if dcnt < 4:
name = name.replace("regulation", "reg.")
name = name.replace("exogenous ", "")
name = name.replace(" via ", " w/")
name = name.replace("T cell mediated cytotoxicity", "cytotoxicity via T cell")
name = name.replace('involved in', 'in')
name = name.replace('-positive', '+')
return name | 0.005174 |
def writer(path):
"""
Creates a compressed file writer from for a path with a specified
compression type.
"""
filename, extension = extract_extension(path)
if extension in FILE_WRITERS:
writer_func = FILE_WRITERS[extension]
return writer_func(path)
else:
raise RuntimeError("Output compression {0} not supported. Type {1}"
.format(extension, tuple(FILE_WRITERS.keys()))) | 0.002222 |
def add_event(self, name, subfolder, session):
"""
Add an event
"""
if self._similar_event_exists(subfolder):
subfolder += "_{0}".format(self.next_id(subfolder))
new_event = ProjectFileEvent(name=name, subfolder=subfolder)
session.add(new_event)
self.events.append(new_event)
session.commit()
return new_event | 0.005089 |
def get_values_fix_params(self, exp, rep, tag, which='last', **kwargs):
""" this function uses get_value(..) but returns all values where the
subexperiments match the additional kwargs arguments. if alpha=1.0,
beta=0.01 is given, then only those experiment values are returned,
as a list.
"""
subexps = self.get_exps(exp)
tagvalues = ['%s%s'%(k, convert_param_to_dirname(kwargs[k])) for k in kwargs]
values = [self.get_value(se, rep, tag, which) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
params = [self.get_params(se) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
return values, params | 0.012262 |
def get_parent_tags(self, rev=None):
"""
Return the tags for the parent revision (or None if no single
parent can be identified).
"""
try:
parent_rev = one(self.get_parent_revs(rev))
except Exception:
return None
return self.get_tags(parent_rev) | 0.041353 |
def output_error(msg):
"""
Prints the specified string to ``stderr``.
:param msg: the message to print
:type msg: str
"""
click.echo(click.style(msg, fg='red'), err=True) | 0.005102 |
def queryset(self, request, queryset):
"""
Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
"""
filter_args = {self._filter_arg_key: None}
if self.value() == "yes":
return queryset.exclude(**filter_args)
if self.value() == "no":
return queryset.filter(**filter_args) | 0.028103 |
def ownership(self, ownership):
"""
A list of dictionaries in format {'party_id': 'XYZ', 'split': Decimal('0.5')}
:param ownership:
:return:
"""
error_msg = 'ownership must be a list of dictionaries'
if ownership:
if not isinstance(ownership, list):
raise TypeError(error_msg)
if not all([isinstance(item, dict) for item in ownership]):
raise TypeError(error_msg)
if not all([item.get('party_id') for item in ownership]):
raise ValueError('Ownership missing one or more party_ids')
if not all([item.get('split') for item in ownership]):
raise ValueError('Ownership missing one or more splits')
if sum([item.get('split') for item in ownership]) != Decimal('1'):
raise ValueError('Ownership must sum up to 100%')
self._ownership = ownership | 0.003185 |
def __collect_file(self, filename, keep_original=False):
"""
Move or copy single file to artifacts dir
"""
dest = self.artifacts_dir + '/' + os.path.basename(filename)
logger.debug("Collecting file: %s to %s", filename, dest)
if not filename or not os.path.exists(filename):
logger.warning("File not found to collect: %s", filename)
return
if os.path.exists(dest):
# FIXME: 3 find a way to store artifacts anyway
logger.warning("File already exists: %s", dest)
return
if keep_original:
shutil.copy(filename, self.artifacts_dir)
else:
shutil.move(filename, self.artifacts_dir)
os.chmod(dest, 0o644) | 0.002618 |
def sround(x, precision=0):
"""
Round a single number using default non-deterministic generator.
@param x: to round.
@param precision: decimal places to round.
"""
sr = StochasticRound(precision=precision)
return sr.round(x) | 0.011583 |
def wrap_call(self, call_cmd):
"""
"wraps" the call_cmd so it can be executed by subprocess.call (and related flavors) as "args" argument
:param call_cmd: original args like argument (string or sequence)
:return: a sequence with the original command "executed" under trickle
"""
if isinstance(call_cmd, basestring): # FIXME python 3 unsafe
call_cmd = [call_cmd]
return [self._trickle_cmd, "-s"] + self._settings.to_argument_list() + list(call_cmd) | 0.007692 |
def bna_config_cmd_status_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd_status = ET.Element("bna_config_cmd_status")
config = bna_config_cmd_status
input = ET.SubElement(bna_config_cmd_status, "input")
session_id = ET.SubElement(input, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003945 |
def dedent(text):
"""Equivalent of textwrap.dedent that ignores unindented first line.
This means it will still dedent strings like:
'''foo
is a bar
'''
For use in wrap_paragraphs.
"""
if text.startswith('\n'):
# text starts with blank line, don't ignore the first line
return textwrap.dedent(text)
# split first line
splits = text.split('\n',1)
if len(splits) == 1:
# only one line
return textwrap.dedent(text)
first, rest = splits
# dedent everything but the first line
rest = textwrap.dedent(rest)
return '\n'.join([first, rest]) | 0.00318 |
def _sigma_pi_loE(self, Tp):
"""
inclusive cross section for Tth < Tp < 2 GeV
Fit from experimental data
"""
m_p = self._m_p
m_pi = self._m_pi
Mres = 1.1883 # GeV
Gres = 0.2264 # GeV
s = 2 * m_p * (Tp + 2 * m_p) # center of mass energy
gamma = np.sqrt(Mres ** 2 * (Mres ** 2 + Gres ** 2))
K = np.sqrt(8) * Mres * Gres * gamma
K /= np.pi * np.sqrt(Mres ** 2 + gamma)
fBW = m_p * K
fBW /= (
(np.sqrt(s) - m_p) ** 2 - Mres ** 2
) ** 2 + Mres ** 2 * Gres ** 2
mu = np.sqrt(
(s - m_pi ** 2 - 4 * m_p ** 2) ** 2 - 16 * m_pi ** 2 * m_p ** 2
)
mu /= 2 * m_pi * np.sqrt(s)
sigma0 = 7.66e-3 # mb
sigma1pi = sigma0 * mu ** 1.95 * (1 + mu + mu ** 5) * fBW ** 1.86
# two pion production
sigma2pi = 5.7 # mb
sigma2pi /= 1 + np.exp(-9.3 * (Tp - 1.4))
E2pith = 0.56 # GeV
sigma2pi[np.where(Tp < E2pith)] = 0.0
return (sigma1pi + sigma2pi) * 1e-27 | 0.00186 |
def render_diagram(out_base):
"""Render a data model diagram
Included in the diagram are all classes from the model registry.
For your project, write a small script that imports all models that you would like to
have included and then calls this function.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
out_base (str): output base path (file endings will be appended)
"""
import codecs
import subprocess
import sadisplay
# generate class descriptions
desc = sadisplay.describe(list(model_registry.values()),
show_methods=False,
show_properties=True,
show_indexes=True,
)
# write description in DOT format
with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f:
f.write(sadisplay.dot(desc))
# check existence of DOT_EXECUTABLE variable and file
if not hasattr(config, 'DOT_EXECUTABLE'):
raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'")
if not os.path.exists(config.DOT_EXECUTABLE):
raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE))
# render to image using DOT
# noinspection PyUnresolvedReferences
subprocess.check_call([
config.DOT_EXECUTABLE,
'-T', 'png',
'-o', out_base + '.png',
out_base + '.dot'
]) | 0.003643 |
def set_process_type(self, value):
"""
Setter for 'process_type' field.
:param value - a new value of 'process_type' field.
"""
if value is None or not isinstance(value, str):
raise TypeError("ProcessType must be set to a String")
elif value not in Process.__process_type_list:
raise ValueError("ProcessType must be one of specified values: 'None', 'Public', 'Private'")
else:
self.__process_type = value | 0.006048 |
def calc_cagr(prices):
"""
Calculates the `CAGR (compound annual growth rate) <https://www.investopedia.com/terms/c/cagr.asp>`_ for a given price series.
Args:
* prices (pandas.Series): A Series of prices.
Returns:
* float -- cagr.
"""
start = prices.index[0]
end = prices.index[-1]
return (prices.iloc[-1] / prices.iloc[0]) ** (1 / year_frac(start, end)) - 1 | 0.007335 |
def range(self, index, *args):
"""
Set the range of each axis, one at a time
args are of the form <start of range>,<end of range>,<interval>
APIPARAM: chxr
"""
self.data['ranges'].append('%s,%s'%(index,
','.join(map(smart_str, args))))
return self.parent | 0.011561 |
def delete_stack(name=None, poll=0, timeout=60, profile=None):
'''
Delete a stack (heat stack-delete)
name
Name of the stack
poll
Poll and report events until stack complete
timeout
Stack creation timeout in minute
profile
Profile to use
CLI Examples:
.. code-block:: bash
salt '*' heat.delete_stack name=mystack poll=5 \\
profile=openstack1
'''
h_client = _auth(profile)
ret = {
'result': True,
'comment': ''
}
if not name:
ret['result'] = False
ret['comment'] = 'Parameter name missing or None'
return ret
try:
h_client.stacks.delete(name)
except heatclient.exc.HTTPNotFound:
ret['result'] = False
ret['comment'] = 'No stack {0}'.format(name)
except heatclient.exc.HTTPForbidden as forbidden:
log.exception(forbidden)
ret['result'] = False
ret['comment'] = six.text_type(forbidden)
if ret['result'] is False:
return ret
if poll > 0:
try:
stack_status, msg = _poll_for_events(h_client, name, action='DELETE',
poll_period=poll, timeout=timeout)
except heatclient.exc.CommandError:
ret['comment'] = 'Deleted stack {0}.'.format(name)
return ret
except Exception as ex: # pylint: disable=W0703
log.exception('Delete failed %s', ex)
ret['result'] = False
ret['comment'] = '{0}'.format(ex)
return ret
if stack_status == 'DELETE_FAILED':
ret['result'] = False
ret['comment'] = 'Deleted stack FAILED\'{0}\'{1}.'.format(name, msg)
else:
ret['comment'] = 'Deleted stack {0}.'.format(name)
return ret | 0.002175 |
def report_import(self, name, filename):
"""report_import Report_Name, filename
Uploads a report template to the current user's reports
UN-DOCUMENTED CALL: This function is not considered stable.
"""
data = self._upload(filename)
return self.raw_query('report', 'import', data={
'filename': data['filename'],
'name': name,
}) | 0.004926 |
def list_data(
self, previous_data=False, prompt=False, console_row=False,
console_row_to_cursor=False, console_row_from_cursor=False
):
""" Return list of strings. Where each string is fitted to windows width. Parameters are the same as
they are in :meth:`.WConsoleWindow.data` method
:return: list of str
"""
return self.split(self.data(
previous_data, prompt, console_row, console_row_to_cursor, console_row_from_cursor
)) | 0.03139 |
def update_reportnumbers(self):
"""Handle reportnumbers. """
rep_088_fields = record_get_field_instances(self.record, '088')
for field in rep_088_fields:
subs = field_get_subfields(field)
if '9' in subs:
for val in subs['9']:
if val.startswith('P0') or val.startswith('CM-P0'):
sf = [('9', 'CERN'), ('b', val)]
record_add_field(self.record, '595', subfields=sf)
for key, val in field[0]:
if key in ['a', '9'] and not val.startswith('SIS-'):
record_add_field(
self.record, '037', subfields=[('a', val)])
record_delete_fields(self.record, "088")
# 037 Externals also...
rep_037_fields = record_get_field_instances(self.record, '037')
for field in rep_037_fields:
subs = field_get_subfields(field)
if 'a' in subs:
for value in subs['a']:
if 'arXiv' in value:
new_subs = [('a', value), ('9', 'arXiv')]
for fld in record_get_field_instances(self.record, '695'):
for key, val in field_get_subfield_instances(fld):
if key == 'a':
new_subs.append(('c', val))
break
nf = create_field(subfields=new_subs)
record_replace_field(self.record, '037', nf, field[4])
for key, val in field[0]:
if key in ['a', '9'] and val.startswith('SIS-'):
record_delete_field(
self.record, '037', field_position_global=field[4]) | 0.001671 |
def removeIterator(self, login, tableName, iterName, scopes):
"""
Parameters:
- login
- tableName
- iterName
- scopes
"""
self.send_removeIterator(login, tableName, iterName, scopes)
self.recv_removeIterator() | 0.004016 |
def cublasSsyr(handle, uplo, n, alpha, x, incx, A, lda):
"""
Rank-1 operation on real symmetric matrix.
"""
status = _libcublas.cublasSsyr_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx, int(A), lda)
cublasCheckStatus(status) | 0.009804 |
def updateDisplayLabel(self, value=None):
"""Update the display label to reflect the value of the parameter."""
if value is None:
value = self.param.value()
opts = self.param.opts
if isinstance(self.widget, QtWidgets.QAbstractSpinBox):
text = asUnicode(self.widget.lineEdit().text())
elif isinstance(self.widget, QtWidgets.QComboBox):
text = self.widget.currentText()
else:
text = asUnicode(value)
self.displayLabel.setText(text) | 0.003752 |
def create_new_dispatch(self, dispatch):
"""
Create a new dispatch
:param dispatch:
is the new dispatch that the client wants to create
"""
self._validate_uuid(dispatch.dispatch_id)
# Create new dispatch
url = "/notification/v1/dispatch"
post_response = NWS_DAO().postURL(
url, self._write_headers(), self._json_body(dispatch.json_data()))
if post_response.status != 200:
raise DataFailureException(
url, post_response.status, post_response.data)
return post_response.status | 0.003311 |
def start(self):
""" Starts the clock from 0.
Uses a separate thread to handle the timing functionalities. """
if not hasattr(self,"thread") or not self.thread.isAlive():
self.thread = threading.Thread(target=self.__run)
self.status = RUNNING
self.reset()
self.thread.start()
else:
print("Clock already running!") | 0.038576 |
def _get_taulny(self, C, mag):
"""
Returns the inter-event random effects coefficient (tau)
Equation 28.
"""
if mag <= 4.5:
return C["tau1"]
elif mag >= 5.5:
return C["tau2"]
else:
return C["tau2"] + (C["tau1"] - C["tau2"]) * (5.5 - mag) | 0.006079 |
def _simple_name(distribution):
"""Infer the original name passed into a distribution constructor.
Distributions typically follow the pattern of
with.name_scope(name) as name:
super(name=name)
so we attempt to reverse the name-scope transformation to allow
addressing of RVs by the distribution's original, user-visible
name kwarg.
Args:
distribution: a tfd.Distribution instance.
Returns:
simple_name: the original name passed into the Distribution.
#### Example
```
d1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/'
d2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/'
_simple_name(d2) # returns 'x'
```
"""
simple_name = distribution.name
# turn 'scope/x/' into 'x'
if simple_name.endswith('/'):
simple_name = simple_name.split('/')[-2]
# turn 'x_3' into 'x'
parts = simple_name.split('_')
if parts[-1].isdigit():
simple_name = '_'.join(parts[:-1])
return simple_name | 0.009534 |
def closeEvent(self, event):
"""Closes listening threads and saves GUI data for later use.
Re-implemented from :qtdoc:`QWidget`
"""
self.acqmodel.stop_listening() # close listener threads
self.saveInputs(self.inputsFilename)
# save GUI size
settings = QtCore.QSettings("audiolab")
settings.setValue("geometry", self.saveGeometry())
settings.setValue("windowState", self.saveState())
logger = logging.getLogger('main')
logger.info('All user settings saved')
self.garbage_timer.stop()
gc.enable() | 0.004992 |
def from_file(cls, f, filename=None, includedir='', seenfiles=None):
'''Create a token stream by reading an input file
Read tokens from `f`. If an include directive ('@include "file.cfg"')
is found, read its contents as well.
The `filename` argument is used for error messages and to detect
circular imports. ``includedir`` sets the lookup directory for included
files. ``seenfiles`` is used internally to detect circular includes,
and should normally not be supplied by users of is function.
'''
if filename is None:
filename = getattr(f, 'name', '<unknown>')
if seenfiles is None:
seenfiles = set()
if filename in seenfiles:
raise ConfigParseError("Circular include: %r" % (filename,))
seenfiles = seenfiles | {filename} # Copy seenfiles, don't alter it.
tokenizer = Tokenizer(filename=filename)
lines = []
tokens = []
for line in f:
m = re.match(r'@include "(.*)"$', line.strip())
if m:
tokens.extend(tokenizer.tokenize(''.join(lines)))
lines = [re.sub(r'\S', ' ', line)]
includefilename = decode_escapes(m.group(1))
includefilename = os.path.join(includedir, includefilename)
try:
includefile = open(includefilename, "r")
except IOError:
raise ConfigParseError("Could not open include file %r" %
(includefilename,))
with includefile:
includestream = cls.from_file(includefile,
filename=includefilename,
includedir=includedir,
seenfiles=seenfiles)
tokens.extend(includestream.tokens)
else:
lines.append(line)
tokens.extend(tokenizer.tokenize(''.join(lines)))
return cls(tokens) | 0.000951 |
def eigh(a, eigvec=True, rcond=None):
""" Eigenvalues and eigenvectors of symmetric matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True`` (default), method returns a tuple
of arrays ``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a`` (in ascending order), and ``vec[:, i]``
are the corresponding eigenvectors of ``a``. Only ``val`` is
returned if ``eigvec=False``.
rcond (float): Eigenvalues whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate
(and ignored) when computing variances for the eigvectors.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(val,vec)`` of eigenvalues and eigenvectors of
matrix ``a`` if parameter ``eigvec==True`` (default).
The eigenvalues ``val[i]`` are in ascending order and
``vec[:, i]`` are the corresponding eigenvalues. Only
the eigenvalues ``val`` are returned if ``eigvec=False``.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
a = numpy.asarray(a)
if a.dtype != object:
val, vec = numpy.linalg.eigh(a)
return (val, vec) if eigvec else val
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
val0, vec0 = numpy.linalg.eigh(amean)
val = val0 + [
vec0[:, i].conjugate().dot(da.dot(vec0[:, i])) for i in range(vec0.shape[1])
]
if eigvec == True:
if vec0.dtype == complex:
raise ValueError('cannot evaluate eigenvectors when a is complex')
vec = numpy.array(vec0, dtype=object)
for i in range(len(val)):
for j in range(len(val)):
dval = val0[i] - val0[j]
if abs(dval) < rcond * abs(val0[j] + val0[i]) or dval == 0.0:
continue
vec[:, i] += vec0[:, j] * (
vec0[:, j].dot(da.dot(vec0[:, i])) / dval
)
return val, vec
else:
return val | 0.002022 |
def revoke_permissions(self, ctype):
"""
Remove all permissions for the content type to be removed
"""
ContentType = apps.get_model('contenttypes', 'ContentType')
try:
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
codenames = ['{0}_{1}'.format(perm, ctype) for perm in self.default_permissions]
cascade_element = apps.get_model(self.label, 'cascadeelement')
element_ctype = ContentType.objects.get_for_model(cascade_element)
Permission.objects.filter(content_type=element_ctype, codename__in=codenames).delete() | 0.006182 |
def is_scalar(self):
"""
:return:
:rtype: bool
"""
return \
isinstance(self._element_template, Boolean) or \
isinstance(self._element_template, Float) or \
isinstance(self._element_template, Integer) or \
isinstance(self._element_template, String) | 0.005952 |
def snpsift(self):
"""SnpSift"""
tstart = datetime.now()
# command = 'python %s/snpsift.py -i sanity_check/checked.vcf 2>log/snpsift.log' % (scripts_dir)
# self.shell(command)
ss = snpsift.SnpSift(self.vcf_file)
ss.run()
tend = datetime.now()
execution_time = tend - tstart | 0.008798 |
def cudnnSetTensor(handle, srcDesc, srcData, value):
""""
Set all data points of a tensor to a given value : srcDest = alpha.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
value : float
Value that all elements of the tensor will be set to.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc)
if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
else:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
status = _libcudnn.cudnnSetTensor(handle, srcDesc, srcData, alphaRef)
cudnnCheckStatus(status) | 0.00114 |
def _upsampling(lr_array, rescale, reference_shape, interp='linear'):
""" Upsample the low-resolution array to the original high-resolution grid
:param lr_array: Low-resolution array to be upsampled
:param rescale: Rescale factor for rows/columns
:param reference_shape: Original size of high-resolution eopatch. Tuple with dimension for time, height and
width
:param interp: Interpolation method ot be used in upsampling. Default is `'linear'`
:return: Upsampled array. The array has 4 dimensions, the last one being of size 1
"""
hr_shape = reference_shape + (1,)
lr_shape = lr_array.shape + (1,)
if rescale is None:
return lr_array.reshape(lr_shape)
out_array = scipy.ndimage.interpolation.zoom(lr_array.reshape(lr_shape),
(1.0,) + tuple(1 / x for x in rescale) + (1.0,),
output=lr_array.dtype, order=INTERP_METHODS.index(interp),
mode='nearest')
# Padding and cropping might be needed to get to the reference shape
out_shape = out_array.shape
padding = tuple((0, np.max((h-o, 0))) for h, o in zip(hr_shape, out_shape))
hr_array = np.pad(out_array, padding, 'edge')
hr_array = hr_array[:, :hr_shape[1], :hr_shape[2], :]
return hr_array | 0.006748 |
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", getattr(app.__class__, "__module__"))
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", getattr(app.__class__, "__name__")),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name | 0.000734 |
def split(self):
"""
Returns a pair of CipherState objects for encrypting/decrypting transport messages.
:return: tuple (CipherState, CipherState)
"""
# Sets temp_k1, temp_k2 = HKDF(ck, b'', 2).
temp_k1, temp_k2 = self.noise_protocol.hkdf(self.ck, b'', 2)
# If HASHLEN is 64, then truncates temp_k1 and temp_k2 to 32 bytes.
if self.noise_protocol.hash_fn.hashlen == 64:
temp_k1 = temp_k1[:32]
temp_k2 = temp_k2[:32]
# Creates two new CipherState objects c1 and c2.
# Calls c1.InitializeKey(temp_k1) and c2.InitializeKey(temp_k2).
c1, c2 = CipherState(self.noise_protocol), CipherState(self.noise_protocol)
c1.initialize_key(temp_k1)
c2.initialize_key(temp_k2)
if self.noise_protocol.handshake_state.initiator:
self.noise_protocol.cipher_state_encrypt = c1
self.noise_protocol.cipher_state_decrypt = c2
else:
self.noise_protocol.cipher_state_encrypt = c2
self.noise_protocol.cipher_state_decrypt = c1
self.noise_protocol.handshake_done()
# Returns the pair (c1, c2).
return c1, c2 | 0.003255 |
def serializer_for(self, obj):
"""
Searches for a serializer for the provided object
Serializers will be searched in this order;
1-NULL serializer
2-Default serializers, like primitives, arrays, string and some default types
3-Custom registered types by user
4-Global serializer if registered by user
4-pickle serialization as a fallback
:param obj: input object
:return: Serializer
"""
# 1-NULL serializer
if obj is None:
return self._null_serializer
obj_type = type(obj)
# 2-Default serializers, Dataserializable, Portable, primitives, arrays, String and some helper types(BigInteger etc)
serializer = self.lookup_default_serializer(obj_type, obj)
# 3-Custom registered types by user
if serializer is None:
serializer = self.lookup_custom_serializer(obj_type)
# 5-Global serializer if registered by user
if serializer is None:
serializer = self.lookup_global_serializer(obj_type)
# 4 Internal serializer
if serializer is None:
serializer = self.lookup_python_serializer(obj_type)
if serializer is None:
raise HazelcastSerializationError("There is no suitable serializer for:" + str(obj_type))
return serializer | 0.003569 |
def wrap(self, message):
"""
NTM GSSwrap()
:param message: The message to be encrypted
:return: The signed and encrypted message
"""
cipher_text = _Ntlm1Session.encrypt(self, message)
signature = _Ntlm1Session.sign(self, message)
return cipher_text, signature | 0.006192 |
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen+len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index | 0.001034 |
def _create_cell(args, cell_body):
"""Implements the pipeline cell create magic used to create Pipeline objects.
The supported syntax is:
%%pipeline create <args>
[<inline YAML>]
Args:
args: the arguments following '%%pipeline create'.
cell_body: the contents of the cell
"""
name = args.get('name')
if name is None:
raise Exception("Pipeline name was not specified.")
pipeline_spec = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment())
airflow_spec = google.datalab.contrib.pipeline._pipeline.PipelineGenerator.generate_airflow_spec(
name, pipeline_spec)
debug = args.get('debug')
if debug is True:
return airflow_spec | 0.012245 |
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if "TLOD" in rec.info and rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file) | 0.003576 |
def fetch(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return the contents of the response."""
params = _prepare_params(params, params_prefix)
if method == "POST" or method == "PUT":
r_data = {"data": params}
else:
r_data = {"params": params}
# build the HTTP request and use basic authentication
url = "https://%s/%s.json" % (CHALLONGE_API_URL, uri)
try:
response = request(
method,
url,
auth=get_credentials(),
**r_data)
response.raise_for_status()
except HTTPError:
if response.status_code != 422:
response.raise_for_status()
# wrap up application-level errors
doc = response.json()
if doc.get("errors"):
raise ChallongeException(*doc['errors'])
return response | 0.001155 |
def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros | 0.005208 |
def call(self, command, *args):
"""
Sends call to the function, whose name is specified by command.
Used by Script invocations and normalizes calls using standard
Redis arguments to use the expected redis-py arguments.
"""
command = self._normalize_command_name(command)
args = self._normalize_command_args(command, *args)
redis_function = getattr(self, command)
value = redis_function(*args)
return self._normalize_command_response(command, value) | 0.003774 |
def do_rm(self, line):
"rm [:tablename] [!fieldname:expectedvalue] [-v] {haskkey [rangekey]}"
table, line = self.get_table_params(line)
expected, line = self.get_expected(line)
args = self.getargs(line)
if "-v" in args:
ret = "ALL_OLD"
args.remove("-v")
else:
ret = None
hkey = self.get_typed_key_value(table, args[0], True)
rkey = self.get_typed_key_value(table, args[1], False) if len(args) > 1 else None
item = table.new_item(hash_key=hkey, range_key=rkey)
item = item.delete(expected_value=expected, return_values=ret)
self.pprint(item)
if self.consumed:
print "consumed units:", item.consumed_units | 0.003989 |
def launch_process(self, command):
# type: (Union[bytes,text_type])->None
"""* What you can do
- It starts process and keep it.
"""
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid | 0.003949 |
def startswith(self, prefix, start=0, end=-1):
"""Return True if string starts with the specified prefix, False otherwise.
With optional start, test beginning at that position. With optional end, stop comparing at that position. prefix
can also be a tuple of strings to try.
:param str prefix: Prefix to search.
:param int start: Beginning position.
:param int end: Stop comparison at this position.
"""
return self.value_no_colors.startswith(prefix, start, end) | 0.007576 |
def set_display_sleep(minutes):
'''
Set the amount of idle time until the display sleeps. Pass "Never" of "Off"
to never sleep.
:param minutes: Can be an integer between 1 and 180 or "Never" or "Off"
:ptype: int, str
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_display_sleep 120
salt '*' power.set_display_sleep off
'''
value = _validate_sleep(minutes)
cmd = 'systemsetup -setdisplaysleep {0}'.format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_display_sleep,
) | 0.001437 |
def draw_key(self, surface, key):
"""Default drawing method for key.
Draw the key accordingly to it type.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
if isinstance(key, VSpaceKey):
self.draw_space_key(surface, key)
elif isinstance(key, VBackKey):
self.draw_back_key(surface, key)
elif isinstance(key, VUppercaseKey):
self.draw_uppercase_key(surface, key)
elif isinstance(key, VSpecialCharKey):
self.draw_special_char_key(surface, key)
else:
self.draw_character_key(surface, key) | 0.004464 |
def verify(self, smessage, signature=None, encoder=encoding.RawEncoder):
"""
Verifies the signature of a signed message, returning the message
if it has not been tampered with else raising
:class:`~nacl.signing.BadSignatureError`.
:param smessage: [:class:`bytes`] Either the original messaged or a
signature and message concated together.
:param signature: [:class:`bytes`] If an unsigned message is given for
smessage then the detached signature must be provided.
:param encoder: A class that is able to decode the secret message and
signature.
:rtype: :class:`bytes`
"""
if signature is not None:
# If we were given the message and signature separately, combine
# them.
smessage = signature + encoder.decode(smessage)
else:
# Decode the signed message
smessage = encoder.decode(smessage)
return nacl.bindings.crypto_sign_open(smessage, self._key) | 0.001914 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.