text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def save_mission(aFileName):
"""
Save a mission in the Waypoint file format
(http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
"""
print("\nSave mission from Vehicle to file: %s" % aFileName)
#Download mission from vehicle
missionlist = download_mission()
#Add file-format information
output='QGC WPL 110\n'
#Add home location as 0th waypoint
home = vehicle.home_location
output+="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (0,1,0,16,0,0,0,0,home.lat,home.lon,home.alt,1)
#Add commands
for cmd in missionlist:
commandline="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)
output+=commandline
with open(aFileName, 'w') as file_:
print(" Write mission to file")
file_.write(output) | 0.037393 |
def fit(arr, dist='norm'):
"""Fit an array to a univariate distribution along the time dimension.
Parameters
----------
arr : xarray.DataArray
Time series to be fitted along the time dimension.
dist : str
Name of the univariate distribution, such as beta, expon, genextreme, gamma, gumbel_r, lognorm, norm
(see scipy.stats).
Returns
-------
xarray.DataArray
An array of distribution parameters fitted using the method of Maximum Likelihood.
"""
# Get the distribution
dc = get_dist(dist)
# Fit the parameters (lazy computation)
data = dask.array.apply_along_axis(dc.fit, arr.get_axis_num('time'), arr)
# Count the number of values used for the fit.
# n = arr.count(dim='time')
# Create a view to a DataArray with the desired dimensions to copy them over to the parameter array.
mean = arr.mean(dim='time', keep_attrs=True)
# Create coordinate for the distribution parameters
coords = dict(mean.coords.items())
coords['dparams'] = ([] if dc.shapes is None else dc.shapes.split(',')) + ['loc', 'scale']
# TODO: add time and time_bnds coordinates (Low will work on this)
# time.attrs['climatology'] = 'climatology_bounds'
# coords['time'] =
# coords['climatology_bounds'] =
out = xr.DataArray(data=data, coords=coords, dims=(u'dparams',) + mean.dims)
out.attrs = arr.attrs
out.attrs['original_name'] = getattr(arr, 'standard_name', '')
out.attrs['standard_name'] = '{0} distribution parameters'.format(dist)
out.attrs['long_name'] = '{0} distribution parameters for {1}'.format(dist, getattr(arr, 'standard_name', ''))
out.attrs['estimator'] = 'Maximum likelihood'
out.attrs['cell_methods'] = (out.attrs.get('cell_methods', '') + ' time: fit').strip()
out.attrs['units'] = ''
msg = '\nData fitted with {0} statistical distribution using a Maximum Likelihood Estimator'
out.attrs['history'] = out.attrs.get('history', '') + msg.format(dist)
return out | 0.004455 |
def contains(container, item):
"""Extends ``operator.contains`` by trying very hard to find ``item`` inside container."""
# equality counts as containment and is usually non destructive
if container == item:
return True
# testing mapping containment is usually non destructive
if isinstance(container, abc.Mapping) and mapping_contains(container, item):
return True
# standard containment except special cases
if isinstance(container, str):
# str __contains__ includes substring match that we don't count as containment
if strict_contains(container, item):
return True
else:
try:
if item in container:
return True
except Exception:
pass
# search matches in generic instances
return instance_contains(container, item) | 0.004651 |
def histogram(
data, name, bins='sturges', datarange=(None, None), format='png', suffix='', path='./', rows=1,
columns=1, num=1, last=True, fontmap = None, verbose=1):
"""
Generates histogram from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the histogram.
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
datarange: tuple or list
Preferred range of histogram (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
# Internal histogram specification for handling nested arrays
try:
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Stand-alone plot or subplot?
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_('Generating histogram of', name)
figure()
subplot(rows, columns, num)
# Specify number of bins
uniquevals = len(unique(data))
if isinstance(bins, int):
pass
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(data))
elif bins == 'doanes':
bins = uniquevals * (uniquevals <= 25) or _doanes(data, len(data))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(data))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in histogram')
if isnan(bins):
bins = uniquevals * (uniquevals <= 25) or int(
4 + 1.5 * log(len(data)))
print_(
'Bins could not be calculated using selected method. Setting bins to %i.' %
bins)
# Generate histogram
hist(data.tolist(), bins, histtype='stepfilled')
xlim(datarange)
# Plot options
title('\n\n %s hist' % name, x=0., y=1., ha='left', va='top',
fontsize='medium')
ylabel("Frequency", fontsize='x-small')
# Plot vertical lines for median and 95% HPD interval
quant = calc_quantiles(data)
axvline(x=quant[50], linewidth=2, color='black')
for q in calc_hpd(data, 0.05):
axvline(x=q, linewidth=2, color='grey', linestyle='dotted')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[rows])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[rows])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name, suffix, format))
# close()
except OverflowError:
print_('... cannot generate histogram') | 0.002056 |
def oauth_unlink_external_id(external_id):
"""Unlink a user from an external id.
:param external_id: The external id associated with the user.
"""
with db.session.begin_nested():
UserIdentity.query.filter_by(id=external_id['id'],
method=external_id['method']).delete() | 0.00303 |
def download_file_no_logon(url, filename):
"""
download a file from a public website with no logon required
output = open(filename,'wb')
output.write(request.urlopen(url).read())
output.close()
"""
import urllib.request
#url = "http://www.google.com/"
request = urllib.request.Request(url)
try:
response = urllib.request.urlopen(request)
with open(filename,'wb') as f:
#print (response.read().decode('utf-8'))
f.write(response.read())
except Exception as ex:
lg.record_result("Error - cant download " + url + str(ex)) | 0.009709 |
def build_single_handler_application(path, argv=None):
''' Return a Bokeh application built using a single handler for a script,
notebook, or directory.
In general a Bokeh :class:`~bokeh.application.application.Application` may
have any number of handlers to initialize :class:`~bokeh.document.Document`
objects for new client sessions. However, in many cases only a single
handler is needed. This function examines the ``path`` provided, and
returns an ``Application`` initialized with one of the following handlers:
* :class:`~bokeh.application.handlers.script.ScriptHandler` when ``path``
is to a ``.py`` script.
* :class:`~bokeh.application.handlers.notebook.NotebookHandler` when
``path`` is to an ``.ipynb`` Jupyter notebook.
* :class:`~bokeh.application.handlers.directory.DirectoryHandler` when
``path`` is to a directory containing a ``main.py`` script.
Args:
path (str) : path to a file or directory for creating a Bokeh
application.
argv (seq[str], optional) : command line arguments to pass to the
application handler
Returns:
:class:`~bokeh.application.application.Application`
Raises:
RuntimeError
Notes:
If ``path`` ends with a file ``main.py`` then a warning will be printed
regarding running directory-style apps by passing the directory instead.
'''
argv = argv or []
path = os.path.abspath(path)
# There are certainly race conditions here if the file/directory is deleted
# in between the isdir/isfile tests and subsequent code. But it would be a
# failure if they were not there to begin with, too (just a different error)
if os.path.isdir(path):
handler = DirectoryHandler(filename=path, argv=argv)
elif os.path.isfile(path):
if path.endswith(".ipynb"):
handler = NotebookHandler(filename=path, argv=argv)
elif path.endswith(".py"):
if path.endswith("main.py"):
warnings.warn(DIRSTYLE_MAIN_WARNING)
handler = ScriptHandler(filename=path, argv=argv)
else:
raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
else:
raise ValueError("Path for Bokeh server application does not exist: %s" % path)
if handler.failed:
raise RuntimeError("Error loading %s:\n\n%s\n%s " % (path, handler.error, handler.error_detail))
application = Application(handler)
return application | 0.002368 |
def instruction_DEC_register(self, opcode, register):
""" Decrement accumulator """
a = register.value
r = self.DEC(a)
# log.debug("$%x DEC %s value $%x -1 = $%x" % (
# self.program_counter,
# register.name, a, r
# ))
register.set(r) | 0.006645 |
def create(dataset, target, model_name, features=None,
validation_set='auto', distributed='auto',
verbose=True, seed=None, **kwargs):
"""
Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel This function is normally not called, call
specific model's create function instead
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
features : list[string], optional
List of feature names used by feature column
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
distributed: env
The distributed environment
verbose : boolean
whether print out messages during training
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
kwargs : dict
Additional parameter options that can be passed
"""
# Perform error-checking and trim inputs to specified columns
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
# Sample a validation set from the training data if requested
if isinstance(validation_set, str):
assert validation_set == 'auto'
if dataset.num_rows() >= 100:
if verbose:
print_validation_track_notification()
dataset, validation_set = dataset.random_split(.95, seed=seed, exact=True)
else:
validation_set = _turicreate.SFrame()
elif validation_set is None:
validation_set = _turicreate.SFrame()
# Sanitize model-specific options
options = {k.lower(): kwargs[k] for k in kwargs}
# Create a model instance and train it
model = _turicreate.extensions.__dict__[model_name]()
with QuietProgress(verbose):
model.train(dataset, target, validation_set, options)
return SupervisedLearningModel(model, model_name) | 0.001073 |
def collision_integral_Kim_Monroe(Tstar, l=1, s=1):
r'''Calculates Lennard-Jones collision integral for any of 16 values of
(l,j) for the wide range of 0.3 < Tstar < 400. Values are accurate to
0.007 % of actual values, but the calculation of actual values is
computationally intensive and so these simplifications are used, developed
in [1]_.
.. math::
\Omega^{(l,s)*} = A^{(l,s)} + \sum_{k=1}^6 \left[ \frac{B_k^{(l,s)}}
{(T^*)^k} + C_k^{(l,s)} (\ln T^*)^k \right]
Parameters
----------
Tstar : float
Reduced temperature of the fluid [-]
l : int
term
s : int
term
Returns
-------
Omega : float
Collision integral of A and B
Notes
-----
Acceptable pairs of (l,s) are (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (3, 3), (3, 4),
(3, 5), and (4, 4).
.. math::
T^* = \frac{k_b T}{\epsilon}
Examples
--------
>>> collision_integral_Kim_Monroe(400, 1, 1)
0.4141818082392228
References
----------
.. [1] Kim, Sun Ung, and Charles W. Monroe. "High-Accuracy Calculations of
Sixteen Collision Integrals for Lennard-Jones (12-6) Gases and Their
Interpolation to Parameterize Neon, Argon, and Krypton." Journal of
Computational Physics 273 (September 15, 2014): 358-73.
doi:10.1016/j.jcp.2014.05.018.
'''
if (l, s) not in As_collision:
raise Exception('Input values of l and s are not supported')
omega = As_collision[(l, s)]
for ki in range(6):
Bs = Bs_collision[(l, s)]
Cs = Cs_collision[(l, s)]
omega += Bs[ki]/Tstar**(ki+1) + Cs[ki]*log(Tstar)**(ki+1)
return omega | 0.001136 |
def distribution_from_path(cls, path, name=None):
"""Return a distribution from a path.
If name is provided, find the distribution. If none is found matching the name,
return None. If name is not provided and there is unambiguously a single
distribution, return that distribution otherwise None.
"""
# Monkeypatch pkg_resources finders should it not already be so.
register_finders()
if name is None:
distributions = set(find_distributions(path))
if len(distributions) == 1:
return distributions.pop()
else:
for dist in find_distributions(path):
if dist.project_name == name:
return dist | 0.008955 |
def read_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
"""
async def action_handler(service, action_type, payload, props, **kwds):
# if the payload represents a new instance of `model`
if action_type == get_crud_action('read', name or Model):
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
try:
# resolve the query using the service schema
resolved = service.schema.execute(payload)
# create the string response
response = json.dumps({
'data': {key:value for key,value in resolved.data.items()},
'errors': resolved.errors
})
# publish the success event
await service.event_broker.send(
payload=response,
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# return the handler
return action_handler | 0.003561 |
def _add_junction(item):
'''
Adds a junction to the _current_statement.
'''
type_, channels = _expand_one_key_dictionary(item)
junction = UnnamedStatement(type='junction')
for item in channels:
type_, value = _expand_one_key_dictionary(item)
channel = UnnamedStatement(type='channel')
for val in value:
if _is_reference(val):
_add_reference(val, channel)
elif _is_inline_definition(val):
_add_inline_definition(val, channel)
junction.add_child(channel)
_current_statement.add_child(junction) | 0.001647 |
def set_expression(self, expression_dict):
"""Set protein expression amounts as initial conditions
Parameters
----------
expression_dict : dict
A dictionary in which the keys are gene names and the
values are numbers representing the absolute amount
(count per cell) of proteins expressed. Proteins that
are not expressed can be represented as nan. Entries
that are not in the dict or are in there but resolve
to None, are set to the default initial amount.
Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan}
"""
if self.model is None:
return
monomers_found = []
monomers_notfound = []
# Iterate over all the monomers
for m in self.model.monomers:
if (m.name in expression_dict and
expression_dict[m.name] is not None):
# Try to get the expression amount from the dict
init = expression_dict[m.name]
# We interpret nan and None as not expressed
if math.isnan(init):
init = 0
init_round = round(init)
set_base_initial_condition(self.model, m, init_round)
monomers_found.append(m.name)
else:
set_base_initial_condition(self.model, m,
self.default_initial_amount)
monomers_notfound.append(m.name)
logger.info('Monomers set to given context')
logger.info('-----------------------------')
for m in monomers_found:
logger.info('%s' % m)
if monomers_notfound:
logger.info('')
logger.info('Monomers not found in given context')
logger.info('-----------------------------------')
for m in monomers_notfound:
logger.info('%s' % m) | 0.001535 |
def bs_plot_data(self, zero_to_efermi=True):
"""
Get the data nicely formatted for a plot
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from the
eigenvalues and plot.
Returns:
dict: A dictionary of the following format:
ticks: A dict with the 'distances' at which there is a kpoint (the
x axis) and the labels (None if no label).
energy: A dict storing bands for spin up and spin down data
[{Spin:[band_index][k_point_index]}] as a list (one element
for each branch) of energy for each kpoint. The data is
stored by branch to facilitate the plotting.
vbm: A list of tuples (distance,energy) marking the vbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
cbm: A list of tuples (distance,energy) marking the cbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
lattice: The reciprocal lattice.
zero_energy: This is the energy used as zero for the plot.
band_gap:A string indicating the band gap and its nature (empty if
it's a metal).
is_metal: True if the band structure is metallic (i.e., there is at
least one band crossing the fermi level).
"""
distance = []
energy = []
if self._bs.is_metal():
zero_energy = self._bs.efermi
else:
zero_energy = self._bs.get_vbm()['energy']
if not zero_to_efermi:
zero_energy = 0.0
for b in self._bs.branches:
if self._bs.is_spin_polarized:
energy.append({str(Spin.up): [], str(Spin.down): []})
else:
energy.append({str(Spin.up): []})
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
ticks = self.get_ticks()
for i in range(self._nb_bands):
energy[-1][str(Spin.up)].append(
[self._bs.bands[Spin.up][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
energy[-1][str(Spin.down)].append(
[self._bs.bands[Spin.down][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
vbm = self._bs.get_vbm()
cbm = self._bs.get_cbm()
vbm_plot = []
cbm_plot = []
for index in cbm['kpoint_index']:
cbm_plot.append((self._bs.distance[index],
cbm['energy'] - zero_energy if zero_to_efermi
else cbm['energy']))
for index in vbm['kpoint_index']:
vbm_plot.append((self._bs.distance[index],
vbm['energy'] - zero_energy if zero_to_efermi
else vbm['energy']))
bg = self._bs.get_band_gap()
direct = "Indirect"
if bg['direct']:
direct = "Direct"
return {'ticks': ticks, 'distances': distance, 'energy': energy,
'vbm': vbm_plot, 'cbm': cbm_plot,
'lattice': self._bs.lattice_rec.as_dict(),
'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(),
'band_gap': "{} {} bandgap = {}".format(direct,
bg['transition'],
bg['energy'])
if not self._bs.is_metal() else ""} | 0.000776 |
def write_length_and_key(fp, value):
"""
Helper to write descriptor key.
"""
written = write_fmt(fp, 'I', 0 if value in _TERMS else len(value))
written += write_bytes(fp, value)
return written | 0.00463 |
def asList(self):
""" returns a Point value as a list of [x,y,<z>,<m>] """
base = [self._x, self._y]
if not self._z is None:
base.append(self._z)
elif not self._m is None:
base.append(self._m)
return base | 0.014925 |
def GetHTTPHeaders(self):
"""Returns the HTTP headers required for request authorization.
Returns:
A dictionary containing the required headers.
"""
http_headers = self._adwords_client.oauth2_client.CreateHttpHeader()
if self.enable_compression:
http_headers['accept-encoding'] = 'gzip'
http_headers.update(self.custom_http_headers)
return http_headers | 0.005063 |
def send_email(self, **kwargs):
"""
Sends an email using Mandrill's API. Returns a
Requests :class:`Response` object.
At a minimum kwargs must contain the keys to, from_email, and text.
Everything passed as kwargs except for the keywords 'key', 'async',
and 'ip_pool' will be sent as key-value pairs in the message object.
Reference https://mandrillapp.com/api/docs/messages.JSON.html#method=send
for all the available options.
"""
endpoint = self.messages_endpoint
data = {
'async': kwargs.pop('async', False),
'ip_pool': kwargs.pop('ip_pool', ''),
'key': kwargs.pop('key', self.api_key),
}
if not data.get('key', None):
raise ValueError('No Mandrill API key has been configured')
# Sending a template through Mandrill requires a couple extra args
# and a different endpoint.
if kwargs.get('template_name', None):
data['template_name'] = kwargs.pop('template_name')
data['template_content'] = kwargs.pop('template_content', [])
endpoint = self.templates_endpoint
data['message'] = kwargs
if self.app:
data['message'].setdefault(
'from_email',
self.app.config.get('MANDRILL_DEFAULT_FROM', None)
)
if endpoint != self.templates_endpoint and not data['message'].get('from_email', None):
raise ValueError(
'No from email was specified and no default was configured')
response = requests.post(endpoint,
data=json.dumps(data),
headers={'Content-Type': 'application/json'})
response.raise_for_status()
return response | 0.002734 |
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = HTTPHeaders._normalize_name(name)
return self._as_list.get(norm_name, []) | 0.010417 |
def get_connections_by_dest(self, dest):
'''Search for all connections between this and another port.'''
with self._mutex:
res = []
for c in self.connections:
if c.has_port(self) and c.has_port(dest):
res.append(c)
return res | 0.00639 |
def coerce_tuples(cls, generator):
"""This class method converts a generator of ``(K, V)`` tuples (the
*tuple protocol*), where ``V`` is not yet of the correct type, to a
generator where it is of the correct type (using the ``coerceitem``
class property)
"""
for k, v in generator:
yield k, cls.coerce_value(v) | 0.005405 |
def block_hash(self, block_number=None, force_recent=True):
"""
Calculates a block's hash
:param block_number: the block number for which to calculate the hash, defaulting to the most recent block
:param force_recent: if True (the default) return zero for any block that is in the future or older than 256 blocks
:return: the block hash
"""
if block_number is None:
block_number = self.block_number() - 1
# We are not maintaining an actual -block-chain- so we just generate
# some hashes for each virtual block
value = sha3.keccak_256((repr(block_number) + 'NONCE').encode()).hexdigest()
value = int(value, 16)
if force_recent:
# 0 is left on the stack if the looked for block number is greater or equal
# than the current block number or more than 256 blocks behind the current
# block. (Current block hash is unknown from inside the tx)
bnmax = Operators.ITEBV(256, self.block_number() > 256, 256, self.block_number())
value = Operators.ITEBV(256, Operators.OR(block_number >= self.block_number(), block_number < bnmax), 0, value)
return value | 0.007353 |
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ['datetime', 'datetime64']:
return np.array('NaT', dtype=_NS_DTYPE)
elif dtype in ['timedelta', 'timedelta64']:
return np.array('NaT', dtype=_TD_DTYPE)
return np.nan | 0.001404 |
async def _send_command(self, command):
"""
This is a private utility method.
The method sends a non-sysex command to Firmata.
:param command: command data
:returns: length of data sent
"""
send_message = ""
for i in command:
send_message += chr(i)
result = None
for data in send_message:
try:
result = await self.write(data)
except():
if self.log_output:
logging.exception('cannot send command')
else:
print('cannot send command')
return result | 0.003026 |
def strip_xss(html, whitelist=None, replacement="(removed)"):
"""
This function returns a tuple containing:
* *html* with all non-whitelisted HTML tags replaced with *replacement*.
* A `set()` containing the tags that were removed.
Any tags that contain JavaScript, VBScript, or other known XSS/executable
functions will also be removed.
If *whitelist* is not given the following will be used::
whitelist = set([
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr'
])
.. note:: To disable the whitelisting simply set `whitelist="off"`.
Example::
>>> html = '<span>Hello, exploit: <img src="javascript:alert(\"pwned!\")"></span>'
>>> html, rejects = strip_xss(html)
>>> print("'%s', Rejected: '%s'" % (html, " ".join(rejects)))
'<span>Hello, exploit: (removed)</span>', Rejected: '<img src="javascript:alert("pwned!")">'
.. note:: The default *replacement* is "(removed)".
If *replacement* is "entities" bad HTML tags will be encoded into HTML
entities. This allows things like <script>'whatever'</script> to be
displayed without execution (which would be much less annoying to users that
were merely trying to share a code example). Here's an example::
>>> html = '<span>Hello, exploit: <img src="javascript:alert(\"pwned!\")"></span>'
>>> html, rejects = strip_xss(html, replacement="entities")
>>> print(html)
<span>Hello, exploit: <img src="javascript:alert("pwned!")"></span>
>>> print("Rejected: '%s'" % ", ".join(rejects))
Rejected: '<img src="javascript:alert("pwned!")">'
**NOTE:** This function should work to protect against *all* `the XSS
examples at OWASP
<https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet>`_. Please
`let us know <https://github.com/LiftoffSoftware/htmltag/issues>`_ if you
find something we missed.
"""
re_html_tag = re.compile( # This matches HTML tags (if used correctly)
"(?i)<\/?\w+((\s+\w+(\s*=\s*(?:\".*?\"|'.*?'|[^'\">\s]+))?)+\s*|\s*)\/?>")
# This will match things like 'onmouseover=' ('on<whatever>=')
on_events_re = re.compile('.*\s+(on[a-z]+\s*=).*')
if not whitelist:
# These are all pretty safe and covers most of what users would want in
# terms of formatting and sharing media (images, audio, video, etc).
whitelist = set([
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr'
])
elif whitelist == "off":
whitelist = None # Disable it altogether
bad_tags = set()
for tag in re_html_tag.finditer(html):
tag = tag.group()
tag_lower = tag.lower()
short_tag = tag_lower.split()[0].lstrip('</').rstrip('>')
if whitelist and short_tag not in whitelist:
bad_tags.add(tag)
continue
# Make sure the tag can't execute any JavaScript
if "javascript:" in tag_lower:
bad_tags.add(tag)
continue
# on<whatever> events are not allowed (just another XSS vuln)
if on_events_re.search(tag_lower):
bad_tags.add(tag)
continue
# Flash sucks
if "fscommand" in tag_lower:
bad_tags.add(tag)
continue
# I'd be impressed if an attacker tried this one (super obscure)
if "seeksegmenttime" in tag_lower:
bad_tags.add(tag)
continue
# Yes we'll protect IE users from themselves...
if "vbscript:" in tag_lower:
bad_tags.add(tag)
continue
if replacement == "entities":
for bad_tag in bad_tags:
escaped = cgi.escape(bad_tag).encode('ascii', 'xmlcharrefreplace')
html = html.replace(bad_tag, escaped.decode('ascii'))
else:
for bad_tag in bad_tags:
html = html.replace(bad_tag, replacement)
return (html, bad_tags) | 0.004846 |
def by_population_density(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population_density.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population density range.
`population density` is `population per square miles on land`
"""
return self.query(
population_density_lower=lower,
population_density_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | 0.010681 |
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name | 0.004464 |
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges() | 0.018561 |
def _get_spacing_conventions(self, use_names):
"""Try to determine the whitespace conventions for parameters.
This will examine the existing parameters and use
:meth:`_select_theory` to determine if there are any preferred styles
for how much whitespace to put before or after the value.
"""
before_theories = defaultdict(lambda: 0)
after_theories = defaultdict(lambda: 0)
for param in self.params:
if not param.showkey:
continue
if use_names:
component = str(param.name)
else:
component = str(param.value)
match = re.search(r"^(\s*).*?(\s*)$", component, FLAGS)
before, after = match.group(1), match.group(2)
if not use_names and component.isspace() and "\n" in before:
# If the value is empty, we expect newlines in the whitespace
# to be after the content, not before it:
before, after = before.split("\n", 1)
after = "\n" + after
before_theories[before] += 1
after_theories[after] += 1
before = self._select_theory(before_theories)
after = self._select_theory(after_theories)
return before, after | 0.001541 |
def get_relationships(self):
"""Gets all ``Relationships``.
return: (osid.relationship.RelationshipList) - a list of
``Relationships``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
url_path = ('/handcar/services/relationship/families/' +
self._catalog_idstr + '/relationships')
return objects.RelationshipList(self._get_request(url_path)) | 0.003571 |
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs):
"""Estimate a skeleton graph from the statistis information.
Args:
indep_test_func: the function name for a conditional
independency test.
data_matrix: data (as a numpy array).
alpha: the significance level.
kwargs:
'max_reach': maximum value of l (see the code). The
value depends on the underlying distribution.
'method': if 'stable' given, use stable-PC algorithm
(see [Colombo2014]).
'init_graph': initial structure of skeleton graph
(as a networkx.Graph). If not specified,
a complete graph is used.
other parameters may be passed depending on the
indep_test_func()s.
Returns:
g: a skeleton graph (as a networkx.Graph).
sep_set: a separation set (as an 2D-array of set()).
[Colombo2014] Diego Colombo and Marloes H Maathuis. Order-independent
constraint-based causal structure learning. In The Journal of Machine
Learning Research, Vol. 15, pp. 3741-3782, 2014.
"""
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
g = _create_complete_graph(node_ids)
l = 0
while True:
cont = False
remove_edges = []
for (i, j) in permutations(node_ids, 2):
adj_i = list(g.neighbors(i))
if j not in adj_i:
continue
else:
adj_i.remove(j)
if len(adj_i) >= l:
_logger.debug('testing %s and %s' % (i,j))
_logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
for k in combinations(adj_i, l):
_logger.debug('indep prob of %s and %s with subset %s'
% (i, j, str(k)))
p_val = indep_test_func(data_matrix, i, j, set(k),
**kwargs)
_logger.debug('p_val is %s' % str(p_val))
if p_val > alpha:
if g.has_edge(i, j):
_logger.debug('p: remove edge (%s, %s)' % (i, j))
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
return (g, sep_set) | 0.001137 |
async def serve(
app: ASGIFramework,
config: Config,
*,
task_status: trio._core._run._TaskStatus = trio.TASK_STATUS_IGNORED,
) -> None:
"""Serve an ASGI framework app given the config.
This allows for a programmatic way to serve an ASGI framework, it
can be used via,
.. code-block:: python
trio.run(partial(serve, app, config))
It is assumed that the event-loop is configured before calling
this function, therefore configuration values that relate to loop
setup or process setup are ignored.
"""
if config.debug:
warnings.warn("The config `debug` has no affect when using serve", Warning)
if config.workers != 1:
warnings.warn("The config `workers` has no affect when using serve", Warning)
if config.worker_class != "asyncio":
warnings.warn("The config `worker_class` has no affect when using serve", Warning)
await worker_serve(app, config, task_status=task_status) | 0.004119 |
def get_record_collections(record, matcher):
"""Return list of collections to which record belongs to.
:param record: Record instance.
:param matcher: Function used to check if a record belongs to a collection.
:return: list of collection names.
"""
collections = current_collections.collections
if collections is None:
# build collections cache
collections = current_collections.collections = dict(_build_cache())
output = set()
for collections in matcher(collections, record):
output |= collections
return list(output) | 0.001701 |
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True) | 0.00492 |
def list(self, path=None, with_metadata=False):
'''get a list of all of the files in the repository'''
path = path.strip('/') if path else ''
if self.upstream:
return self.upstream.list(path, with_metadata=with_metadata)
else:
raise NotImplementedError() | 0.00641 |
def scan(args):
"""Scan for sensors."""
backend = _get_backend(args)
print('Scanning for 10 seconds...')
devices = miflora_scanner.scan(backend, 10)
print('Found {} devices:'.format(len(devices)))
for device in devices:
print(' {}'.format(device)) | 0.003571 |
def arbitrary_object_to_string(a_thing):
"""take a python object of some sort, and convert it into a human readable
string. this function is used extensively to convert things like "subject"
into "subject_key, function -> function_key, etc."""
# is it None?
if a_thing is None:
return ''
# is it already a string?
if isinstance(a_thing, six.string_types):
return a_thing
if six.PY3 and isinstance(a_thing, six.binary_type):
try:
return a_thing.decode('utf-8')
except UnicodeDecodeError:
pass
# does it have a to_str function?
try:
return a_thing.to_str()
except (AttributeError, KeyError, TypeError):
# AttributeError - no to_str function?
# KeyError - DotDict has no to_str?
# TypeError - problem converting
# nope, no to_str function
pass
# is this a type proxy?
try:
return arbitrary_object_to_string(a_thing.a_type)
except (AttributeError, KeyError, TypeError):
#
# nope, no a_type property
pass
# is it a built in?
try:
return known_mapping_type_to_str[a_thing]
except (KeyError, TypeError):
# nope, not a builtin
pass
# is it something from a loaded module?
try:
if a_thing.__module__ not in ('__builtin__', 'builtins', 'exceptions'):
if a_thing.__module__ == "__main__":
module_name = (
sys.modules['__main__']
.__file__[:-3]
.replace('/', '.')
.strip('.')
)
else:
module_name = a_thing.__module__
return "%s.%s" % (module_name, a_thing.__name__)
except AttributeError:
# nope, not one of these
pass
# maybe it has a __name__ attribute?
try:
return a_thing.__name__
except AttributeError:
# nope, not one of these
pass
# punt and see what happens if we just cast it to string
return str(a_thing) | 0.000482 |
def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths | 0.000418 |
def dctmat(N,K,freqstep,orthogonalize=True):
"""Return the orthogonal DCT-II/DCT-III matrix of size NxK.
For computing or inverting MFCCs, N is the number of
log-power-spectrum bins while K is the number of cepstra."""
cosmat = numpy.zeros((N, K), 'double')
for n in range(0,N):
for k in range(0, K):
cosmat[n,k] = numpy.cos(freqstep * (n + 0.5) * k)
if orthogonalize:
cosmat[:,0] = cosmat[:,0] * 1./numpy.sqrt(2)
return cosmat | 0.016563 |
def set_object_status(self, statusdict):
"""
Set statuses from a dictionary of format ``{name: status}``
"""
for name, value in statusdict.items():
getattr(self.system, name).status = value
return True | 0.007782 |
def startServer(tikaServerJar, java_path = TikaJava, serverHost = ServerHost, port = Port, classpath=None, config_path=None):
'''
Starts Tika Server
:param tikaServerJar: path to tika server jar
:param serverHost: the host interface address to be used for binding the service
:param port: the host port to be used for binding the service
:param classpath: Class path value to pass to JVM
:return: None
'''
if classpath is None:
classpath = TikaServerClasspath
host = "localhost"
if Windows:
host = "0.0.0.0"
if classpath:
classpath += ":" + tikaServerJar
else:
classpath = tikaServerJar
# setup command string
cmd_string = ""
if not config_path:
cmd_string = '%s -cp %s org.apache.tika.server.TikaServerCli --port %s --host %s &' \
% (java_path, classpath, port, host)
else:
cmd_string = '%s -cp %s org.apache.tika.server.TikaServerCli --port %s --host %s --config %s &' \
% (java_path, classpath, port, host, config_path)
# Check that we can write to log path
try:
tika_log_file_path = os.path.join(TikaServerLogFilePath, 'tika-server.log')
logFile = open(tika_log_file_path, 'w')
except PermissionError as e:
log.error("Unable to create tika-server.log at %s due to permission error." % (TikaServerLogFilePath))
return False
# Check that specified java binary is available on path
try:
_ = Popen(java_path, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"))
except FileNotFoundError as e:
log.error("Unable to run java; is it installed?")
return False
# Run java with jar args
cmd = Popen(cmd_string, stdout=logFile, stderr=STDOUT, shell=True)
# Check logs and retry as configured
try_count = 0
is_started = False
while try_count < TikaStartupMaxRetry:
with open(tika_log_file_path, "r") as tika_log_file_tmp:
# check for INFO string to confirm listening endpoint
if "Started Apache Tika server at" in tika_log_file_tmp.read():
is_started = True
else:
log.warning("Failed to see startup log message; retrying...")
time.sleep(TikaStartupSleep)
try_count += 1
if not is_started:
log.error("Tika startup log message not received after %d tries." % (TikaStartupMaxRetry))
return False
else:
return True | 0.005993 |
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge) | 0.00738 |
def wolmatch(tgt, tgt_type='glob', bcast='255.255.255.255', destport=9):
'''
Send a "Magic Packet" to wake up Minions that are matched in the grains cache
CLI Example:
.. code-block:: bash
salt-run network.wolmatch minion_id
salt-run network.wolmatch 192.168.0.0/16 tgt_type='ipcidr' bcast=255.255.255.255 destport=7
'''
ret = []
minions = __salt__['cache.grains'](tgt, tgt_type)
for minion in minions:
for iface, mac in minion['hwaddr_interfaces'].items():
if iface == 'lo':
continue
mac = mac.strip()
wol(mac, bcast, destport)
log.info('Waking up %s', mac)
ret.append(mac)
return ret | 0.004144 |
def handle_build_cache(
conf: Config, name: str, tag: str, icb: ImageCachingBehavior):
"""Handle Docker image build cache.
Return image ID if image is cached, and there's no need to redo the build.
Return None if need to build the image (whether cached locally or not).
Raise RuntimeError if not allowed to build the image because of state of
local cache.
TODO(itamar): figure out a better name for this function, that reflects
what it returns (e.g. `get_cached_image_id`),
without "surprising" the caller with the potential of long
and non-trivial operations that are not usually expected from functions
with such names.
"""
if icb.pull_if_cached or (icb.pull_if_not_cached and
get_cached_image_id(icb.remote_image) is None):
try:
pull_docker_image(icb.remote_image, conf.docker_pull_cmd)
except CalledProcessError:
pass
local_image = '{}:{}'.format(name, tag)
if (icb.skip_build_if_cached and
get_cached_image_id(icb.remote_image) is not None):
tag_docker_image(icb.remote_image, local_image)
return get_cached_image_id(local_image)
if ((not icb.allow_build_if_not_cached) and
get_cached_image_id(icb.remote_image) is None):
raise RuntimeError('No cached image for {}'.format(local_image))
return None | 0.000717 |
def label_empty(self, **kwargs):
"Label every item with an `EmptyLabel`."
kwargs['label_cls'] = EmptyLabelList
return self.label_from_func(func=lambda o: 0., **kwargs) | 0.010471 |
def get_file_contents(self, file_key):
'''Gets file contents
Args:
file_key key for the file
return (status code, ?)
'''
#does not work
self._raise_unimplemented_error()
uri = '/'.join([self.api_uri,
self.files_suffix,
file_key,
self.file_contents_suffix,
])
return self._req('get', uri) | 0.068047 |
def _ExtractGoogleDocsSearchQuery(self, url):
"""Extracts a search query from a Google docs URL.
Google Docs: https://docs.google.com/.*/u/0/?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ') | 0.006912 |
def remove(self, server_id):
"""remove server and data stuff
Args:
server_id - server identity
"""
server = self._storage.pop(server_id)
server.stop()
server.cleanup() | 0.008811 |
def compute_transitions(self, density_normalize=True):
"""Compute transition matrix.
Parameters
----------
density_normalize : `bool`
The density rescaling of Coifman and Lafon (2006): Then only the
geometry of the data matters, not the sampled density.
Returns
-------
Makes attributes `.transitions_sym` and `.transitions` available.
"""
W = self._connectivities
# density normalization as of Coifman et al. (2005)
# ensures that kernel matrix is independent of sampling density
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = np.asarray(W.sum(axis=0))
if not issparse(W):
Q = np.diag(1.0/q)
else:
Q = scipy.sparse.spdiags(1.0/q, 0, W.shape[0], W.shape[0])
K = Q.dot(W).dot(Q)
else:
K = W
# z[i] is the square root of the row sum of K
z = np.sqrt(np.asarray(K.sum(axis=0)))
if not issparse(K):
self.Z = np.diag(1.0/z)
else:
self.Z = scipy.sparse.spdiags(1.0/z, 0, K.shape[0], K.shape[0])
self._transitions_sym = self.Z.dot(K).dot(self.Z)
logg.msg('computed transitions', v=4, time=True) | 0.001439 |
def fromWeb3(cls, web3, addr=None):
"""
Generate an ENS instance with web3
:param `web3.Web3` web3: to infer connection information
:param hex-string addr: the address of the ENS registry on-chain. If not provided,
ENS.py will default to the mainnet ENS registry address.
"""
return cls(web3.manager.provider, addr=addr) | 0.007874 |
def GetUsername(self, event, default_username='-'):
"""Retrieves the username related to the event.
Args:
event (EventObject): event.
default_username (Optional[str]): default username.
Returns:
str: username.
"""
username = getattr(event, 'username', None)
if username and username != '-':
return username
session_identifier = event.GetSessionIdentifier()
if session_identifier is None:
return default_username
user_sid = getattr(event, 'user_sid', None)
username = self._knowledge_base.GetUsernameByIdentifier(
user_sid, session_identifier=session_identifier)
return username or default_username | 0.004392 |
def createArchiveExample(fileName):
""" Creates Combine Archive containing the given file.
:param fileName: file to include in the archive
:return: None
"""
print('*' * 80)
print('Create archive')
print('*' * 80)
archive = CombineArchive()
archive.addFile(
fileName, # filename
"./models/model.xml", # target file name
KnownFormats.lookupFormat("sbml"), # look up identifier for SBML models
True # mark file as master
)
# add metadata to the archive itself
description = OmexDescription()
description.setAbout(".")
description.setDescription("Simple test archive including one SBML model")
description.setCreated(OmexDescription.getCurrentDateAndTime())
creator = VCard()
creator.setFamilyName("Bergmann")
creator.setGivenName("Frank")
creator.setEmail("[email protected]")
creator.setOrganization("Caltech")
description.addCreator(creator)
archive.addMetadata(".", description)
# add metadata to the added file
location = "./models/model.xml"
description = OmexDescription()
description.setAbout(location)
description.setDescription("SBML model")
description.setCreated(OmexDescription.getCurrentDateAndTime())
archive.addMetadata(location, description)
# write the archive
out_file = "out.omex"
archive.writeToFile(out_file)
print('Archive created:', out_file) | 0.001387 |
def previous_week_day(base_date, weekday):
"""
Finds previous weekday
"""
day = base_date - timedelta(days=1)
while day.weekday() != weekday:
day = day - timedelta(days=1)
return day | 0.004673 |
def drop(self, relation):
"""Drop the named relation and cascade it appropriately to all
dependent relations.
Because dbt proactively does many `drop relation if exist ... cascade`
that are noops, nonexistent relation drops cause a debug log and no
other actions.
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop.
"""
dropped = _make_key(relation)
logger.debug('Dropping relation: {!s}'.format(dropped))
with self.lock:
self._drop_cascade_relation(dropped) | 0.003195 |
def _update_with_calls(result_file, cnv_file):
"""Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA.
"""
results = {}
with open(result_file) as in_handle:
in_handle.readline() # header
_, _, cs, ps = in_handle.readline().strip().split()
for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))):
results[i] = (c, p)
cnvs = {}
with open(cnv_file) as in_handle:
for line in in_handle:
chrom, start, end, _, count = line.rstrip().split()[:5]
cnvs[(chrom, start, end)] = count
def update(i, line):
parts = line.rstrip().split("\t")
chrom, start, end = parts[1:4]
parts += cnvs.get((chrom, start, end), ".")
parts += list(results[i])
return "\t".join(parts) + "\n"
return update | 0.003521 |
def payments(self, virtual_account_id, data={}, **kwargs):
""""
Fetch Payment for Virtual Account Id
Args:
virtual_account_id :
Id for which Virtual Account objects has to be retrieved
Returns:
Payment dict for given Virtual Account Id
"""
url = "{}/{}/payments".format(self.base_url, virtual_account_id)
return self.get_url(url, data, **kwargs) | 0.004515 |
def _post_zone(self, zone):
"""
Pushes updated zone for current domain to authenticated Hetzner account and
returns a boolean, if update was successful or not. Furthermore, waits until
the zone has been taken over, if it is a Hetzner Robot account.
"""
api = self.api[self.account]['zone']
data = zone['hidden']
data[api['file']] = zone['data'].to_text(relativize=True)
response = self._post(api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to update zone for domain %s: Syntax error\n\n%s',
zone['data'].origin.to_unicode(True),
zone['data'].to_text(relativize=True).decode('UTF-8'))
return False
LOGGER.info('Hetzner => Update zone for domain %s',
zone['data'].origin.to_unicode(True))
if self.account == 'robot':
latency = self._get_provider_option('latency')
LOGGER.info('Hetzner => Wait %ds until Hetzner Robot has taken over zone...',
latency)
time.sleep(latency)
return True | 0.004963 |
def list_instance_configs(self, page_size=None, page_token=None):
"""List available instance configurations for the client's project.
.. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\
google.spanner.admin.instance.v1#google.spanner.admin.\
instance.v1.InstanceAdmin.ListInstanceConfigs
See `RPC docs`_.
:type page_size: int
:param page_size:
Optional. The maximum number of configs in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of configs, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of
:class:`~google.cloud.spanner_v1.instance.InstanceConfig`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instance_configs(
path, page_size=page_size, metadata=metadata
)
page_iter.next_page_token = page_token
page_iter.item_to_value = _item_to_instance_config
return page_iter | 0.00122 |
def prepare(args):
"""
%prog prepare barcode_key.csv reference.fasta
Prepare TASSEL pipeline.
"""
valid_enzymes = "ApeKI|ApoI|BamHI|EcoT22I|HinP1I|HpaII|MseI|MspI|" \
"NdeI|PasI|PstI|Sau3AI|SbfI|AsiSI-MspI|BssHII-MspI|" \
"FseI-MspI|PaeR7I-HhaI|PstI-ApeKI|PstI-EcoT22I|PstI-MspI" \
"PstI-TaqI|SalI-MspI|SbfI-MspI".split("|")
p = OptionParser(prepare.__doc__)
p.add_option("--enzyme", default="ApeKI", choices=valid_enzymes,
help="Restriction enzyme used [default: %default]")
p.set_home("tassel")
p.set_aligner(aligner="bwa")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
barcode, reference = args
thome = opts.tassel_home
reference = get_abs_path(reference)
folders = ("fastq", "tagCounts", "mergedTagCounts", "topm",
"tbt", "mergedTBT", "hapmap", "hapmap/raw",
"hapmap/mergedSNPs", "hapmap/filt", "hapmap/bpec")
for f in folders:
mkdir(f)
# Build the pipeline
runsh = []
o = "-i fastq -k {0} -e {1} -o tagCounts".format(barcode, opts.enzyme)
cmd = run_pipeline(thome, "FastqToTagCountPlugin", o)
runsh.append(cmd)
o = "-i tagCounts -o mergedTagCounts/myMasterTags.cnt"
o += " -c 5 -t mergedTagCounts/myMasterTags.cnt.fq"
cmd = run_pipeline(thome, "MergeMultipleTagCountPlugin", o)
runsh.append(cmd)
runsh.append("cd mergedTagCounts")
cmd = "python -m jcvi.apps.{0} align --cpus {1}".\
format(opts.aligner, opts.cpus)
cmd += " {0} myMasterTags.cnt.fq".format(reference)
runsh.append(cmd)
runsh.append("cd ..")
o = "-i mergedTagCounts/*.sam -o topm/myMasterTags.topm"
cmd = run_pipeline(thome, "SAMConverterPlugin", o)
runsh.append(cmd)
o = "-i mergedTBT/myStudy.tbt.byte -y -m topm/myMasterTags.topm"
o += " -mUpd topm/myMasterTagsWithVariants.topm"
o += " -o hapmap/raw/myGBSGenos_chr+.hmp.txt"
o += " -mnF 0.8 -p myPedigreeFile.ped -mnMAF 0.02 -mnMAC 100000"
o += " -ref {0} -sC 1 -eC 10".format(reference)
cmd = run_pipeline(thome, "TagsToSNPByAlignmentPlugin", o)
runsh.append(cmd)
o = "-hmp hapmap/raw/myGBSGenos_chr+.hmp.txt"
o += " -o hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt"
o += " -misMat 0.1 -p myPedigreeFile.ped -callHets -sC 1 -eC 10"
cmd = run_pipeline(thome, "MergeDuplicateSNPsPlugin", o)
runsh.append(cmd)
o = "-hmp hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt"
o += " -o hapmap/filt/myGBSGenos_mergedSNPsFilt_chr+.hmp.txt"
o += " -mnTCov 0.01 -mnSCov 0.2 -mnMAF 0.01 -sC 1 -eC 10"
#o += "-hLD -mnR2 0.2 -mnBonP 0.005"
cmd = run_pipeline(thome, "GBSHapMapFiltersPlugin", o)
runsh.append(cmd)
runfile = "run.sh"
write_file(runfile, "\n".join(runsh)) | 0.001034 |
def get(self, key, value=None):
"x.get(k[,d]) -> x[k] if k in x, else d. d defaults to None."
_key = self._prepare_key(key)
prefix, node = self._get_node_by_key(_key)
if prefix==_key and node.value is not None:
return self._unpickle_value(node.value)
else:
return value | 0.009009 |
def sections(self):
"""List with tuples of section names and positions.
Positions of section names are measured by cumulative word count.
"""
sections = []
for match in texutils.section_pattern.finditer(self.text):
textbefore = self.text[0:match.start()]
wordsbefore = nlputils.wordify(textbefore)
numwordsbefore = len(wordsbefore)
sections.append((numwordsbefore, match.group(1)))
self._sections = sections
return sections | 0.003781 |
def ping(self):
"""
Ping the broker.
Send a MQTT `PINGREQ <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718081>`_ message for response.
This method is a *coroutine*.
"""
if self.session.transitions.is_connected():
yield from self._handler.mqtt_ping()
else:
self.logger.warning("MQTT PING request incompatible with current session state '%s'" %
self.session.transitions.state) | 0.007533 |
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indices
"""
result = dict()
# caller differs dict/ODict, preserved type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in frames.items():
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = {a: ax for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))}
reindex_dict = {self._AXIS_SLICEMAP[a]: axes_dict[a] for a in axes}
reindex_dict['copy'] = False
for key, frame in adj_frames.items():
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict | 0.001489 |
def approx_post(self, xp, yt):
""" approximates the law of X_t|Y_t,X_{t-1}
returns a tuple of size 3: loc, cov, logpyt
"""
xmax, Q = self.approx_likelihood(yt)
G = np.eye(self.dx)
covY = linalg.inv(Q)
pred = kalman.MeanAndCov(mean=self.predmean(xp), cov=self.SigX)
return kalman.filter_step(G, covY, pred, xmax) | 0.007813 |
def populate_target(device_name):
"""! @brief Add targets from cmsis-pack-manager matching the given name.
Targets are added to the `#TARGET` list. A case-insensitive comparison against the
device part number is used to find the target to populate. If multiple packs are installed
that provide the same part numbers, all matching targets will be populated.
"""
device_name = device_name.lower()
targets = ManagedPacks.get_installed_targets()
for dev in targets:
if device_name == dev.part_number.lower():
PackTargets.populate_device(dev) | 0.009524 |
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.") | 0.001055 |
def _encode(self, obj, context):
"""Encodes a class to a lower-level object using the class' own
to_construct function.
If no such function is defined, returns the object unchanged.
"""
func = getattr(obj, 'to_construct', None)
if callable(func):
return func(context)
else:
return obj | 0.005495 |
def stMFCC(X, fbank, n_mfcc_feats):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
X: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the
scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more
compact and suitable for the pyAudioAnalysis Lib
"""
mspec = numpy.log10(numpy.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:n_mfcc_feats]
return ceps | 0.004658 |
def is_element_available(self, locator):
"""
Synchronization method for making sure the element we're looking for is not only on the page,
but also visible -- since Se will happily deal with things that aren't visible.
Use this instead of is_element_present most of the time.
"""
if self.driver.is_element_present(locator):
if self.driver.is_visible(locator):
return True
else:
return False
else:
return False | 0.007491 |
def relabel(self, i):
'''
API: relabel(self, i)
Description:
Used by max_flow_preflowpush() method for relabelling node i.
Input:
i: Node that is being relabelled.
Post:
'distance' attribute of node i is updated.
'''
min_distance = 2*len(self.get_node_list()) + 1
for j in self.get_neighbors(i):
if (self.get_node_attr(j, 'distance') < min_distance and
(self.get_edge_attr(i, j, 'flow') <
self.get_edge_attr(i, j, 'capacity'))):
min_distance = self.get_node_attr(j, 'distance')
for j in self.get_in_neighbors(i):
if (self.get_node_attr(j, 'distance') < min_distance and
self.get_edge_attr(j, i, 'flow') > 0):
min_distance = self.get_node_attr(j, 'distance')
self.set_node_attr(i, 'distance', min_distance + 1) | 0.00324 |
def get_property_by_name(pif, name):
"""Get a property by name"""
return next((x for x in pif.properties if x.name == name), None) | 0.007246 |
def _add_or_update_records(cls, conn: Connection, table: Table,
records: List["I2B2CoreWithUploadId"]) -> Tuple[int, int]:
"""Add or update the supplied table as needed to reflect the contents of records
:param table: i2b2 sql connection
:param records: records to apply
:return: number of records added / modified
"""
num_updates = 0
num_inserts = 0
inserts = []
# Iterate over the records doing updates
# Note: This is slow as molasses - definitely not optimal for batch work, but hopefully we'll be dealing with
# thousands to tens of thousands of records. May want to move to ORM model if this gets to be an issue
for record in records:
keys = [(table.c[k] == getattr(record, k)) for k in cls.key_fields]
key_filter = I2B2CoreWithUploadId._nested_fcn(and_, keys)
rec_exists = conn.execute(select([table.c.upload_id]).where(key_filter)).rowcount
if rec_exists:
known_values = {k: v for k, v in as_dict(record).items()
if v is not None and k not in cls._no_update_fields and
k not in cls.key_fields}
vals = [table.c[k] != v for k, v in known_values.items()]
val_filter = I2B2CoreWithUploadId._nested_fcn(or_, vals)
known_values['update_date'] = record.update_date
upd = update(table).where(and_(key_filter, val_filter)).values(known_values)
num_updates += conn.execute(upd).rowcount
else:
inserts.append(as_dict(record))
if inserts:
if cls._check_dups:
dups = cls._check_for_dups(inserts)
nprints = 0
if dups:
print("{} duplicate records encountered".format(len(dups)))
for k, vals in dups.items():
if len(vals) == 2 and vals[0] == vals[1]:
inserts.remove(vals[1])
else:
if nprints < 20:
print("Key: {} has a non-identical dup".format(k))
elif nprints == 20:
print(".... more ...")
nprints += 1
for v in vals[1:]:
inserts.remove(v)
# TODO: refactor this to load on a per-resource basis. Temporary fix
for insert in ListChunker(inserts, 500):
num_inserts += conn.execute(table.insert(), insert).rowcount
return num_inserts, num_updates | 0.004372 |
def tweet(ctx, created_at, twtfile, text):
"""Append a new tweet to your twtxt file."""
text = expand_mentions(text)
tweet = Tweet(text, created_at) if created_at else Tweet(text)
pre_tweet_hook = ctx.obj["conf"].pre_tweet_hook
if pre_tweet_hook:
run_pre_tweet_hook(pre_tweet_hook, ctx.obj["conf"].options)
if not add_local_tweet(tweet, twtfile):
click.echo("✗ Couldn’t write to file.")
else:
post_tweet_hook = ctx.obj["conf"].post_tweet_hook
if post_tweet_hook:
run_post_tweet_hook(post_tweet_hook, ctx.obj["conf"].options) | 0.001672 |
def reverse_query(cls, parent_class, relation_key, child):
"""
创建一个新的 Query 对象,反向查询所有指向此 Relation 的父对象。
:param parent_class: 父类名称
:param relation_key: 父类中 Relation 的字段名
:param child: 子类对象
:return: leancloud.Query
"""
q = leancloud.Query(parent_class)
q.equal_to(relation_key, child._to_pointer())
return q | 0.005181 |
def explode(self):
"""
Collects all the polygons, holes and points in the Space
packaged in a list. The returned geometries are not in *pyny3d*
form, instead the will be represented as *ndarrays*.
:returns: The polygons, the holes and the points.
:rtype: list
"""
seed = self.get_seed()['places']
points = []
polygons = []
holes = []
for place in seed:
points.append(place['set_of_points'])
polygons += sum(place['polyhedra'], [])
polygons += place['surface']['polygons']
holes += place['surface']['holes']
return [polygons, holes, np.concatenate(points, axis=0)] | 0.007884 |
def strip_dimensions(self, text_lines, location, pid):
"""
Calculate the dimension
Returns
-------
out : types.SimpleNamespace
A structure with all the coordinates required
to draw the strip text and the background box.
"""
dpi = 72
num_lines = len(text_lines)
get_property = self.theme.themeables.property
ax = self.axs[pid]
bbox = ax.get_window_extent().transformed(
self.figure.dpi_scale_trans.inverted())
ax_width, ax_height = bbox.width, bbox.height # in inches
strip_size = self.strip_size(location, num_lines)
m1, m2 = self.inner_strip_margins(location)
m1, m2 = m1/dpi, m2/dpi
margin = 0 # default
if location == 'right':
box_x = 1
box_y = 0
box_width = strip_size/ax_width
box_height = 1
# y & height properties of the background slide and
# shrink the strip vertically. The y margin slides
# it horizontally.
with suppress(KeyError):
box_y = get_property('strip_background_y', 'y')
with suppress(KeyError):
box_height = get_property('strip_background_y', 'height')
with suppress(KeyError):
margin = get_property('strip_margin_y')
x = 1 + (strip_size-m2+m1) / (2*ax_width)
y = (2*box_y+box_height)/2
# margin adjustment
hslide = 1 + margin*strip_size/ax_width
x *= hslide
box_x *= hslide
else:
box_x = 0
box_y = 1
box_width = 1
box_height = strip_size/ax_height
# x & width properties of the background slide and
# shrink the strip horizontally. The y margin slides
# it vertically.
with suppress(KeyError):
box_x = get_property('strip_background_x', 'x')
with suppress(KeyError):
box_width = get_property('strip_background_x', 'width')
with suppress(KeyError):
margin = get_property('strip_margin_x')
x = (2*box_x+box_width)/2
y = 1 + (strip_size-m1+m2)/(2*ax_height)
# margin adjustment
vslide = 1 + margin*strip_size/ax_height
y *= vslide
box_y *= vslide
dimensions = types.SimpleNamespace(
x=x, y=y, box_x=box_x, box_y=box_y,
box_width=box_width,
box_height=box_height)
return dimensions | 0.000764 |
def _setup_features(self):
""" Setup the advanced widget feature handlers.
"""
features = self._features = self.declaration.features
if not features:
return
if features & Feature.FocusTraversal:
self.hook_focus_traversal()
if features & Feature.FocusEvents:
self.hook_focus_events()
if features & Feature.DragEnabled:
self.hook_drag()
if features & Feature.DropEnabled:
self.hook_drop()
features = self._extra_features
if features & GraphicFeature.WheelEvent:
self.hook_wheel()
if features & GraphicFeature.DrawEvent:
self.hook_draw() | 0.004167 |
def _entity_list_as_bel(entities: Iterable[BaseEntity]) -> str:
"""Stringify a list of BEL entities."""
return ', '.join(
e.as_bel()
for e in entities
) | 0.005556 |
def get_oauth_token(oauth_key, oauth_secret, username, password, useragent=_DEFAULT_USERAGENT, script_key=None):
"""
Gets an OAuth token from Reddit or returns a valid locally stored token.
Because the retrieved token is stored on the file system (script_key is used to distinguish between files), this function is safe
to call across multiple instances or runs. The token is renewed after one hour.
This function can be used without PRAW.
Note: Only script-based oauth is supported.
:param oauth_key: Reddit oauth key
:param oauth_secret: Reddit oauth secret
:param username: Reddit username
:param password: Reddit password
:param useragent: Connection useragent (this should be changed, otherwise you'll be heavily rate limited)
:param script_key: Key used to distinguish between local token files
:return: An OAuth token if one could be retrieved, otherwise None.
"""
token = _get_local_token(script_key, username)
if token is None:
token_time = _time_ms()
token = _request_oauth_token(oauth_key, oauth_secret, username, password, useragent=useragent)
write_config(token, token_time, _get_config_file(script_key, username))
return token | 0.027142 |
def remove_parameter(self, parameter_name):
"""Removes the specified parameter from the list."""
if parameter_name in self.paramorder:
index = self.paramorder.index(parameter_name)
del self.paramorder[index]
if parameter_name in self._parameters:
del self._parameters[parameter_name] | 0.005814 |
def delete(self, synchronous=True):
"""Delete the current entity.
Call :meth:`delete_raw` and check for an HTTP 4XX or 5XX response.
Return either the JSON-decoded response or information about a
completed foreman task.
:param synchronous: A boolean. What should happen if the server returns
an HTTP 202 (accepted) status code? Wait for the task to complete
if ``True``. Immediately return a response otherwise.
:returns: A dict. Either the JSON-decoded response or information about
a foreman task.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If an HTTP 202 response is received and the
response JSON can not be decoded.
:raises nailgun.entity_mixins.TaskTimedOutError: If an HTTP 202
response is received, ``synchronous is True`` and the task times
out.
"""
response = self.delete_raw()
response.raise_for_status()
if (synchronous is True and
response.status_code == http_client.ACCEPTED):
return _poll_task(response.json()['id'], self._server_config)
elif (response.status_code == http_client.NO_CONTENT or
(response.status_code == http_client.OK and
hasattr(response, 'content') and
not response.content.strip())):
# "The server successfully processed the request, but is not
# returning any content. Usually used as a response to a successful
# delete request."
return
return response.json() | 0.001176 |
def ssh(self, enable=True, comment=None):
"""
Enable or disable SSH
:param bool enable: enable or disable SSH daemon
:param str comment: optional comment for audit
:raises NodeCommandFailed: cannot enable SSH daemon
:return: None
"""
self.make_request(
NodeCommandFailed,
method='update',
resource='ssh',
params={'enable': enable, 'comment': comment}) | 0.00431 |
def delete_plan(self, plan_code):
"""
Delete an entire subscription plan associated with the merchant.
Args:
plan_code: Plan’s identification code for the merchant.
Returns:
"""
return self.client._delete(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers()) | 0.008929 |
def create_item(self, item):
"""
Create a new item in D4S2 service for item at the specified destination.
:param item: D4S2Item data to use for creating a D4S2 item
:return: requests.Response containing the successful result
"""
item_dict = {
'project_id': item.project_id,
'from_user_id': item.from_user_id,
'to_user_id': item.to_user_id,
'role': item.auth_role,
'user_message': item.user_message
}
if item.share_user_ids:
item_dict['share_user_ids'] = item.share_user_ids
data = json.dumps(item_dict)
resp = requests.post(self.make_url(item.destination), headers=self.json_headers, data=data)
self.check_response(resp)
return resp | 0.005 |
def info(self):
""" retreive metadata and currenct price data """
url = "{}/v7/finance/quote?symbols={}".format(
self._base_url, self.ticker)
r = _requests.get(url=url).json()["quoteResponse"]["result"]
if len(r) > 0:
return r[0]
return {} | 0.006601 |
def _getEngineVersionDetails(self):
"""
Parses the JSON version details for the latest installed version of UE4
"""
versionFile = os.path.join(self.getEngineRoot(), 'Engine', 'Build', 'Build.version')
return json.loads(Utility.readFile(versionFile)) | 0.030888 |
def statistics(self):
"""
Access the statistics
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsList
"""
if self._statistics is None:
self._statistics = WorkersStatisticsList(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._statistics | 0.007859 |
def cget(self, key):
"""
Query widget option.
:param key: option name
:type key: str
:return: value of the option
To get the list of options for this widget, call the method :meth:`~LinkLabel.keys`.
"""
if key is "link":
return self._link
elif key is "hover_color":
return self._hover_color
elif key is "normal_color":
return self._normal_color
elif key is "clicked_color":
return self._clicked_color
else:
return ttk.Label.cget(self, key) | 0.005025 |
def drop(self, *cols):
"""
Drops columns from the main dataframe
:param cols: names of the columns
:type cols: str
:example: ``ds.drop("Col 1", "Col 2")``
"""
try:
index = self.df.columns.values
for col in cols:
if col not in index:
self.warning("Column", col, "not found. Aborting")
return
self.df = self.df.drop(col, axis=1)
except Exception as e:
self.err(e, self.drop, "Can not drop column") | 0.003515 |
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result | 0.004695 |
def get_extra_data(self, data):
"""Get eventual extra data for this placeholder from the
admin form. This method is called when the Page is
saved in the admin and passed to the placeholder save
method."""
result = {}
for key in list(data.keys()):
if key.startswith(self.ctype + '-'):
new_key = key.replace(self.ctype + '-', '')
result[new_key] = data[key]
return result | 0.004264 |
def get_metadata(self, lcid):
"""Get the parameters derived from the fit for the given id.
This is table 2 of Sesar 2010
"""
if self._metadata is None:
self._metadata = fetch_rrlyrae_lc_params()
i = np.where(self._metadata['id'] == lcid)[0]
if len(i) == 0:
raise ValueError("invalid lcid: {0}".format(lcid))
return self._metadata[i[0]] | 0.004819 |
def _make_association(self, *args, **kwargs):
"""
Delegate _make_association on items
:note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._make_association`
"""
for o in self:
o._make_association(*args, **kwargs) | 0.010563 |
def cross_section_components(data_x, data_y, index='index'):
r"""Obtain the tangential and normal components of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_tangential, component_normal: tuple of `xarray.DataArray`
The components of the vector field in the tangential and normal directions,
respectively.
See Also
--------
tangential_component, normal_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
# Get the unit vectors
unit_tang, unit_norm = unit_vectors_from_cross_section(data_x, index=index)
# Take the dot products
component_tang = data_x * unit_tang[0] + data_y * unit_tang[1]
component_norm = data_x * unit_norm[0] + data_y * unit_norm[1]
# Reattach units (only reliable attribute after operation)
component_tang.attrs = {'units': data_x.attrs['units']}
component_norm.attrs = {'units': data_x.attrs['units']}
return component_tang, component_norm | 0.003805 |
def get_resource(collection, key):
"""Return the appropriate *Response* for retrieving a single resource.
:param string collection: a :class:`sandman.model.Model` endpoint
:param string key: the primary key for the :class:`sandman.model.Model`
:rtype: :class:`flask.Response`
"""
resource = retrieve_resource(collection, key)
_validate(endpoint_class(collection), request.method, resource)
return resource_response(resource) | 0.002179 |
def tiles_to_pixels(self, tiles):
"""Convert tile coordinates into pixel coordinates"""
pixel_coords = Vector2()
pixel_coords.X = tiles[0] * self.spritesheet[0].width
pixel_coords.Y = tiles[1] * self.spritesheet[0].height
return pixel_coords | 0.007117 |
def get_lab_text(lab_slug, language):
"""Gets text description in English or Italian from a single lab from makeinitaly.foundation."""
if language == "English" or language == "english" or language == "EN" or language == "En":
language = "en"
elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it":
language = "it"
else:
language = "en"
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug + "/" + language,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
if "revisions" in wiki_response["query"]["pages"][i]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
else:
content = ""
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
return result[0] | 0.005415 |
def list(self, params=None):
''' /v1/sshkey/list
GET - account
List all the SSH keys on the current account
Link: https://www.vultr.com/api/#sshkey_list
'''
params = params if params else dict()
return self.request('/v1/sshkey/list', params, 'GET') | 0.006557 |
def get_xy_range(bbox):
r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float
Range in meters in y dimension.
"""
x_range = bbox['east'] - bbox['west']
y_range = bbox['north'] - bbox['south']
return x_range, y_range | 0.002257 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.