text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def audit_logs_list(self, filter_actor_id=None, filter_created_at=None, filter_ip_address=None, filter_source_type=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/audit_logs#listing-audit-logs"
api_path = "/api/v2/audit_logs.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if filter_actor_id:
api_query.update({
"filter[actor_id]": filter_actor_id,
})
if filter_created_at:
api_query.update({
"filter[created_at]": filter_created_at,
})
if filter_ip_address:
api_query.update({
"filter[ip_address]": filter_ip_address,
})
if filter_source_type:
api_query.update({
"filter[source_type]": filter_source_type,
})
return self.call(api_path, query=api_query, **kwargs) | 0.004024 |
def json_output(cls, cs, score_dict, output_filename, ds_loc, limit,
output_type='json'):
'''
Generates JSON output for the ocmpliance score(s)
@param cs Compliance Checker Suite
@param score_groups List of results
@param output_filename The file path to output to
@param ds_loc List of source datasets
@param limit The degree of strictness, 1 being the strictest,
and going up from there.
@param output_type Either 'json' or 'json_new'. json_new is the new
json output format that supports multiple datasets
'''
results = {}
# json output keys out at the top level by
if len(score_dict) > 1 and output_type != 'json_new':
raise ValueError("output_type must be set to 'json_new' if outputting multiple datasets to a single json file or stdout")
if output_type == 'json':
for ds, score_groups in six.iteritems(score_dict):
for checker, rpair in six.iteritems(score_groups):
groups, errors = rpair
results[checker] = cs.dict_output(
checker, groups, ds, limit,
)
elif output_type == 'json_new':
for ds, score_groups in six.iteritems(score_dict):
for checker, rpair in six.iteritems(score_groups):
groups, errors = rpair
results[ds] = {}
results[ds][checker] = cs.dict_output(
checker, groups, ds, limit
)
json_results = json.dumps(results, indent=2, ensure_ascii=False)
if output_filename == '-':
print(json_results)
else:
with io.open(output_filename, 'w', encoding='utf8') as f:
f.write(json_results)
return groups | 0.002528 |
def _add_facl_rules(self):
"""
Apply ACL rules on the directory using setfacl program. Raises CommandDoesNotExistException
if the command is not present on the system.
:return: None
"""
setfacl_command_exists()
# we are not using pylibacl b/c it's only for python 2
if self.facl_rules:
logger.debug("adding ACLs %s to %s", self.facl_rules, self.path)
r = ",".join(self.facl_rules)
run_cmd(["setfacl", "-m", r, self.path]) | 0.005747 |
def inference(self, observed_arr):
'''
Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
'''
if observed_arr.ndim != 2:
observed_arr = observed_arr.reshape((observed_arr.shape[0], -1))
pred_arr = self.__nn.inference(observed_arr)
return pred_arr | 0.008493 |
def _parse_fmadm_config(output):
'''
Parsbb fmdump/fmadm output
'''
result = []
output = output.split("\n")
# extract header
header = [field for field in output[0].lower().split(" ") if field]
del output[0]
# parse entries
for entry in output:
entry = [item for item in entry.split(" ") if item]
entry = entry[0:3] + [" ".join(entry[3:])]
# prepare component
component = OrderedDict()
for field in header:
component[field] = entry[header.index(field)]
result.append(component)
# keying
keyed_result = OrderedDict()
for component in result:
keyed_result[component['module']] = component
del keyed_result[component['module']]['module']
result = keyed_result
return result | 0.001232 |
def get_contents(self):
"""Fetch the signature contents. This is the main reason this
class exists, so we can compute this once and cache it regardless
of how many target or source Nodes there are.
"""
try:
return self._memo['get_contents']
except KeyError:
pass
env = self.get_build_env()
action_list = self.get_action_list()
all_targets = self.get_all_targets()
all_sources = self.get_all_sources()
result = bytearray("",'utf-8').join([action.get_contents(all_targets,
all_sources,
env)
for action in action_list])
self._memo['get_contents'] = result
return result | 0.003436 |
def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags | 0.004739 |
def encode_offset_commit_request_kafka(cls, group, payloads):
"""
Encode an OffsetCommitRequest struct
Arguments:
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequestPayload
"""
return kafka.protocol.commit.OffsetCommitRequest[2](
consumer_group=group,
consumer_group_generation_id=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_GENERATION_ID,
consumer_id='',
retention_time=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
topics=[(
topic,
[(
partition,
payload.offset,
payload.metadata)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) | 0.005214 |
def get_data_path(package, resource):
# type: (str, str) -> str
"""Return the full file path of a resource of a package."""
loader = pkgutil.get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
raise PackageResourceError("Failed to load package: '{0}'".format(
package))
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
raise PackageResourceError("Failed to load module: '{0}'".format(
package))
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return resource_name | 0.001449 |
def IfContainer(cls, ifc: IfContainer, ctx: SerializerCtx):
"""
Srialize IfContainer instance
"""
childCtx = ctx.withIndent()
def asHdl(statements):
return [cls.asHdl(s, childCtx) for s in statements]
try:
cond = cls.condAsHdl(ifc.cond, True, ctx)
except UnsupportedEventOpErr as e:
cond = None
if cond is None:
assert not ifc.elIfs
assert not ifc.ifFalse
stmBuff = [cls.asHdl(s, ctx) for s in ifc.ifTrue]
return "\n".join(stmBuff)
elIfs = []
ifTrue = ifc.ifTrue
ifFalse = ifc.ifFalse
if ifFalse is None:
ifFalse = []
for c, statements in ifc.elIfs:
try:
elIfs.append((cls.condAsHdl(c, True, ctx), asHdl(statements)))
except UnsupportedEventOpErr as e:
if len(ifc.elIfs) == 1 and not ifFalse:
# register expression is in valid format and this
# is just register with asynchronous reset or etc...
ifFalse = statements
else:
raise e
return cls.ifTmpl.render(
indent=getIndent(ctx.indent),
cond=cond,
ifTrue=asHdl(ifTrue),
elIfs=elIfs,
ifFalse=asHdl(ifFalse)) | 0.001447 |
def _read_output(self, command):
""" Read CSV delimited input from Slurm. """
cmd = []
cmd.extend(self._prefix)
cmd.extend([self._path, "-iP"])
cmd.extend(command)
command = cmd
logger.debug("Cmd %s" % command)
null = open('/dev/null', 'w')
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=null)
null.close()
results = []
reader = csv.reader(_input_csv(process.stdout), delimiter=str("|"))
try:
headers = next(reader)
logger.debug("<-- headers %s" % headers)
except StopIteration:
logger.debug("Cmd %s headers not found" % command)
headers = []
for row in reader:
_output_csv(row)
logger.debug("<-- row %s" % row)
this_row = {}
i = 0
for i in range(0, len(headers)):
key = headers[i]
value = row[i]
this_row[key] = value
results.append(this_row)
process.stdout.close()
retcode = process.wait()
if retcode != 0:
logger.error("<-- Cmd %s returned %d (error)" % (command, retcode))
raise subprocess.CalledProcessError(retcode, command)
if len(headers) == 0:
logger.error("Cmd %s didn't return any headers." % command)
raise RuntimeError("Cmd %s didn't return any headers." % command)
logger.debug("<-- Returned: %d (good)" % retcode)
return results | 0.001274 |
async def setFormName(self, oldn, newn):
'''
Rename a form within all the layers.
'''
logger.info(f'Migrating [{oldn}] to [{newn}]')
async with self.getTempSlab():
i = 0
async for buid, valu in self.getFormTodo(oldn):
await self.editNodeNdef((oldn, valu), (newn, valu))
i = i + 1
if i and i % _progress == 0:
logger.info(f'Migrated {i} buids.') | 0.004167 |
def unpack(self, unpacker):
"""
unpacks an exception handler entry in an exception table. Updates
the internal structure of this instance
"""
(a, b, c, d) = unpacker.unpack_struct(_HHHH)
self.start_pc = a
self.end_pc = b
self.handler_pc = c
self.catch_type_ref = d | 0.005917 |
def _check_if_complete(self, url, json_response):
"""
Check if a request has been completed and return the redirect URL if it has
@type url: str
@type json_response: list or dict
@rtype: str or bool
"""
if '__done' in json_response and isinstance(json_response, list):
mr_parts = list(urlparse(url))
mr_query = parse_qs(mr_parts[4])
mr_query['mr'] = '"' + str(json_response[0]) + '"'
mr_parts[4] = urlencode(mr_query, True)
mr_link = urlunparse(mr_parts)
mr_j, mr_r = self._ajax(mr_link)
self.log.debug('MultipleRedirect link: %s', mr_link)
return super(Installer, self)._check_if_complete(url, mr_j)
return False | 0.003797 |
def run(self, path_dir=None, convergence=True, write_input=True,
clear_dir=False, max_lpfac=150, min_egrid=0.00005):
"""
Write inputs (optional), run BoltzTraP, and ensure
convergence (optional)
Args:
path_dir (str): directory in which to run BoltzTraP
convergence (bool): whether to check convergence and make
corrections if needed
write_input: (bool) whether to write input files before the run
(required for convergence mode)
clear_dir: (bool) whether to remove all files in the path_dir
before starting
max_lpfac: (float) maximum lpfac value to try before reducing egrid
in convergence mode
min_egrid: (float) minimum egrid value to try before giving up in
convergence mode
Returns:
"""
# TODO: consider making this a part of custodian rather than pymatgen
# A lot of this functionality (scratch dirs, handlers, monitors)
# is built into custodian framework
if convergence and not write_input:
raise ValueError("Convergence mode requires write_input to be "
"true")
if self.run_type in ("BANDS", "DOS", "FERMI"):
convergence = False
if self.lpfac > max_lpfac:
max_lpfac = self.lpfac
if self.run_type == "BANDS" and self.bs.is_spin_polarized:
print("Reminder: for run_type " + str(
self.run_type) + ", spin component are not separated! "
"(you have a spin polarized band structure)")
if self.run_type in ("FERMI", "DOS") and self.spin is None:
if self.bs.is_spin_polarized:
raise BoltztrapError(
"Spin parameter must be specified for spin polarized "
"band structures!")
else:
self.spin = 1
dir_bz_name = "boltztrap"
if path_dir is None:
temp_dir = tempfile.mkdtemp()
path_dir = os.path.join(temp_dir, dir_bz_name)
else:
path_dir = os.path.abspath(
os.path.join(path_dir, dir_bz_name))
if not os.path.exists(path_dir):
os.mkdir(path_dir)
elif clear_dir:
for c in os.listdir(path_dir):
os.remove(os.path.join(path_dir, c))
FORMAT = "%(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT,
filename=os.path.join(path_dir, "../boltztrap.out"))
with cd(path_dir):
lpfac_start = self.lpfac
converged = False
while self.energy_grid >= min_egrid and not converged:
self.lpfac = lpfac_start
if time.time() - self.start_time > self.timeout:
raise BoltztrapError("no doping convergence after timeout "
"of {} s".format(self.timeout))
logging.info("lpfac, energy_grid: {} {}".format(self.lpfac, self.energy_grid))
while self.lpfac <= max_lpfac and not converged:
if time.time() - self.start_time > self.timeout:
raise BoltztrapError("no doping convergence after "
"timeout of {} s".format(self.timeout))
if write_input:
self.write_input(path_dir)
bt_exe = ["x_trans", "BoltzTraP"]
if self._bs.is_spin_polarized or self.soc:
bt_exe.append("-so")
p = subprocess.Popen(bt_exe, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
for c in p.communicate():
logging.info(c.decode())
if "error in factorization" in c.decode():
raise BoltztrapError("error in factorization")
warning = ""
with open(os.path.join(path_dir,
dir_bz_name + ".outputtrans")) as f:
for l in f:
if "Option unknown" in l:
raise BoltztrapError(
"DOS mode needs a custom version of "
"BoltzTraP code is needed")
if "WARNING" in l:
warning = l
break
if "Error - Fermi level was not found" in l:
warning = l
break
if not warning and convergence:
# check convergence for warning
analyzer = BoltztrapAnalyzer.from_files(path_dir)
for doping in ['n', 'p']:
for c in analyzer.mu_doping[doping]:
if len(analyzer.mu_doping[doping][c]) != len(
analyzer.doping[doping]):
warning = "length of mu_doping array is " \
"incorrect"
break
if doping == 'p' and \
sorted(
analyzer.mu_doping[doping][
c], reverse=True) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for p-type"
break
# ensure n-type doping sorted correctly
if doping == 'n' and sorted(
analyzer.mu_doping[doping][c]) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for n-type"
break
if warning:
self.lpfac += 10
logging.warn("Warning detected: {}! Increase lpfac to "
"{}".format(warning, self.lpfac))
else:
converged = True
if not converged:
self.energy_grid /= 10
logging.info("Could not converge with max lpfac; "
"Decrease egrid to {}".format(self.energy_grid))
if not converged:
raise BoltztrapError(
"Doping convergence not reached with lpfac=" + str(
self.lpfac) + ", energy_grid=" + str(self.energy_grid))
return path_dir | 0.001367 |
def convert_args_to_laid_out_tensors(xs):
"""Convert list elements to laid-out-tensors when possible.
Args:
xs: a list
Returns:
a list
"""
ret = []
for x in xs:
if hasattr(x, "to_laid_out_tensor"):
ret.append(x.to_laid_out_tensor())
else:
ret.append(x)
return ret | 0.022876 |
def setup(self):
"""
Called by pytest to setup the collector cells in .
Here we start a kernel and setup the sanitize patterns.
"""
if self.parent.config.option.current_env:
kernel_name = CURRENT_ENV_KERNEL_NAME
else:
kernel_name = self.nb.metadata.get(
'kernelspec', {}).get('name', 'python')
self.kernel = RunningKernel(kernel_name, str(self.fspath.dirname))
self.setup_sanitize_files()
if getattr(self.parent.config.option, 'cov_source', None):
setup_coverage(self.parent.config, self.kernel, getattr(self, "fspath", None)) | 0.004608 |
def path(self):
"""Returns the build root for the current workspace."""
if self._root_dir is None:
# This env variable is for testing purpose.
override_buildroot = os.environ.get('PANTS_BUILDROOT_OVERRIDE', None)
if override_buildroot:
self._root_dir = override_buildroot
else:
self._root_dir = os.path.realpath(self.find_buildroot())
if PY2:
self._root_dir = self._root_dir.decode('utf-8')
return self._root_dir | 0.012552 |
def set_top_sentences(self, value):
''' setter '''
if isinstance(value, int) is False:
raise TypeError("The type of __top_sentences must be int.")
self.__top_sentences = value | 0.009479 |
def get_parameter_limits(xval, loglike, cl_limit=0.95, cl_err=0.68269, tol=1E-2,
bounds=None):
"""Compute upper/lower limits, peak position, and 1-sigma errors
from a 1-D likelihood function. This function uses the
delta-loglikelihood method to evaluate parameter limits by
searching for the point at which the change in the log-likelihood
value with respect to the maximum equals a specific value. A
cubic spline fit to the log-likelihood values is used to
improve the accuracy of the calculation.
Parameters
----------
xval : `~numpy.ndarray`
Array of parameter values.
loglike : `~numpy.ndarray`
Array of log-likelihood values.
cl_limit : float
Confidence level to use for limit calculation.
cl_err : float
Confidence level to use for two-sided confidence interval
calculation.
tol : float
Absolute precision of likelihood values.
Returns
-------
x0 : float
Coordinate at maximum of likelihood function.
err_lo : float
Lower error for two-sided confidence interval with CL
``cl_err``. Corresponds to point (x < x0) at which the
log-likelihood falls by a given value with respect to the
maximum (0.5 for 1 sigma). Set to nan if the change in the
log-likelihood function at the lower bound of the ``xval``
input array is less than than the value for the given CL.
err_hi : float
Upper error for two-sided confidence interval with CL
``cl_err``. Corresponds to point (x > x0) at which the
log-likelihood falls by a given value with respect to the
maximum (0.5 for 1 sigma). Set to nan if the change in the
log-likelihood function at the upper bound of the ``xval``
input array is less than the value for the given CL.
err : float
Symmetric 1-sigma error. Average of ``err_lo`` and ``err_hi``
if both are defined.
ll : float
Lower limit evaluated at confidence level ``cl_limit``.
ul : float
Upper limit evaluated at confidence level ``cl_limit``.
lnlmax : float
Log-likelihood value at ``x0``.
"""
dlnl_limit = onesided_cl_to_dlnl(cl_limit)
dlnl_err = twosided_cl_to_dlnl(cl_err)
try:
# Pad the likelihood function
# if len(xval) >= 3 and np.max(loglike) - loglike[-1] < 1.5*dlnl_limit:
# p = np.polyfit(xval[-3:], loglike[-3:], 2)
# x = np.linspace(xval[-1], 10 * xval[-1], 3)[1:]
# y = np.polyval(p, x)
# x = np.concatenate((xval, x))
# y = np.concatenate((loglike, y))
# else:
x, y = xval, loglike
spline = UnivariateSpline(x, y, k=2,
#k=min(len(xval) - 1, 3),
w=(1 / tol) * np.ones(len(x)))
except:
print("Failed to create spline: ", xval, loglike)
return {'x0': np.nan, 'ul': np.nan, 'll': np.nan,
'err_lo': np.nan, 'err_hi': np.nan, 'err': np.nan,
'lnlmax': np.nan}
sd = spline.derivative()
imax = np.argmax(loglike)
ilo = max(imax - 1, 0)
ihi = min(imax + 1, len(xval) - 1)
# Find the peak
x0 = xval[imax]
# Refine the peak position
if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):
x0 = find_function_root(sd, xval[ilo], xval[ihi])
lnlmax = float(spline(x0))
def fn(t): return spline(t) - lnlmax
fn_val = fn(xval)
if np.any(fn_val[imax:] < -dlnl_limit):
xhi = xval[imax:][fn_val[imax:] < -dlnl_limit][0]
else:
xhi = xval[-1]
# EAC: brute force check that xhi is greater than x0
# The fabs is here in case x0 is negative
if xhi <= x0:
xhi = x0 + np.fabs(x0)
if np.any(fn_val[:imax] < -dlnl_limit):
xlo = xval[:imax][fn_val[:imax] < -dlnl_limit][-1]
else:
xlo = xval[0]
# EAC: brute force check that xlo is less than x0
# The fabs is here in case x0 is negative
if xlo >= x0:
xlo = x0 - 0.5*np.fabs(x0)
ul = find_function_root(fn, x0, xhi, dlnl_limit, bounds=bounds)
ll = find_function_root(fn, x0, xlo, dlnl_limit, bounds=bounds)
err_lo = np.abs(x0 - find_function_root(fn, x0, xlo, dlnl_err,
bounds=bounds))
err_hi = np.abs(x0 - find_function_root(fn, x0, xhi, dlnl_err,
bounds=bounds))
err = np.nan
if np.isfinite(err_lo) and np.isfinite(err_hi):
err = 0.5 * (err_lo + err_hi)
elif np.isfinite(err_hi):
err = err_hi
elif np.isfinite(err_lo):
err = err_lo
o = {'x0': x0, 'ul': ul, 'll': ll,
'err_lo': err_lo, 'err_hi': err_hi, 'err': err,
'lnlmax': lnlmax}
return o | 0.001436 |
def _do_get(self, uri, **kwargs):
"""
Convinient method for GET requests
Returns http request status value from a POST request
"""
#TODO:
# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method
scaleioapi_get_headers = {'Content-type':'application/json','Version':'1.0'}
self.logger.debug("_do_get() " + "{}/{}".format(self._api_url,uri))
if kwargs:
for key, value in kwargs.iteritems():
if key == 'headers':
scaleio_get_headersvalue = value
try:
#response = self._im_session.get("{}/{}".format(self._api_url, uri), headers = scaleioapi_get_headers, payload = scaleio_payload).json()
response = self._im_session.get("{}/{}".format(self._api_url, uri), **kwargs).json()
#response = self._session.get(url, headers=scaleioapi_post_headers, **kwargs)
if response.status_code == requests.codes.ok:
return response
else:
raise RuntimeError("_do_get() - HTTP response error" + response.status_code)
except:
raise RuntimeError("_do_get() - Communication error with ScaleIO gateway")
return response | 0.013857 |
def cell_ends_with_code(lines):
"""Is the last line of the cell a line with code?"""
if not lines:
return False
if not lines[-1].strip():
return False
if lines[-1].startswith('#'):
return False
return True | 0.004016 |
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [
c.name for c in self.values_axes if c.metadata is not None] | 0.011173 |
def temporal_split(X, y, test_size=0.25):
'''
Split time series or sequence data along the time axis.
Test data is drawn from the end of each series / sequence
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
test_size : float
between 0 and 1, amount to allocate to test
Returns
-------
X_train : array-like, shape [n_series, ]
X_test : array-like, shape [n_series, ]
y_train : array-like, shape [n_series, ]
y_test : array-like, shape [n_series, ]
'''
if test_size <= 0. or test_size >= 1.:
raise ValueError("temporal_split: test_size must be >= 0.0 and <= 1.0"
" (was %.1f)" %test_size)
Ns = len(y) # number of series
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
train_size = 1. - test_size
train_ind = [np.arange(0, int(train_size * len(Xt[i]))) for i in range(Ns)]
test_ind = [np.arange(len(train_ind[i]), len(Xt[i])) for i in range(Ns)]
X_train = [Xt[i][train_ind[i]] for i in range(Ns)]
X_test = [Xt[i][test_ind[i]] for i in range(Ns)]
if Xc is not None:
X_train = TS_Data(X_train, Xc)
X_test = TS_Data(X_test, Xc)
if len(np.atleast_1d(y[0])) == len(Xt[0]):
# y is a time series
y_train = [y[i][train_ind[i]] for i in range(Ns)]
y_test = [y[i][test_ind[i]] for i in range(Ns)]
else:
# y is contextual
y_train = y
y_test = y
return X_train, X_test, y_train, y_test | 0.001228 |
def _pos_nt(pr, pos, stranded=False):
"""
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
"""
nt = None
bases = dict(zip(pr.alignment.get_reference_positions(),
list(pr.alignment.seq.upper())))
if pos in bases.keys():
nt = bases[pos]
if nt and stranded:
strand = None
if pr.alignment.is_read1 and pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read2 and not pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read1 and not pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
if pr.alignment.is_read2 and pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
nt = '{}{}'.format(nt, strand)
return nt | 0.002475 |
def fullpath(self):
"""
Full path to the Mackup configuration files.
The full path to the directory when Mackup is storing the configuration
files.
Returns:
str
"""
return str(os.path.join(self.path, self.directory)) | 0.006993 |
def _pre_index_check(handler, host=None, core_name=None):
'''
PRIVATE METHOD - MASTER CALL
Does a pre-check to make sure that all the options are set and that
we can talk to solr before trying to send a command to solr. This
Command should only be issued to masters.
handler : str
The import handler to check the state of
host : str (None):
The solr host to query. __opts__['host'] is default
core_name (None):
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
REQUIRED if you are using cores.
Return: dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
'''
# make sure that it's a master minion
if _get_none_or_value(host) is None and not _is_master():
err = [
'solr.pre_indexing_check can only be called by "master" minions']
return _get_return_dict(False, err)
# solr can run out of memory quickly if the dih is processing multiple
# handlers at the same time, so if it's a multicore setup require a
# core_name param.
if _get_none_or_value(core_name) is None and _check_for_cores():
errors = ['solr.full_import is not safe to multiple handlers at once']
return _get_return_dict(False, errors=errors)
# check to make sure that we're not already indexing
resp = import_status(handler, host, core_name)
if resp['success']:
status = resp['data']['status']
if status == 'busy':
warn = ['An indexing process is already running.']
return _get_return_dict(True, warnings=warn)
if status != 'idle':
errors = ['Unknown status: "{0}"'.format(status)]
return _get_return_dict(False, data=resp['data'], errors=errors)
else:
errors = ['Status check failed. Response details: {0}'.format(resp)]
return _get_return_dict(False, data=resp['data'], errors=errors)
return resp | 0.000493 |
def show_metrics(metrics, all_branches=False, all_tags=False):
"""
Args:
metrics (list): Where each element is either a `list`
if an xpath was specified, otherwise a `str`
"""
for branch, val in metrics.items():
if all_branches or all_tags:
logger.info("{branch}:".format(branch=branch))
for fname, metric in val.items():
lines = metric if type(metric) is list else metric.splitlines()
if len(lines) > 1:
logger.info("\t{fname}:".format(fname=fname))
for line in lines:
logger.info("\t\t{content}".format(content=line))
else:
logger.info("\t{}: {}".format(fname, metric)) | 0.001346 |
def check_conflicts(self):
"""Checks for any conflicts between modules configured to be built.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# Now consider conflicts
self.log('PHASE: conflicts', level=logging.DEBUG)
errs = []
self.pause_point('\nNow checking for conflicts between modules', print_input=False, level=3)
for module_id in self.module_ids():
if not cfg[module_id]['shutit.core.module.build']:
continue
conflicter = self.shutit_map[module_id]
for conflictee in conflicter.conflicts_with:
# If the module id isn't there, there's no problem.
conflictee_obj = self.shutit_map.get(conflictee)
if conflictee_obj is None:
continue
if ((cfg[conflicter.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(conflicter)) and
(cfg[conflictee_obj.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(conflictee_obj))):
errs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))
return errs | 0.025818 |
async def clean_up_clients_async(self):
"""
Resets the pump swallows all exceptions.
"""
if self.partition_receiver:
if self.eh_client:
await self.eh_client.stop_async()
self.partition_receiver = None
self.partition_receive_handler = None
self.eh_client = None | 0.005435 |
def execute(self, commands, pretty_format=False):
"""
Parse and run a DQL string
Parameters
----------
commands : str
The DQL command string
pretty_format : bool
Pretty-format the return value. (e.g. 4 -> 'Updated 4 items')
"""
tree = parser.parseString(commands)
self.consumed_capacities = []
self._analyzing = False
self._query_rate_limit = None
for statement in tree:
try:
result = self._run(statement)
except ExplainSignal:
return self._format_explain()
if pretty_format:
return self._pretty_format(tree[-1], result)
return result | 0.002703 |
def stop(self, timeout=None, force=False):
"""Stop the worker thread and synchronously wait for it to finish.
Args:
timeout (float): The maximum time to wait for the thread to stop
before raising a TimeoutExpiredError. If force is True, TimeoutExpiredError
is not raised and the thread is just marked as a daemon thread
so that it does not block cleanly exiting the process.
force (bool): If true and the thread does not exit in timeout seconds
no error is raised since the thread is marked as daemon and will
be killed when the process exits.
"""
self.signal_stop()
self.wait_stopped(timeout, force) | 0.006693 |
def add_download_task(self, source_url, remote_path, selected_idx=(), **kwargs):
"""
:param source_url: 离线下载目标的URL
:param remote_path: 欲保存到百度网盘的目录, 注意以 / 打头
:param selected_idx: 在 BT 或者磁力链的下载类型中, 选择哪些idx下载, 不填写为全部
添加离线任务,支持所有百度网盘支持的类型
"""
if source_url.startswith('magnet:?'):
print('Magnet: "%s"' % source_url)
return self.add_magnet_task(source_url, remote_path, selected_idx, **kwargs)
elif source_url.endswith('.torrent'):
print('BitTorrent: "%s"' % source_url)
return self.add_torrent_task(source_url, remote_path, selected_idx, **kwargs)
else:
print('Others: "%s"' % source_url)
data = {
'method': 'add_task',
'source_url': source_url,
'save_path': remote_path,
}
url = 'http://{0}/rest/2.0/services/cloud_dl'.format(BAIDUPAN_SERVER)
return self._request('services/cloud_dl', 'add_task', url=url,
data=data, **kwargs) | 0.005535 |
def not_right(self, num):
"""
WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:]
"""
if num == None:
return FlatList([_get_list(self)[:-1:]])
if num <= 0:
return FlatList.EMPTY
return FlatList(_get_list(self)[:-num:]) | 0.012658 |
def get_asset_form_for_create(self, asset_record_types):
"""Gets the asset form for creating new assets.
A new form should be requested for each create transaction.
arg: asset_record_types (osid.type.Type[]): array of asset
record types
return: (osid.repository.AssetForm) - the asset form
raise: NullArgument - ``asset_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.get_resource_form_for_create_template
for arg in asset_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if asset_record_types == []:
obj_form = objects.AssetForm(
repository_id=self._catalog_id,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
else:
obj_form = objects.AssetForm(
repository_id=self._catalog_id,
record_types=asset_record_types,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form | 0.001797 |
def jacobian(expr, symbols):
"""
Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector.
:param expr: A sympy Expr.
:param symbols: The symbols w.r.t. which to derive.
"""
jac = []
for symbol in symbols:
# Differentiate to every param
f = sympy.diff(expr, symbol)
jac.append(f)
return jac | 0.005208 |
def xyz(self):
"""
:returns: an array of shape (N, 3) with the cartesian coordinates
"""
return geo_utils.spherical_to_cartesian(
self.lons.flat, self.lats.flat, self.depths.flat) | 0.008969 |
def view_get(method_name):
"""
Creates a getter that will drop the current value,
and call the view's method with specified name
using the context's key as first argument.
@param method_name: the name of a method belonging to the view.
@type method_name: str
"""
def view_get(_value, context, **_params):
method = getattr(context["view"], method_name)
return _get(method, context["key"], (), {})
return view_get | 0.002151 |
def price_diff(self):
'返回DataStruct.price的一阶差分'
res = self.price.groupby(level=1).apply(lambda x: x.diff(1))
res.name = 'price_diff'
return res | 0.011429 |
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) | 0.00428 |
def iter_object_acl(root):
"""Child-first discovery of ACEs for an object.
Walks the ACL graph via ``__acl_bases__`` and yields the ACEs parsed from
``__acl__`` on each object.
"""
for obj in iter_object_graph(root):
for ace in parse_acl(getattr(obj, '__acl__', ())):
yield ace | 0.003125 |
def update_caption(self, mouse):
""" 添加坐标显示 """
caption = "{} x: {}, y: {}".format(self._title, mouse.x, mouse.y)
super().set_caption(caption) | 0.011976 |
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs | 0.000855 |
def temp(dev, target):
""" Gets or sets the target temperature."""
click.echo("Current target temp: %s" % dev.target_temperature)
if target:
click.echo("Setting target temp: %s" % target)
dev.target_temperature = target | 0.004049 |
def schedule(self):
"""Initiate distribution of the test collection.
Initiate scheduling of the items across the nodes. If this gets called
again later it behaves the same as calling ``._reschedule()`` on all
nodes so that newly added nodes will start to be used.
If ``.collection_is_completed`` is True, this is called by the hook:
- ``DSession.worker_collectionfinish``.
"""
assert self.collection_is_completed
# Initial distribution already happened, reschedule on all nodes
if self.collection is not None:
for node in self.nodes:
self._reschedule(node)
return
# Check that all nodes collected the same tests
if not self._check_nodes_have_same_collection():
self.log("**Different tests collected, aborting run**")
return
# Collections are identical, create the final list of items
self.collection = list(next(iter(self.registered_collections.values())))
if not self.collection:
return
# Determine chunks of work (scopes)
for nodeid in self.collection:
scope = self._split_scope(nodeid)
work_unit = self.workqueue.setdefault(scope, default=OrderedDict())
work_unit[nodeid] = False
# Avoid having more workers than work
extra_nodes = len(self.nodes) - len(self.workqueue)
if extra_nodes > 0:
self.log("Shuting down {0} nodes".format(extra_nodes))
for _ in range(extra_nodes):
unused_node, assigned = self.assigned_work.popitem(last=True)
self.log("Shuting down unused node {0}".format(unused_node))
unused_node.shutdown()
# Assign initial workload
for node in self.nodes:
self._assign_work_unit(node)
# Ensure nodes start with at least two work units if possible (#277)
for node in self.nodes:
self._reschedule(node)
# Initial distribution sent all tests, start node shutdown
if not self.workqueue:
for node in self.nodes:
node.shutdown() | 0.001365 |
def uninstall(pkg):
'''
Uninstall the specified package.
Args:
pkg (str): The package name.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.uninstall org.gimp.GIMP
'''
ret = {'result': None, 'output': ''}
out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' uninstall ' + pkg)
if out['retcode'] and out['stderr']:
ret['stderr'] = out['stderr'].strip()
ret['result'] = False
else:
ret['stdout'] = out['stdout'].strip()
ret['result'] = True
return ret | 0.001637 |
def write_yaml_report(func):
"""Decorator used in campaign node post-processing
"""
@wraps(func)
def _wrapper(*args, **kwargs):
now = datetime.datetime.now()
with Timer() as timer:
data = func(*args, **kwargs)
if isinstance(data, (SEQUENCES, types.GeneratorType)):
report = dict(children=list(map(str, data)))
elif isinstance(data, MAPPINGS):
report = data
else:
raise Exception('Unexpected data type: %s', type(data))
report['elapsed'] = timer.elapsed
report['date'] = now.isoformat()
if "no_exec" not in kwargs and report is not None:
with open(YAML_REPORT_FILE, 'w') as ostr:
yaml.dump(report, ostr, default_flow_style=False)
return report
return _wrapper | 0.001174 |
def estimate_pos_and_err_parabolic(tsvals):
"""Solve for the position and uncertainty of source in one dimension
assuming that you are near the maximum and the errors are parabolic
Parameters
----------
tsvals : `~numpy.ndarray`
The TS values at the maximum TS, and for each pixel on either side
Returns
-------
The position and uncertainty of the source, in pixel units
w.r.t. the center of the maximum pixel
"""
a = tsvals[2] - tsvals[0]
bc = 2. * tsvals[1] - tsvals[0] - tsvals[2]
s = a / (2 * bc)
err = np.sqrt(2 / bc)
return s, err | 0.001631 |
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentLogistic.params_size(event_shape, name=name),
name=name) | 0.003546 |
def perturb(model, verbose=False, steady_state=None, eigmax=1.0-1e-6,
solve_steady_state=False, order=1, details=True):
"""Compute first order approximation of optimal controls
Parameters:
-----------
model: NumericModel
Model to be solved
verbose: boolean
If True: displays number of contracting eigenvalues
steady_state: ndarray
Use supplied steady-state value to compute the approximation.
The routine doesn't check whether it is really a solution or not.
solve_steady_state: boolean
Use nonlinear solver to find the steady-state
orders: {1}
Approximation order. (Currently, only first order is supported).
Returns:
--------
TaylorExpansion:
Decision Rule for the optimal controls around the steady-state.
"""
if order > 1:
raise Exception("Not implemented.")
if steady_state is None:
steady_state = model.calibration
G_s, G_x, G_e, F_s, F_x, F_S, F_X = get_derivatives(model, steady_state=steady_state)
C, eigvals = approximate_1st_order(G_s, G_x, G_e, F_s, F_x, F_S, F_X)
m = steady_state['exogenous']
s = steady_state['states']
x = steady_state['controls']
from dolo.numeric.processes import VAR1
from dolo.numeric.processes import MvNormal
process = model.exogenous
if isinstance(process, VAR1):
C_m = C[:,:len(m)]
C_s = C[:,len(m):]
elif isinstance(process, MvNormal):
C_m = None
C_s = C
dr = BivariateTaylor(m,s,x,C_m,C_s)
if not details:
return dr
else:
return PerturbationResult(
dr,
eigvals,
True, # otherwise an Exception should have been raised already
True, # otherwise an Exception should have been raised already
True # otherwise an Exception should have been raised already
) | 0.005694 |
def delete_issue_remote_link_by_id(self, issue_key, link_id):
"""
Deletes Remote Link on Issue
:param issue_key: str
:param link_id: str
"""
url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id)
return self.delete(url) | 0.009288 |
def setup(self):
""" performs data collection for qpid dispatch router """
options = ""
if self.get_option("port"):
options = (options + " -b " + gethostname() +
":%s" % (self.get_option("port")))
# gethostname() is due to DISPATCH-156
# for either present option, add --option=value to 'options' variable
for option in ["ssl-certificate", "ssl-key", "ssl-trustfile"]:
if self.get_option(option):
options = (options + " --%s=" % (option) +
self.get_option(option))
self.add_cmd_output([
"qdstat -a" + options, # Show Router Addresses
"qdstat -n" + options, # Show Router Nodes
"qdstat -c" + options, # Show Connections
"qdstat -m" + options # Show Broker Memory Stats
])
self.add_copy_spec([
"/etc/qpid-dispatch/qdrouterd.conf"
]) | 0.002066 |
def savebinary(fname, X, savecoloring=True):
"""
Save a tabarray to a numpy binary file or archive.
Save a tabarray to a numpy binary file (``.npy``) or archive
(``.npz``) that can be loaded by :func:`tabular.io.savebinary`.
The ``.npz`` file is a zipped archive created using
:func:`numpy.savez` and containing one or more ``.npy`` files,
which are NumPy binary files created by :func:`numpy.save`.
**Parameters**
**fname** : string or file-like object
File name or open numpy binary file (``.npy``) or archive (``.npz``)
created by :func:`tabular.io.savebinary`.
**X** : tabarray
The actual data in a :class:`tabular.tab.tabarray`:
* if `fname` is a ``.npy`` file, then this is the same as::
numpy.savez(fname, data=X)
* otherwise, if `fname` is a ``.npz`` file, then `X` is zipped
inside of `fname` as ``data.npy``
**savecoloring** : boolean
Whether or not to save the `coloring` attribute of `X`. If
`savecoloring` is `True`, then `fname` must be a ``.npz`` archive
and `X.coloring` is zipped inside of `fname` as ``coloring.npy``
See :func:`tabular.tab.tabarray.__new__` for more information about
coloring.
**See Also:**
:func:`tabular.io.loadbinary`, :func:`numpy.load`,
:func:`numpy.save`, :func:`numpy.savez`
"""
if fname[-4:] == '.npy':
np.save(fname, X)
else:
if savecoloring is True:
np.savez(fname, data=X, coloring=X.coloring)
else:
np.savez(fname, data=X) | 0.005319 |
def transform_multiple(sequence, transformations, iterations):
"""Chains a transformation a given number of times.
Args:
sequence (str): a string or generator onto which transformations are applied
transformations (dict): a dictionary mapping each char to the string that is
substituted for it when the rule is applied
iterations (int): how many times to repeat the transformation
Yields:
str: the next character in the output sequence.
"""
for _ in range(iterations):
sequence = transform_sequence(sequence, transformations)
return sequence | 0.004847 |
def register_actions(self, shortcut_manager):
"""Register callback methods fot triggered actions.
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions.
"""
shortcut_manager.add_callback_for_action('close', self.on_close_shortcut)
# Call register_action of parent in order to register actions for child controllers
super(StateMachinesEditorController, self).register_actions(shortcut_manager) | 0.011111 |
def get_entity(self, table_name, partition_key, row_key, select=None,
accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Get an entity from the specified table. Throws if the entity does not exist.
:param str table_name:
The name of the table to get the entity from.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str select:
Returns only the desired properties of an entity from the set.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: The retrieved entity.
:rtype: :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
request = _get_entity(partition_key, row_key, select, accept)
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_entity_path(table_name, partition_key, row_key)
request.query['timeout'] = _int_to_str(timeout)
return self._perform_request(request, _convert_json_response_to_entity,
[property_resolver, self.require_encryption,
self.key_encryption_key, self.key_resolver_function]) | 0.007964 |
def new(self, key=None, data=None, content_type='application/json',
encoded_data=None):
"""A shortcut for manually instantiating a new
:class:`~riak.riak_object.RiakObject` or a new
:class:`~riak.datatypes.Datatype`, based on the presence and value
of the :attr:`datatype <BucketType.datatype>` bucket property. When
the bucket contains a :class:`~riak.datatypes.Datatype`, all
arguments are ignored except ``key``, otherwise they are used to
initialize the :class:`~riak.riak_object.RiakObject`.
:param key: Name of the key. Leaving this to be None (default)
will make Riak generate the key on store.
:type key: str
:param data: The data to store in a
:class:`~riak.riak_object.RiakObject`, see
:attr:`RiakObject.data <riak.riak_object.RiakObject.data>`.
:type data: object
:param content_type: The media type of the data stored in the
:class:`~riak.riak_object.RiakObject`, see
:attr:`RiakObject.content_type
<riak.riak_object.RiakObject.content_type>`.
:type content_type: str
:param encoded_data: The encoded data to store in a
:class:`~riak.riak_object.RiakObject`, see
:attr:`RiakObject.encoded_data
<riak.riak_object.RiakObject.encoded_data>`.
:type encoded_data: str
:rtype: :class:`~riak.riak_object.RiakObject` or
:class:`~riak.datatypes.Datatype`
"""
from riak import RiakObject
if self.bucket_type.datatype:
return TYPES[self.bucket_type.datatype](bucket=self, key=key)
if PY2:
try:
if isinstance(data, string_types):
data = data.encode('ascii')
except UnicodeError:
raise TypeError('Unicode data values are not supported.')
obj = RiakObject(self._client, self, key)
obj.content_type = content_type
if data is not None:
obj.data = data
if encoded_data is not None:
obj.encoded_data = encoded_data
return obj | 0.001382 |
def _valid_config(self, settings):
"""Scan through the returned settings to ensure they appear sane.
There are time when the returned buffer has the proper information, but
the reading is inaccurate. When this happens, temperatures will swing
or system values will be set to improper values.
:param settings: Configuration derived from the buffer
:type settings: dict
:returns: bool
"""
if ((int(settings['environment_temp']) > self.MAX_BOUND_TEMP or
int(settings['environment_temp']) < self.MIN_BOUND_TEMP) or
(int(settings['bean_temp']) > self.MAX_BOUND_TEMP or
int(settings['bean_temp']) < self.MIN_BOUND_TEMP)):
self._log.error('Temperatures are outside of bounds')
return False
binary = ['drum_motor', 'chaff_tray', 'solenoid', 'cooling_motor']
for item in binary:
if int(settings.get(item)) not in [0, 1]:
self._log.error('Settings show invalid values')
return False
return True | 0.001828 |
def last_location_of_minimum(x):
"""
Returns the last location of the minimal value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
return 1.0 - np.argmin(x[::-1]) / len(x) if len(x) > 0 else np.NaN | 0.002445 |
def _run_macro(self, statement: Statement) -> bool:
"""
Resolve a macro and run the resulting string
:param statement: the parsed statement from the command line
:return: a flag indicating whether the interpretation of commands should stop
"""
from itertools import islice
if statement.command not in self.macros.keys():
raise KeyError('{} is not a macro'.format(statement.command))
macro = self.macros[statement.command]
# Make sure enough arguments were passed in
if len(statement.arg_list) < macro.minimum_arg_count:
self.perror("The macro '{}' expects at least {} argument(s)".format(statement.command,
macro.minimum_arg_count),
traceback_war=False)
return False
# Resolve the arguments in reverse and read their values from statement.argv since those
# are unquoted. Macro args should have been quoted when the macro was created.
resolved = macro.value
reverse_arg_list = sorted(macro.arg_list, key=lambda ma: ma.start_index, reverse=True)
for arg in reverse_arg_list:
if arg.is_escaped:
to_replace = '{{' + arg.number_str + '}}'
replacement = '{' + arg.number_str + '}'
else:
to_replace = '{' + arg.number_str + '}'
replacement = statement.argv[int(arg.number_str)]
parts = resolved.rsplit(to_replace, maxsplit=1)
resolved = parts[0] + replacement + parts[1]
# Append extra arguments and use statement.arg_list since these arguments need their quotes preserved
for arg in islice(statement.arg_list, macro.minimum_arg_count, None):
resolved += ' ' + arg
# Run the resolved command
return self.onecmd_plus_hooks(resolved) | 0.004615 |
def git_str(self):
"""
If the distribution is not installed via git, return an empty string.
If the distribution is installed via git and pip recognizes the git
source, return the pip requirement string specifying the git URL and
commit, with an '*' appended if :py:attr:`~.git_is_dirty` is True.
Otherwise, return a string of the form:
url@ref[*]
Where URL is the remote URL, ref is the tag name if the repo is checked
out to a commit that matches a tag or else the commit hex SHA, and '*'
is appended if :py:attr:`~.git_is_dirty` is True.
:return: description of the git repo remote and state
:rtype: str
"""
dirty = '*' if self._git_is_dirty else ''
if 'git' in self._pip_requirement:
return self._pip_requirement + dirty
if self._git_commit is None and self._git_remotes is None:
return ''
ref = self._git_tag if self._git_tag is not None else self._git_commit
return '%s@%s%s' % (self.git_remote, ref, dirty) | 0.001835 |
def consolidate(self,
volume: float,
source: List[Well],
dest: Well,
*args, **kwargs) -> 'InstrumentContext':
"""
Move liquid from multiple wells (sources) to a single well(destination)
:param volume: The amount of volume to consolidate from each source
well.
:param source: List of wells from where liquid will be aspirated.
:param dest: The single well into which liquid will be dispensed.
:param kwargs: See :py:meth:`transfer`.
:returns: This instance
"""
self._log.debug("Consolidate {} from {} to {}"
.format(volume, source, dest))
kwargs['mode'] = 'consolidate'
return self.transfer(volume, source, dest, **kwargs) | 0.007168 |
def get_albums_for_artist(self, artist, full_album_art_uri=False):
"""Get an artist's albums.
Args:
artist (str): an artist's name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
"""
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
# It is necessary to update the list of items in two places, due to
# a bug in SearchResult
result[:] = reduced
result._metadata.update({
'item_list': reduced,
'search_type': 'albums_for_artist',
'number_returned': len(reduced),
'total_matches': len(reduced)
})
return result | 0.001951 |
def reduce_activities(stmts_in, **kwargs):
"""Reduce the activity types in a list of statements
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to reduce activity types in.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of reduced activity statements.
"""
logger.info('Reducing activities on %d statements...' % len(stmts_in))
stmts_out = [deepcopy(st) for st in stmts_in]
ml = MechLinker(stmts_out)
ml.gather_explicit_activities()
ml.reduce_activities()
stmts_out = ml.statements
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | 0.001208 |
def get(self, table='', start=0, limit=0, order=None, where=None):
"""
Get a list of stat items
:param table: str database table name
:param start: int
:param limit: int
:param order: list|tuple
:param where: list|tuple
:return:
"""
parameters = {}
args = ['start', 'limit', 'order', 'where']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
response = self._client.session.get(
'{url}/{table}'.format(
url=self.endpoint_url, table=table
),
params=parameters
)
return self.process_response(response) | 0.002703 |
def cmd_time(self, args):
'''show autopilot time'''
tusec = self.master.field('SYSTEM_TIME', 'time_unix_usec', 0)
if tusec == 0:
print("No SYSTEM_TIME time available")
return
print("%s (%s)\n" % (time.ctime(tusec * 1.0e-6), time.ctime())) | 0.006803 |
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
url_parameters = {
"org": self.login,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
parameters=url_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True) | 0.005814 |
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
):
"""
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
"""
r = _get_report()
r.append_body(render.listing(
source=source,
ordered=ordered,
expand_full=expand_full
))
r.stdout_interceptor.write_source('[ADDED] Listing\n') | 0.001026 |
def mixpanel(parser, token):
"""
Mixpanel tracking template tag.
Renders Javascript code to track page visits. You must supply
your Mixpanel token in the ``MIXPANEL_API_TOKEN`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return MixpanelNode() | 0.002747 |
def set_opcode(self, opcode):
"""Set the opcode.
@param opcode: the opcode
@type opcode: int
"""
self.flags &= 0x87FF
self.flags |= dns.opcode.to_flags(opcode) | 0.009662 |
def stop(self):
"""Stops all session activity.
Blocks until io and writer thread dies
"""
if self._io_thread is not None:
self.log.info("Waiting for I/O thread to stop...")
self.closed = True
self._io_thread.join()
if self._writer_thread is not None:
self.log.info("Waiting for Writer Thread to stop...")
self.closed = True
self._writer_thread.join()
self.log.info("All worker threads stopped.") | 0.003861 |
def get_ip(source='aws'):
''' a method to get current public ip address of machine '''
if source == 'aws':
source_url = 'http://checkip.amazonaws.com/'
else:
raise Exception('get_ip currently only supports queries to aws')
import requests
try:
response = requests.get(url=source_url)
except Exception as err:
from labpack.handlers.requests import handle_requests
from requests import Request
request_object = Request(method='GET', url=source_url)
request_details = handle_requests(request_object)
raise Exception(request_details['error'])
current_ip = response.content.decode()
current_ip = current_ip.strip()
return current_ip | 0.006693 |
def add_predicate(self, name: str, function: Callable, side_arguments: List[str] = None):
"""
Adds a predicate to this domain language. Typically you do this with the ``@predicate``
decorator on the methods in your class. But, if you need to for whatever reason, you can
also call this function yourself with a (type-annotated) function to add it to your
language.
Parameters
----------
name : ``str``
The name that we will use in the induced language for this function.
function : ``Callable``
The function that gets called when executing a predicate with the given name.
side_arguments : ``List[str]``, optional
If given, we will ignore these arguments for the purposes of grammar induction. This
is to allow passing extra arguments from the decoder state that are not explicitly part
of the language the decoder produces, such as the decoder's attention over the question
when a terminal was predicted. If you use this functionality, you also `must` use
``language.execute_action_sequence()`` instead of ``language.execute()``, and you must
pass the additional side arguments needed to that function. See
:func:`execute_action_sequence` for more information.
"""
side_arguments = side_arguments or []
signature = inspect.signature(function)
argument_types = [param.annotation for name, param in signature.parameters.items()
if name not in side_arguments]
return_type = signature.return_annotation
argument_nltk_types: List[PredicateType] = [PredicateType.get_type(arg_type)
for arg_type in argument_types]
return_nltk_type = PredicateType.get_type(return_type)
function_nltk_type = PredicateType.get_function_type(argument_nltk_types, return_nltk_type)
self._functions[name] = function
self._function_types[name].append(function_nltk_type) | 0.008118 |
def filter_requires_grad(pgroups):
"""Returns parameter groups where parameters
that don't require a gradient are filtered out.
Parameters
----------
pgroups : dict
Parameter groups to be filtered
"""
warnings.warn(
"For filtering gradients, please use skorch.callbacks.Freezer.",
DeprecationWarning)
for pgroup in pgroups:
output = {k: v for k, v in pgroup.items() if k != 'params'}
output['params'] = (p for p in pgroup['params'] if p.requires_grad)
yield output | 0.001835 |
def ws_url(self):
"""websocket url matching the current request
turns http[s]://host[:port] into
ws[s]://host[:port]
"""
proto = self.request.protocol.replace('http', 'ws')
host = self.application.ipython_app.websocket_host # default to config value
if host == '':
host = self.request.host # get from request
return "%s://%s" % (proto, host) | 0.011737 |
def teardown_tempdir(dir):
r'''
Cleanup temporary directory.
'''
if ssh_conn:
delete_tree(dir)
assert_valid_dir(dir)
shutil.rmtree(dir) | 0.005917 |
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded | 0.002907 |
def add_team_member(name, team_name, profile="github"):
'''
Adds a team member to a team with team_name.
name
The name of the team member to add.
team_name
The name of the team of which to add the user.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.add_team_member 'user_name' 'team_name'
.. versionadded:: 2016.11.0
'''
team = get_team(team_name, profile=profile)
if not team:
log.error('Team %s does not exist', team_name)
return False
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
team = organization.get_team(team['id'])
member = client.get_user(name)
except UnknownObjectException:
log.exception('Resource not found: %s', team['id'])
return False
try:
# Can't use team.add_membership due to this bug that hasn't made it into
# a PyGithub release yet https://github.com/PyGithub/PyGithub/issues/363
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + member._identity,
input={'role': 'member'},
parameters={'role': 'member'}
)
except github.GithubException:
log.exception('Error in adding a member to a team')
return False
return True | 0.001979 |
def _manage_http_servers(self):
"""
Compares the running services with the current settings configuration
and adjusts the running services to match it if different.
The services are identified only by their postion in the server list,
so a change than only involves a position move will cause a shuffle
in multiple services. They don't change often so this isn't much
of a problem.
The method attempts to create all services. If any service
creation fails, the slot is set to None, the error is logged and
a self._http_retry is set to when another attempt should be made.
The method should be called whenever self._http_retry has expired.
The presumption is that the error was transient (already in use, etc)
and a retry might bring the service up.
"""
log = self._params.get('log', self._discard)
if not self._config_running:
raise Exception('Attempt to create HTTP services before config loaded')
conf = self._config_running
need = self._get_http_services(conf['settings']['http']
if 'settings' in conf and 'http' in conf['settings'] else [])
# If the service count has changed, close all servers and rebuild from scratch.
#
if len(self._http_servers) != len(need):
log.info("HTTP services count changed from %d to %d, reconfiguring all services",
len(self._http_servers), len(need))
pos = 0
for server in self._http_servers:
if server:
if self._pset:
try: self._pset.unregister(server)
except: pass
try: server.close()
except: pass
log.debug("Slot %d service closed", pos)
pos += 1
self._http_servers = []
self._http_retry = None
for pos in range(len(need)):
if len(self._http_servers) > pos:
if self._http_servers[pos]:
if need[pos].cmp(self._http_servers[pos]._http_service):
log.debug("No change in service slot %d: %s", pos, need[pos])
continue
else:
log.debug("Slot %d service changing from %s", pos, self._http_servers[pos]._http_service)
if self._pset:
try: self._pset.unregister(self._http_servers[pos])
except: pass
try: self._http_servers[pos].close()
except: pass
self._http_servers[pos] = None
else:
log.debug("No existing service in slot %d", pos)
else:
log.debug("Increasing services list for slot %d", pos)
self._http_servers.append(None)
# At this point the service slot exists and is empty. We'll attempt to fill it.
try:
server = httpd.server(need[pos], log=log)
# Add our own attribute to retain the service information
#
server._http_service = need[pos]
manage.http(self, server, log=log)
status.http(self, server, log=log)
if self._pset:
self._pset.register(server, poll.POLLIN)
self._http_servers[pos] = server
log.info("Slot %d service is now %s", pos, server._http_service)
except Exception as e:
log.error("Failed to create server slot %d on %s -- %s", pos, need[pos], e)
if not self._http_retry:
self._http_retry = time.time() + service_retry_limit | 0.006472 |
def _geom_points(geom):
"""GeoJSON geometry to a sequence of point tuples
"""
if geom['type'] == 'Point':
yield tuple(geom['coordinates'])
elif geom['type'] in ('MultiPoint', 'LineString'):
for position in geom['coordinates']:
yield tuple(position)
else:
raise InvalidFeatureError(
"Unsupported geometry type:{0}".format(geom['type'])) | 0.002481 |
async def create_vm(self, preset_name: str, image: str, flavor: str, security_groups: List=None,
userdata: Dict=None, key_name: str=None, availability_zone: str=None,
subnets: List=None) -> Any:
"""
Create (boot) a new server.
:arg string preset_name: Name of vm group where vm is created.
:arg string image: Image name.
:arg string flavor: Flavor (or instance_type in AWS) name.
:arg list security_groups: A list of security group names.
:arg dict userdata: A dict of arbitrary key/value metadata to store in grains.
:arg string key_name: (optional extension) name of previously created
keypair to inject into the instance.
:arg string availability_zone: Name of the availability zone for instance
placement.
:arg string subnets: List of the subnets for instance placement.
Returns Any vm_id.
"""
raise NotImplementedError | 0.017408 |
def _create_dom(data):
"""
Creates doublelinked DOM from `data`.
Args:
data (str/HTMLElement): Either string or HTML element.
Returns:
obj: HTMLElement containing double linked DOM.
"""
if not isinstance(data, dhtmlparser.HTMLElement):
data = dhtmlparser.parseString(
utils.handle_encodnig(data)
)
dhtmlparser.makeDoubleLinked(data)
return data | 0.002358 |
def parse_metadata(self):
"""Parse the INDEX_JSON file and reorganize it as a dictionary of lists."""
all_models = defaultdict(list)
with open(self.metadata_index_json) as f:
loaded = json.load(f)
for m in loaded['index']:
all_models[m['uniprot_ac']].append(m)
self.all_models = dict(all_models) | 0.008264 |
def configure_upload(graph, ns, mappings, exclude_func=None):
"""
Register Upload endpoints for a resource object.
"""
convention = UploadConvention(graph, exclude_func)
convention.configure(ns, mappings) | 0.004444 |
def _get_selected_item(self):
"""The currently selected item"""
selection = self.get_selection()
if selection.get_mode() != gtk.SELECTION_SINGLE:
raise AttributeError('selected_item not valid for select_multiple')
model, selected = selection.get_selected()
if selected is not None:
return self._object_at_sort_iter(selected) | 0.005155 |
def _empty_except_predicates(xast, node, context):
'''Check if a node is empty (no child nodes or attributes) except
for any predicates defined in the specified xpath.
:param xast: parsed xpath (xpath abstract syntax tree) from
:mod:`eulxml.xpath`
:param node: lxml element to check
:param context: any context required for the xpath (e.g.,
namespace definitions)
:returns: boolean indicating if the element is empty or not
'''
# copy the node, remove predicates, and check for any remaining
# child nodes or attributes
node_c = deepcopy(node)
_remove_predicates(xast, node_c, context)
return bool(len(node_c) == 0 and len(node_c.attrib) == 0) | 0.008584 |
def client(self, name=None):
"""Initialize a backend's client with given name or default."""
name = name or self.default
if not name:
return NullClient(self, None, None)
params = self.backends_hash[name]
ccls = self.backends_schemas.get(params.scheme, TCPClient)
return (yield from ccls(self, params.hostname, params.port).connect()) | 0.005089 |
def template_request(configuration, spec):
"""
Calls the get template request
:param configuration:
:param spec:
:return:
"""
# Template request, nonce will be regenerated.
req = CreateUO.get_template_request(configuration, spec)
# Do the request with retry.
caller = RequestCall(req)
resp = caller.call()
return resp | 0.004819 |
def gen_challenge(self, state):
"""This function generates a challenge for given state. It selects a
random number and sets that as the challenge key. By default, v_max
is set to the prime, and the number of chunks to challenge is the
number of chunks in the file. (this doesn't guarantee that the whole
file will be checked since some chunks could be selected twice and
some selected none.
:param state: the state to use. it can be encrypted, as it will
have just been received from the server
"""
state.decrypt(self.key)
chal = Challenge(state.chunks, self.prime, Random.new().read(32))
return chal | 0.002845 |
def current(place):
"""return data as list of dicts with all data filled in."""
lat, lon = place
url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % (APIKEY, lat,
lon)
w_data = json.loads(urllib2.urlopen(url).read())
currently = w_data['currently']
return mangle(currently) | 0.002732 |
def patterson_f2(aca, acb):
"""Unbiased estimator for F2(A, B), the branch length between populations
A and B.
Parameters
----------
aca : array_like, int, shape (n_variants, 2)
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
Returns
-------
f2 : ndarray, float, shape (n_variants,)
Notes
-----
See Patterson (2012), Appendix A.
"""
# check inputs
aca = AlleleCountsArray(aca, copy=False)
assert aca.shape[1] == 2, 'only biallelic variants supported'
acb = AlleleCountsArray(acb, copy=False)
assert acb.shape[1] == 2, 'only biallelic variants supported'
check_dim0_aligned(aca, acb)
# compute allele numbers
sa = aca.sum(axis=1)
sb = acb.sum(axis=1)
# compute heterozygosities
ha = h_hat(aca)
hb = h_hat(acb)
# compute sample frequencies for the alternate allele
a = aca.to_frequencies()[:, 1]
b = acb.to_frequencies()[:, 1]
# compute estimator
x = ((a - b) ** 2) - (ha / sa) - (hb / sb)
return x | 0.000903 |
def run_scan():
"""
show all available partitions
"""
all_disks = walk_data(sess, oid_hrStorageDescr, helper)[0]
print "All available disks at: " + host
for disk in all_disks:
print "Disk: \t'" + disk + "'"
quit() | 0.011278 |
def get_or_create(cls, mp, part_number):
"""Get or create a part."""
obj = cls.get_or_none(mp, part_number)
if obj:
return obj
return cls.create(mp, part_number) | 0.009756 |
def north_arrow_path(feature, parent):
"""Retrieve the full path of default north arrow logo."""
_ = feature, parent # NOQA
north_arrow_file = setting(inasafe_north_arrow_path['setting_key'])
if os.path.exists(north_arrow_file):
return north_arrow_file
else:
LOGGER.info(
'The custom north arrow is not found in {north_arrow_file}. '
'Default north arrow will be used.').format(
north_arrow_file=north_arrow_file)
return inasafe_default_settings['north_arrow_path'] | 0.001828 |
def read_associations(assoc_fn, anno_type='id2gos', **kws):
"""Return associatinos in id2gos format"""
# kws get_objanno: taxids hdr_only prt allow_missing_symbol
obj = get_objanno(assoc_fn, anno_type, **kws)
# kws get_id2gos: ev_include ev_exclude keep_ND keep_NOT b_geneid2gos go2geneids
return obj.get_id2gos(**kws) | 0.005917 |
def find_magic_in_file(filename):
"""
Search the file for any magic incantations.
:param filename: file to search
:returns: a tuple containing the spell and then maybe some extra words (or None if no magic present)
"""
with open(filename, "rt", encoding="utf-8") as f:
for line in f:
if line.startswith("#"):
comment = line[1:].strip()
if comment.startswith("~~~~* ") or comment.startswith("----* ") or comment.startswith("====* "):
spell = comment[5:].strip()
return tuple(spell.split())
else:
break
return None | 0.004539 |
def send(self, to, subject=None, body=None, reply_to=None, template=None, **kwargs):
"""
To send email
:param to: the recipients, list or string
:param subject: the subject
:param body: the body
:param reply_to: reply_to
:param template: template, will use the templates instead
:param kwargs: context args
:return: bool - True if everything is ok
"""
sender = self.config.get("MAIL_SENDER")
recipients = [to] if not isinstance(to, list) else to
kwargs.update({
"subject": subject,
"body": body,
"reply_to": reply_to
})
if not self.validated:
abort("MailmanConfigurationError")
if self.provider == "SES":
kwargs["to"] = recipients
if template:
self.mail.send_template(template=template, **kwargs)
else:
self.mail.send(**kwargs)
elif self.provider == "SMTP":
if template:
data = self._template(template=template, **kwargs)
kwargs["subject"] = data["subject"]
kwargs["body"] = data["body"]
kwargs["recipients"] = recipients
kwargs["sender"] = sender
# Remove invalid Messages keys
_safe_keys = ["recipients", "subject", "body", "html", "alts",
"cc", "bcc", "attachments", "reply_to", "sender",
"date", "charset", "extra_headers", "mail_options",
"rcpt_options"]
for k in kwargs.copy():
if k not in _safe_keys:
del kwargs[k]
message = flask_mail.Message(**kwargs)
self.mail.send(message)
else:
abort("MailmanUnknownProviderError") | 0.002682 |
def _read_data_handler(length, whence, ctx, skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT):
"""Creates a co-routine for retrieving data up to a requested size.
Args:
length (int): The minimum length requested.
whence (Coroutine): The co-routine to return to after the data is satisfied.
ctx (_HandlerContext): The context for the read.
skip (Optional[bool]): Whether the requested number of bytes should be skipped.
stream_event (Optional[IonEvent]): The stream event to return if no bytes are read or
available.
"""
trans = None
queue = ctx.queue
if length > ctx.remaining:
raise IonException('Length overrun: %d bytes, %d remaining' % (length, ctx.remaining))
# Make sure to check the queue first.
queue_len = len(queue)
if queue_len > 0:
# Any data available means we can only be incomplete.
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= queue_len
if skip:
# For skipping we need to consume any remnant in the buffer queue.
if length >= 0:
queue.skip(queue_len)
else:
queue.skip(queue_len + length)
while True:
data_event, self = (yield trans)
if data_event is not None and data_event.data is not None:
data = data_event.data
data_len = len(data)
if data_len > 0:
# We got something so we can only be incomplete.
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= data_len
if not skip:
queue.extend(data)
else:
pos_adjustment = data_len
if length < 0:
pos_adjustment += length
# More data than we need to skip, so make sure to accumulate that remnant.
queue.extend(data[length:])
queue.position += pos_adjustment
if length <= 0:
# We got all the data we need, go back immediately
yield Transition(None, whence)
trans = Transition(stream_event, self) | 0.003286 |
async def get_playback_settings(self) -> List[Setting]:
"""Get playback settings such as shuffle and repeat."""
return [
Setting.make(**x)
for x in await self.services["avContent"]["getPlaybackModeSettings"]({})
] | 0.011494 |
def from_list(cls, l):
"""Return a Point instance from a given list"""
if len(l) == 3:
x, y, z = map(float, l)
return cls(x, y, z)
elif len(l) == 2:
x, y = map(float, l)
return cls(x, y)
else:
raise AttributeError | 0.01278 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.