text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def socket_close(self):
"""Close our socket."""
if self.sock != NC.INVALID_SOCKET:
self.sock.close()
self.sock = NC.INVALID_SOCKET | 0.012048 |
def unpack(self, fmt, length=1):
"""
Unpack the stream contents according to the specified format in `fmt`.
For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html
Args:
fmt (str): format string.
length (int): amount of bytes to read.
Returns:
variable: the result according to the specified format.
"""
try:
info = struct.unpack(fmt, self.stream.read(length))[0]
except struct.error as e:
raise SDKException(ErrorCode.unpack_error(e.args[0]))
return info | 0.004777 |
def login(self, username, password, email, url, config_path):
"""
If username and password are provided, authenticate with the registry.
Otherwise, check the config file for existing authentication data.
"""
if username and password:
try:
self.client.login(username=username, password=password, email=email,
registry=url, reauth=True)
except docker_errors.APIError as exc:
raise exceptions.AnsibleContainerConductorException(
u"Error logging into registry: {}".format(exc)
)
except Exception:
raise
self._update_config_file(username, password, email, url, config_path)
username, password = self._get_registry_auth(url, config_path)
if not username:
raise exceptions.AnsibleContainerConductorException(
u'Please provide login credentials for registry {}.'.format(url))
return username, password | 0.004748 |
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear | 0.002181 |
def log_error(self, error, message, detail=None, strip=4):
"Add an error message and optional user message to the error list"
if message:
msg = message + ": " + error
else:
msg = error
tb = traceback.format_stack()
if sys.version_info >= (3, 0):
tb = tb[:-strip]
else:
tb = tb[strip:]
self.errors.append({
'message': msg,
'traceback': tb,
'detail': detail
}) | 0.003937 |
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel) | 0.010256 |
def simxGetInMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = ct.c_int()
return c_GetInMessageInfo(clientID, infoType, ct.byref(info)), info.value | 0.007968 |
def write_yum_repo(content, filename='ceph.repo'):
"""add yum repo file in /etc/yum.repos.d/"""
repo_path = os.path.join('/etc/yum.repos.d', filename)
if not isinstance(content, str):
content = content.decode('utf-8')
write_file(repo_path, content.encode('utf-8')) | 0.003472 |
def est_entropy(self):
r"""
Estimates the entropy of the current particle distribution
as :math:`-\sum_i w_i \log w_i` where :math:`\{w_i\}`
is the set of particles with nonzero weight.
"""
nz_weights = self.particle_weights[self.particle_weights > 0]
return -np.sum(np.log(nz_weights) * nz_weights) | 0.005634 |
def memory(self):
"""Memory information in bytes
Example:
>>> print(ctx.device(0).memory())
{'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L}
Returns:
total/used/free memory in bytes
"""
class GpuMemoryInfo(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
c_memory = GpuMemoryInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory)))
return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used} | 0.004286 |
def _update_ref(self, ref, delete=False):
"""Update a reference."""
cmd = ['git', 'update-ref']
if delete:
cmd.extend(['-d', ref.refname])
action = 'deleted'
else:
cmd.extend([ref.refname, ref.hash])
action = 'updated to %s' % ref.hash
try:
self._exec(cmd, cwd=self.dirpath, env=self.gitenv)
except RepositoryError as e:
logger.warning("Git %s ref could not be %s during sync process in %s (%s); skipped",
ref.refname, action, self.uri, self.dirpath)
else:
logger.debug("Git %s ref %s in %s (%s)",
ref.refname, action, self.uri, self.dirpath) | 0.004071 |
def params(self):
"""Get the params of response data from the API.
Returns:
- d (dict): Dictionary mapping of all configuration values
"""
payload = self.payload
d = {}
for i, p in enumerate(payload["currentConfiguration"]):
type_name = p["typeName"]
cp = payload["configurationParameters"][i]["message"]
name = cp["parameterName"]
if type_name == "BTMParameterQuantity":
try:
v = q(p["message"]["expression"])
except:
v = q(p["message"]["value"], p["message"]["units"])
elif type_name == "BTMParameterBoolean":
v = p["message"]["value"]
elif type_name == "BTMParameterEnum":
enum = p["message"]["value"]
enum_map = {d['message']['option']: i for i, d in enumerate(cp['options'])}
v = cp['options'][enum_map[enum]]['message']['optionName']
d[name] = v
return d | 0.003813 |
def identify_denonavr_receivers():
"""
Identify DenonAVR using SSDP and SCPD queries.
Returns a list of dictionaries which includes all discovered Denon AVR
devices with keys "host", "modelName", "friendlyName", "presentationURL".
"""
# Sending SSDP broadcast message to get devices
devices = send_ssdp_broadcast()
# Check which responding device is a DenonAVR device and prepare output
receivers = []
for device in devices:
try:
receiver = evaluate_scpd_xml(device["URL"])
except ConnectionError:
continue
if receiver:
receivers.append(receiver)
return receivers | 0.001493 |
def solc_arguments(libraries=None, combined='bin,abi',
optimize=True, extra_args=None):
""" Build the arguments to call the solc binary. """
args = [
'--combined-json', combined,
]
def str_of(address):
"""cast address to string. py2/3 compatability. """
try:
return address.decode('utf8')
except AttributeError:
return address
if optimize:
args.append('--optimize')
if extra_args:
try:
args.extend(shlex.split(extra_args))
except BaseException: # if not a parseable string then treat it as a list
args.extend(extra_args)
if libraries is not None and len(libraries):
addresses = [
'{name}:{address}'.format(
name=name, address=str_of(address))
for name, address in libraries.items()
]
args.extend([
'--libraries',
','.join(addresses),
])
return args | 0.002982 |
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest)==1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n") | 0.002503 |
def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
"""Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
"""
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg | 0.002888 |
def node_has_namespace(node: BaseEntity, namespace: str) -> bool:
"""Pass for nodes that have the given namespace."""
ns = node.get(NAMESPACE)
return ns is not None and ns == namespace | 0.005102 |
def resolve_election_tie(self, candidates):
"""
call callback to resolve a tie between candidates
"""
sorted_candidate_ids = list(sorted(candidates, key=self.candidate_order_fn))
return sorted_candidate_ids[self.election_tie_cb(candidates)] | 0.010714 |
def checksum_creation_action(target, source, env):
"""Create a linker command file for patching an application checksum into a firmware image"""
# Important Notes:
# There are apparently many ways to calculate a CRC-32 checksum, we use the following options
# Initial seed value prepended to the input: 0xFFFFFFFF
# Whether the input is fed into the shift register least-significant bit or most-significant bit first: LSB
# Whether each data word is inverted: No
# Whether the final CRC value is inverted: No
# *These settings must agree between the executive and this function*
import crcmod
crc32_func = crcmod.mkCrcFun(0x104C11DB7, initCrc=0xFFFFFFFF, rev=False, xorOut=0)
with open(str(source[0]), 'rb') as f:
data = f.read()
# Ignore the last four bytes of the file since that is where the checksum will go
data = data[:-4]
# Make sure the magic number is correct so that we're dealing with an actual firmware image
magicbin = data[-4:]
magic, = struct.unpack('<L', magicbin)
if magic != 0xBAADDAAD:
raise BuildError("Attempting to patch a file that is not a CDB binary or has the wrong size", reason="invalid magic number found", actual_magic=magic, desired_magic=0xBAADDAAD)
# Calculate CRC32 in the same way as its done in the target microcontroller
checksum = crc32_func(data) & 0xFFFFFFFF
with open(str(target[0]), 'w') as f:
# hex strings end with L on windows and possibly some other systems
checkhex = hex(checksum)
if checkhex[-1] == 'L':
checkhex = checkhex[:-1]
f.write("--defsym=__image_checksum=%s\n" % checkhex) | 0.005248 |
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"])) | 0.007788 |
def ipv6_acl_ipv6_access_list_standard_seq_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ipv6_acl = ET.SubElement(config, "ipv6-acl", xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list")
ipv6 = ET.SubElement(ipv6_acl, "ipv6")
access_list = ET.SubElement(ipv6, "access-list")
standard = ET.SubElement(access_list, "standard")
name_key = ET.SubElement(standard, "name")
name_key.text = kwargs.pop('name')
seq = ET.SubElement(standard, "seq")
seq_id_key = ET.SubElement(seq, "seq-id")
seq_id_key.text = kwargs.pop('seq_id')
action = ET.SubElement(seq, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003601 |
def handle_message(self, msg, host):
"""Processes messages that have been delivered from the listener.
Args:
msg (string): The raw packet data delivered from the listener. This
data will be unserialized and then processed based on the packet's
method.
host (tuple): The (address, host) tuple of the source message.
Returns:
A response that will be sent back to the client via the listener.
"""
response = None
# Unserialize the packet, and decrypt if the host has encryption enabled
if host in self.encrypted_hosts:
msg_data = unserialize_data(msg, self.compression, self.encryption)
else:
msg_data = unserialize_data(msg, self.compression)
logger.debug("Packet received: " + pformat(msg_data))
# If the message data is blank, return none
if not msg_data: return response
# For debug purposes, check if the client is registered or not
if self.is_registered(msg_data["cuuid"], host[0]):
logger.debug("<%s> Client is currently registered" % msg_data["cuuid"])
else:
logger.debug("<%s> Client is not registered" % msg_data["cuuid"])
if "method" in msg_data:
if msg_data["method"] == "REGISTER":
logger.debug("<%s> Register packet received" % msg_data["cuuid"])
response = self.register(msg_data, host)
elif msg_data["method"] == "OHAI":
if not self.discoverable:
return False
logger.debug("<%s> Autodiscover packet received" % msg_data["cuuid"])
response = self.autodiscover(msg_data)
elif msg_data["method"] == "AUTH":
logger.debug("<%s> Authentication packet recieved" % msg_data["cuuid"])
response = self.auth_server.verify_login(msg_data)
if response:
self.registry[host]["authenticated"] = True
else:
if self.auth_server:
if self.registry[host]["authenticated"]:
response = self.handle_message_registered(msg_data, host)
else:
response = self.handle_message_registered(msg_data, host)
logger.debug("Packet processing completed")
return response | 0.004149 |
def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None):
"""
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
"""
ssl_info = self.get_ssl_termination(loadbalancer)
if not ssl_info:
raise exc.NoSSLTerminationConfiguration("You must configure SSL "
"termination on this load balancer before attempting "
"to update it.")
if securePort is None:
securePort = ssl_info["securePort"]
if enabled is None:
enabled = ssl_info["enabled"]
if secureTrafficOnly is None:
secureTrafficOnly = ssl_info["secureTrafficOnly"]
uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer)
req_body = {"sslTermination": {
"enabled": enabled,
"secureTrafficOnly": secureTrafficOnly,
"securePort": securePort,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body | 0.004367 |
def set_cells(self, cells_location):
"""
Set self.cells to function :cells in file pathname.py
:param cells_location: cells location, format 'pathname.py:cells'
:return:
"""
if ':' in cells_location:
pathname, func_name = cells_location.split(':')
else:
pathname = cells_location
func_name = 'cells'
check_isfile(pathname)
try:
self.cells = get_func(func_name, pathname)
except SyntaxError as e:
fatal(traceback.format_exc(limit=1))
return pathname, func_name | 0.003273 |
def exit(status=0):
"""
Terminate the program with the given status code.
"""
if status == 0:
lab.io.printf(lab.io.Colours.GREEN, "Done.")
else:
lab.io.printf(lab.io.Colours.RED, "Error {0}".format(status))
sys.exit(status) | 0.003802 |
def _get_default_value(self, value):
"""
Format a value so that it can be used in "default" clauses.
"""
if isinstance(value, QueryExpression):
return value
if isinstance(value, bool):
return "'%s'" % int(value)
return "'%s'" % value | 0.006515 |
def _handle_complete_reply(self, rep):
""" Handle replies for tab completion.
"""
self.log.debug("complete: %s", rep.get('content', ''))
cursor = self._get_cursor()
info = self._request_info.get('complete')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
text = '.'.join(self._get_context())
cursor.movePosition(QtGui.QTextCursor.Left, n=len(text))
self._complete_with_items(cursor, rep['content']['matches']) | 0.003623 |
def on_pre_execution(**kwargs):
"""
Calls callbacks before execution.
Note that any exception from callback will be logged but won't be propagated.
:param kwargs:
:return: None
"""
logging.debug("Calling callbacks: %s", __pre_exec_callbacks)
for cb in __pre_exec_callbacks:
try:
cb(**kwargs)
except Exception:
logging.exception('Failed on pre-execution callback using %s', cb) | 0.004464 |
def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword '
'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close() | 0.00094 |
def insert(self, task, args=[], kwargs={}, delay_seconds=0):
"""
Insert a task into an existing queue.
"""
body = {
"payload": task.payload(),
"queueName": self._queue_name,
"groupByTag": True,
"tag": task.__class__.__name__
}
def cloud_insertion():
self._api.insert(body, delay_seconds)
self._pool.spawn(cloud_insertion)
return self | 0.005025 |
def json(self):
"""Load response body as json.
:raises: :class:`ContentDecodingError`
"""
try:
return json.loads(self.text)
except Exception as e:
raise ContentDecodingError(e) | 0.008299 |
def ConnectNoSSL(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None, thumbprint=None,
b64token=None, mechanism='userpass'):
"""
Provides a standard method for connecting to a specified server without SSL
verification. Useful when connecting to servers with self-signed certificates
or when you wish to ignore SSL altogether. Will attempt to create an unverified
SSL context and then connect via the Connect method.
"""
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
else:
sslContext = None
return Connect(host=host,
port=port,
user=user,
pwd=pwd,
service=service,
adapter=adapter,
namespace=namespace,
path=path,
version=version,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism) | 0.007241 |
def _process_loaded_object(self, path):
"""process the :paramref:`path`.
:param str path: the path to load an svg from
"""
file_name = os.path.basename(path)
name = os.path.splitext(file_name)[0]
with open(path) as file:
string = file.read()
self._instruction_type_to_file_content[name] = string | 0.005435 |
def csv_column_cleaner(rows):
"""
clean csv columns parsed omitting empty/dirty rows.
"""
# check columns if there was empty columns
result = [[] for x in range(0, len(rows))]
for i_index in range(0, len(rows[0])):
partial_values = []
for x_row in rows:
partial_values.append(
x_row[i_index] if len(x_row) > i_index else '')
colum_rows = exclude_empty_values(partial_values)
if len(colum_rows) > len(rows) / 5: # adjust this value
for index in range(0, len(rows)):
result[index].append(
rows[index][i_index] if len(rows[index]) > i_index else '')
return result | 0.001425 |
def get_tiles(self):
"""Get all TileCoordinates contained in the region"""
for x, y in griditer(self.root_tile.x, self.root_tile.y, ncol=self.tiles_per_row):
yield TileCoordinate(self.root_tile.zoom, x, y) | 0.012876 |
def make_related_protein_fasta_from_dataframe(self, input_df):
'''
DataFrame should have
'''
dirname = './group_fastas'
if not os.path.exists(dirname):
os.makedirs(dirname)
unique_hit_queries = set(input_df.hit_query)
for hq in unique_hit_queries:
fasta = []
subdf = input_df[input_df.hit_query==hq].reset_index()
for i in range(0, len(subdf)):
fasta.append('>' + subdf.ix[i].org_name.replace(" ", "-") +
"," + subdf.ix[i].hit_query +
"," + subdf.ix[i].prot_acc +
'\n' + subdf.ix[i].prot_translation + '\n')
faastring = "".join(fasta)
filename = './group_fastas/' + hq + '.fasta'
write_fasta = open(filename, 'w')
write_fasta.write(faastring)
write_fasta.close() | 0.005308 |
def receive(
self,
request: RequestType,
user: UserType = None,
sender_key_fetcher: Callable[[str], str] = None,
skip_author_verification: bool = False) -> Tuple[str, dict]:
"""
Receive a request.
For testing purposes, `skip_author_verification` can be passed. Authorship will not be verified.
"""
self.user = user
self.get_contact_key = sender_key_fetcher
self.payload = json.loads(decode_if_bytes(request.body))
self.request = request
self.extract_actor()
# Verify the message is from who it claims to be
if not skip_author_verification:
self.verify_signature()
return self.actor, self.payload | 0.003927 |
def list_huisnummers_by_perceel(self, perceel, sort=1):
'''
List all `huisnummers` on a `Pereel`.
Generally there will only be one, but multiples are possible.
:param perceel: The :class:`Perceel` for which the \
`huisnummers` are wanted.
:rtype: A :class: `list` of :class:`Huisnummer`
'''
try:
id = perceel.id
except AttributeError:
id = perceel
def creator():
res = crab_gateway_request(
self.client, 'ListHuisnummersWithStatusByIdentificatorPerceel',
id, sort
)
try:
huisnummers= []
for r in res.HuisnummerWithStatusItem:
h = self.get_huisnummer_by_id(r.HuisnummerId)
h.clear_gateway()
huisnummers.append(h)
return huisnummers
except AttributeError:
return []
if self.caches['short'].is_configured:
key = 'ListHuisnummersWithStatusByIdentificatorPerceel#%s%s' % (id, sort)
huisnummers = self.caches['short'].get_or_create(key, creator)
else:
huisnummers = creator()
for h in huisnummers:
h.set_gateway(self)
return huisnummers | 0.003019 |
def forward(node, analysis):
"""Perform a given analysis on all functions within an AST."""
if not isinstance(analysis, Forward):
raise TypeError('not a valid forward analysis object')
for succ in gast.walk(node):
if isinstance(succ, gast.FunctionDef):
cfg_obj = CFG.build_cfg(succ)
analysis.visit(cfg_obj.entry)
return node | 0.019886 |
def nearest_int(x):
"""
Return nearest long integer to x
"""
if x == 0:
return np.int64(0)
elif x > 0:
return np.int64(x + 0.5)
else:
return np.int64(x - 0.5) | 0.004854 |
def closest_point(triangles, points):
"""
Return the closest point on the surface of each triangle for a
list of corresponding points.
Implements the method from "Real Time Collision Detection" and
use the same variable names as "ClosestPtPointTriangle" to avoid
being any more confusing.
Parameters
----------
triangles : (n, 3, 3) float
Triangle vertices in space
points : (n, 3) float
Points in space
Returns
----------
closest : (n, 3) float
Point on each triangle closest to each point
"""
# check input triangles and points
triangles = np.asanyarray(triangles, dtype=np.float64)
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (len(triangles), 3)):
raise ValueError('need same number of triangles and points!')
# store the location of the closest point
result = np.zeros_like(points)
# which points still need to be handled
remain = np.ones(len(points), dtype=np.bool)
# if we dot product this against a (n, 3)
# it is equivalent but faster than array.sum(axis=1)
ones = [1.0, 1.0, 1.0]
# get the three points of each triangle
# use the same notation as RTCD to avoid confusion
a = triangles[:, 0, :]
b = triangles[:, 1, :]
c = triangles[:, 2, :]
# check if P is in vertex region outside A
ab = b - a
ac = c - a
ap = points - a
# this is a faster equivalent of:
# util.diagonal_dot(ab, ap)
d1 = np.dot(ab * ap, ones)
d2 = np.dot(ac * ap, ones)
# is the point at A
is_a = np.logical_and(d1 < tol.zero, d2 < tol.zero)
if is_a.any():
result[is_a] = a[is_a]
remain[is_a] = False
# check if P in vertex region outside B
bp = points - b
d3 = np.dot(ab * bp, ones)
d4 = np.dot(ac * bp, ones)
# do the logic check
is_b = (d3 > -tol.zero) & (d4 <= d3) & remain
if is_b.any():
result[is_b] = b[is_b]
remain[is_b] = False
# check if P in edge region of AB, if so return projection of P onto A
vc = (d1 * d4) - (d3 * d2)
is_ab = ((vc < tol.zero) &
(d1 > -tol.zero) &
(d3 < tol.zero) & remain)
if is_ab.any():
v = (d1[is_ab] / (d1[is_ab] - d3[is_ab])).reshape((-1, 1))
result[is_ab] = a[is_ab] + (v * ab[is_ab])
remain[is_ab] = False
# check if P in vertex region outside C
cp = points - c
d5 = np.dot(ab * cp, ones)
d6 = np.dot(ac * cp, ones)
is_c = (d6 > -tol.zero) & (d5 <= d6) & remain
if is_c.any():
result[is_c] = c[is_c]
remain[is_c] = False
# check if P in edge region of AC, if so return projection of P onto AC
vb = (d5 * d2) - (d1 * d6)
is_ac = (vb < tol.zero) & (d2 > -tol.zero) & (d6 < tol.zero) & remain
if is_ac.any():
w = (d2[is_ac] / (d2[is_ac] - d6[is_ac])).reshape((-1, 1))
result[is_ac] = a[is_ac] + w * ac[is_ac]
remain[is_ac] = False
# check if P in edge region of BC, if so return projection of P onto BC
va = (d3 * d6) - (d5 * d4)
is_bc = ((va < tol.zero) &
((d4 - d3) > - tol.zero) &
((d5 - d6) > -tol.zero) & remain)
if is_bc.any():
d43 = d4[is_bc] - d3[is_bc]
w = (d43 / (d43 + (d5[is_bc] - d6[is_bc]))).reshape((-1, 1))
result[is_bc] = b[is_bc] + w * (c[is_bc] - b[is_bc])
remain[is_bc] = False
# any remaining points must be inside face region
if remain.any():
# point is inside face region
denom = 1.0 / (va[remain] + vb[remain] + vc[remain])
v = (vb[remain] * denom).reshape((-1, 1))
w = (vc[remain] * denom).reshape((-1, 1))
# compute Q through its barycentric coordinates
result[remain] = a[remain] + (ab[remain] * v) + (ac[remain] * w)
return result | 0.000253 |
def _FindInFileEntry(self, file_entry, find_specs, search_depth):
"""Searches for matching file entries within the file entry.
Args:
file_entry (FileEntry): file entry.
find_specs (list[FindSpec]): find specifications.
search_depth (int): number of location path segments to compare.
Yields:
PathSpec: path specification of a matching file entry.
"""
sub_find_specs = []
for find_spec in find_specs:
match, location_match = find_spec.Matches(file_entry, search_depth)
if match:
yield file_entry.path_spec
# pylint: disable=singleton-comparison
if location_match != False and not find_spec.AtMaximumDepth(search_depth):
sub_find_specs.append(find_spec)
if not sub_find_specs:
return
search_depth += 1
try:
for sub_file_entry in file_entry.sub_file_entries:
for matching_path_spec in self._FindInFileEntry(
sub_file_entry, sub_find_specs, search_depth):
yield matching_path_spec
except errors.AccessError:
pass | 0.011268 |
def map(self, func, axis=(0,), value_shape=None, dtype=None, with_keys=False):
"""
Apply a function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may incur a swap.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation
dtype: numpy.dtype, optional, default=None
Known dtype of values resulting from operation
with_keys : bool, optional, default=False
Include keys as an argument to the function
Returns
-------
BoltArraySpark
"""
axis = tupleize(axis)
swapped = self._align(axis)
if with_keys:
test_func = lambda x: func(((0,), x))
else:
test_func = func
if value_shape is None or dtype is None:
# try to compute the size of each mapped element by applying func to a random array
try:
mapped = test_func(random.randn(*swapped.values.shape).astype(self.dtype))
except Exception:
first = swapped._rdd.first()
if first:
# eval func on the first element
mapped = test_func(first[1])
if value_shape is None:
value_shape = mapped.shape
if dtype is None:
dtype = mapped.dtype
shape = tuple([swapped._shape[ax] for ax in range(len(axis))]) + tupleize(value_shape)
if with_keys:
rdd = swapped._rdd.map(lambda kv: (kv[0], func(kv)))
else:
rdd = swapped._rdd.mapValues(func)
# reshaping will fail if the elements aren't uniformly shaped
def check(v):
if len(v.shape) > 0 and v.shape != tupleize(value_shape):
raise Exception("Map operation did not produce values of uniform shape.")
return v
rdd = rdd.mapValues(lambda v: check(v))
return self._constructor(rdd, shape=shape, dtype=dtype, split=swapped.split).__finalize__(swapped) | 0.003331 |
def _compute_distance(self, rup, dists, C):
"""
Compute the distance function, equation (5).
"""
rval = np.sqrt(dists.repi ** 2 + C['h'] ** 2)
return C['c1'] * np.log10(rval) | 0.009346 |
def subtract_params(param_list_left, param_list_right):
"""Subtract two lists of parameters
:param param_list_left: list of numpy arrays
:param param_list_right: list of numpy arrays
:return: list of numpy arrays
"""
res = []
for x, y in zip(param_list_left, param_list_right):
res.append(x - y)
return res | 0.002882 |
def draw_clusters(data, clusters, noise = [], marker_descr = '.', hide_axes = False, axes = None, display_result = True):
"""!
@brief Displays clusters for data in 2D or 3D.
@param[in] data (list): Points that are described by coordinates represented.
@param[in] clusters (list): Clusters that are represented by lists of indexes where each index corresponds to point in data.
@param[in] noise (list): Points that are regarded to noise.
@param[in] marker_descr (string): Marker for displaying points.
@param[in] hide_axes (bool): If True - axes is not displayed.
@param[in] axes (ax) Matplotlib axes where clusters should be drawn, if it is not specified (None) then new plot will be created.
@param[in] display_result (bool): If specified then matplotlib axes will be used for drawing and plot will not be shown.
@return (ax) Matplotlib axes where drawn clusters are presented.
"""
# Get dimension
dimension = 0;
if ( (data is not None) and (clusters is not None) ):
dimension = len(data[0]);
elif ( (data is None) and (clusters is not None) ):
dimension = len(clusters[0][0]);
else:
raise NameError('Data or clusters should be specified exactly.');
"Draw clusters"
colors = [ 'red', 'blue', 'darkgreen', 'brown', 'violet',
'deepskyblue', 'darkgrey', 'lightsalmon', 'deeppink', 'yellow',
'black', 'mediumspringgreen', 'orange', 'darkviolet', 'darkblue',
'silver', 'lime', 'pink', 'gold', 'bisque' ];
if (len(clusters) > len(colors)):
raise NameError('Impossible to represent clusters due to number of specified colors.');
fig = plt.figure();
if (axes is None):
# Check for dimensions
if ((dimension) == 1 or (dimension == 2)):
axes = fig.add_subplot(111);
elif (dimension == 3):
axes = fig.gca(projection='3d');
else:
raise NameError('Drawer supports only 2d and 3d data representation');
color_index = 0;
for cluster in clusters:
color = colors[color_index];
for item in cluster:
if (dimension == 1):
if (data is None):
axes.plot(item[0], 0.0, color = color, marker = marker_descr);
else:
axes.plot(data[item][0], 0.0, color = color, marker = marker_descr);
if (dimension == 2):
if (data is None):
axes.plot(item[0], item[1], color = color, marker = marker_descr);
else:
axes.plot(data[item][0], data[item][1], color = color, marker = marker_descr);
elif (dimension == 3):
if (data is None):
axes.scatter(item[0], item[1], item[2], c = color, marker = marker_descr);
else:
axes.scatter(data[item][0], data[item][1], data[item][2], c = color, marker = marker_descr);
color_index += 1;
for item in noise:
if (dimension == 1):
if (data is None):
axes.plot(item[0], 0.0, 'w' + marker_descr);
else:
axes.plot(data[item][0], 0.0, 'w' + marker_descr);
if (dimension == 2):
if (data is None):
axes.plot(item[0], item[1], 'w' + marker_descr);
else:
axes.plot(data[item][0], data[item][1], 'w' + marker_descr);
elif (dimension == 3):
if (data is None):
axes.scatter(item[0], item[1], item[2], c = 'w', marker = marker_descr);
else:
axes.scatter(data[item][0], data[item][1], data[item][2], c = 'w', marker = marker_descr);
axes.grid(True);
if (hide_axes is True):
axes.xaxis.set_ticklabels([]);
axes.yaxis.set_ticklabels([]);
if (dimension == 3):
axes.zaxis.set_ticklabels([]);
if (display_result is True):
plt.show();
return axes; | 0.026925 |
def transfer_matrix(self, superoperator):
"""
Compute the transfer matrix :math:`R_{jk} = \tr[P_j sop(P_k)]`.
:param qutip.Qobj superoperator: The superoperator to transform.
:return: The transfer matrix in sparse form.
:rtype: scipy.sparse.csr_matrix
"""
if not self.is_orthonormal(): # pragma no coverage
raise ValueError("transfer_matrix() only implemented for orthonormal operator bases.")
return self.basis_transform.H * superoperator.data * self.basis_transform | 0.007339 |
def connect(self, protocolFactory):
"""Starts a process and connect a protocol to it.
"""
deferred = self._startProcess()
deferred.addCallback(self._connectRelay, protocolFactory)
deferred.addCallback(self._startRelay)
return deferred | 0.007092 |
def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts] | 0.003058 |
def _update_linecache(self, path, data):
"""
The Python 2.4 linecache module, used to fetch source code for
tracebacks and :func:`inspect.getsource`, does not support PEP-302,
meaning it needs extra help to for Mitogen-loaded modules. Directly
populate its cache if a loaded module belongs to the Mitogen package.
"""
if PY24 and 'mitogen' in path:
linecache.cache[path] = (
len(data),
0.0,
[line+'\n' for line in data.splitlines()],
path,
) | 0.003419 |
def load_model_class(repo, content_type):
"""
Return a model class for a content type in a repository.
:param Repo repo:
The git repository.
:param str content_type:
The content type to list
:returns: class
"""
schema = get_schema(repo, content_type).to_json()
return deserialize(schema, module_name=schema['namespace']) | 0.00271 |
def generate_version_file(self, schema_filename, binding_filename):
"""Given a DataONE schema, generates a file that contains version information
about the schema."""
version_filename = binding_filename + '_version.txt'
version_path = os.path.join(self.binding_dir, version_filename)
schema_path = os.path.join(self.schema_dir, schema_filename)
try:
tstamp, svnpath, svnrev, version = self.get_version_info_from_svn(
schema_path
)
except TypeError:
pass
else:
self.write_version_file(version_path, tstamp, svnpath, svnrev, version) | 0.00607 |
def manage_initial_host_status_brok(self, b):
"""Prepare the known hosts cache"""
host_name = b.data['host_name']
logger.debug("got initial host status: %s", host_name)
self.hosts_cache[host_name] = {
'realm_name':
sanitize_name(b.data.get('realm_name', b.data.get('realm', 'All'))),
}
if 'customs' in b.data:
self.hosts_cache[host_name]['_GRAPHITE_PRE'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_PRE', None))
self.hosts_cache[host_name]['_GRAPHITE_GROUP'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_GROUP', None))
logger.debug("initial host status received: %s", host_name) | 0.004098 |
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)]) | 0.002657 |
def create_module_rst_file(module_name):
"""Function for creating content in each .rst file for a module.
:param module_name: name of the module.
:type module_name: str
:returns: A content for auto module.
:rtype: str
"""
return_text = 'Module: ' + module_name
dash = '=' * len(return_text)
return_text += '\n' + dash + '\n\n'
return_text += '.. automodule:: ' + module_name + '\n'
return_text += ' :members:\n\n'
return return_text | 0.002053 |
def fetch(self, max=None):
"""
Fetch all collection by pages of `max` items.
Parameters
----------
max : int, None (optional)
The number of item per page. If None, retrieve all collection.
Returns
-------
self Collection, the fetched collection
"""
if max:
self.max = max
n_pages = 0
while not self._total_pages or n_pages < self._total_pages:
self.fetch_next_page(True)
n_pages += 1
return self
else:
return self._fetch() | 0.003231 |
def set_attr(obj, path, value):
"""
SAME AS object.__setattr__(), BUT USES DOT-DELIMITED path
RETURN OLD VALUE
"""
try:
return _set_attr(obj, split_field(path), value)
except Exception as e:
Log = get_logger()
if PATH_NOT_FOUND in e:
Log.warning(PATH_NOT_FOUND + ": {{path}}", path=path, cause=e)
else:
Log.error("Problem setting value", cause=e) | 0.002347 |
def Record(self, value, fields=None):
"""Records the given observation in a distribution."""
key = _FieldsToKey(fields)
metric_value = self._metric_values.get(key)
if metric_value is None:
metric_value = self._DefaultValue()
self._metric_values[key] = metric_value
metric_value.Record(value) | 0.009288 |
def to_bytes(value):
"""Get a byte array representing the value"""
if isinstance(value, unicode):
return value.encode('utf8')
elif not isinstance(value, str):
return str(value)
return value | 0.004525 |
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue | 0.003122 |
def _median(imageObjectList, paramDict):
"""Create a median image from the list of image Objects
that has been given.
"""
newmasks = paramDict['median_newmasks']
comb_type = paramDict['combine_type'].lower()
nlow = paramDict['combine_nlow']
nhigh = paramDict['combine_nhigh']
grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0
maskpt = paramDict['combine_maskpt']
proc_units = paramDict['proc_unit']
compress = paramDict['compress']
bufsizeMB = paramDict['combine_bufsize']
sigma = paramDict["combine_nsigma"]
sigmaSplit = sigma.split()
nsigma1 = float(sigmaSplit[0])
nsigma2 = float(sigmaSplit[1])
if paramDict['combine_lthresh'] is None:
lthresh = None
else:
lthresh = float(paramDict['combine_lthresh'])
if paramDict['combine_hthresh'] is None:
hthresh = None
else:
hthresh = float(paramDict['combine_hthresh'])
# the name of the output median file isdefined in the output wcs object and
# stuck in the image.outputValues["outMedian"] dict of every imageObject
medianfile = imageObjectList[0].outputNames["outMedian"]
# Build combined array from single drizzled images.
# Start by removing any previous products...
if os.access(medianfile, os.F_OK):
os.remove(medianfile)
# Define lists for instrument specific parameters, these should be in
# the image objects need to be passed to the minmed routine
readnoiseList = []
exposureTimeList = []
backgroundValueList = [] # list of MDRIZSKY *platescale values
singleDrizList = [] # these are the input images
singleWeightList = [] # pointers to the data arrays
wht_mean = [] # Compute the mean value of each wht image
single_hdr = None
virtual = None
# for each image object
for image in imageObjectList:
if virtual is None:
virtual = image.inmemory
det_gain = image.getGain(1)
img_exptime = image._image['sci', 1]._exptime
native_units = image.native_units
native_units_lc = native_units.lower()
if proc_units.lower() == 'native':
if native_units_lc not in ['counts', 'electrons', 'counts/s',
'electrons/s']:
raise ValueError("Unexpected native units: '{}'"
.format(native_units))
if lthresh is not None:
if native_units_lc.startswith('counts'):
lthresh *= det_gain
if native_units_lc.endswith('/s'):
lthresh *= img_exptime
if hthresh is not None:
if native_units_lc.startswith('counts'):
hthresh *= det_gain
if native_units_lc.endswith('/s'):
hthresh *= img_exptime
singleDriz = image.getOutputName("outSingle")
singleDriz_name = image.outputNames['outSingle']
singleWeight = image.getOutputName("outSWeight")
singleWeight_name = image.outputNames['outSWeight']
# If compression was used, reference ext=1 as CompImageHDU only writes
# out MEF files, not simple FITS.
if compress:
wcs_ext = '[1]'
wcs_extnum = 1
else:
wcs_ext = '[0]'
wcs_extnum = 0
if not virtual:
if isinstance(singleDriz, str):
iter_singleDriz = singleDriz + wcs_ext
iter_singleWeight = singleWeight + wcs_ext
else:
iter_singleDriz = singleDriz[wcs_extnum]
iter_singleWeight = singleWeight[wcs_extnum]
else:
iter_singleDriz = singleDriz_name + wcs_ext
iter_singleWeight = singleWeight_name + wcs_ext
# read in WCS from first single drizzle image to use as WCS for
# median image
if single_hdr is None:
if virtual:
single_hdr = singleDriz[wcs_extnum].header
else:
single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum,
memmap=False)
single_image = iterfile.IterFitsFile(iter_singleDriz)
if virtual:
single_image.handle = singleDriz
single_image.inmemory = True
singleDrizList.append(single_image) # add to an array for bookkeeping
# If it exists, extract the corresponding weight images
if (not virtual and os.access(singleWeight, os.F_OK)) or (
virtual and singleWeight):
weight_file = iterfile.IterFitsFile(iter_singleWeight)
if virtual:
weight_file.handle = singleWeight
weight_file.inmemory = True
singleWeightList.append(weight_file)
try:
tmp_mean_value = ImageStats(weight_file.data, lower=1e-8,
fields="mean", nclip=0).mean
except ValueError:
tmp_mean_value = 0.0
wht_mean.append(tmp_mean_value * maskpt)
# Extract instrument specific parameters and place in lists
# If an image has zero exposure time we will
# redefine that value as '1'. Although this will cause inaccurate
# scaling of the data to occur in the 'minmed' combination
# algorith, this is a necessary evil since it avoids divide by
# zero exceptions. It is more important that the divide by zero
# exceptions not cause AstroDrizzle to crash in the pipeline than
# it is to raise an exception for this obviously bad data even
# though this is not the type of data you would wish to process
# with AstroDrizzle.
#
# Get the exposure time from the InputImage object
#
# MRD 19-May-2011
# Changed exposureTimeList to take exposure time from img_exptime
# variable instead of hte image._exptime attribute, since
# image._exptime was just giving 1.
#
exposureTimeList.append(img_exptime)
# Use only "commanded" chips to extract subtractedSky and rdnoise:
rdnoise = 0.0
nchips = 0
bsky = None # minimum sky across **used** chips
for chip in image.returnAllChips(extname=image.scienceExt):
# compute sky value as sky/pixel using the single_drz
# pixel scale:
if bsky is None or bsky > chip.subtractedSky:
bsky = chip.subtractedSky * chip._conversionFactor
# Extract the readnoise value for the chip
rdnoise += chip._rdnoise**2
nchips += 1
if bsky is None:
bsky = 0.0
if nchips > 0:
rdnoise = math.sqrt(rdnoise/nchips)
backgroundValueList.append(bsky)
readnoiseList.append(rdnoise)
print("reference sky value for image '{}' is {}"
.format(image._filename, backgroundValueList[-1]))
#
# END Loop over input image list
#
# create an array for the median output image, use the size of the first
# image in the list. Store other useful image characteristics:
single_driz_data = singleDrizList[0].data
data_item_size = single_driz_data.itemsize
single_data_dtype = single_driz_data.dtype
imrows, imcols = single_driz_data.shape
medianImageArray = np.zeros_like(single_driz_data)
del single_driz_data
if comb_type == "minmed" and not newmasks:
# Issue a warning if minmed is being run with newmasks turned off.
print('\nWARNING: Creating median image without the application of '
'bad pixel masks!\n')
# The overlap value needs to be set to 2*grow in order to
# avoid edge effects when scrolling down the image, and to
# insure that the last section returned from the iterator
# has enough rows to span the kernel used in the boxcar method
# within minmed.
overlap = 2 * grow
buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)
section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))
if section_nrows == 0:
buffsize = imcols * data_item_size
print("WARNING: Buffer size is too small to hold a single row.\n"
" Buffer size size will be increased to minimal "
"required: {}MB".format(float(buffsize) / 1048576.0))
section_nrows = 1
if section_nrows < overlap + 1:
new_grow = int((section_nrows - 1) / 2)
if section_nrows == imrows:
print("'grow' parameter is too large for actual image size. "
"Reducing 'grow' to {}".format(new_grow))
else:
print("'grow' parameter is too large for requested buffer size. "
"Reducing 'grow' to {}".format(new_grow))
grow = new_grow
overlap = 2 * grow
nbr = section_nrows - overlap
nsec = (imrows - overlap) // nbr
if (imrows - overlap) % nbr > 0:
nsec += 1
for k in range(nsec):
e1 = k * nbr
e2 = e1 + section_nrows
u1 = grow
u2 = u1 + nbr
if k == 0: # first section
u1 = 0
if k == nsec - 1: # last section
e2 = min(e2, imrows)
e1 = min(e1, e2 - overlap - 1)
u2 = e2 - e1
imdrizSectionsList = np.empty(
(len(singleDrizList), e2 - e1, imcols),
dtype=single_data_dtype
)
for i, w in enumerate(singleDrizList):
imdrizSectionsList[i, :, :] = w[e1:e2]
if singleWeightList:
weightSectionsList = np.empty(
(len(singleWeightList), e2 - e1, imcols),
dtype=single_data_dtype
)
for i, w in enumerate(singleWeightList):
weightSectionsList[i, :, :] = w[e1:e2]
else:
weightSectionsList = None
weight_mask_list = None
if newmasks and weightSectionsList is not None:
# Build new masks from single drizzled images.
# Generate new pixel mask file for median step.
# This mask will be created from the single-drizzled
# weight image for this image.
# The mean of the weight array will be computed and all
# pixels with values less than 0.7 of the mean will be flagged
# as bad in this mask. This mask will then be used when
# creating the median image.
# 0 means good, 1 means bad here...
weight_mask_list = np.less(
weightSectionsList,
np.asarray(wht_mean)[:, None, None]
).astype(np.uint8)
if 'minmed' in comb_type: # Do MINMED
# set up use of 'imedian'/'imean' in minmed algorithm
fillval = comb_type.startswith('i')
# Create the combined array object using the minmed algorithm
result = min_med(
imdrizSectionsList,
weightSectionsList,
readnoiseList,
exposureTimeList,
backgroundValueList,
weight_masks=weight_mask_list,
combine_grow=grow,
combine_nsigma1=nsigma1,
combine_nsigma2=nsigma2,
fillval=fillval
)
else: # DO NUMCOMBINE
# Create the combined array object using the numcombine task
result = numcombine.num_combine(
imdrizSectionsList,
masks=weight_mask_list,
combination_type=comb_type,
nlow=nlow,
nhigh=nhigh,
upper=hthresh,
lower=lthresh
)
# Write out the processed image sections to the final output array:
medianImageArray[e1+u1:e1+u2, :] = result[u1:u2, :]
# Write out the combined image
# use the header from the first single drizzled image in the list
pf = _writeImage(medianImageArray, inputHeader=single_hdr)
if virtual:
mediandict = {}
mediandict[medianfile] = pf
for img in imageObjectList:
img.saveVirtualOutputs(mediandict)
else:
try:
print("Saving output median image to: '{}'".format(medianfile))
pf.writeto(medianfile)
except IOError:
msg = "Problem writing file '{}'".format(medianfile)
print(msg)
raise IOError(msg)
# Always close any files opened to produce median image; namely,
# single drizzle images and singly-drizzled weight images
#
for img in singleDrizList:
if not virtual:
img.close()
# Close all singly drizzled weight images used to create median image.
for img in singleWeightList:
if not virtual:
img.close() | 0.000076 |
def aliased_as(self, name):
"""
Create an alias of this stream.
Returns an alias of this stream with name `name`.
When invocation of an SPL operator requires an
:py:class:`~streamsx.spl.op.Expression` against
an input port this can be used to ensure expression
matches the input port alias regardless of the name
of the actual stream.
Example use where the filter expression for a ``Filter`` SPL operator
uses ``IN`` to access input tuple attribute ``seq``::
s = ...
s = s.aliased_as('IN')
params = {'filter': op.Expression.expression('IN.seq % 4ul == 0ul')}
f = op.Map('spl.relational::Filter', stream, params = params)
Args:
name(str): Name for returned stream.
Returns:
Stream: Alias of this stream with ``name`` equal to `name`.
.. versionadded:: 1.9
"""
stream = copy.copy(self)
stream._alias = name
return stream | 0.003839 |
def _new_stream(self, idx):
'''Randomly select and create a new stream.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace
'''
# Don't activate the stream if the weight is 0 or None
if self.stream_weights_[idx]:
self.streams_[idx] = self.streamers[idx].iterate()
else:
self.streams_[idx] = None
# Reset the sample count to zero
self.stream_counts_[idx] = 0 | 0.003984 |
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
# One iteration changes the opinion of N agent pairs using the following procedure:
# - first one agent is selected
# - then a second agent is selected based on a probability that decreases with the distance to the first agent
# - if the two agents have a distance smaller than epsilon, then they change their status to the average of
# their previous statuses
self.clean_initial_status(None)
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# interact with peers
for i in range(0, self.graph.number_of_nodes()):
# select a random node
n1 = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# select all of the node's neighbours (no digraph possible)
neighbours = list(self.graph.neighbors(n1))
if len(neighbours) == 0:
continue
# compute probabilities to select a second node among the neighbours
selection_prob = np.array([self.prob(np.abs(actual_status[neighbours[i]]-actual_status[n1]),
self.params['model']['gamma'],0.00001) for i in range(len(neighbours))])
selection_prob = selection_prob/np.sum(selection_prob)
cumulative_selection_probability = np.cumsum(selection_prob)
# select second nodebased on selection probabilities above
r = np.random.random_sample()
n2 = 0
while cumulative_selection_probability[n2] < r:
n2 = n2+1
n2 = neighbours[n2]
# update status of n1 and n2
diff = np.abs(actual_status[n1]-actual_status[n2])
if diff < self.params['model']['epsilon']:
avg = (actual_status[n1]+actual_status[n2])/2.0
actual_status[n1] = avg
actual_status[n2] = avg
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | 0.006266 |
def qualified_note_rate(pianoroll, threshold=2):
"""Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll."""
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8)
padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets) | 0.00157 |
def format_results(self, results):
"""
Format the ldap results object into somthing that is reasonable
"""
if not results:
return None
userdn = results[0][0]
userobj = results[0][1]
userobj['dn'] = userdn
keymap = self.config.get('KEY_MAP')
if keymap:
return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) }
else:
return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) } | 0.013865 |
def twopercent(station_code):
"""Two percent high design temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius
"""
# (DB=>MWB) 2%, MaxDB=
temp = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
for line in fin:
value = re.search("""2%, MaxDB=(\\d+\\.\\d*)""", line)
if value:
temp = float(value.groups()[0])
except IOError:
pass
if not temp:
# (DB=>MWB) 2%, MaxDB=
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'stat')))
flag = 0
tdata = []
for line in fin:
if line.find('2%') is not -1:
flag = 3
if flag > 0:
tdata.append(line.split('\t'))
flag -= 1
temp = float(tdata[2][5].strip())
except IOError:
pass
if temp:
return temp
else:
raise Exception("Error: 2% High Temperature not found") | 0.000816 |
def overrepresented_sequences (self):
"""Sum the percentages of overrepresented sequences and display them in a bar plot"""
data = dict()
for s_name in self.fastqc_data:
data[s_name] = dict()
try:
max_pcnt = max( [ float(d['percentage']) for d in self.fastqc_data[s_name]['overrepresented_sequences']] )
total_pcnt = sum( [ float(d['percentage']) for d in self.fastqc_data[s_name]['overrepresented_sequences']] )
data[s_name]['total_overrepresented'] = total_pcnt
data[s_name]['top_overrepresented'] = max_pcnt
data[s_name]['remaining_overrepresented'] = total_pcnt - max_pcnt
except KeyError:
if self.fastqc_data[s_name]['statuses']['overrepresented_sequences'] == 'pass':
data[s_name]['total_overrepresented'] = 0
data[s_name]['top_overrepresented'] = 0
data[s_name]['remaining_overrepresented'] = 0
else:
log.debug("Couldn't find data for {}, invalid Key".format(s_name))
cats = OrderedDict()
cats['top_overrepresented'] = { 'name': 'Top over-represented sequence' }
cats['remaining_overrepresented'] = { 'name': 'Sum of remaining over-represented sequences' }
# Config for the plot
pconfig = {
'id': 'fastqc_overrepresented_sequencesi_plot',
'title': 'FastQC: Overrepresented sequences',
'ymin': 0,
'yCeiling': 100,
'yMinRange': 20,
'tt_decimals': 2,
'tt_suffix': '%',
'tt_percentages': False,
'ylab_format': '{value}%',
'cpswitch': False,
'ylab': 'Percentage of Total Sequences'
}
# Check if any samples have more than 1% overrepresented sequences, else don't make plot.
if max([ x['total_overrepresented'] for x in data.values()]) < 1:
plot_html = '<div class="alert alert-info">{} samples had less than 1% of reads made up of overrepresented sequences</div>'.format(len(data))
else:
plot_html = bargraph.plot(data, cats, pconfig)
self.add_section (
name = 'Overrepresented sequences',
anchor = 'fastqc_overrepresented_sequences',
description = 'The total amount of overrepresented sequences found in each library.',
helptext = '''
FastQC calculates and lists overrepresented sequences in FastQ files. It would not be
possible to show this for all samples in a MultiQC report, so instead this plot shows
the _number of sequences_ categorized as over represented.
Sometimes, a single sequence may account for a large number of reads in a dataset.
To show this, the bars are split into two: the first shows the overrepresented reads
that come from the single most common sequence. The second shows the total count
from all remaining overrepresented sequences.
From the [FastQC Help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/9%20Overrepresented%20Sequences.html):
_A normal high-throughput library will contain a diverse set of sequences, with no
individual sequence making up a tiny fraction of the whole. Finding that a single
sequence is very overrepresented in the set either means that it is highly biologically
significant, or indicates that the library is contaminated, or not as diverse as you expected._
_FastQC lists all of the sequences which make up more than 0.1% of the total.
To conserve memory only sequences which appear in the first 100,000 sequences are tracked
to the end of the file. It is therefore possible that a sequence which is overrepresented
but doesn't appear at the start of the file for some reason could be missed by this module._
''',
plot = plot_html
) | 0.012661 |
def tuple_to_schema(tuple_):
"""Convert a tuple representing an XML data structure into a schema tuple
that can be used in the ``.schema`` property of a sub-class of
PREMISElement.
"""
schema = []
for element in tuple_:
if isinstance(element, (tuple, list)):
try:
if isinstance(element[1], six.string_types):
schema.append((element[0],))
else:
schema.append(tuple_to_schema(element))
except IndexError:
schema.append((element[0],))
else:
schema.append(element)
return tuple(schema) | 0.001541 |
def colors(lang="en"):
"""This resource returns all dyes in the game, including localized names
and their color component information.
:param lang: The language to query the names for.
The response is a dictionary where color ids are mapped to an dictionary
containing the following properties:
name (string):
The name of the dye.
base_rgb (list):
The base RGB values.
cloth (object):
Detailed information on its appearance when applied on cloth armor.
leather (object):
Detailed information on its appearance when applied on leather armor.
metal (object):
Detailed information on its appearance when applied on metal armor.
The detailed information object contains the following properties:
brightness (number):
The brightness.
contrast (number):
The contrast.
hue (number):
The hue in the HSL colorspace.
saturation (number):
The saturation in the HSL colorspace.
lightness (number):
The lightness in the HSL colorspace.
rgb (list):
A list containing precalculated RGB values.
"""
cache_name = "colors.%s.json" % lang
data = get_cached("colors.json", cache_name, params=dict(lang=lang))
return data["colors"] | 0.000771 |
def get_changelog(project_dir=os.curdir, bugtracker_url='', rpm_format=False):
"""
Retrieves the changelog, from the CHANGELOG file (if in a package) or
generates it from the git history. Optionally in rpm-compatible format.
:param project_dir: Path to the git repo of the project.
:type project_dir: str
:param bugtracker_url: Url to the bug tracker for the issues.
:type bugtracker_url: str
:param rpm_format: if set to True, will make the changelog rpm-compatible
:returns: changelog
:rtype: str
:rises RuntimeError: If the changelog could not be retrieved
"""
changelog = ''
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
changelog_file = os.path.join(project_dir, 'CHANGELOG')
if os.path.exists(pkg_info_file) and os.path.exists(changelog_file):
with open(changelog_file) as changelog_fd:
changelog = changelog_fd.read()
else:
changelog = api.get_changelog(
repo_path=project_dir,
bugtracker_url=bugtracker_url,
rpm_format=rpm_format,
)
return changelog | 0.000899 |
def main(command, argument, argument2, paths_to_mutate, backup, runner, tests_dir,
test_time_multiplier, test_time_base,
swallow_output, use_coverage, dict_synonyms, cache_only, version,
suspicious_policy, untested_policy, pre_mutation, post_mutation,
use_patch_file):
"""return exit code, after performing an mutation test run.
:return: the exit code from executing the mutation tests
:rtype: int
"""
if version:
print("mutmut version %s" % __version__)
return 0
if use_coverage and use_patch_file:
raise click.BadArgumentUsage("You can't combine --use-coverage and --use-patch")
valid_commands = ['run', 'results', 'apply', 'show', 'junitxml']
if command not in valid_commands:
raise click.BadArgumentUsage('%s is not a valid command, must be one of %s' % (command, ', '.join(valid_commands)))
if command == 'results' and argument:
raise click.BadArgumentUsage('The %s command takes no arguments' % command)
dict_synonyms = [x.strip() for x in dict_synonyms.split(',')]
if command in ('show', 'diff'):
if not argument:
print_result_cache()
return 0
if argument == 'all':
print_result_cache(show_diffs=True, dict_synonyms=dict_synonyms, print_only_filename=argument2)
return 0
print(get_unified_diff(argument, dict_synonyms))
return 0
if use_coverage and not exists('.coverage'):
raise FileNotFoundError('No .coverage file found. You must generate a coverage file to use this feature.')
if command == 'results':
print_result_cache()
return 0
if command == 'junitxml':
print_result_cache_junitxml(dict_synonyms, suspicious_policy, untested_policy)
return 0
if command == 'apply':
do_apply(argument, dict_synonyms, backup)
return 0
paths_to_mutate = get_or_guess_paths_to_mutate(paths_to_mutate)
if not isinstance(paths_to_mutate, (list, tuple)):
paths_to_mutate = [x.strip() for x in paths_to_mutate.split(',')]
if not paths_to_mutate:
raise click.BadOptionUsage('--paths-to-mutate', 'You must specify a list of paths to mutate. Either as a command line argument, or by setting paths_to_mutate under the section [mutmut] in setup.cfg')
tests_dirs = []
for p in tests_dir.split(':'):
tests_dirs.extend(glob(p, recursive=True))
for p in paths_to_mutate:
for pt in tests_dir.split(':'):
tests_dirs.extend(glob(p + '/**/' + pt, recursive=True))
del tests_dir
os.environ['PYTHONDONTWRITEBYTECODE'] = '1' # stop python from creating .pyc files
using_testmon = '--testmon' in runner
print("""
- Mutation testing starting -
These are the steps:
1. A full test suite run will be made to make sure we
can run the tests successfully and we know how long
it takes (to detect infinite loops for example)
2. Mutants will be generated and checked
Results are stored in .mutmut-cache.
Print found mutants with `mutmut results`.
Legend for output:
🎉 Killed mutants. The goal is for everything to end up in this bucket.
⏰ Timeout. Test suite took 10 times as long as the baseline so were killed.
🤔 Suspicious. Tests took a long time, but not long enough to be fatal.
🙁 Survived. This means your tests needs to be expanded.
""")
baseline_time_elapsed = time_test_suite(
swallow_output=not swallow_output,
test_command=runner,
using_testmon=using_testmon
)
if using_testmon:
copy('.testmondata', '.testmondata-initial')
# if we're running in a mode with externally whitelisted lines
if use_coverage or use_patch_file:
covered_lines_by_filename = {}
if use_coverage:
coverage_data = read_coverage_data()
else:
assert use_patch_file
covered_lines_by_filename = read_patch_data(use_patch_file)
coverage_data = None
def _exclude(context):
try:
covered_lines = covered_lines_by_filename[context.filename]
except KeyError:
if coverage_data is not None:
covered_lines = coverage_data.lines(os.path.abspath(context.filename))
covered_lines_by_filename[context.filename] = covered_lines
else:
covered_lines = None
if covered_lines is None:
return True
current_line = context.current_line_index + 1
if current_line not in covered_lines:
return True
return False
else:
def _exclude(context):
del context
return False
if command != 'run':
raise click.BadArgumentUsage("Invalid command %s" % command)
mutations_by_file = {}
if argument is None:
for path in paths_to_mutate:
for filename in python_source_files(path, tests_dirs):
update_line_numbers(filename)
add_mutations_by_file(mutations_by_file, filename, _exclude, dict_synonyms)
else:
filename, mutation_id = filename_and_mutation_id_from_pk(int(argument))
mutations_by_file[filename] = [mutation_id]
total = sum(len(mutations) for mutations in mutations_by_file.values())
print()
print('2. Checking mutants')
config = Config(
swallow_output=not swallow_output,
test_command=runner,
exclude_callback=_exclude,
baseline_time_elapsed=baseline_time_elapsed,
backup=backup,
dict_synonyms=dict_synonyms,
total=total,
using_testmon=using_testmon,
cache_only=cache_only,
tests_dirs=tests_dirs,
hash_of_tests=hash_of_tests(tests_dirs),
test_time_multiplier=test_time_multiplier,
test_time_base=test_time_base,
pre_mutation=pre_mutation,
post_mutation=post_mutation,
)
try:
run_mutation_tests(config=config, mutations_by_file=mutations_by_file)
except Exception as e:
traceback.print_exc()
return compute_exit_code(config, e)
else:
return compute_exit_code(config)
finally:
print() | 0.002069 |
def live_scores(self, live_scores):
"""Store output of live scores to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([game['league'], game['homeTeamName'],
game['goalsHomeTeam'], game['goalsAwayTeam'],
game['awayTeamName']] for game in live_scores['games'])
self.generate_output(result) | 0.004115 |
def new_inner_member(self, name=None, params=None):
"""Create a CheckModulation object and add it to items
:param name: CheckModulation name
:type name: str
:param params: parameters to init CheckModulation
:type params: dict
:return: None
TODO: Remove this default mutable argument. Usually result in unexpected behavior
"""
if name is None:
name = 'Generated_checkmodulation_%s' % uuid.uuid4()
if params is None:
params = {}
params['checkmodulation_name'] = name
checkmodulation = CheckModulation(params)
self.add_item(checkmodulation) | 0.004491 |
def command(func_or_args=None):
"""Decorator to tell Skal that the method/function is a command.
"""
def decorator(f):
f.__args__ = args
return f
if type(func_or_args) == type(decorator):
args = {}
return decorator(func_or_args)
args = func_or_args
return decorator | 0.003106 |
def data_manipulation_sh(network):
""" Adds missing components to run calculations with SH scenarios.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
from shapely.geometry import Point, LineString, MultiLineString
from geoalchemy2.shape import from_shape, to_shape
# add connection from Luebeck to Siems
new_bus = str(network.buses.index.astype(np.int64).max() + 1)
new_trafo = str(network.transformers.index.astype(np.int64).max() + 1)
new_line = str(network.lines.index.astype(np.int64).max() + 1)
network.add("Bus", new_bus, carrier='AC',
v_nom=220, x=10.760835, y=53.909745)
network.add("Transformer", new_trafo, bus0="25536",
bus1=new_bus, x=1.29960, tap_ratio=1, s_nom=1600)
network.add("Line", new_line, bus0="26387",
bus1=new_bus, x=0.0001, s_nom=1600)
network.lines.loc[new_line, 'cables'] = 3.0
# bus geom
point_bus1 = Point(10.760835, 53.909745)
network.buses.set_value(new_bus, 'geom', from_shape(point_bus1, 4326))
# line geom/topo
network.lines.set_value(new_line, 'geom', from_shape(MultiLineString(
[LineString([to_shape(network.
buses.geom['26387']), point_bus1])]), 4326))
network.lines.set_value(new_line, 'topo', from_shape(LineString(
[to_shape(network.buses.geom['26387']), point_bus1]), 4326))
# trafo geom/topo
network.transformers.set_value(new_trafo,
'geom', from_shape(MultiLineString(
[LineString(
[to_shape(network
.buses.geom['25536']),
point_bus1])]), 4326))
network.transformers.set_value(new_trafo, 'topo', from_shape(
LineString([to_shape(network.buses.geom['25536']), point_bus1]), 4326))
return | 0.001494 |
def EnumerateFilesystemsFromClient(args):
"""List all local filesystems mounted on this system."""
del args # Unused.
for fs_struct in client_utils_osx.GetFileSystems():
yield rdf_client_fs.Filesystem(
device=fs_struct.f_mntfromname,
mount_point=fs_struct.f_mntonname,
type=fs_struct.f_fstypename)
drive_re = re.compile("r?disk[0-9].*")
for drive in os.listdir("/dev"):
if not drive_re.match(drive):
continue
path = os.path.join("/dev", drive)
try:
img_inf = pytsk3.Img_Info(path)
# This is a volume or a partition - we send back a TSK device.
yield rdf_client_fs.Filesystem(device=path)
vol_inf = pytsk3.Volume_Info(img_inf)
for volume in vol_inf:
if volume.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
offset = volume.start * vol_inf.info.block_size
yield rdf_client_fs.Filesystem(
device="{path}:{offset}".format(path=path, offset=offset),
type="partition")
except (IOError, RuntimeError):
continue | 0.014231 |
def _check_items(items):
'''
Check:
- that the list of items is iterable and finite.
- that the list is not empty
'''
num_items = 0
# Check that the list is iterable and finite
try:
num_items = len(items)
except:
raise TypeError("The item list ({}) is not a finite iterable (has no length)".format(items))
# Check that the list has items
if num_items == 0:
raise ValueError("The item list is empty.") | 0.006383 |
def add_user(self, group, username):
"""
Add a user to the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to add
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inherits from InvalidResult. See #lookup_id for more info.
"""
try:
self.lookup_id(group)
except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover
raise err from None
operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]}
self.client.modify(self.__distinguished_name(group), operation) | 0.002725 |
def release_node(self, node):
"""
release a single redis node
"""
# use the lua script to release the lock in a safe way
try:
node._release_script(keys=[self.resource], args=[self.lock_key])
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):
pass | 0.008798 |
def factorize(self):
""" Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R.
"""
[prow, pcol] = self.sample_probability()
self._rid = self.sample(self._rrank, prow)
self._cid = self.sample(self._crank, pcol)
self._cmdinit()
self.computeUCR() | 0.004474 |
def delete_existing_policy(self, scaling_policy, server_group):
"""Given a scaling_policy and server_group, deletes the existing scaling_policy.
Scaling policies need to be deleted instead of upserted for consistency.
Args:
scaling_policy (json): the scaling_policy json from Spinnaker that should be deleted
server_group (str): the affected server_group
"""
self.log.info("Deleting policy %s on %s", scaling_policy['policyName'], server_group)
delete_dict = {
"application":
self.app,
"description":
"Delete scaling policy",
"job": [{
"policyName": scaling_policy['policyName'],
"serverGroupName": server_group,
"credentials": self.env,
"region": self.region,
"provider": "aws",
"type": "deleteScalingPolicy",
"user": "foremast-autoscaling-policy"
}]
}
wait_for_task(json.dumps(delete_dict)) | 0.005634 |
def listing(self):
"Return a list of filename entries currently in the archive"
return ['.'.join([f,ext]) if ext else f for (f,ext) in self._files.keys()] | 0.029412 |
def shape(self):
"""Returns the shape of the data."""
# TODO cache
first = self.first().shape
shape = self._rdd.map(lambda x: x.shape[0]).sum()
return (shape,) + first[1:] | 0.009479 |
def mass_2d(self, r, rho0, Ra, Rs):
"""
mass enclosed projected 2d sphere of radius r
:param r:
:param rho0:
:param Ra:
:param Rs:
:return:
"""
Ra, Rs = self._sort_ra_rs(Ra, Rs)
sigma0 = self.rho2sigma(rho0, Ra, Rs)
m_2d = 2 * np.pi * sigma0 * Ra * Rs / (Rs - Ra) * (np.sqrt(Ra ** 2 + r ** 2) - Ra - np.sqrt(Rs ** 2 + r ** 2) + Rs)
return m_2d | 0.006834 |
def debye_temperature(self, volume):
"""
Calculates the debye temperature.
Eq(6) in doi.org/10.1016/j.comphy.2003.12.001. Thanks to Joey.
Eq(6) above is equivalent to Eq(3) in doi.org/10.1103/PhysRevB.37.790
which does not consider anharmonic effects. Eq(20) in the same paper
and Eq(18) in doi.org/10.1016/j.commatsci.2009.12.006 both consider
anharmonic contributions to the Debye temperature through the Gruneisen
parameter at 0K (Gruneisen constant).
The anharmonic contribution is toggled by setting the anharmonic_contribution
to True or False in the QuasiharmonicDebyeApprox constructor.
Args:
volume (float): in Ang^3
Returns:
float: debye temperature in K
"""
term1 = (2./3. * (1. + self.poisson) / (1. - 2. * self.poisson))**1.5
term2 = (1./3. * (1. + self.poisson) / (1. - self.poisson))**1.5
f = (3. / (2. * term1 + term2))**(1. / 3.)
debye = 2.9772e-11 * (volume / self.natoms) ** (-1. / 6.) * f * \
np.sqrt(self.bulk_modulus/self.avg_mass)
if self.anharmonic_contribution:
gamma = self.gruneisen_parameter(0, self.ev_eos_fit.v0) # 0K equilibrium Gruneisen parameter
return debye * (self.ev_eos_fit.v0 / volume) ** (gamma)
else:
return debye | 0.003605 |
def list_all_currencies(cls, **kwargs):
"""List Currencies
Return a list of Currencies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_currencies(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Currency]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_currencies_with_http_info(**kwargs)
else:
(data) = cls._list_all_currencies_with_http_info(**kwargs)
return data | 0.002307 |
def files(self, absolute=False):
"""Returns an expanded list of all valid paths that were passed in."""
_paths = []
for arg in self.all:
for path in _expand_path(arg):
if os.path.exists(path):
if absolute:
_paths.append(os.path.abspath(path))
else:
_paths.append(path)
return _paths | 0.004619 |
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
"""
if token_type_hint:
tok = self._tokengetter(**{token_type_hint: token})
else:
tok = self._tokengetter(access_token=token)
if not tok:
tok = self._tokengetter(refresh_token=token)
if tok:
request.client_id = tok.client_id
request.user = tok.user
tok.delete()
return True
msg = 'Invalid token supplied.'
log.debug(msg)
request.error_message = msg
return False | 0.003091 |
def show_lbaas_l7rule(self, l7rule, l7policy, **_params):
"""Fetches information of a certain L7 policy's rule."""
return self.get(self.lbaas_l7rule_path % (l7policy, l7rule),
params=_params) | 0.008658 |
def add_pipers(self, pipers, *args, **kwargs):
"""
Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the
specified order. Takes optional arguments for ``Dagger.add_piper``.
Arguments:
- pipers(sequence of valid ``add_piper`` arguments) Sequence of
``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to
the ``Dagger`` in the left to right order.
"""
for piper in pipers:
self.add_piper(piper, *args, **kwargs) | 0.014311 |
def _flush_message_buffer(self):
"""
Loops through the transport message_buffer until there are no messages
left in the queue. Each response is assigned to the Request object
based on the message_id which are then available in
self.outstanding_requests
"""
while True:
message_bytes = self.transport.receive()
# there were no messages receives, so break from the loop
if message_bytes is None:
break
# check if the message is encrypted and decrypt if necessary
if message_bytes[:4] == b"\xfdSMB":
message = SMB2TransformHeader()
message.unpack(message_bytes)
message_bytes = self._decrypt(message)
# now retrieve message(s) from response
is_last = False
session_id = None
while not is_last:
next_command = struct.unpack("<L", message_bytes[20:24])[0]
header_length = \
next_command if next_command != 0 else len(message_bytes)
header_bytes = message_bytes[:header_length]
message = SMB2HeaderResponse()
message.unpack(header_bytes)
flags = message['flags']
if not flags.has_flag(Smb2Flags.SMB2_FLAGS_RELATED_OPERATIONS):
session_id = message['session_id'].get_value()
self._verify(message, session_id)
message_id = message['message_id'].get_value()
request = self.outstanding_requests.get(message_id, None)
if not request:
error_msg = "Received response with an unknown message " \
"ID: %d" % message_id
raise smbprotocol.exceptions.SMBException(error_msg)
# add the upper credit limit based on the credits granted by
# the server
credit_response = message['credit_response'].get_value()
self.sequence_window['high'] += \
credit_response if credit_response > 0 else 1
request.response = message
self.outstanding_requests[message_id] = request
message_bytes = message_bytes[header_length:]
is_last = next_command == 0 | 0.000838 |
async def get_capability_report(self):
"""
This method retrieves the Firmata capability report.
Refer to http://firmata.org/wiki/Protocol#Capability_Query
The command format is: {"method":"get_capability_report","params":["null"]}
:returns: {"method": "capability_report_reply", "params": [RAW_CAPABILITY_REPORT]}
"""
value = await self.core.get_capability_report()
await asyncio.sleep(.1)
if value:
reply = json.dumps({"method": "capability_report_reply", "params": value})
else:
reply = json.dumps({"method": "capability_report_reply", "params": "None"})
await self.websocket.send(reply) | 0.008523 |
def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:
"""Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
"""
results = defaultdict(lambda: defaultdict(set))
for u, v, data in graph.edges(data=True):
if CITATION not in data:
continue
results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())
return dict(results) | 0.007299 |
def to_short_time_string(self) -> str:
""" Return the iso time string only """
hour = self.time.hour
minute = self.time.minute
return f"{hour:02}:{minute:02}" | 0.010526 |
async def postback_send(msg: BaseMessage, platform: Platform) -> Response:
"""
Injects the POST body into the FSM as a Postback message.
"""
await platform.inject_message(msg)
return json_response({
'status': 'ok',
}) | 0.003984 |
def _defrag_list(lst, defrag, missfrag):
"""Internal usage only. Part of the _defrag_logic"""
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags.MF: # first or last fragment missing
missfrag.append(lst)
return
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl << 2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag << 3: # Wrong fragmentation offset
if clen > q.frag << 3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag << 3, p, txt, q)) # noqa: E501
missfrag.append(lst)
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl << 2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
if q.time > p.time:
p.time = q.time
else:
ip.flags.MF = False
del(ip.chksum)
del(ip.len)
p = p / txt
p._defrag_pos = max(x._defrag_pos for x in lst)
defrag.append(p) | 0.000752 |
def search(self, pattern="*", raw=True, search_raw=True,
output=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
return self._run_sql("WHERE %s GLOB ?" % tosearch, (pattern,),
raw=raw, output=output) | 0.004673 |
def _get_current_context(self):
"""Returns the current ZipkinAttrs and generates new ones if needed.
:returns: (report_root_timestamp, zipkin_attrs)
:rtype: (bool, ZipkinAttrs)
"""
# This check is technically not necessary since only root spans will have
# sample_rate, zipkin_attrs or a transport set. But it helps making the
# code clearer by separating the logic for a root span from the one for a
# child span.
if self._is_local_root_span:
# If sample_rate is set, we need to (re)generate a trace context.
# If zipkin_attrs (trace context) were passed in as argument there are
# 2 possibilities:
# is_sampled = False --> we keep the same trace_id but re-roll the dice
# for is_sampled.
# is_sampled = True --> we don't want to stop sampling halfway through
# a sampled trace, so we do nothing.
# If no zipkin_attrs were passed in, we generate new ones and start a
# new trace.
if self.sample_rate is not None:
# If this trace is not sampled, we re-roll the dice.
if self.zipkin_attrs_override and \
not self.zipkin_attrs_override.is_sampled:
# This will be the root span of the trace, so we should
# set timestamp and duration.
return True, create_attrs_for_span(
sample_rate=self.sample_rate,
trace_id=self.zipkin_attrs_override.trace_id,
)
# If zipkin_attrs_override was not passed in, we simply generate
# new zipkin_attrs to start a new trace.
elif not self.zipkin_attrs_override:
return True, create_attrs_for_span(
sample_rate=self.sample_rate,
use_128bit_trace_id=self.use_128bit_trace_id,
)
if self.firehose_handler and not self.zipkin_attrs_override:
# If it has gotten here, the only thing that is
# causing a trace is the firehose. So we force a trace
# with sample rate of 0
return True, create_attrs_for_span(
sample_rate=0.0,
use_128bit_trace_id=self.use_128bit_trace_id,
)
# If we arrive here it means the sample_rate was not set while
# zipkin_attrs_override was, so let's simply return that.
return False, self.zipkin_attrs_override
else:
# Check if there's already a trace context in _context_stack.
existing_zipkin_attrs = self.get_tracer().get_zipkin_attrs()
# If there's an existing context, let's create new zipkin_attrs
# with that context as parent.
if existing_zipkin_attrs:
return False, ZipkinAttrs(
trace_id=existing_zipkin_attrs.trace_id,
span_id=generate_random_64bit_string(),
parent_span_id=existing_zipkin_attrs.span_id,
flags=existing_zipkin_attrs.flags,
is_sampled=existing_zipkin_attrs.is_sampled,
)
return False, None | 0.002646 |
def long_to_bytes(n, blocksize=0):
"""Convert an integer to a byte string.
In Python 3.2+, use the native method instead::
>>> n.to_bytes(blocksize, 'big')
For instance::
>>> n = 80
>>> n.to_bytes(2, 'big')
b'\x00P'
If the optional :data:`blocksize` is provided and greater than zero,
the byte string is padded with binary zeros (on the front) so that
the total length of the output is a multiple of blocksize.
If :data:`blocksize` is zero or not provided, the byte string will
be of minimal length.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
pack = struct.pack
while n > 0:
s = pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s | 0.00082 |
Subsets and Splits