content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def model_handle_check(model_type):
"""
Checks for the model_type and model_handle on the api function,
model_type is a argument to this decorator, it steals model_handle and checks if it is
present in the MODEL_REGISTER
the api must have model_handle in it
Args:
model_type: the "type" of the model, as specified in the MODEL_REGISTER
Returns:
wrapped api function
"""
def decorator(api_func):
@wraps(api_func)
def wrapper(*args, model_handle, **kwargs):
if model_handle not in MODEL_REGISTER:
return make_response(
jsonify(
{"error": f"{model_handle} not found in registered models"}
),
404,
)
if (
model_handle in MODEL_REGISTER
and MODEL_REGISTER[model_handle]["type"] != model_type
):
return make_response(
jsonify({"error": f"{model_handle} model is not an {model_type}"}),
412,
)
return api_func(*args, model_handle=model_handle, **kwargs)
return wrapper
return decorator | 1c2bab3399dff743fd1ca1a37971a4e71f5d5b8f | 15,000 |
def train_model_mixed_data(type_tweet, split_index, custom_tweet_data = pd.Series([]), stop_words = "english"):
"""
Fits the data on a Bayes model. Modified train_model() with custom splitting of data.
:param type_tweet:
:param split_index:
:param custom_tweet_data: if provided, this is used instead of test data for prediction
:param stop_words:
:return: training_data, testing_data , label_train, label_test
"""
data_train = type_tweet['tweet'][:split_index]
label_train = type_tweet['class'][:split_index]
data_test = type_tweet['tweet'][split_index:]
label_test = type_tweet['class'][split_index:]
#probably better to not remove any stopwords
count_vector = CountVectorizer(stop_words=[])
# Fit training data and return a matrix
training_data = count_vector.fit_transform(data_train)
# Transform testing data and return a matrix.
if not custom_tweet_data.empty:
testing_data = count_vector.transform(custom_tweet_data)
else:
testing_data = count_vector.transform(data_test)
return training_data, testing_data , label_train, label_test | 4c7d4e29562b63ea53f1832af0841fb112c6596a | 15,001 |
import scipy
def _fit_curves(ns, ts):
"""Fit different functional forms of curves to the times.
Parameters:
ns: the value of n for each invocation
ts: the measured run time, as a (len(ns), reps) shape array
Returns:
scores: normalised scores for each function
coeffs: coefficients for each function
names: names of each function
fns: the callable for each function in turn.
"""
# compute stats
med_times = np.median(ts, axis=1)
# fit and score complexities
scores = []
coeffs = []
names = []
fns = []
ns = np.array(ns)
ts = np.array(med_times)
for c_name, c_fn in complexities.items():
res = scipy.optimize.minimize_scalar(
complexity_fit, bracket=[1e-5, 1e5], args=(c_fn, ns, ts)
)
scores.append(res.fun)
coeffs.append(res.x)
names.append(c_name)
fns.append(c_fn)
scores = 1.0 / np.sqrt(np.array(scores))
tot_score = np.sum(scores)
scores = scores / tot_score
return scores, coeffs, names, fns | 9a480869d930e27d9aa988455228e6197f87417a | 15,002 |
def isolate_integers(string):
"""Isolate positive integers from a string, returns as a list of integers."""
return [int(s) for s in string.split() if s.isdigit()] | cc95f7a37e3ae258ffaa54ec59f4630c600e84e1 | 15,003 |
def extractAFlappyTeddyBird(item):
"""
# A Flappy Teddy Bird
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'The Black Knight who was stronger than even the Hero' in item['title']:
return buildReleaseMessageWithType(item, 'The Black Knight Who Was Stronger than Even the Hero', vol, chp, frag=frag, postfix=postfix)
return False | ca382caa9d1d9244424a39d1bc43c141b003691d | 15,004 |
def get_trainable_vars(name):
"""
returns the trainable variables
:param name: (str) the scope
:return: ([TensorFlow Variable])
"""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name) | c45b075c739e8c86d6f1dadc0b1f4eacfb1d1505 | 15,005 |
def getLambdaFasta():
"""
Returns the filename of the FASTA of the lambda phage reference.
"""
return _getAbsPath('lambdaNEB.fa') | 4cb351d874087da71d8f726802a5bb86438dacd1 | 15,006 |
from heartandsole import Activity
import warnings
def register_field(name):
"""Register a custom accessor on Activity objects.
Based on :func:`pandas.api.extensions.register_dataframe_accessor`.
Args:
name (str): Name under which the accessor should be registered. A warning
is issued if this name conflicts with a preexisting attribute.
Returns:
callable: A class decorator.
See also:
:func:`pandas.api.extensions.register_dataframe_accessor`
Register a custom accessor on DataFrame objects.
`pandas.api.extensions._register_accessor() <https://github.com/pandas-dev/pandas/blob/v1.2.4/pandas/core/accessor.py#L189-L275>`_
Notes:
When accessed, your accessor will be initialized with the Activity object
the user is interacting with. So the signature must be
.. code-block:: python
def __init__(self, activity_obj): # noqa: E999
...
Examples:
In your library code::
import heartandsole as hns
@hns.api.extensions.register_field('running_smoothness')
class SmoothnessAccessor:
def __init__(self, activity_obj):
self._obj = activity_obj
@property
def avg(self):
# return the average of the records
return self._obj.records['running_smoothness'].mean()
Back in an interactive IPython session:
.. code-block:: ipython
In [1]: act = hns.Activity(pd.DataFrame({{'running_smoothness': np.linspace(0, 10)}})
In [2]: act.running_smoothness.avg
Out[2]: 5.0
TODO:
* Consider making this a classmethod of Activity.
"""
def decorator(field):
if hasattr(Activity, name):
warnings.warn(
f"registration of accessor {repr(field)} under name "
f"{repr(name)} for type {repr(Activity)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=2,
)
setattr(Activity, name, CachedField(name, field))
Activity._fields.add(name)
return field
return decorator | 115893cbca27c9822f08746f45a5ae0dcf48aadf | 15,007 |
def Rotation_ECL_EQD(time):
"""Calculates a rotation matrix from ecliptic J2000 (ECL) to equatorial of-date (EQD).
This is one of the family of functions that returns a rotation matrix
for converting from one orientation to another.
Source: ECL = ecliptic system, using equator at J2000 epoch.
Target: EQD = equatorial system, using equator of date.
Parameters
----------
time : Time
The date and time of the desired equator.
Returns
-------
RotationMatrix
A rotation matrix that converts ECL to EQD.
"""
rot = Rotation_EQD_ECL(time)
return InverseRotation(rot) | d140e2c03e62fba2168faf9c3599afa6e41bb774 | 15,008 |
def _make_players_away(team_size):
"""Construct away team of `team_size` players."""
away_players = []
for i in range(team_size):
away_players.append(
Player(Team.AWAY, _make_walker("away%d" % i, i, _RGBA_RED)))
return away_players | b0beff6f06fc52f870c143c01c14d18eb77d0cc5 | 15,009 |
import json
def store_barbican_secret_for_coriolis(
barbican, secret_info, name='Coriolis Secret'):
""" Stores secret connection info in Barbican for Coriolis.
:param barbican: barbican_client.Client instance
:param secret_info: secret info to store
:return: the HREF (URL) of the newly-created Barbican secret
"""
payload = json.dumps(secret_info)
secret = barbican.secrets.create(
name=name, payload=payload,
payload_content_type='application/json')
secret_ref = secret.store()
return secret_ref | 218bf941203dd12bc78fc7a87d6a2f9f21761d57 | 15,010 |
def wfr2_grad_single(image, sigma, kx, ky, kw, kstep, grad=None):
"""Optimized, single precision version of wfr2_grad.
Single precision might be faster on some hardware.
In addition to returning the
used k-vector and lock-in signal, return the gradient of the lock-in
signal as well, for each pixel computed from the values of the surrounding pixels
of the GPA of the best k-vector. Slightly more accurate, determination of this gradient,
as boundary effects are mitigated.
"""
xx, yy = cp.ogrid[0:image.shape[0],
0:image.shape[1]]
c_image = cp.asarray(image, dtype=np.float32)
g = {'lockin': cp.zeros_like(c_image, dtype=np.complex64),
'grad': cp.zeros(image.shape + (2,), dtype=np.float32),
}
gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image, dtype=np.float32), sigma=sigma)
if grad == 'diff':
def grad_func(phase):
dbdx = cp.diff(phase, axis=0, append=np.nan)
dbdy = cp.diff(phase, axis=1, append=np.nan)
return dbdx, dbdy
elif grad is None:
def grad_func(phase):
return cp.gradient(phase)
else:
grad_func = grad
for wx in np.arange(kx-kw, kx+kw, kstep):
for wy in np.arange(ky-kw, ky+kw, kstep):
multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy))
X = cp.fft.fft2(c_image * multiplier)
X = X * gaussian
sf = cp.fft.ifft2(X)
t = cp.abs(sf) > cp.abs(g['lockin'])
angle = -cp.angle(sf)
grad = grad_func(angle)
grad = cp.stack(grad, axis=-1)
g['lockin'] = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g['lockin'])
# TODO: do outside forloop.
g['grad'] = cp.where(t[..., None], grad + 2*np.pi * cp.array([(wx-kx), (wy-ky)]), g['grad'])
for key in g.keys():
g[key] = g[key].get()
g['grad'] = wrapToPi(2 * g['grad']) / 2
return g | e93a0bde20a151018a07dd138d09691711946458 | 15,011 |
import random
def padding():
"""Return 16-200 random bytes"""
return URANDOM(random.randrange(16, PAD_MAX)) | 65a52c19c3b39344bd1959c58f2cd7950b0a19e4 | 15,012 |
import sys
def listener(phrase_limit: int, timeout: int = None, sound: bool = True) -> str:
"""Function to activate listener, this function will be called by most upcoming functions to listen to user input.
Args:
phrase_limit: Time in seconds for the listener to actively listen to a sound.
timeout: Time in seconds for the overall listener to be active.
sound: Flag whether or not to play the listener indicator sound. Defaults to True unless set to False.
Returns:
str:
- On success, returns recognized statement from the microphone.
- On failure, returns ``SR_ERROR`` as a string which is conditioned to respond appropriately.
"""
try:
sys.stdout.write("\rListener activated..") and playsound('indicators/start.mp3') if sound else \
sys.stdout.write("\rListener activated..")
if timeout and phrase_limit:
listened = recognizer.listen(source, phrase_time_limit=phrase_limit, timeout=timeout)
else:
listened = recognizer.listen(source, phrase_time_limit=phrase_limit)
sys.stdout.write("\r") and playsound('indicators/end.mp3') if sound else sys.stdout.write("\r")
return_val = recognizer.recognize_google(listened)
sys.stdout.write(f'\r{return_val}')
except (UnknownValueError, RequestError, WaitTimeoutError):
return_val = 'SR_ERROR'
return return_val | 87cadd28bc4c924c0db775063d16e6775f4d2381 | 15,013 |
def find():
"""Prints user message and returns the number of HP-49 connected.
"""
hps = com.find()
if len( hps ) == 0:
print "No HP49-compatible devices connected."
sys.stdout.flush()
else:
print "Number of HP49-compatible devices: %d" % len( hps )
sys.stdout.flush()
return len( hps ) | 8530fc9d6d904e8c4fe061c237af57f9874a2ea2 | 15,014 |
def get_children_templates(pvc_enabled=False):
"""
Define a list of all resources that should be created.
"""
children_templates = {
"service": "service.yaml",
"ingress": "ingress.yaml",
"statefulset": "statefulset.yaml",
"configmap": "configmap.yaml",
"secret": "secret.yaml",
}
if pvc_enabled:
children_templates["pvc"] = "pvc.yaml"
return children_templates | 25db24b03542b1365529bbf1814e2fb801337022 | 15,015 |
def sort_as_int(environment, value, reverse=False, attribute=None):
"""Sort collection after converting the attribute value to an int"""
def convert_to_int(x):
val = str(x)
# Test if this is a string representation of a float.
# This is what the copy rig does and it's annoying
if '.' in val:
val = float(val)
return int(val)
key_func = make_attrgetter(
environment, attribute,
postprocess=convert_to_int
)
return sorted(value, key=key_func, reverse=reverse) | 13e7727d1337bbfddec1a0661552c51d7015e58b | 15,016 |
def get_pretty_table_for_item(item, output_fields):
"""
"""
x = PrettyTable(["Attribute", "Value"])
attrs = _filter_attributes(item.get_attributes(), output_fields)
for attr in attrs:
row = []
row.append(attr)
row.append(getattr(item, attr))
x.add_row(row)
return x | 48d7b4c1a53884dc65de8da1167762cbf0143d2c | 15,017 |
def create_t1_based_unwarp(name='unwarp'):
"""
Unwarp an fMRI time series based on non-linear registration to T1.
NOTE: AS IT STANDS THIS METHOD DID NOT PRODUCE ACCEPTABLE RESULTS
IF BRAIN COVERAGE IS NOT COMPLETE ON THE EPI IMAGE.
ALSO: NEED TO ADD AUTOMATIC READING OF EPI RESOLUTION TO GET
"""
unwarpflow = pe.Workflow(name=name)
inputnode = pe.Node(interface=util.IdentityInterface(fields=['epi',
'T1W']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=[
'unwarped_func',
'warp_files']),
name='outputspec')
tmedian = pe.Node(interface=ImageMaths(), name='tmedian')
tmedian.inputs.op_string = '-Tmedian'
epi_brain_ext = pe.Node(interface=util.Function(function=epi_brain_extract,
input_names=['in_file'],
output_names=['out_vol',
'out_mask']),
name='epi_brain_ext')
fast_debias = pe.Node(interface=FAST(), name='FAST_debias')
fast_debias.inputs.output_biascorrected = True
robex = pe.Node(interface=util.Function(function=my_robex,
input_names=['in_file'],
output_names=['out_file',
'out_mask']),
name='robex')
downsample_T1 = pe.Node(MRIConvert(), name='downsample_dti')
downsample_T1.inputs.vox_size = (3.438, 3.438, 3.000)
downsample_T1.inputs.out_type = 'niigz'
contrast_invert = pe.Node(interface=util.Function(function=invert_contrast,
input_names=['in_t1_brain',
'in_b0_brain'],
output_names=['out_fn']),
name='contrast_invert')
ants_syn = pe.Node(interface=util.Function(function=my_ants_registration_syn,
input_names=['in_T1W',
'in_epi'],
output_names=['out_transforms']),
name='ants_syn')
ants_warp = pe.Node(interface=WarpTimeSeriesImageMultiTransform(),
name='ants_warp')
'''connections'''
# unwarpflow.connect(inputnode, 'T1W', robex, 'in_file')
unwarpflow.connect(inputnode, 'T1W', fast_debias, 'in_files')
# unwarpflow.connect(robex, 'out_file', fast_debias, 'in_files')
unwarpflow.connect(fast_debias, 'restored_image', robex, 'in_file')
# unwarpflow.connect(fast_debias, 'restored_image', downsample_T1, 'in_file')
unwarpflow.connect(robex, 'out_file', downsample_T1, 'in_file')
unwarpflow.connect(downsample_T1, 'out_file', contrast_invert, 'in_t1_brain')
unwarpflow.connect(inputnode, 'epi', tmedian, 'in_file')
unwarpflow.connect(tmedian, 'out_file', epi_brain_ext, 'in_file')
unwarpflow.connect(epi_brain_ext, 'out_vol', contrast_invert, 'in_b0_brain')
unwarpflow.connect(contrast_invert, 'out_fn', ants_syn, 'in_T1W')
unwarpflow.connect(epi_brain_ext, 'out_vol', ants_syn, 'in_epi')
unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'out_transforms')
unwarpflow.connect(inputnode, 'epi', ants_warp, 'input_image')
unwarpflow.connect(contrast_invert, 'out_fn', ants_warp, 'reference_image')
unwarpflow.connect(ants_syn, 'out_transforms', ants_warp, 'transformation_series')
unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'warp_files')
unwarpflow.connect(ants_warp, 'output_image', outputnode, 'unwarped_func')
return unwarpflow | cbf3180e2899ac6314cde3c30ca3619ca4d3e125 | 15,018 |
def get_qnode(caching, diff_method="finite-diff", interface="autograd"):
"""Creates a simple QNode"""
dev = qml.device("default.qubit.autograd", wires=3)
@qnode(dev, caching=caching, diff_method=diff_method, interface=interface)
def qfunc(x, y):
qml.RX(x, wires=0)
qml.RX(y, wires=1)
qml.CNOT(wires=[0, 1])
return expval(qml.PauliZ(wires=1))
return qfunc | 3a8cb0f47e8846338338d21896e59cba475e8351 | 15,019 |
def segment_relative_timestamps(segment_start, segment_end, timestamps):
""" Converts timestamps for a global recording to timestamps in a segment given the segment boundaries
Args:
segment_start (float): segment start time in seconds
segment_end (float): segment end time in seconds
timestamps (list): List with length the number of labelled classes. Each element of the list is a array of
start and end time of labelled portion of the recording.
Returns:
List of the timestamps of labelled portion in the segment , with respect to the segment.
Examples:
>>> timestamps = [np.array([0.0, 1.0, 2.0, 9.0]),
np.array([0.5, 1.5]),
np.array([3.0, 6.0]),
np.array([]),
np.array([7.0, 8.0])]
>>> segment_relative_timestamps(3.3, 6.6, timestamps)
>>> [array([[0. , 3.3]], dtype=float32),
array([], dtype=float32),
array([[0. , 2.7]], dtype=float32),
array([], dtype=float32),
array([], dtype=float32)]
"""
segment_timestamps = []
# loop over the classes
for c_timestamps in timestamps:
if c_timestamps.size > 0: # "if there are timestamps"
inside_timestamps = []
# For all timestamps, look if they fall in the segment. If they do, convert them to segment times.
for (start, end) in zip(c_timestamps[::2], c_timestamps[1::2]):
if start <= segment_end and end >= segment_start:
inside_timestamps.append(
(np.max([segment_start, start]) - segment_start, np.min([end, segment_end]) - segment_start))
segment_timestamps.append(np.asarray(inside_timestamps, dtype=np.float32))
else:
segment_timestamps.append(np.array([], dtype=np.float32))
return segment_timestamps | 743938adfb8ee1450c2140f76dbbdfb88c2a3c7f | 15,020 |
def compare_dataframes_mtmc(gts, ts):
"""Compute ID-based evaluation metrics for MTMCT
Return:
df (pandas.DataFrame): Results of the evaluations in a df with only the 'idf1', 'idp', and 'idr' columns.
"""
gtds = []
tsds = []
gtcams = gts['CameraId'].drop_duplicates().tolist()
tscams = ts['CameraId'].drop_duplicates().tolist()
maxFrameId = 0
for k in sorted(gtcams):
gtd = gts.query('CameraId == %d' % k)
gtd = gtd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
# max FrameId in gtd only
mfid = gtd['FrameId'].max()
gtd['FrameId'] += maxFrameId
gtd = gtd.set_index(['FrameId', 'Id'])
gtds.append(gtd)
if k in tscams:
tsd = ts.query('CameraId == %d' % k)
tsd = tsd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
# max FrameId among both gtd and tsd
mfid = max(mfid, tsd['FrameId'].max())
tsd['FrameId'] += maxFrameId
tsd = tsd.set_index(['FrameId', 'Id'])
tsds.append(tsd)
maxFrameId += mfid
# compute multi-camera tracking evaluation stats
multiCamAcc = mm.utils.compare_to_groundtruth(
pd.concat(gtds), pd.concat(tsds), 'iou')
metrics = list(mm.metrics.motchallenge_metrics)
metrics.extend(['num_frames', 'idfp', 'idfn', 'idtp'])
mh = mm.metrics.create()
summary = mh.compute(multiCamAcc, metrics=metrics, name='MultiCam')
return summary | 002333c2be971a453727f43c257b46a99b0451cb | 15,021 |
import os
import subprocess
import time
def launch_corba(
exec_file=None,
run_location=None,
jobname=None,
nproc=None,
verbose=False,
additional_switches="",
start_timeout=60,
):
"""Start MAPDL in AAS mode
Notes
-----
The CORBA interface is likely to fail on computers with multiple
network adapters. The ANSYS RPC isn't smart enough to determine
the right adapter and will likely try to communicate on the wrong
IP.
"""
# Using stored parameters so launch command can be run from a
# cached state (when launching the GUI)
# can't run /BATCH in windows, so we trick it using "-b" and
# provide a dummy input file
if os.name == "nt":
# must be a random filename to avoid conflicts with other
# potential instances
tmp_file = "%s.inp" % random_string(10)
with open(os.path.join(run_location, tmp_file), "w") as f:
f.write("FINISH")
additional_switches += " -b -i %s -o out.txt" % tmp_file
# command must include "aas" flag to start MAPDL server
command = '"%s" -aas -j %s -np %d %s' % (
exec_file,
jobname,
nproc,
additional_switches,
)
# remove any broadcast files
broadcast_file = os.path.join(run_location, "mapdl_broadcasts.txt")
if os.path.isfile(broadcast_file):
os.remove(broadcast_file)
if verbose:
subprocess.Popen(command, shell=True, cwd=run_location)
else:
subprocess.Popen(
command,
shell=True,
cwd=run_location,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# listen for broadcast file
telapsed = 0
tstart = time.time()
started_rpc = False
while telapsed < start_timeout and not started_rpc:
try:
if os.path.isfile(broadcast_file):
broadcast = open(broadcast_file).read()
# see if connection to RPC has been made
rpc_txt = "visited:collaborativecosolverunitior-set:"
started_rpc = rpc_txt in broadcast
time.sleep(0.1)
telapsed = time.time() - tstart
except KeyboardInterrupt:
raise KeyboardInterrupt
# exit if timed out
if not started_rpc:
err_str = "Unable to start ANSYS within %.1f seconds" % start_timeout
if os.path.isfile(broadcast_file):
broadcast = open(broadcast_file).read()
err_str += "\n\nLast broadcast:\n%s" % broadcast
raise TimeoutError(err_str)
# return CORBA key
keyfile = os.path.join(run_location, "aaS_MapdlId.txt")
return open(keyfile).read() | d877aa81aab9722a188dbe61c284564e2da3b835 | 15,022 |
def fetch_xml(url):
"""
Fetch a URL and parse it as XML using ElementTree
"""
resp=urllib2.urlopen(url)
tree=ET.parse(resp)
return tree | d0f4f5b7fe19692675cba1254f6bfa63f07e45a5 | 15,023 |
def update_hirsch_index(depth_node_dict, minimum_hirsch_value, maximum_hirsch_value):
"""
Calculates the Hirsch index for a radial tree.
Note that we have a slightly different definition of the Hirsch index to the one found in:
Gómez, V., Kaltenbrunner, A., & López, V. (2008, April).
Statistical analysis of the social network and discussion threads in slashdot.
In Proceedings of the 17th international conference on World Wide Web (pp. 645-654). ACM.
Inputs: - depth_node_dict: A map from node depth to node ids as a python dictionary.
- minimum_hirsch_value: This is the previous Hirsch value.
- maximum_hirsch_value: This is the depth of the latest node added to the tree.
Output: - hirsch: The Hirsch index.
"""
# This is the previous hirsch index value.
hirsch_index = minimum_hirsch_value
if maximum_hirsch_value > minimum_hirsch_value:
adopters = depth_node_dict[maximum_hirsch_value]
width = len(adopters)
if width >= maximum_hirsch_value:
hirsch_index = maximum_hirsch_value
return hirsch_index | 2fdf5ca6aa216eacb3f18cd2f91875d02e0740ea | 15,024 |
import numpy
import sys
def read_trajectory(filename, matrix=True):
"""
Read a trajectory from a text file.
Input:
filename -- file to be read
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list_ok = []
for i,l in enumerate(list):
if l[4:8]==[0,0,0,0]:
continue
isnan = False
for v in l:
if numpy.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj | abbcaf44b51adcd7468b8e03d59170adeceff143 | 15,025 |
import json
from typing import List
import asyncio
async def async_start(hass: HomeAssistantType, config_entry=None) -> bool:
"""Start Ampio discovery."""
topics = {}
@callback
async def version_info_received(msg):
"""Process the version info message."""
_LOGGER.debug("Version %s", msg.payload)
try:
data = json.loads(msg.payload)
except json.JSONDecodeError:
_LOGGER.error("Unable to decode Ampio MQTT Server version")
return
version = data.get(ATTR_VERSION, "N/A")
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, str("ampio-mqtt"))},
identifiers={(DOMAIN, str("ampio-mqtt"))},
name="Ampio MQTT Server",
manufacturer="Ampio",
model="MQTT Server",
sw_version=version,
)
topics[RESPONSE_AMPIO_VERSION] = {
"topic": RESPONSE_AMPIO_VERSION,
"msg_callback": version_info_received,
"qos": DEFAULT_QOS,
}
@callback
async def device_list_received(msg):
"""Process device list info message."""
try:
payload = json.loads(msg.payload)
except ValueError as err:
_LOGGER.error("Unable to parse JSON module list: %s", err)
return
modules: List[AmpioModuleInfo] = AmpioModuleInfo.from_topic_payload(payload)
for module in modules:
data_modules = hass.data[DATA_AMPIO_MODULES]
await async_setup_device_registry(hass, config_entry, module)
data_modules[module.user_mac] = module
ampio.async_publish(
hass, REQUEST_MODULE_NAMES.format(mac=module.user_mac), "1", 0, False
)
topics[RESPONSE_MODULE_DISCOVERY] = {
"topic": RESPONSE_MODULE_DISCOVERY,
"msg_callback": device_list_received,
"qos": DEFAULT_QOS,
}
async def module_names_received(msg):
"Handle names update." ""
matched = MAC_FROM_TOPIC_RE.match(msg.topic)
if matched:
mac = matched.group("mac").upper()
module = hass.data[DATA_AMPIO_MODULES].get(mac)
if module is None:
return
else:
return
try:
payload = json.loads(msg.payload)
except ValueError as err:
_LOGGER.error("Unable to parse JSON module names: %s", err)
return
module.names = ItemName.from_topic_payload(payload)
module.update_configs()
_LOGGER.info(
"Discovered: %s-%s (%s): %s",
module.code,
module.model,
module.software,
module.name,
)
for component, configs in module.configs.items():
for config in configs:
unique_id = config.get("unique_id")
if unique_id not in hass.data[DATA_AMPIO_UNIQUE_IDS]:
hass.data[DATA_AMPIO][component].append(config)
hass.data[DATA_AMPIO_UNIQUE_IDS].add(unique_id)
else:
_LOGGER.debug("Ignoring: %s", unique_id)
del hass.data[DATA_AMPIO_MODULES][mac]
if len(hass.data[DATA_AMPIO_MODULES]) == 0: # ALL MODULES discovered
_LOGGER.info("All modules discovered")
asyncio.create_task(async_load_entities(hass))
topics[RESPONSE_MODULE_NAMES] = {
"topic": RESPONSE_MODULE_NAMES,
"msg_callback": module_names_received,
"qos": DEFAULT_QOS,
}
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[DATA_AMPIO_MODULES] = {}
hass.data[DATA_AMPIO_UNIQUE_IDS] = set()
hass.data[DISCOVERY_UNSUBSCRIBE] = await subscription.async_subscribe_topics(
hass, hass.data.get(DISCOVERY_UNSUBSCRIBE), topics
)
ampio.async_publish(hass, REQUEST_AMPIO_VERSION, "", 0, False)
ampio.async_publish(hass, REQUEST_MODULE_DISCOVERY, "1", 0, False)
return True | 1a55c944a9f3099638b5e245283a86f476b9e29b | 15,026 |
def get_E_E_fan_H_d_t(P_fan_rtd_H, V_hs_vent_d_t, V_hs_supply_d_t, V_hs_dsgn_H, q_hs_H_d_t):
"""(37)
Args:
P_fan_rtd_H: 定格暖房能力運転時の送風機の消費電力(W)
V_hs_vent_d_t: 日付dの時刻tにおける熱源機の風量のうちの全般換気分(m3/h)
V_hs_supply_d_t: param V_hs_dsgn_H:暖房時の設計風量(m3/h)
q_hs_H_d_t: 日付dの時刻tにおける1時間当たりの熱源機の平均暖房能力(-)
V_hs_dsgn_H: returns: 日付dの時刻tにおける1時間当たりの送風機の消費電力量のうちの暖房設備への付加分(kWh/h)
Returns:
日付dの時刻tにおける1時間当たりの送風機の消費電力量のうちの暖房設備への付加分(kWh/h)
"""
f_SFP = get_f_SFP()
E_E_fan_H_d_t = np.zeros(24 * 365)
a = (P_fan_rtd_H - f_SFP * V_hs_vent_d_t) \
* ((V_hs_supply_d_t - V_hs_vent_d_t) / (V_hs_dsgn_H - V_hs_vent_d_t)) * 10 ** (-3)
E_E_fan_H_d_t[q_hs_H_d_t > 0] = np.clip(a[q_hs_H_d_t > 0], 0, None)
return E_E_fan_H_d_t | 0e2ceb9f8fbedd95d44f1c307cfb0d9ea17ea370 | 15,027 |
def load_func(func_string):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if func_string is None:
return None
elif isinstance(func_string, str):
return import_from_string(func_string)
return func_string | 99fdf6889936c95d7680ed5a70a2095474e02a9b | 15,028 |
def normalize(features):
"""
Scale data in provided series into [0,1] range.
:param features:
:return:
"""
return (features - features.min()) / (features.max() - features.min()) | a85d77e37e71c732471d7dcd42ae1aef2181f6dc | 15,029 |
def get_gitlab_template_version(response):
"""Return version number of gitlab template."""
return glom(response, 'ref', default=False).replace('refs/tags/', '') | 95e1be93ef6f14d24757e07d0ba644ce89bc0dc9 | 15,030 |
def getConfigXmlString(version, name, protocol, user, host, port, path):
"""! Arguments -> XML String. """
tag_root = ET.Element(TAG_ROOT)
tag_root.set(ATTR_VERSION, version)
tag_remote = ET.Element(TAG_REMOTE)
tag_remote.set(ATTR_NAME, name)
tag_root.append(tag_remote)
appendElement(tag_remote, TAG_PROTOCOL, protocol)
appendElement(tag_remote, TAG_USER, user)
appendElement(tag_remote, TAG_HOST, host)
appendElement(tag_remote, TAG_PORT, port)
appendElement(tag_remote, TAG_PATH, path)
return ET.tostring(tag_root) | da0546a2e276c16820e09807930c981bf7d5406c | 15,031 |
from typing import Optional
def phase_angle(A: Entity,
B: Entity,
C: Entity) -> Optional[float]:
"""The orbital phase angle, between A-B-C, of the angle at B.
i.e. the angle between the ref-hab vector and the ref-targ vector."""
# Code from Newton Excel Bach blog, 2014, "the angle between two vectors"
if B.name == C.name:
return None
AB = A.pos - B.pos
CB = C.pos - B.pos
return np.degrees(
np.arctan2(AB[1], AB[0]) -
np.arctan2(CB[1], CB[0])
) % 360 | ddbbc75909977350f89748c0afdb242ed9d741b6 | 15,032 |
def point2geojsongeometry(x, y, z=None):
"""
helper function to generate GeoJSON geometry of point
:param x: x coordinate
:param y: y coordinate
:param z: y coordinate (default=None)
:returns: `dict` of GeoJSON geometry
"""
if z is None or int(z) == 0:
LOGGER.debug('Point has no z property')
coordinates = [x, y]
else:
LOGGER.debug('Point has z property')
coordinates = [x, y, z]
if None in coordinates:
return None
geometry = {
'type': 'Point',
'coordinates': coordinates
}
return geometry | d825055f7cf9d5c71decce342d384eb8018546d9 | 15,033 |
def upper(string): # pragma: no cover
"""Lower."""
new_string = []
for c in string:
o = ord(c)
new_string.append(chr(o - 32) if LC_A <= o <= LC_Z else c)
return ''.join(new_string) | c13b1cc49a608bcc65a3afa87ca94f73f0deeb0b | 15,034 |
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError: # nosec(cjschaef): 'obj' doesn't contain attribute
# 'uuid', return attribute 'id' or the 'obj'
pass
try:
return obj.id
except AttributeError:
return obj | 43160e6dd61ddc2e8e0559925bf2a35def79eb3f | 15,035 |
from datetime import datetime
import time
def dateToUsecs(datestring):
"""Convert Date String to Unix Epoc Microseconds"""
dt = datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S")
return int(time.mktime(dt.timetuple())) * 1000000 | cba081ae63523c86572463249b4324f2183fcaaa | 15,036 |
def _compute_applied_axial(R_od, t_wall, m_stack, section_mass):
"""Compute axial stress for spar from z-axis loading
INPUTS:
----------
params : dictionary of input parameters
section_mass : float (scalar/vector), mass of each spar section as axial loading increases with spar depth
OUTPUTS:
-------
stress : float (scalar/vector), axial stress
"""
R = R_od - 0.5 * t_wall
# Add in weight of sections above it
axial_load = m_stack + np.r_[0.0, np.cumsum(section_mass[:-1])]
# Divide by shell cross sectional area to get stress
return gravity * axial_load / (2.0 * np.pi * R * t_wall) | 35c9a92b22b3639b6d1236ba45dd797388e25b07 | 15,037 |
import os
def glob(loader, node):
"""Construct glob expressions."""
value = loader.construct_scalar(node)[len('~+/'):]
return os.path.join(
os.path.dirname(loader.name),
value
) | e8976fdac21f8decb85bb05a23bacc929d1d56eb | 15,038 |
def categorical(p, rng=None, size=()):
"""Draws i with probability p[i]"""
if len(p) == 1 and isinstance(p[0], np.ndarray):
p = p[0]
p = np.asarray(p)
if size == ():
size = (1,)
elif isinstance(size, (int, np.number)):
size = (size,)
else:
size = tuple(size)
if size == (0,):
return np.asarray([])
assert len(size)
if p.ndim == 0:
raise NotImplementedError()
elif p.ndim == 1:
n_draws = int(np.prod(size))
sample = rng.multinomial(n=1, pvals=p, size=int(n_draws))
assert sample.shape == size + (len(p),)
rval = np.dot(sample, np.arange(len(p)))
rval.shape = size
return rval
elif p.ndim == 2:
n_draws_, n_choices = p.shape
(n_draws,) = size
assert n_draws == n_draws_
rval = [
np.where(rng.multinomial(pvals=p[ii], n=1))[0][0] for ii in range(n_draws)
]
rval = np.asarray(rval)
rval.shape = size
return rval
else:
raise NotImplementedError() | 1cee8c996206284f36f3bf72f8c6729037489f4d | 15,039 |
import argparse
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description='DSNT human pose model info')
parser.add_argument(
'--model', type=str, metavar='PATH', required=True,
help='model state file')
parser.add_argument(
'--gpu', type=int, metavar='N', default=0,
help='index of the GPU to use')
args = parser.parse_args()
return args | 1365cf3b60004baa8fa6f07ae755d79f6d952e95 | 15,040 |
import os
def get_model(theme, corpus_all, dictionary_all, num_topics=15, passes=25, iterations=400,
eval_every=None, update_every=0, alpha='auto', eta='auto'):
"""
Get the LDA model
"""
# Check if a model with the same config already exists.
# If it does, load the model instead of generating a new one
tempfile = TEMP_PATH + '/%s/%s_LDA_model_' % (theme, theme) + '_'.join([str(num_topics), str(passes), str(iterations), str(alpha), str(eta)])
if os.path.exists(tempfile):
lda = gensim.models.LdaModel.load(tempfile)
else:
lda = generate_lda_model(theme, corpus_all, dictionary_all, num_topics, passes,
iterations, eval_every, update_every, alpha, eta)
return lda | d2ab8c32ffc1cef8e620c0288866c0ee5cc4f297 | 15,041 |
from typing import List
def covariance_distance(covariances: List[Covariance],
x: np.ndarray) -> np.ndarray:
"""Euclidean distance of all pairs gp_models.
:param covariances:
:param x:
:return:
"""
# For each pair of kernel matrices, compute Euclidean distance
n_kernels = len(covariances)
dists = np.zeros((n_kernels, n_kernels))
for i in range(n_kernels):
for j in range(i + 1, n_kernels):
dists[i, j] = kernel_l2_dist(covariances[i].raw_kernel, covariances[j].raw_kernel, x)
# Make symmetric
dists = (dists + dists.T) / 2.
return dists | 7bfe0337c89b8476285797d1fdec394ffcd04479 | 15,042 |
def create_inputs(im, im_info, model_arch='YOLO'):
"""generate input for different model type
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
model_arch (str): model type
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = im
origin_shape = list(im_info['origin_shape'])
resize_shape = list(im_info['resize_shape'])
scale = im_info['scale']
if 'YOLO' in model_arch:
im_size = np.array([origin_shape]).astype('int32')
inputs['im_size'] = im_size
elif 'RetinaNet' in model_arch:
im_info = np.array([resize_shape + [scale]]).astype('float32')
inputs['im_info'] = im_info
elif 'RCNN' in model_arch:
im_info = np.array([resize_shape + [scale]]).astype('float32')
im_shape = np.array([origin_shape + [1.]]).astype('float32')
inputs['im_info'] = im_info
inputs['im_shape'] = im_shape
return inputs | 940563f6c48cfe54e328339b0efcd44e03ad67d8 | 15,043 |
def multiplex(n, q, **kwargs):
""" Convert one queue into several equivalent Queues
>>> q1, q2, q3 = multiplex(3, in_q)
"""
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues | ee9dac3506acb5159580a39d64e3cb046c44b204 | 15,044 |
import xml.etree.ElementTree as ET
def select_project(FILENAME):
"""
lee el fichero xml FILENAME, muestra los proyectos para que el usuario
escoja uno de ellos
input
FILENAME: fichero xml de estructura adecuada situada donde se encuentran
los scripts del programa
return:
el proyecto seleccionado por el usuario con un árbol xml
"""
tree = ET.parse(FILENAME)
root = tree.getroot()
print('Projects in ' + FILENAME)
projects = []
for i, project in enumerate(root.findall('project')):
projects.append(project)
print(i, end=' ')
print('. ' + project.get('name'))
print('Select project number:')
choice = input()
return projects[int(choice)] | 0ef7ddd4b320e2ca577c253522f512e2802569e1 | 15,045 |
def compute_average(arr):
"""Compute average value for given matrix
Args:
arr (numpy array): a numpy array
Return:
float: average value
"""
val_avg = np.average(arr)
return val_avg | c69d17f53e946f693242cfd9d90877847e7c7cc6 | 15,046 |
def _read_config(rundate, pipeline, *args, **kwargs):
"""Read the configuration of a Where analysis from file
Todo: Add this as a classmethod on Configuration
Args:
rundate: Rundate of analysis.
pipeline: Pipeline used for analysis.
session: Session in analysis.
Returns:
Configuration of Where analysis.
"""
if not has_config(rundate, pipeline, *args, **kwargs):
raise FileNotFoundError(f"No configuration found for {pipeline.upper()} {rundate.strftime(config.FMT_date)}")
cfg = mg_config.Configuration.read_from_file(pipeline, _config_path(rundate, pipeline, *args, **kwargs))
cfg.master_section = pipeline
return cfg | d122887d260044ebf10eb8830b247dbdf2a18274 | 15,047 |
from typing import Callable
def ta_series(func: Callable, *args, **kwargs) -> QFSeries:
"""
Function created to allow using TA-Lib functions with QFSeries.
Parameters
----------
func
talib function: for example talib.MA
args
time series arguments to the function. They are all passed as QFSeries.
for example: 'close' or 'high, low, close' where each argument is a QFSeries.
kwargs
additional arguments to the function. for example: 'timeperiod=10' or 'timeperiod=timeperiod, matype=i'.
All additional arguments have to be passed as keyword arguments.
Returns
-------
QFSeries
Output from the talib function encapsulated in a QFSeries
"""
series_list = list(map(lambda series: series.values, args))
result = func(*series_list, **kwargs)
result = QFSeries(index=args[0].index, data=result)
return result | c3e4e644fd3e6ce7853cbe99441fe6c8a5ca2679 | 15,048 |
def find_trendline(
df_data: pd.DataFrame, y_key: str, high_low: str = "high"
) -> pd.DataFrame:
"""Attempts to find a trend line based on y_key column from a given stock ticker data frame.
Parameters
----------
df_data : DataFrame
The stock ticker data frame with at least date_id, y_key columns.
y_key : str
Column name to base the trend line on.
high_low: str, optional
Either "high" or "low". High is the default.
Returns
-------
DataFrame
If a trend is successfully found,
An updated Panda's data frame with a trend data {y_key}_trend column.
If no trend was found,
An original Panda's data frame
"""
for iteration in [3, 4, 5, 6, 7]:
df_temp = df_data.copy()
while len(df_temp) > iteration:
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
if high_low == "high":
df_temp = df_temp.loc[
df_temp[y_key] > reg[0] * df_temp["date_id"] + reg[1]
]
else:
df_temp = df_temp.loc[
df_temp[y_key] < reg[0] * df_temp["date_id"] + reg[1]
]
if len(df_temp) > 1:
break
if len(df_temp) == 1:
return df_data
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
df_data[f"{y_key}_trend"] = reg[0] * df_data["date_id"] + reg[1]
return df_data | dbe995fab1436a1c212780eebf123dc39f27f234 | 15,049 |
def find_core(read, core, core_position_sum, core_position_count, start = -1):
"""
Find the core sequence, trying "average" position first for efficiency.
"""
if start < 0 and core_position_count > 0:
core_position = round(core_position_sum/core_position_count)
if len(read) > core_position+len(core):
if read[core_position:core_position+len(core)]==core:
return core_position
return read.find(core, start+1) | 3a0de472194db00fac4e65a2b0e15cfa351eb70f | 15,050 |
from functools import reduce
def clambda(n):
"""
clambda(n)
Returns Carmichael's lambda function for positive integer n.
Relies on factoring n
"""
smallvalues=[1,1,2,2,4,2,6,2,6,4,10,2,12,6,4,4,16,6,18,4,6,10,22,2,20,12,18,\
6,28,4,30,8,10,16,12,6,36,18,12,4,40,6,42,10,12,22,46,4,42,20,16,12,52,18,\
20,6,18,28,58,4,60,30,6,16,12,10,66,16,22,12,70,6,72,36,20,18,30,12,78,4,54,\
40,82,6,16,42,28,10,88,12,12,22,30,46,36,8,96,42,30,20]
if n<=100: return smallvalues[n-1]
factors=factor(n)
l1=[]
for p,e in factors:
if p==2 and e>2:
l1.append(2**(e-2))
else:
l1.append((p-1)*p**(e-1))
return reduce(lambda a,b : lcm(a,b), l1) | 0da59a30e6d7376731a868ae81aed8e1cb42e8ce | 15,051 |
def dashboard():
"""
Render the dashboard template on the /dashboard route
"""
return render_template('page/home/dashboard.html', title="Dashboard") | 12e1750a6c0b90aa8fcda29b78a463805abd45f3 | 15,052 |
import json
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port"""
try:
r = do_single_request("GET",
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
r = json.loads(r)
except NvpApiClient.ResourceNotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id)
except NvpApiClient.NvpApiException as e:
raise exception.QuantumException()
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN | f5c6fdf7d23fef17f402525cbfe9c3892012e3f0 | 15,053 |
import scipy
import tqdm
import logging
def make_nearest_neighbors_graph(data, k, n=1000):
"""Build exact k-nearest neighbors graph from numpy data.
Args:
data: Data to compute nearest neighbors of, each column is one point
k: number of nearest neighbors to compute
n (optional): number of neighbors to compute simultaneously
Returns:
A scipy sparse matrix in LIL format giving the symmetric nn graph.
"""
shape = data.shape
assert shape[0] % n == 0
nbr_graph = scipy.sparse.lil_matrix((shape[0], shape[0]))
norm = np.sum(data**2, axis=1)
cols = np.meshgrid(np.arange(n), np.ones(k+1))[0]
for i in tqdm(range(0, shape[0], n)):
dot = data @ data[i:i+n].T
dists = np.sqrt(np.abs(norm[:, None] - 2*dot + norm[i:i+n][None, :]))
idx = np.argpartition(dists, k, axis=0)[:k+1]
nbrs = idx[np.argsort(dists[idx, cols], axis=0), cols][1:]
for j in range(n):
nbr_graph[i+j, nbrs[:, j]] = 1
# Symmetrize graph
for i in tqdm(range(shape[0])):
for j in nbr_graph.rows[i]:
if nbr_graph[j, i] == 0:
nbr_graph[j, i] = nbr_graph[i, j]
logging.info('Symmetrized neighbor graph')
return nbr_graph | dd99b42c306ac963232aeca4e86ef7e0449126ca | 15,054 |
import multiprocessing
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
example_id, encoded_example = tf.TFRecordReader(options=options).read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count) | 5265e14d02b53d7b7c8754f980573b8d8c9667ea | 15,055 |
def gen_context(n=10):
"""
method returns a random matrix which can be used to produce private prices over a bunch of items
"""
return np.random.randint(-3,4,size=(n,n)) | 51b3cf2a64530147eddacf628c8b593b7e923402 | 15,056 |
def _parallel_binning_fit(split_feat, _self, X, y,
weights, support_sample_weight,
bins, loss):
"""Private function to find the best column splittings within a job."""
n_sample, n_feat = X.shape
feval = CRITERIA[_self.criterion]
split_t = None
split_col = None
left_node = (None, None, None, None)
right_node = (None, None, None, None)
largs_left = {'classes': None}
largs_right = {'classes': None}
if n_sample < _self._min_samples_split:
return loss, split_t, split_col, left_node, right_node
for col, _bin in zip(split_feat, bins):
for q in _bin:
# create 1D bool mask for right/left children
mask = (X[:, col] > q)
n_left, n_right = (~mask).sum(), mask.sum()
if n_left < _self._min_samples_leaf or n_right < _self._min_samples_leaf:
continue
# create 2D bool mask for right/left children
left_mesh = np.ix_(~mask, _self._linear_features)
right_mesh = np.ix_(mask, _self._linear_features)
model_left = deepcopy(_self.base_estimator)
model_right = deepcopy(_self.base_estimator)
if hasattr(_self, 'classes_'):
largs_left['classes'] = np.unique(y[~mask])
largs_right['classes'] = np.unique(y[mask])
if len(largs_left['classes']) == 1:
model_left = DummyClassifier(strategy="most_frequent")
if len(largs_right['classes']) == 1:
model_right = DummyClassifier(strategy="most_frequent")
if weights is None:
model_left.fit(X[left_mesh], y[~mask])
loss_left = feval(model_left, X[left_mesh], y[~mask],
**largs_left)
wloss_left = loss_left * (n_left / n_sample)
model_right.fit(X[right_mesh], y[mask])
loss_right = feval(model_right, X[right_mesh], y[mask],
**largs_right)
wloss_right = loss_right * (n_right / n_sample)
else:
if support_sample_weight:
model_left.fit(X[left_mesh], y[~mask],
sample_weight=weights[~mask])
model_right.fit(X[right_mesh], y[mask],
sample_weight=weights[mask])
else:
model_left.fit(X[left_mesh], y[~mask])
model_right.fit(X[right_mesh], y[mask])
loss_left = feval(model_left, X[left_mesh], y[~mask],
weights=weights[~mask], **largs_left)
wloss_left = loss_left * (weights[~mask].sum() / weights.sum())
loss_right = feval(model_right, X[right_mesh], y[mask],
weights=weights[mask], **largs_right)
wloss_right = loss_right * (weights[mask].sum() / weights.sum())
total_loss = wloss_left + wloss_right
# store if best
if total_loss < loss:
split_t = q
split_col = col
loss = total_loss
left_node = (model_left, loss_left, wloss_left,
n_left, largs_left['classes'])
right_node = (model_right, loss_right, wloss_right,
n_right, largs_right['classes'])
return loss, split_t, split_col, left_node, right_node | 5889993b9ad1ca49ac9e8ce541262ff716dea18f | 15,057 |
import subprocess
def _get_latest_template_version_w_git_ssh(template):
"""
Tries to obtain the latest template version using an SSH key
"""
cmd = 'git ls-remote {} | grep HEAD | cut -f1'.format(template)
ret = temple.utils.shell(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = ret.stderr.decode('utf-8').strip()
stdout = ret.stdout.decode('utf-8').strip()
if stderr and not stdout:
raise RuntimeError((
'An unexpected error happened when running "{}". (stderr="{}"'
).format(cmd, stderr))
return stdout | 125bd2250144a96d2e4deeec72bb09ffd23b1317 | 15,058 |
def checkkeywords(keywordsarr, mdtype):
""" Check the keywords
Datasets: for Check 9
Services: for Check 9
Logic: there must be at least one keyword to get a score = 2. If keywords contain comma's (","), then a maimum of score = 1 is possible.
"""
score = 0
# keywordsarr is an array of objects, each containing a property "keywords" and info on a thesaurus
# here we join the keywords from all objects to one array
keywordsstr = ""
if keywordsarr != None:
keywords = []
for k in keywordsarr:
for i in k["keywords"]:
i = i.replace("\n", " ")
# exception for 1 keyword of INSPIRE
if i.find(",") > -1 and i != "Gebiedsbeheer, gebieden waar beperkingen gelden, gereguleerde gebieden en rapportage-eenheden":
score = 1
keywords.append(i)
# if the score is already 1, then we know the keywords are not
# correctly set
if len(keywords) > 0 and score != 1:
score = 2
keywordsstr = valuesep.join(keywords)
else:
keywordsstr = ""
# Now fetch the result
if mdtype == "dataset" or mdtype == "series":
# checkid = 9, so the index in the matrix is: 8
result = checksdatasets[8][2][score]
else:
result = checksservices[8][2][score]
return MkmScore(keywordsstr, score, result) | cb165689ce820c1ead3622ed562260ee76558205 | 15,059 |
def compute_presence_ratios(
sorting,
duration_in_frames,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the presence ratios for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
presence_ratios: np.ndarray
The presence ratios of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
return presence_ratios | f270ff52c2d60296db25887bcbb7d203bfc23c07 | 15,060 |
def img_box_match(bboxes_gt, bboxes_pre, iou_threshold):
"""
Goal:
Returns info for mAP calculation (Precision recall curve)
Precision = TP / (TP + FP)
Recall = TP / (TP + FN)
Returns:
list of [TP/FP, conf]
num_gt_bboxes : int
Notes:
For each prediction bbox, it finds what ground-truth bbox it belongs to in a descending order of confidence
If iou(pred_box, gt_box) > iou_threshold, this gt_box is assigned to this pred_box.
Then we check if the class is correct or not -> correct: TP
-> incorrect: FP
The rest of prediction bboxes cannot find gt bboxes -> FP
The rest of gt bboxes haven't been assigned to any prediction bboxes -> FN
"""
num_gt_bboxes = len(bboxes_gt)
gt_assign = [0] * num_gt_bboxes
pre_TF = []
for box_pre in bboxes_pre:
max_iou = 0
max_iou_index = -1
for i in range(num_gt_bboxes):
iou_temp = iou_compute(box_pre, bboxes_gt[i])
if gt_assign[i] == 0: # This gt bbox hasn't been assigned
# Find the box_gt with largest iou with this given box_pre
if iou_temp > iou_threshold and iou_temp > max_iou:
max_iou_index = i
max_iou = iou_temp
if max_iou_index != -1: # successfully find a box_gt
gt_assign[i] = 1
# TP
pre_TF.append([True, box_pre['conf']])
else:
# FP
pre_TF.append([False, box_pre['conf']])
return pre_TF, num_gt_bboxes | 09a7c9e9739f491777f1b0f48f745858281a0953 | 15,061 |
def random_char():
"""Return a random character."""
return Char(choice(_possible_chars)) | aca30fc1e6b7039cd5187264b89bca4d2899d169 | 15,062 |
def bond(self, atom:Atom, nBonds:int=1, main=False) -> Atom:
"""Like :meth:`__call__`, but returns the atom passed in instead, so you
can form the main loop quickly."""
self(atom, nBonds, main); return atom | e8d065f55110c37b4db06ca394c741d98ffbd446 | 15,063 |
def train_list():
"""
Return a sorted list of all train patients
"""
patients = listdir_no_hidden(INPUT_PATH)
patients.sort()
l = []
for patient in patients:
if labels[patient] != None:
l.append(patient)
return l | 7977e8ea72e826e18b4138391e812c15f3cfb6c0 | 15,064 |
def split_data(X, Y):
"""
This function split the features and the target into training and test set
Params:
X- (df containing predictors)
y- (series conatining Target)
Returns:
X_train, y_train, X_test, y_test
"""
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, random_state=2)
return X_train, X_test, Y_train, Y_test | c8dbc5a6e63f0b24abf3547ba208ad0a24e5594b | 15,065 |
def get_bucket(client=None, **kwargs):
"""
Get bucket object.
:param client: client object to use.
:type client: Google Cloud Storage client
:returns: Bucket object
:rtype: ``object``
"""
bucket = client.lookup_bucket(kwargs['Bucket'])
return bucket | b71891eec3a9f7c8f9b8fad134b2dd02bfb65e51 | 15,066 |
def remove_macros(xml_tree: etree._ElementTree) -> etree._ElementTree:
"""Removes the macros section from the tool tree.
Args:
xml_tree (etree._ElementTree): The tool element tree.
Returns:
etree.ElementTree: The tool element tree without the macros section.
"""
to_remove = []
for macros_el in xml_tree.getroot().findall("macros"):
to_remove.append(macros_el)
for macros_el in to_remove:
xml_tree.getroot().remove(macros_el)
return xml_tree | 77fed7e85dadbe8b2ec7511ad3b4cf7c272807a4 | 15,067 |
def flight_time_movies_2_binary_search(movie_lengths, flight_length):
"""
Solution: Sort the list of movies, then iterate it, conducting a binary
search on each item for different item, when added together, equals the
flight length.
Complexity:
Time: O(n * lg{n})
Space: O(1)
"""
if len(movie_lengths) < 2:
raise ValueError('movie length list must be at least 2 items long')
# Sort the movies first: Time: O(n * lg{n})
movie_lengths.sort()
# For each movie length
for index, movie_length_first in enumerate(movie_lengths):
# Conduct a binary search on movie_lengths: O(lg{n}) time
target_length = flight_length - movie_length_first
movie_lengths_sub = movie_lengths[0:index] + movie_lengths[
index + 1:len(
movie_lengths)]
if binary_search(target=target_length, nums=movie_lengths_sub):
return True
return False | ac7e8ad340e677f6c51f1841aab61262d8c4e226 | 15,068 |
def find_vertical_bounds(hp, T):
"""
Finds the upper and lower bounds of the characters' zone on the plate based on threshold value T
:param hp: horizontal projection (axis=1) of the plate image pixel intensities
:param T: Threshold value for bound detection
:return: upper and lower bounds
"""
N = len(hp)
# Find lower bound
i = 0
while ~((hp[i] <= T) & (hp[i+1] > T)) & (i < int(N/2)):
i += 1
lower_bound = 0 if i == int(N/2) else i
# Find superior bound
i = N-1
while ~((hp[i-1] > T) & (hp[i] <= T)) & (i > int(N/2)):
i -= 1
upper_bound = i
return [lower_bound, upper_bound] | 8520c3b638cafe1cfb2d86cc7ce8c3f28d132512 | 15,069 |
import base64
def executeCmd(cmd,arg):
""" the meat: how we react to the SNI-based logic and execute the underlying command """
global currentPath
global currentDirList
global currentFileList
global currentFileSizeList
global agentName
commands = initCmd(cmd)
for testedCommand, alias in commands.items():
if testedCommand == cmd == "WHERE":
currentPath = encodeString(cmdHandler(alias))
return cmdHandler(alias)
elif testedCommand == cmd == 'CB':
returnedOutput = cmdHandler(alias)
currentPath = encodeString(returnedOutput)
return returnedOutput
elif testedCommand == cmd == 'ALIVE':
return (str(agentName)).encode('utf-8')
elif testedCommand == cmd == 'LS':
returnedOutput = cmdHandler(alias)
currentFileList,returnedOutput = emptyListCheck(returnedOutput)
return returnedOutput
elif testedCommand == cmd == 'SIZE':
returnedOutput = cmdHandler(alias)
currentFileSizeList = emptyListCheck(returnedOutput)
return returnedOutput
elif testedCommand == cmd == 'CD':
try:
target_dir = ('%s' % currentDirList[int(arg)])
except IndexError:
print("(!) Invalid directory number!")
return
alias = (alias % target_dir).replace("'","")
returnedOutput = (cmdHandler(alias))
currentPath = encodeString(returnedOutput)
return returnedOutput
elif testedCommand == cmd == 'EX':
try:
targetFile = ('%s' % currentFileList[int(arg)])
except IndexError:
print("(!) Invalid file number!")
return
targetFilePath = ('%s/%s' % (currentPath,targetFile))
with open(targetFilePath, 'rb') as f:
content = base64.b32encode(f.read())
return content
elif testedCommand == cmd == "LD":
returnedOutput = cmdHandler(alias)
currentDirList,returnedOutput = emptyListCheck(returnedOutput)
return returnedOutput
elif testedCommand == cmd == "LIST":
returnedOutput = cmdHandler(alias)
return returnedOutput | b257e77c2f7c692d63aa4140cc5ce6ccc2213273 | 15,070 |
from typing import Union
from typing import Tuple
import re
async def text2image(
text: str,
auto_parse: bool = True,
font_size: int = 20,
color: Union[str, Tuple[int, int, int], Tuple[int, int, int, int]] = "white",
font: str = "CJGaoDeGuo.otf",
font_color: Union[str, Tuple[int, int, int]] = "black",
padding: Union[int, Tuple[int, int, int, int]] = 0,
) -> BuildImage:
"""
说明:
解析文本并转为图片
使用标签
<f> </f>
可选配置项
font: str -> 特殊文本字体
fs / font_size: int -> 特殊文本大小
fc / font_color: Union[str, Tuple[int, int, int]] -> 特殊文本颜色
示例
在不在,<f font=YSHaoShenTi-2.ttf font_size=30 font_color=red>HibiKi小姐</f>,
你最近还好吗,<f font_size=15 font_color=black>我非常想你</f>,这段时间我非常不好过,
<f font_size=25>抽卡抽不到金色</f>,这让我很痛苦
参数:
:param text: 文本
:param auto_parse: 是否自动解析,否则原样发送
:param font_size: 普通字体大小
:param color: 背景颜色
:param font: 普通字体
:param font_color: 普通字体颜色
:param padding: 文本外边距,元组类型时为 (上,左,下,右)
"""
pw = ph = top_padding = left_padding = 0
if padding:
if isinstance(padding, int):
pw = padding * 2
ph = padding * 2
top_padding = left_padding = padding
elif isinstance(padding, tuple):
pw = padding[0] + padding[2]
ph = padding[1] + padding[3]
top_padding = padding[0]
left_padding = padding[1]
if auto_parse and re.search(r"<f(.*)>(.*)</f>", text):
_data = []
new_text = ""
placeholder_index = 0
for s in text.split("</f>"):
r = re.search(r"<f(.*)>(.*)", s)
if r:
start, end = r.span()
if start != 0 and (t := s[:start]):
new_text += t
_data.append(
[
(start, end),
f"[placeholder_{placeholder_index}]",
r.group(1).strip(),
r.group(2),
]
)
new_text += f"[placeholder_{placeholder_index}]"
placeholder_index += 1
new_text += text.split("</f>")[-1]
image_list = []
current_placeholder_index = 0
# 切分换行,每行为单张图片
for s in new_text.split("\n"):
_tmp_text = s
img_height = BuildImage(0, 0, font_size=font_size).getsize("正")[1]
img_width = 0
_tmp_index = current_placeholder_index
for _ in range(s.count("[placeholder_")):
placeholder = _data[_tmp_index]
if "font_size" in placeholder[2]:
r = re.search(r"font_size=['\"]?(\d+)", placeholder[2])
if r:
w, h = BuildImage(0, 0, font_size=int(r.group(1))).getsize(
placeholder[3]
)
img_height = img_height if img_height > h else h
img_width += w
else:
img_width += BuildImage(0, 0, font_size=font_size).getsize(
placeholder[3]
)[0]
_tmp_text = _tmp_text.replace(f"[placeholder_{_tmp_index}]", "")
_tmp_index += 1
img_width += BuildImage(0, 0, font_size=font_size).getsize(_tmp_text)[0]
# img_width += len(_tmp_text) * font_size
# 开始画图
A = BuildImage(
img_width, img_height, color=color, font=font, font_size=font_size
)
basic_font_h = A.getsize("正")[1]
current_width = 0
# 遍历占位符
for _ in range(s.count("[placeholder_")):
if not s.startswith(f"[placeholder_{current_placeholder_index}]"):
slice_ = s.split(f"[placeholder_{current_placeholder_index}]")
await A.atext(
(current_width, A.h - basic_font_h - 1), slice_[0], font_color
)
current_width += A.getsize(slice_[0])[0]
placeholder = _data[current_placeholder_index]
# 解析配置
_font = font
_font_size = font_size
_font_color = font_color
for e in placeholder[2].split():
if e.startswith("font="):
_font = e.split("=")[-1]
if e.startswith("font_size=") or e.startswith("fs="):
_font_size = int(e.split("=")[-1])
if _font_size > 1000:
_font_size = 1000
if _font_size < 1:
_font_size = 1
if e.startswith("font_color") or e.startswith("fc="):
_font_color = e.split("=")[-1]
text_img = BuildImage(
0,
0,
plain_text=placeholder[3],
font_size=_font_size,
font_color=_font_color,
font=_font,
)
_img_h = (
int(A.h / 2 - text_img.h / 2)
if new_text == "[placeholder_0]"
else A.h - text_img.h
)
await A.apaste(text_img, (current_width, _img_h - 1), True)
current_width += text_img.w
s = s[
s.index(f"[placeholder_{current_placeholder_index}]")
+ len(f"[placeholder_{current_placeholder_index}]") :
]
current_placeholder_index += 1
if s:
slice_ = s.split(f"[placeholder_{current_placeholder_index}]")
await A.atext((current_width, A.h - basic_font_h), slice_[0])
current_width += A.getsize(slice_[0])[0]
A.crop((0, 0, current_width, A.h))
# A.show()
image_list.append(A)
height = 0
width = 0
for img in image_list:
height += img.h
width = width if width > img.w else img.w
width += pw
height += ph
A = BuildImage(width + left_padding, height + top_padding, color=color)
current_height = top_padding
for img in image_list:
await A.apaste(img, (left_padding, current_height), True)
current_height += img.h
else:
width = 0
height = 0
_tmp = BuildImage(0, 0, font_size=font_size)
for x in text.split("\n"):
w, h = _tmp.getsize(x)
height += h
width = width if width > w else w
width += pw
height += ph
A = BuildImage(
width + left_padding,
height + top_padding,
font_size=font_size,
color=color,
font=font,
)
await A.atext((left_padding, top_padding), text, font_color)
# A.show()
return A | dbdc6436c94d57aa2d1eb910dc18e4afeeadf689 | 15,071 |
def get_dosage_ann():
""" Convenience function for getting the dosage and snp annotation
"""
dos = {}
s_ann = {}
dos_path =\
("/export/home/barnarj/CCF_1000G_Aug2013_DatABEL/CCF_1000G_Aug2013_Chr"
"{0}.dose.double.ATB.RNASeq_MEQTL.txt")
SNP_ANNOT =\
("/proj/genetics/Projects/shared/Studies/Impute_CCF_Arrythmia/"
"Projects/CCF/Projects/ATB/Projects/ATB_RNASeq/OutputData/"
"ATB.RNASeq_Variant_Ann.bed.gz")
return(dos, s_ann) | 792caa3c9b6326178ca5a706b694c52cf1bddccc | 15,072 |
import types
import typing
import re
def function_arguments(function_name: str, services_module: types.ModuleType) -> typing.List[str]:
"""Get function arguments for stan::services `function_name`.
This function parses a function's docstring to get argument names. This is
an inferior method to using `inspect.Signature.from_callable(function)`.
Unfortunately, pybind11 does not support this use of `inspect`.
A compiled `services_module` is required for the lookup. Only simple function
arguments are returned. For example, callback writers and var_context
arguments are dropped.
Arguments:
function_name: Name of the function.
services_module (module): Compiled model-specific services extension module.
Returns:
Argument names for `function_name`.
"""
function = getattr(services_module, f"{function_name}_wrapper")
docstring = function.__doc__
# first line look something like this: function_name(arg1: int, arg2: int, ...) -> int
function_name_with_arguments = docstring.split(" -> ", 1).pop(0)
parameters = re.findall(r"(\w+): \w+", function_name_with_arguments)
# remove arguments which are specific to the wrapper
arguments_exclude = {"socket_filename"}
return list(filter(lambda arg: arg not in arguments_exclude, parameters)) | 01a12d97c6b154159c4ba2d142e1374a008befe3 | 15,073 |
def cost_n_moves(prev_cost: int, weight: int = 1) -> int:
""" 'g(n)' cost function that adds a 'weight' to each move."""
return prev_cost + weight | 77a737d68f2c74eaba484b36191b95064b05e1a9 | 15,074 |
import codecs
import re
import glob
import os
def get_email_dict(txt_dir):
"""
:param txt_dir: the input directory containing all text files.
:return: a dictionary where the key is the publication ID and the value is the list of authors' email addresses.
"""
def chunk(text_file, page_limit=2000):
fin = codecs.open(text_file, encoding='utf-8')
doc = []
n = 0
for line in fin:
line = line.strip().lower()
if line:
doc.append(line)
n += len(line)
if n > page_limit: break
return ' '.join(doc)
re_email = re.compile('[({\[]?\s*([a-z0-9\.\-_]+(?:\s*[,;|]\s*[a-z0-9\.\-_]+)*)\s*[\]})]?\s*@\s*([a-z0-9\.\-_]+\.[a-z]{2,})')
email_dict = {}
for txt_file in glob.glob(os.path.join(txt_dir, '*.txt')):
# print(txt_file)
try: doc = chunk(txt_file)
except UnicodeDecodeError: continue
emails = []
for m in re_email.findall(doc):
ids = m[0].replace(';', ',').replace('|', ',')
domain = m[1]
if ',' in ids:
emails.extend([ID.strip()+'@'+domain for ID in ids.split(',') if ID.strip()])
else:
emails.append(ids+'@'+domain)
if emails:
key = os.path.basename(txt_file)[:-4]
email_dict[key] = emails
return email_dict | b7d70c8ec13bc2350e7291f8bf68026de4638bbc | 15,075 |
import io
def get_gaussian_fundamentals(s, nfreq=None):
"""
Parses harmonic and anharmonic frequencies from gaussian
log file.
Input:
s: String containing the log file output.
nfreq : number of vibrational frequencies
Returns:
If successful:
Numpy 2D array of size: nfreq x 2
1st column for harmonic frequencies in cm-1
2nd column for anharmonic frequencies in cm-1
else:
A string showing the error.
Portion of the relevant output:
Fundamental Bands (DE w.r.t. Ground State)
1(1) 3106.899 2957.812 -0.042978 -0.008787 -0.008920
2(1) 3106.845 2959.244 -0.042969 -0.008924 -0.008782
3(1) 3082.636 2934.252 -0.043109 -0.008543 -0.008705
4(1) 3082.581 2935.702 -0.043101 -0.008709 -0.008539
5(1) 3028.430 2918.529 -0.048859 -0.008796 -0.008794
6(1) 3026.064 2926.301 -0.048438 -0.008788 -0.008785
7(1) 1477.085 1438.911 -0.044573 -0.001097 -0.007855
8(1) 1477.063 1439.122 -0.044576 -0.007858 -0.001089
9(1) 1474.346 1432.546 -0.043241 0.000678 -0.007062
10(1) 1474.318 1432.981 -0.043245 -0.007065 0.000691
11(1) 1410.843 1377.548 -0.028060 -0.016937 -0.016944
12(1) 1387.532 1356.818 -0.027083 -0.016001 -0.016001
13(1) 1205.022 1177.335 -0.029813 -0.010333 -0.011188
14(1) 1204.977 1177.775 -0.029806 -0.011191 -0.010328
15(1) 1011.453 988.386 -0.037241 -0.014274 -0.014270
16(1) 821.858 814.503 -0.025712 -0.008603 -0.010446
17(1) 821.847 814.500 -0.025693 -0.010449 -0.008599
18(1) 317.554 296.967 -0.035184 -0.010866 -0.010861
Overtones (DE w.r.t. Ground State)
"""
if nfreq == None:
nfreq = get_gaussian_nfreq(s)
freqs = np.zeros((nfreq, 2))
lines = s.splitlines()
key = 'Fundamental Bands (DE w.r.t. Ground State)'
iline = io.get_line_number(key, lines=lines)
if iline > 0:
for i in range(nfreq):
iline += 1
line = lines[iline]
cols = line.split()
freqs[i, :] = [float(cols[-5]), float(cols[-4])]
return freqs[freqs[:, 0].argsort()] | 0da2acf3eb1ca0e057da8935ad772a2c65fd251a | 15,076 |
def uniform_selection_tensor(tensor_data: np.ndarray,
p: int,
n_bits: int,
per_channel: bool = False,
channel_axis: int = 1,
n_iter: int = 10,
min_threshold: float = MIN_THRESHOLD,
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) -> dict:
"""
Compute the optimal quantization range based on the provided QuantizationErrorMethod
to uniformly quantize the tensor.
Different search is applied, depends on the value of the selected QuantizationErrorMethod.
Args:
tensor_data: Tensor content as Numpy array.
p: p-norm to use for the Lp-norm distance.
n_bits: Number of bits to quantize the tensor.
per_channel: Whether the quantization should be per-channel or not.
channel_axis: Output channel index.
n_iter: Number of iterations to search for the optimal threshold (not used for this method).
min_threshold: Minimal threshold to use if threshold is too small (not used for this method).
quant_error_method: an error function to optimize the range parameters' selection accordingly.
Returns:
Optimal quantization range to quantize the tensor uniformly.
"""
tensor_min = get_tensor_min(tensor_data, per_channel, channel_axis)
tensor_max = get_tensor_max(tensor_data, per_channel, channel_axis)
if quant_error_method == qc.QuantizationErrorMethod.NOCLIPPING:
mm = tensor_min, tensor_max
else:
error_function = get_threshold_selection_tensor_error_function(QuantizationMethod.UNIFORM, quant_error_method, p, norm=False)
mm = qparams_uniform_selection_tensor_search(error_function,
tensor_data,
tensor_min,
tensor_max,
n_bits,
per_channel,
channel_axis)
return {RANGE_MIN: mm[0],
RANGE_MAX: mm[1]} | 17f5e13443fc23ce4d0dafc0fb69de226e93fc56 | 15,077 |
import os
def _get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, "PennTreebank")
os.makedirs(base_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(list)
for file_name in ("ptb.train.txt", "ptb.valid.txt", "ptb.test.txt"):
txt_file = os.path.join(base_dir, file_name)
with open(txt_file, "w", encoding="utf-8") as f:
for i in range(5):
rand_string = get_random_unicode(seed)
dataset_line = f"{rand_string}"
# append line to correct dataset split
split = file_name.replace("ptb.", "").replace(".txt", "")
mocked_data[split].append(dataset_line)
f.write(f"{rand_string}\n")
seed += 1
return mocked_data | e16d05937f743941b1b15324b3d77ddc2b2ccf9c | 15,078 |
def calculate_bin_P(P, x, cal_type='pes'):
""" Calculate the virtual, binary transition function.
That is, this function is to calculate the transition function which
a state and action pair may visit the virtual state $z$
"""
n, m = x.world_shape
# P_z is defined for the n*m states $s$ and a virtual state $z$
# index 0 - n*m-1: real state
# n*m: virtual state
P_z = np.zeros((5, n*m+1, n*m+1))
ind_a, ind_s, ind_sp = np.where(P)
if cal_type == 'pes':
safe_space = x.S_hat
elif cal_type == 'opt':
safe_space = x.S_bar
for i in range(len(ind_a)):
if safe_space[ind_s[i], ind_a[i]]:
P_z[ind_a[i], ind_s[i], ind_sp[i]] = 1
else:
P_z[ind_a[i], ind_s[i], -1] = 1
# For any action, transition probability from z to z is equal to 1
P_z[:, -1, -1] = 1
return P_z | db7908f2ac0f20d72a70a920c412b895bf4ccef4 | 15,079 |
def makeYbus(baseMVA, bus, branch):
"""Builds the bus admittance matrix and branch admittance matrices.
Returns the full bus admittance matrix (i.e. for all buses) and the
matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage
vector, yield the vector currents injected into each line from the
"from" and "to" buses respectively of each line. Does appropriate
conversions to p.u.
@see: L{makeSbus}
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
"""
## constants
nb = bus.shape[0] ## number of buses
nl = branch.shape[0] ## number of lines
## for each branch, compute the elements of the branch admittance matrix where
##
## | If | | Yff Yft | | Vf |
## | | = | | * | |
## | It | | Ytf Ytt | | Vt |
##
Ytt, Yff, Yft, Ytf = branch_vectors(branch, nl)
## compute shunt admittance
## if Psh is the real power consumed by the shunt at V = 1.0 p.u.
## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u.
## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs,
## i.e. Ysh = Psh + j Qsh, so ...
## vector of shunt admittances
Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA
## build connection matrices
f = real(branch[:, F_BUS]).astype(int) ## list of "from" buses
t = real(branch[:, T_BUS]).astype(int) ## list of "to" buses
## connection matrix for line & from buses
Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
## connection matrix for line & to buses
Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
## build Yf and Yt such that Yf * V is the vector of complex branch currents injected
## at each branch's "from" bus, and Yt is the same for the "to" bus end
i = hstack([range(nl), range(nl)]) ## double set of row indices
Yf = csr_matrix((hstack([Yff, Yft]), (i, hstack([f, t]))), (nl, nb))
Yt = csr_matrix((hstack([Ytf, Ytt]), (i, hstack([f, t]))), (nl, nb))
# Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct
# Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct
## build Ybus
Ybus = Cf.T * Yf + Ct.T * Yt + \
csr_matrix((Ysh, (range(nb), range(nb))), (nb, nb))
Ybus.sort_indices()
Ybus.eliminate_zeros()
return Ybus, Yf, Yt | 8068d6a17c99f747e8d95b0b3ba1ac65735be382 | 15,080 |
import numpy
def list_blob(math_engine, batch_len, batch_width, list_size, channels, dtype="float32"):
"""Creates a blob with one-dimensional Height * Width * Depth elements.
Parameters
---------
math_engine : object
The math engine that works with this blob.
batch_len : int, > 0
The BatchLength dimension of the new blob.
batch_width : int, > 0
The BatchWidth dimension of the new blob.
list_size : int, > 0
The ListSize dimension of the new blob.
channels : int, > 0
The Channels dimension of the new blob.
dtype : {"float32", "int32"}, default="float32"
The type of data in the blob.
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if batch_len < 1:
raise ValueError('The `batch_len` must be > 0.')
if batch_width < 1:
raise ValueError('The `batch_width` must be > 0.')
if list_size < 1:
raise ValueError('The `list_size` must be > 0.')
if channels < 1:
raise ValueError('The `channels` must be > 0.')
shape = numpy.array((batch_len, batch_width, list_size, 1, 1, 1, channels), dtype=numpy.int32, copy=False)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype)) | dab3b45173fcca32f2cfc7bfbc585e002ff34f37 | 15,081 |
def skip_on_pypy_because_cache_next_works_differently(func):
"""Not sure what happens there but on PyPy CacheNext doesn't work like on
CPython.
"""
return _skipif_wrapper(func, IS_PYPY,
reason='PyPy works differently with __next__ cache.') | b2f765f1cad292948bb456aa841e92f180222061 | 15,082 |
import random
def get_life_of_brian():
"""
Get lines from test_LifeOfBrian.
"""
count = 0
monty_list = ['coconut']
try:
with open(LIFE_OF_BRIAN_SCRIPT) as f:
lines = f.readlines()
for line in lines:
count += 1
#print(line)
monty_list.append(line)
random_line = random.randrange(0, count)
picked_line = monty_list[random_line]
return picked_line
except:
#print(f"file at : {LIFE_OF_BRIAN_SCRIPT} could not be opened.")
return 'but it has FAAANNNGGsss' | 5b6007888f51b0b2a38eea6381bdaa5187624dda | 15,083 |
def ackley_func(x):
"""Ackley's objective function.
Has a global minimum at :code:`f(0,0,...,0)` with a search
domain of [-32, 32]
Parameters
----------
x : numpy.ndarray
set of inputs of shape :code:`(n_particles, dimensions)`
Returns
-------
numpy.ndarray
computed cost of size :code:`(n_particles, )`
Raises
------
ValueError
When the input is out of bounds with respect to the function
domain
"""
if not np.logical_and(x >= -32, x <= 32).all():
raise ValueError('Input for Ackley function must be within [-32, 32].')
d = x.shape[1]
j = (-20.0 * np.exp(-0.2 * np.sqrt((1/d) * (x**2).sum(axis=1)))
- np.exp((1/float(d)) * np.cos(2 * np.pi * x).sum(axis=1))
+ 20.0
+ np.exp(1))
return j | f00b729f57fbaa1534bb78589e1a64912b08b4a3 | 15,084 |
def validate_listable_type(*atype):
"""Validate a list of atype.
@validate_listable_type(str)
def example_func(a_list):
return a_list
@validate_listable_type(int)
def example_int_func(a_list):
return a_list
"""
if len(atype) != 1:
raise ValueError("Expected one arg. Got {n} args.".format(n=len(atype)))
type_ = atype[0]
def wrap(f):
def wrapped_f(*args, **kw):
for arg in args[0]:
if not isinstance(arg, type_):
raise TypeError("Expected type {t}. Got type {x} for {v}.".format(t=type_, x=type(arg), v=args))
return f(*args)
return wrapped_f
return wrap | 691737184fca8bdcc7f4c3779af86b9a041b71dc | 15,085 |
def meh(captcha):
"""Returns the sum of the digits which match the next one in the captcha
input string.
>>> meh('1122')
3
>>> meh('1111')
4
>>> meh('1234')
0
>>> meh('91212129')
9
"""
result = 0
for n in range(len(captcha)):
if captcha[n] == captcha[(n + 1) % len(captcha)]:
result += int(captcha[n])
return result | 2ff68455b7bb826a81392dba3bc8899374cbcc3e | 15,086 |
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If the given neighbor exists on the given vRouter, return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers'
out = run_commands(module, check_vrouter)[1]
if out:
out = out.split()
VROUTER_EXISTS = True if vrouter_name in out else False
if neighbor:
# Check for BGP neighbor
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
out = run_commands(module, show)[1]
if out and neighbor in out.split():
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
return VROUTER_EXISTS, NEIGHBOR_EXISTS | fdeb4dafad83562a48d0d22871fb6dc5a845fc2b | 15,087 |
def is_prime(n):
""" from
https://stackoverflow.com/questions/15285534/isprime-function-for-python-language
"""
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True | e992badd0648d0896097df71186fee2895d20119 | 15,088 |
def load(file, encoding=None):
"""load(file,encoding=None) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary.
"""
# Read the length prefix one char at a time.
# Note that the netstring spec explicitly forbids padding zeros.
c = file.read(1)
if not c.isdigit():
raise ValueError("not a tnetstring: missing or invalid length prefix")
datalen = ord(c) - ord("0")
c = file.read(1)
if datalen != 0:
while c.isdigit():
datalen = (10 * datalen) + (ord(c) - ord("0"))
if datalen > 999999999:
errmsg = "not a tnetstring: absurdly large length prefix"
raise ValueError(errmsg)
c = file.read(1)
if c != ":":
raise ValueError("not a tnetstring: missing or invalid length prefix")
# Now we can read and parse the payload.
# This repeats the dispatch logic of pop() so we can avoid
# re-constructing the outermost tnetstring.
data = file.read(datalen)
if len(data) != datalen:
raise ValueError("not a tnetstring: length prefix too big")
type = file.read(1)
if type == ",":
if encoding is not None:
return data.decode(encoding)
return data
if type == "#":
try:
return int(data)
except ValueError:
raise ValueError("not a tnetstring: invalid integer literal")
if type == "^":
try:
return float(data)
except ValueError:
raise ValueError("not a tnetstring: invalid float literal")
if type == "!":
if data == "true":
return True
elif data == "false":
return False
else:
raise ValueError("not a tnetstring: invalid boolean literal")
if type == "~":
if data:
raise ValueError("not a tnetstring: invalid null literal")
return None
if type == "]":
l = []
while data:
(item, data) = pop(data, encoding)
l.append(item)
return l
if type == "}":
d = {}
while data:
(key, data) = pop(data, encoding)
(val, data) = pop(data, encoding)
d[key] = val
return d
raise ValueError("unknown type tag") | 939cc6f7a42daa35552e256a1e2725826d44c01c | 15,089 |
import ispyb.model.datacollection
import ispyb.model.processingprogram
import logging
import configparser
def enable(configuration_file, section="ispyb"):
"""Enable access to features that are currently under development."""
global _db, _db_cc, _db_config
if _db_config:
if _db_config == configuration_file:
# This database connection is already set up.
return
logging.getLogger("ispyb").warn(
"__future__ configuration file change requested"
)
disable()
logging.getLogger("ispyb").info(
"NOTICE: This code uses __future__ functionality in the ISPyB API. "
"This enables unsupported and potentially unstable code, which may "
"change from version to version without warnings. Here be dragons."
)
cfgparser = configparser.RawConfigParser()
if not cfgparser.read(configuration_file):
raise RuntimeError(
"Could not read from configuration file %s" % configuration_file
)
cfgsection = dict(cfgparser.items(section))
host = cfgsection.get("host")
port = cfgsection.get("port", 3306)
database = cfgsection.get("database", cfgsection.get("db"))
username = cfgsection.get("username", cfgsection.get("user"))
password = cfgsection.get("password", cfgsection.get("pw"))
# Open a direct MySQL connection
_db = mysql.connector.connect(
host=host,
port=port,
user=username,
password=password,
database=database,
use_pure=True,
)
_db_config = configuration_file
_db.autocommit = True
class DictionaryCursorContextManager(object):
"""This class creates dictionary cursors for mysql.connector connections.
By using a context manager it is ensured that cursors are closed
immediately after use.
Cursors created with this context manager return results as a dictionary
and offer a .run() function, which is an alias to .execute that accepts
query parameters as function parameters rather than a list.
"""
def __enter__(cm):
"""Enter context. Ensure the database is alive and return a cursor
with an extra .run() function."""
_db.ping(reconnect=True)
cm.cursor = _db.cursor(dictionary=True)
def flat_execute(stmt, *parameters):
"""Pass all given function parameters as a list to the existing
.execute() function."""
return cm.cursor.execute(stmt, parameters)
setattr(cm.cursor, "run", flat_execute)
return cm.cursor
def __exit__(cm, *args):
"""Leave context. Close cursor. Destroy reference."""
cm.cursor.close()
cm.cursor = None
_db_cc = DictionaryCursorContextManager
ispyb.model.datacollection.DataCollection.integrations = (
_get_linked_autoprocintegration_for_dc
)
ispyb.model.datacollection.DataCollection.pdb = _get_linked_pdb_for_dc
ispyb.model.processingprogram.ProcessingProgram.reload = _get_autoprocprogram | 2aa613694f01c290f4cfeea8d8a470e87000021f | 15,090 |
def MakeMsgCmd(cmdName,argList):
"""
Take a command name and an argList of tuples consisting of
pairs of the form (argName, argValue), and return a string
representing the corresponding dibs command.
"""
body = MakeStartTag(dibs_constants.cmdTagName,{'id':cmdName}) + '\n'
for argPair in argList:
body += (MakeStartTag(dibs_constants.argTagName,{'id':argPair[0]})
+ argPair[1] + MakeEndTag(dibs_constants.argTagName) + '\n')
body += (MakeStartTag(dibs_constants.argTagName,
{'id':dibs_constants.cmdTimeArgName})
+ `time.time()` + MakeEndTag(dibs_constants.argTagName) + '\n' +
MakeEndCmdTag())
return body | 63fd4c2695c005fa7ff465cd1975285d15dd4faf | 15,091 |
def preprocess(tweet):
"""
Substitures urls with the string URL. Removes leading and trailing whitespaces
Removes non latin characters
:param tweet:
:return:
"""
# remove URL
line = remove_url(str(tweet.strip()))
# remove non Latin characters
stripped_text = ''
for c in line:
stripped_text += c if len(c.encode(encoding='utf_8')) == 1 else ''
return stripped_text.translate(table).strip() | 44bc9f9c66c6abc8f95acdf1666f9ded7c6aa610 | 15,092 |
def read_xml_file(input_file, elem):
"""Reads xml data and extracts specified elements
Parameters
----------
input_file : str
The OTA xml file
elem : str
Specified elements to be extracted
Returns
-------
list
a list of xml seat data
"""
tree = ET.parse(input_file)
root = tree.findall(elem)
return root | 58b8e4b86f1400d0d77856cb57e6823f0c538487 | 15,093 |
from .application import Application, ApplicationEnv
from .operator import Operator, OperatorEnv
from typing import Optional
from typing import Union
from typing import List
def env(pip_packages: Optional[Union[str, List[str]]] = None):
"""A decorator that adds an environment specification to either Operator or Application.
Args:
pip_packages Optional[Union[str, List[str]]]: A string that is a path to requirements.txt file
or a list of packages to install.
Returns:
A decorator that adds an environment specification to either Operator or Application.
"""
# Import the classes here to avoid circular import.
def decorator(cls):
if hasattr(cls, "_env") and cls._env:
raise ItemAlreadyExistsError(f"@env decorator is aleady specified for {cls}.")
if issubclass(cls, Operator):
environment = OperatorEnv(pip_packages=pip_packages)
elif issubclass(cls, Application):
environment = ApplicationEnv(pip_packages=pip_packages)
else:
raise UnknownTypeError(f"@env decorator cannot be specified for {cls}.")
cls._env = environment
return cls
return decorator | 9404d28e56a0d8824c9f05a0fa601b9ba181c98f | 15,094 |
import time
def test_trace_propagation(
endpoint, transport, encoding, enabled, expect_spans, expect_baggage,
http_patchers, tracer, mock_server, thrift_service,
app, http_server, base_url, http_client):
"""
Main TChannel-OpenTracing integration test, using basictracer as
implementation of OpenTracing API.
The main logic of this test is as follows:
1. Start a new trace with a root span
2. Store a random value in the baggage
3. Call the first service at the endpoint from `endpoint` parameter.
The first service is either tchannel or http, depending on the value
if `transport` parameter.
4. The first service calls the second service using pre-defined logic
that depends on the endpoint invoked on the first service.
5. The second service accesses the tracing span and returns the value
of the baggage item as the response.
6. The first service responds with the value from the second service.
7. The main test validates that the response is equal to the original
random value of the baggage, proving trace & baggage propagation.
8. The test also validates that all spans have been finished and
recorded, and that they all have the same trace ID.
We expect 5 spans to be created from each test run:
* top-level (root) span started in the test
* client span (calling service-1)
* service-1 server span
* service-1 client span (calling service-2)
* service-2 server span
:param endpoint: name of the endpoint to call on the first service
:param transport: type of the first service: tchannel or http
:param enabled: if False, channels are instructed to disable tracing
:param expect_spans: number of spans we expect to be generated
:param http_patchers: monkey-patching of tornado AsyncHTTPClient
:param tracer: a concrete implementation of OpenTracing Tracer
:param mock_server: tchannel server (from conftest.py)
:param thrift_service: fixture that creates a Thrift service from fake IDL
:param app: tornado.web.Application fixture
:param http_server: http server (provided by pytest-tornado)
:param base_url: address of http server (provided by pytest-tornado)
:param http_client: Tornado's AsyncHTTPClient (provided by pytest-tornado)
"""
# mock_server is created as a fixture, so we need to set tracer on it
mock_server.tchannel._dep_tchannel._tracer = tracer
mock_server.tchannel._dep_tchannel._trace = enabled
register(tchannel=mock_server.tchannel, thrift_service=thrift_service,
http_client=http_client, base_url=base_url)
tchannel = TChannel(name='test', tracer=tracer, trace=enabled)
app.add_handlers(".*$", [
(r"/", HttpHandler, {'client_channel': tchannel})
])
with mock.patch('opentracing.tracer', tracer),\
mock.patch.object(tracing.log, 'exception') as log_exception:
assert opentracing.tracer == tracer # sanity check that patch worked
span = tracer.start_span('root')
baggage = 'from handler3 %d' % time.time()
span.set_baggage_item(BAGGAGE_KEY, baggage)
if not enabled:
span.set_tag('sampling.priority', 0)
with span: # use span as context manager so that it's always finished
response_future = None
with tchannel.context_provider.span_in_context(span):
if transport == 'tchannel':
if encoding == 'json':
response_future = tchannel.json(
service='test-client',
endpoint=endpoint,
hostport=mock_server.hostport,
body=mock_server.hostport,
)
elif encoding == 'thrift':
if endpoint == 'thrift1':
response_future = tchannel.thrift(
thrift_service.X.thrift1(mock_server.hostport),
hostport=mock_server.hostport,
)
elif endpoint == 'thrift3':
response_future = tchannel.thrift(
thrift_service.X.thrift3(mock_server.hostport),
hostport=mock_server.hostport,
)
elif endpoint == 'thrift4':
response_future = tchannel.thrift(
thrift_service.X.thrift4(mock_server.hostport),
hostport=mock_server.hostport,
)
else:
raise ValueError('wrong endpoint %s' % endpoint)
else:
raise ValueError('wrong encoding %s' % encoding)
elif transport == 'http':
response_future = http_client.fetch(
request=HTTPRequest(
url='%s%s' % (base_url, endpoint),
method='POST',
body=mock_server.hostport,
)
)
else:
raise NotImplementedError(
'unknown transport %s' % transport)
response = yield response_future
assert log_exception.call_count == 0
body = response.body
if expect_baggage:
assert body == baggage
def get_sampled_spans():
return [s for s in tracer.reporter.get_spans() if s.is_sampled]
# Sometimes the test runs into weird race condition where the
# after_send_response() hook is executed, but the span is not yet
# recorded. To prevent flaky test runs we check and wait until
# all spans are recorded, for up to 1 second.
for i in range(0, 1000):
spans = get_sampled_spans()
if len(spans) >= expect_spans:
break
yield tornado.gen.sleep(0.001) # yield execution and sleep for 1ms
spans = get_sampled_spans()
assert expect_spans == len(spans), 'Unexpected number of spans reported'
# We expect all trace IDs in collected spans to be the same
if expect_spans > 0:
spans = tracer.reporter.get_spans()
assert 1 == len(set([s.trace_id for s in spans])), \
'all spans must have the same trace_id' | afd85ef71b14a263f4480a0c0f81e019dc680e34 | 15,095 |
def cost_stage_grads(x, u, target, lmbda):
"""
x: (n_states, )
u: (n_controls,)
target: (n_states, )
lmbda: penalty on controls
"""
dL = jacrev(cost_stage, (0,1)) #l_x, l_u
d2L = jacfwd(dL, (0,1)) # l_xx etc
l_x, l_u = dL(x, u, target, lmbda)
d2Ldx, d2Ldu = d2L(x, u, target, lmbda)
l_xx, l_xu = d2Ldx
l_ux, l_uu = d2Ldu
return l_x, l_u, l_xx, l_ux, l_uu | a6137653adcb3579775a9bcc9e8ddf03bb6f2cda | 15,096 |
def create_rotation_matrix(angles):
"""
Returns a rotation matrix that will produce the given Euler angles
:param angles: (roll, pitch, yaw)
"""
R_x = Matrix([[1, 0, 0],
[0, cos(q), -sin(q)],
[0, sin(q), cos(q)]]).evalf(subs={q: angles[0]})
R_y = Matrix([[cos(q), 0, sin(q)],
[0, 1, 0],
[-sin(q), 0, cos(q)]]).evalf(subs={q: angles[1]})
R_z = Matrix([[cos(q), -sin(q), 0],
[sin(q), cos(q), 0],
[0, 0, 1]]).evalf(subs={q: angles[2]})
return R_z * R_y * R_x | 8431dce383a83d431f9951f76624723f5697cf83 | 15,097 |
def goodput_for_range(endpoint, first_packet, last_packet):
"""Computes the goodput (in bps) achieved between observing two specific packets"""
if first_packet == last_packet or \
first_packet.timestamp_us == last_packet.timestamp_us:
return 0
byte_count = 0
seen_first = False
for packet in endpoint.packets:
if packet == last_packet:
break
if packet == first_packet:
seen_first = True
if not seen_first:
continue
# Packet contributes to goodput if it was not retransmitted
if not packet.is_lost():
byte_count += packet.data_len
time_us = last_packet.timestamp_us - first_packet.timestamp_us
return byte_count * 8 * 1E6 / time_us | aea56993771c1a250dacdfccf8328c7a0d3ce50b | 15,098 |
from typing import Sequence
def validate_scopes(
required_scopes: Sequence[str], token_scopes: Sequence[str]
) -> bool:
"""Validates that all require scopes are present in the token scopes"""
missing_scopes = set(required_scopes) - set(token_scopes)
if missing_scopes:
raise SecurityException(f"Missing required scopes: {missing_scopes}")
return not missing_scopes | e979cdd2eb73c89084f72fd4f70390dfe3109c17 | 15,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.