content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from re import S
def solve(FLT_MIN, FLT_MAX):
"""Solving cos(x) <= -0.99, dx/dt=1, x(0) = 0
# Basic steps:
# 1. First compute the n terms for each ode
# 2. Next replace the guard with ode(t), so that it is only in t
# 3. Then compute the number of terms needed for g(t)
# 4. Finally, compute g(t) = 0 and g(t)-2g(0) = 0
# 5. Note that computing number of terms "n" in taylor essentially
# guarantees that tᵣ - t ≤ floating point error only, specified by the
# polynomial solver.
"""
# XXX: This is the theta
def test_multivariate():
# LTI is easy to solve
# Xdiff = S.sympify('(5*x(t) + 2*y(t) + 1)')
# Time varying, takes more time in general,
# with increasing power for t^n
# Xdiff = S.sympify('(5*x(t) + 2*y(t) + t**3)')
# Non linear with periodic functions
# Xdiff = S.sympify('sin(sqrt(x(t)+1))')
# import math
# FLT_MIN = 0
# FLT_MAX = 2*math.pi
# More complex ode
# Xdiff = S.sympify('sin(sin(x(t)+1))')
# The angles can only be between 0 and 2π
# import math
# FLT_MIN = -2*math.pi
# FLT_MAX = 2*math.pi
# A sqrt
# Xdiff = S.sympify('sqrt(x(t)+1)')
# The ones below need to have a reduced search space bound for
# continous variables.
# Another sqrt, does not seem to converge
# Xdiff = S.sympify('x(t)*t') # Does not work
# Now multiplication, seems to not coverge ever.
Xdiff = S.sympify('exp(2*x(t))') # Does not work either
# Using scaling factor, to reduce the bounds of the maximisation
# problem.
FLT_MIN = -1e1
FLT_MAX = 1e1
return FLT_MIN, FLT_MAX, Xdiff
FLT_MIN, FLT_MAX, tomaximize = test_multivariate()
xt = S.sympify('x(t)')
x = S.abc.x
yt = S.sympify('y(t)')
y = S.abc.y
# Coupled ode example
(tokens, nx) = getN({xt.diff(t): ([tomaximize],
{yt.diff(t): (xt,
# args always in
# same order for
# everyone
[x, y, t])},
# Always list all the replacements
{xt: x, yt: y},
[x, y, t])},
FLT_MIN=FLT_MIN, FLT_MAX=FLT_MAX, epsilon=1e-6)
# print(tokens)
print('required terms for θ satisfying Lipschitz constant:', nx)
# Now make the taylor polynomial
taylorxcoeffs = [5*S.pi/2, 1] + [0]*(nx-2)
# These are the smooth tokens
taylorxpoly = sum([t**i*v for i, v in zip(range(nx), taylorxcoeffs)])
# The theta' taylor polynomial
print('θ(t) = ', taylorxpoly)
# The guard function that needs the lipschitz constant
def guard():
gt = (S.cos(taylorxpoly)+0.99)
return gt.diff(t)
gt = S.sympify('g(t)')
tokens, n = getN({gt.diff(t): ([guard()], dict(), dict(), [t])})
# print(tokens)
print('Number of terms for cos(%s)+0.99: %s' % (taylorxpoly, n))
# Now we do the example of the ode with taylor polynomial
cosseries1 = S.fps(S.cos(taylorxpoly)+0.99, x0=0).polynomial(n=n)
print('Guard taylor polynomial:', cosseries1, '\n')
# print(S.simplify(cosseries1))
root = None
try:
root1 = S.nsolve(cosseries1, t, 0, dict=True)[0][t]
root = root1
except ValueError:
print('No root for g(t)=0')
# Now the second one, this one fails
# g(t) - 2*g(0) = 0
cosseries2 = S.fps(S.cos((5*S.pi/2) + t)-1.98, x0=0).polynomial(n=n)
# print(S.simplify(cosseries2))
try:
root2 = S.nsolve(cosseries2, t, 0, dict=True)[0][t]
root = min(root, root2)
except ValueError:
print('No root for g(t)-2*g(0) = 0')
print('guard Δt:', root) | 90288d73717d02b966beb66396e0be16f68f55f5 | 12,117 |
from tvm.contrib import graph_executor
def run_tvm_graph(
coreml_model, target, device, input_data, input_name, output_shape, output_dtype="float32"
):
"""Generic function to compile on relay and execute on tvm"""
if isinstance(input_data, list):
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_name):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype
else:
shape_dict = {input_name: input_data.shape}
dtype_dict = {input_name: input_data.dtype}
mod, params = relay.frontend.from_coreml(coreml_model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](device))
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_name):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_name, tvm.nd.array(input_data.astype(input_data.dtype)))
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
else:
if not output_shape:
tvm_output = m.get_output(0)
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.numpy() | 391b3e0dcca3c1893da69a6ce1ac219f7c56dfa0 | 12,118 |
import math
def detect_peaks(data, srate):
"""
obrain maximum and minimum values from blood pressure or pleth waveform
the minlist is always one less than the maxlist
"""
ret = []
if not isinstance(data, np.ndarray):
data = np.array(data)
raw_data = np.copy(data)
raw_srate = srate
# resampling rate to 100Hz
data = resample_hz(data, srate, 100)
srate = 100
# upper and lower bound of the heart rate (Hz = /sec)
# heart rate = hf * 60;
fh = 200 / 60 # 3.3
fl = 30 / 60 # 0.5
# estimate hr
y1 = band_pass(data, srate, 0.5 * fl, 3 * fh)
# Divide the entire x into four regions and use the median of these
# hf = []
# for(var i = 0; i < 4; i++) {
# var subw = new Wav(srate, y1.vals.copy(data.length / 4 * i, data.length / 4 * (i+1)));
# hf[i] = subw.estimate_heart_rate(fl, fh);
# if(hf[i] == 0) {
# console.log("HR estimation failed, assume 75");
# hf[i] = 75 / 60;
# }
# }
# hf = hf.median();
# Whole heart freq estimation
hf = estimate_heart_freq(y1, srate)
if hf == 0:
print("HR estimation failed, assume 75")
hf = 75 / 60
# band pass filter again with heart freq estimation
y2 = band_pass(data, srate, 0.5 * fl, 2.5 * hf)
d2 = np.diff(y2)
# detect peak in gradient
p2 = detect_maxima(d2, 90)
# detect real peak
y3 = band_pass(data, srate, 0.5 * fl, 10 * hf)
p3 = detect_maxima(y3, 60)
# find closest p3 that follows p2
p4 = []
last_p3 = 0
for idx_p2 in p2:
idx_p3 = 0
for idx_p3 in p3:
if idx_p3 > idx_p2:
break
if idx_p3 != 0:
if last_p3 != idx_p3:
p4.append(idx_p3)
last_p3 = idx_p3
# nearest neighbor and inter beat interval correction
# p: location of detected peaks
pc = []
# find all maxima before preprocessing
m = detect_maxima(data, 0)
m = np.array(m)
# correct peaks location error due to preprocessing
last = -1
for idx_p4 in p4:
cand = find_nearest(m, idx_p4)
if cand != last:
pc.append(cand)
last = cand
ht = 1 / hf # beat interval (sec)
# correct false negatives (FN)
# Make sure if there is rpeak not included in the PC.
i = -1
while i < len(pc):
if i < 0:
idx_from = 0
else:
idx_from = pc[i]
if i >= len(pc) - 1:
idx_to = len(data)-1
else:
idx_to = pc[i+1]
# find false negative and fill it
if idx_to - idx_from < 1.75 * ht * srate:
i += 1
continue
# It can not be within 0.2 of both sides
idx_from += 0.2 * ht * srate
idx_to -= 0.2 * ht * srate
# Find missing peak and add it
# find the maximum value from idx_from to idx_to
idx_max = -1
val_max = 0
for j in range(np.searchsorted(m, idx_from), len(m)):
idx_cand = m[j]
if idx_cand >= idx_to:
break
if idx_max == -1 or val_max < data[idx_cand]:
val_max = data[idx_cand]
idx_max = idx_cand
# There is no candidate to this FN. Overtake
if idx_max != -1: # add idx_max and restart trom there
pc.insert(i+1, idx_max)
i -= 1
i += 1
# correct false positives (FP)
i = 0
while i < len(pc) - 1:
idx1 = pc[i]
idx2 = pc[i+1]
if idx2 - idx1 < 0.75 * ht * srate: # false positive
idx_del = i + 1 # default: delete i+1
if 1 < i < len(pc) - 2:
# minimize heart rate variability
idx_prev = pc[i-1]
idx_next = pc[i+2]
# find center point distance
d1 = abs(idx_next + idx_prev - 2 * idx1)
d2 = abs(idx_next + idx_prev - 2 * idx2)
if d1 > d2:
idx_del = i
else:
idx_del = i+1
elif i == 0:
idx_del = i
elif i == len(pc) - 2:
idx_del = i+1
pc.pop(idx_del)
i -= 1
i += 1
# remove dupilcates
i = 0
for i in range(0, len(pc) - 1):
if pc[i] == pc[i+1]:
pc.pop(i)
i -= 1
i += 1
# find nearest peak in real data
# We downsample x to srate to get maxidxs. ex) 1000 Hz -> 100 Hz
# Therefore, the position found by maxidx may differ by raw_srate / srate.
maxlist = []
ratio = math.ceil(raw_srate / srate)
for maxidx in pc:
idx = int(maxidx * raw_srate / srate) # extimated idx -> not precise
maxlist.append(max_idx(raw_data, idx - ratio - 1, idx + ratio + 1))
# get the minlist from maxlist
minlist = []
for i in range(len(maxlist) - 1):
minlist.append(min_idx(raw_data, maxlist[i], maxlist[i+1]))
return [minlist, maxlist] | ad327b10dd6bcecb3036ecfb5cdcf07defecf2ff | 12,119 |
from typing import Tuple
from typing import Dict
def add_entity_to_watchlist(client: Client, args) -> Tuple[str, Dict, Dict]:
"""Adds an entity to a watchlist.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
watchlist_name = args.get('watchlist_name')
entity_type = args.get('entity_type')
entity_name = args.get('entity_name')
expiry_days = args.get('expiry_days') if 'expiry_days' in args else '30'
response = client.add_entity_to_watchlist_request(watchlist_name, entity_type, entity_name, expiry_days)
if 'successfull' not in response:
raise Exception(f'Failed to add entity {entity_name} to the watchlist {watchlist_name}.\n'
f'Error from Securonix is: {response}.')
human_readable = f'Added successfully the entity {entity_name} to the watchlist {watchlist_name}.'
return human_readable, {}, response | 28270c3fa0985458a1fc18f5fd4d2c8661eae1dc | 12,120 |
def is_base255(channels):
"""check if a color is in base 01"""
if isinstance(channels, str):
return False
return all(_test_base255(channels).values()) | e8e6176785303f8f1130c7e99f929ec183e145c5 | 12,121 |
def make_unrestricted_prediction(solution: SolverState) -> tuple[Role, ...]:
"""
Uses a list of true/false statements and possible role sets
to return a rushed list of predictions for all roles.
Does not restrict guesses to the possible sets.
"""
all_role_guesses, curr_role_counts = get_basic_guesses(solution)
solved = recurse_assign(solution, all_role_guesses, curr_role_counts, False)
switch_dict = get_switch_dict(solution)
final_guesses = tuple(solved[switch_dict[i]] for i in range(len(solved)))
if len(final_guesses) != const.NUM_ROLES:
raise RuntimeError("Could not find unrestricted assignment of roles.")
return final_guesses | 2662979b0fdca524dcea368daf7b11283906ecbb | 12,122 |
def computeFourteenMeVPoint(xs, E14='14.2 MeV', useCovariance=True, covariance=None):
"""
Compute the value of the cross section at 14.2 MeV.
If the covariance is provided, the uncertainty on the 14.2 MeV point will be computed.
:param xs: reference to the cross section
:param E14: the 14 MeV point to use (in case you want to override the default of 14.2 MeV)
:param useCovariance: use this to override covariance usage
:type useCovariance: bool
:param covariance: covariance to use when computing uncertainty on the spectral average.
If None (default: None), no uncertainty is computed.
:type covariance: covariance instance or None
:rtype: PQU
"""
return computeValueAtAPoint(xs, E14, useCovariance=useCovariance, covariance=covariance) | 4d3165518e227f0c1027d45507c6d67e1e27bf0b | 12,124 |
def conn_reshape_directed(da, net=False, sep='-', order=None, rm_missing=False,
fill_value=np.nan, to_dataframe=False,
inplace=False):
"""Reshape a raveled directed array of connectivity.
This function takes a DataArray of shape (n_pairs, n_directions) or
(n_pairs, n_times, n_direction) where n_pairs reflects pairs of roi
(e.g 'roi_1-roi_2') and n_direction usually contains bidirected 'x->y' and
'y->x'. At the end, this function reshape the input array so that rows
contains the sources and columns the targets leading to a non-symmetric
DataArray of shape (n_roi, n_roi, n_times). A typical use case for this
function would be after computing the covariance based granger causality.
Parameters
----------
da : xarray.DataArray
Xarray DataArray of shape (n_pairs, n_times, n_directions) where
actually the roi dimension contains the pairs (roi_1-roi_2, roi_1-roi_3
etc.). The dimension n_directions should contains the dimensions 'x->y'
and 'y->x'
sep : string | '-'
Separator used to separate the pairs of roi names.
order : list | None
List of roi names to reorder the output.
rm_missing : bool | False
When reordering the connectivity array, choose if you prefer to reindex
even if there's missing regions (rm_missing=False) or if missing
regions should be removed (rm_missing=True)
fill_value : float | np.nan
Value to use for filling missing pairs (e.g diagonal)
to_dataframe : bool | False
Dataframe conversion. Only possible if the da input does not contains
a time axis.
Returns
-------
da_out : xarray.DataArray
DataArray of shape (n_roi, n_roi, n_times)
See also
--------
conn_covgc
"""
assert isinstance(da, xr.DataArray)
if not inplace:
da = da.copy()
assert ('roi' in list(da.dims)) and ('direction' in list(da.dims))
if 'times' not in list(da.dims):
da = da.expand_dims("times")
# get sources, targets names and sorted full list
sources, targets, roi_tot = _untangle_roi(da, sep)
# transpose, reindex and reorder (if needed)
da_xy, da_yx = da.sel(direction='x->y'), da.sel(direction='y->x')
if net:
da = xr.concat((da_xy - da_yx, da_xy - da_yx), 'roi')
else:
da = xr.concat((da_xy, da_yx), 'roi')
da, order = _dataarray_unstack(da, sources, targets, roi_tot, fill_value,
order, rm_missing)
# dataframe conversion
if to_dataframe:
da = _dataframe_conversion(da, order)
return da | bb6747cc47b263545fce219f8357d8773fb428bc | 12,125 |
def create_manager(user):
"""
Return a ManageDNS object associated with user (for history)
"""
if 'REVERSE_ZONE' in app.config:
revzone = app.config['REVERSE_ZONE']
else:
revzone = None
return ManageDNS(nameserver=app.config['SERVER'], forward_zone=app.config['FORWARD_ZONE'],
reverse_zone=revzone, user=user, key_name=key_name,
key_hash=key_hash) | 0832ce4353775a19cc015490f4febf6df6bd8f04 | 12,127 |
def get_psi_part(v, q):
"""Return the harmonic oscillator wavefunction for level v on grid q."""
Hr = make_Hr(v + 1)
return N(v) * Hr[v](q) * np.exp(-q * q / 2.0) | 9d4d6a62b7ee434d5d92a694a4a2491fd8a94f97 | 12,129 |
def get_pij(d, scale, i, optim = "fast"):
"""
Compute probabilities conditioned on point i from a row of distances
d and a Gaussian scale (scale = 2*sigma^2). Vectorized and unvectorized
versions available.
"""
if optim == "none":
#
# TO BE DONE
#
return get_pij(d, scale, i, optim = "fast")
else:
d_scaled = -d/scale
d_scaled -= np.max(d_scaled)
exp_D = np.exp(d_scaled)
exp_D[i] = 0
return exp_D/np.sum(exp_D) | 4a1ee1d91ba949789cc96d2ed1873197afbf4b67 | 12,131 |
def get_user_input(prompt: str, current_setting: str):
"""
Get user input
:param prompt: prompt to display
:param current_setting: current value
:return:
"""
if current_setting != '':
print(f'-- Current setting: {current_setting}')
use_current = '/return to use current'
else:
use_current = ''
user_ip = ''
while user_ip == '':
user_ip = input(f'{prompt} [q to quit{use_current}]: ')
if user_ip.lower() == 'q':
break
if user_ip == '' and current_setting != '':
user_ip = current_setting
return user_ip | 358bd937db4ae111eb515385f0f61391a7ae665c | 12,134 |
def class_is_u16_len(cls):
"""
Return True if cls_name is an object which uses initial uint16 length
"""
ofclass = loxi_globals.unified.class_by_name(cls)
if not ofclass:
return False
if len(ofclass.members) < 1:
return False
m = ofclass.members[0]
if not isinstance(m, ir.OFLengthMember):
return False
if m.oftype != "uint16_t":
return False
return True | f45a35e98ff0a2cf6d10ac31e5e31b501f7edcfd | 12,135 |
def split(data, train_ids, test_ids, valid_ids=None):
"""Split data into train, test (and validation) subsets."""
datasets = {
"train": (
tuple(map(lambda x: x[train_ids], data[0])),
data[1][train_ids],
),
"test": (tuple(map(lambda x: x[test_ids], data[0])), data[1][test_ids]),
}
if valid_ids is not None:
datasets["valid"] = (
tuple(map(lambda x: x[valid_ids], data[0])),
data[1][valid_ids],
)
else:
datasets["valid"] = None
return datasets | 0156d39a5920c5ba7e3ab05a85358b1a960cf239 | 12,137 |
def parse_value_file(path):
"""return param: [(value type, value)]"""
data = {}
samples = [x.strip("\n").split("\t") for x in open(path)]
for row in samples:
parameter = row[0]
values = [x for x in row[1:] if x != SKIP_VAL]
if values != []:
if parameter not in data:
data[parameter] = []
data[parameter] += values
return data | 7dd51f21877ec8ce4a8a64c288a5e862ea0e2a52 | 12,138 |
def _is_comments_box(shape):
""" Checks if this shape represents a Comments question; RECTANGLE with a green outline """
if shape.get('shapeType') != 'RECTANGLE':
return False
color = get_dict_nested_value(shape, 'shapeProperties', 'outline', 'outlineFill', 'solidFill', 'color', 'rgbColor')
return 'blue' not in color and 'red' not in color and 'green' in color and color.get('green') == 1 | 5fa4abba6cc0db3552e90bd73ea9aa7659665ffe | 12,140 |
def get_config(cfg, name):
"""Given the argument name, read the value from the config file.
The name can be multi-level, like 'optimizer.lr'
"""
name = name.split('.')
suffix = ''
for item in name:
assert item in cfg, f'attribute {item} not cfg{suffix}'
cfg = cfg[item]
suffix += f'.{item}'
return cfg | 4b0a8eedb057a26d67cd5c9f7698c33754b29249 | 12,142 |
def get_current_frame_content_entire_size(driver):
# type: (AnyWebDriver) -> ViewPort
"""
:return: The size of the entire content.
"""
try:
width, height = driver.execute_script(_JS_GET_CONTENT_ENTIRE_SIZE)
except WebDriverException:
raise EyesError('Failed to extract entire size!')
return dict(width=width, height=height) | 6df557ae5f628897a0d178695a13349787532168 | 12,143 |
def conv_slim_capsule(input_tensor,
input_dim,
output_dim,
layer_name,
input_atoms=8,
output_atoms=8,
stride=2,
kernel_size=5,
padding='SAME',
**routing_args):
"""Builds a slim convolutional capsule layer.
This layer performs 2D convolution given 5D input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]`. Then refines
the votes with routing and applies Squash non linearity for each capsule.
Each capsule in this layer is a convolutional unit and shares its kernel over
the position grid and different capsules of layer below. Therefore, number
of trainable variables in this layer is:
kernel: [kernel_size, kernel_size, input_atoms, output_dim * output_atoms]
bias: [output_dim, output_atoms]
Output of a conv2d layer is a single capsule with channel number of atoms.
Therefore conv_slim_capsule is suitable to be added on top of a conv2d layer
with num_routing=1, input_dim=1 and input_atoms=conv_channels.
Args:
input_tensor: tensor, of rank 5. Last two dimmensions representing height
and width position grid.
input_dim: scalar, number of capsules in the layer below.
output_dim: scalar, number of capsules in this layer.
layer_name: string, Name of this layer.
input_atoms: scalar, number of units in each capsule of input layer.
output_atoms: scalar, number of units in each capsule of output layer.
stride: scalar, stride of the convolutional kernel.
kernel_size: scalar, convolutional kernels are [kernel_size, kernel_size].
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
**routing_args: dictionary {leaky, num_routing}, args to be passed to the
update_routing function.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms, out_height, out_width]`. If padding is
'SAME', out_height = in_height and out_width = in_width. Otherwise, height
and width is adjusted with same rules as 'VALID' in tf.nn.conv2d.
"""
with tf.variable_scope(layer_name):
# convolution. return [batch_size, 1, 32, 8, 6, 6]
kernel = variables.weight_variable(shape=[
kernel_size, kernel_size, input_atoms, output_dim * output_atoms
])
biases = variables.bias_variable([output_dim, output_atoms, 1, 1])
votes, votes_shape, input_shape = _depthwise_conv3d(
input_tensor, kernel, input_dim, output_dim, input_atoms, output_atoms,
stride, padding)
# convolution End
with tf.name_scope('routing'):
logit_shape = tf.stack([
input_shape[0], input_dim, output_dim, votes_shape[2], votes_shape[3]
])
biases_replicated = tf.tile(biases,
[1, 1, votes_shape[2], votes_shape[3]])
activations = _update_routing(
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_dims=6,
input_dim=input_dim,
output_dim=output_dim,
**routing_args)
return activations | 626719fa607c7e02e2315d5082e9536b995ab080 | 12,144 |
def p_op_mean0_update(prev_p_op_mean0: float, p_op_var0: float, op_choice: int):
"""0-ToM updates mean choice probability estimate"""
# Input variable transforms
p_op_var0 = np.exp(p_op_var0)
# Update
new_p_op_mean0 = prev_p_op_mean0 + p_op_var0 * (
op_choice - inv_logit(prev_p_op_mean0)
)
# For numerical purposes, according to the VBA package
new_p_op_mean0 = logit(inv_logit(new_p_op_mean0))
return new_p_op_mean0 | 2a66e0e6089813c8605e658bb68c103d2b07515d | 12,145 |
from aiida.orm import QueryBuilder, Code
from aiida.common.exceptions import NotExistent
def get_last_code(entry_point_name):
"""Return a `Code` node of the latest code executable of the given entry_point_name in the database.
The database will be queried for the existence of a inpgen node.
If this is not exists and NotExistent error is raised.
:param entry_point_name: string
:return: the uuid of a inpgen `Code` node
:raise: aiida.common.exceptions.NotExistent
"""
filters = {'attributes.input_plugin': {'==': entry_point_name}}
builder = QueryBuilder().append(Code, filters=filters)
builder.order_by({Code: {'ctime': 'asc'}})
results = builder.first()
if not results:
raise NotExistent(f'ERROR: Could not find any Code in the database with entry point: {entry_point_name}!')
else:
inpgen = results[0]
return inpgen.uuid | 801ef01075dccaf2d6d91c8bfcd9f038dc749ba7 | 12,146 |
def digamma(x):
"""Digamma function.
Parameters
----------
x : array-like
Points on the real line
out : ndarray, optional
Output array for the values of `digamma` at `x`
Returns
-------
ndarray
Values of `digamma` at `x`
"""
return _digamma(x) | fb0eb21ee4255851aaa90bea1ad2b8729c7b0137 | 12,147 |
def get_time_index_dataset_from_file(the_file: h5py.File) -> h5py.Dataset:
"""Return the dataset for time indices from the H5 file object."""
return the_file[TIME_INDICES] | dfcd2017285ac252becbb2de7d1b4c4eb178534e | 12,148 |
def is_feature_enabled(feature_name):
"""A short-form method for server-side usage. This method evaluates and
returns the values of the feature flag, using context from the server only.
Args:
feature_name: str. The name of the feature flag that needs to
be evaluated.
Returns:
bool. The value of the feature flag, True if it's enabled.
"""
return _evaluate_feature_flag_value_for_server(feature_name) | 65c3b74988d7eb145352d2835b90a60abedfeba1 | 12,150 |
def str_to_size(size_str):
"""
Receives a human size (i.e. 10GB) and converts to an integer size in
mebibytes.
Args:
size_str (str): human size to be converted to integer
Returns:
int: formatted size in mebibytes
Raises:
ValueError: in case size provided in invalid
"""
if size_str is None:
return None
# no unit: assume mebibytes as default and convert directly
if size_str.isnumeric():
return int(size_str)
size_str = size_str.upper()
# check if size is non-negative number
if size_str.startswith('-'):
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# decimal units are converted to bytes and then to mebibytes
dec_units = ('KB', 'MB', 'GB', 'TB')
for index, unit in enumerate(dec_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(size_str[:-2]) * pow(1000, index+1)
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# result is returned in mebibytes
return int(size_int / pow(1024, 2))
# binary units are just divided/multipled by powers of 2
bin_units = ('KIB', 'MIB', 'GIB', 'TIB')
for index, unit in enumerate(bin_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(int(size_str[:-3]) * pow(1024, index-1))
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
return size_int
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None | 0051b7cf55d295a4fffcc41ed5b0d900243ef2da | 12,151 |
def point_line_distance(point, line):
"""Distance between a point and great circle arc on a sphere."""
start, end = line
if start == end:
dist = great_circle_distance(point, start, r=1)/np.pi*180
else:
dist = cross_track_distance(point, line, r=1)
dist = abs(dist/np.pi*180)
return dist | 472b9134034fb0f0e03ad4964f97c2be7337db56 | 12,152 |
def uniq_by(array, iteratee=None):
"""This method is like :func:`uniq` except that it accepts iteratee which
is invoked for each element in array to generate the criterion by which
uniqueness is computed. The order of result values is determined by the
order they occur in the array. The iteratee is invoked with one argument:
``(value)``.
Args:
array (list): List to process.
iteratee (mixed, optional): Function to transform the elements of the
arrays. Defaults to :func:`.identity`.
Returns:
list: Unique list.
Example:
>>> uniq_by([1, 2, 3, 1, 2, 3], lambda val: val % 2)
[1, 2]
.. versionadded:: 4.0.0
"""
return list(iterunique(array, iteratee=iteratee)) | c4398add1597a447f400bd6c100cc10eda4e63a4 | 12,153 |
def process_lvq_pak(dataset_name='lvq-pak', kind='all', numeric_labels=True, metadata=None):
"""
kind: {'test', 'train', 'all'}, default 'all'
numeric_labels: boolean (default: True)
if set, target is a vector of integers, and label_map is created in the metadata
to reflect the mapping to the string targets
"""
untar_dir = interim_data_path / dataset_name
unpack_dir = untar_dir / 'lvq_pak-3.1'
if kind == 'train':
data, target = read_space_delimited(unpack_dir / 'ex1.dat', skiprows=[0,1])
elif kind == 'test':
data, target = read_space_delimited(unpack_dir / 'ex2.dat', skiprows=[0])
elif kind == 'all':
data1, target1 = read_space_delimited(unpack_dir / 'ex1.dat', skiprows=[0,1])
data2, target2 = read_space_delimited(unpack_dir / 'ex2.dat', skiprows=[0])
data = np.vstack((data1, data2))
target = np.append(target1, target2)
else:
raise Exception(f'Unknown kind: {kind}')
if numeric_labels:
if metadata is None:
metadata = {}
mapped_target, label_map = normalize_labels(target)
metadata['label_map'] = label_map
target = mapped_target
dset_opts = {
'dataset_name': dataset_name,
'data': data,
'target': target,
'metadata': metadata
}
return dset_opts | 7d9aaed88fb20dc151c61d23760e05e77965838c | 12,154 |
def StrToPtkns(path_string):
""" The inverse of PtknsToStr(), this function splits a string like
'/usr/local/../bin/awk' into ['usr','local','..','bin','awk'].
For illustrative purposes only. Use text.split('/') directly instead."""
return orig_text.split('/') | c6259c2ae34f987d1e6cd8a23bec963c8cd4b466 | 12,155 |
def load_key(file, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> EC
"""
Factory function that instantiates a EC object.
:param file: Names the filename that contains the PEM representation
of the EC key pair.
:param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
with BIO.openfile(file) as bio:
return load_key_bio(bio, callback) | 72d47a88d80141b385f8212134fb682507ce47d4 | 12,156 |
import fnmatch
def glob_path_match(path: str, pattern_list: list) -> bool:
"""
Checks if path is in a list of glob style wildcard paths
:param path: path of file / directory
:param pattern_list: list of wildcard patterns to check for
:return: Boolean
"""
return any(fnmatch(path, pattern) for pattern in pattern_list) | 7c21e8f1c441641990826cf1d6d29d4add40e9ca | 12,157 |
def sample_df(df, col_name='family', n_sample_per_class=120, replace = False):
"""
samples the dataframe based on a column, duplicates only if the
number of initial rows < required sample size
"""
samples = df.groupby(col_name)
list_cls = df[col_name].unique()
df_lst = []
for cls in list_cls:
cls_df = samples.get_group(cls)
if (cls_df.shape[0] < n_sample_per_class) and (replace==False):
cls_sample = cls_df
else:
cls_sample = cls_df.sample(n=n_sample_per_class,replace=replace,random_state=42)
df_lst.append(cls_sample)
df_sampled = pd.concat(df_lst, sort=False)
df_sampled = shuffle(df_sampled)
return df_sampled | cc229e9cbd4a094b9a42893f15d99303f1f14c2d | 12,158 |
import base64
def inventory_user_policies_header(encode):
"""generate output header"""
if encode == 'on':
return misc.format_line((
base64.b64encode(str("Account")),
base64.b64encode(str("UserName")),
base64.b64encode(str("PolicyName")),
base64.b64encode(str("Policy"))
))
else:
return misc.format_line((
str("Account"),
str("UserName"),
str("PolicyName"),
str("Policy")
)) | 80d170505c0f05e48c2854c9f5370d161de953fb | 12,159 |
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
`attr.Attribute`\\ s. This will be a `dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs)) | 863106132a8ce27c8cb12c8ace8f4204b43484c3 | 12,160 |
def cdlxsidegap3methods(
client,
symbol,
timeframe="6m",
opencol="open",
highcol="high",
lowcol="low",
closecol="close",
):
"""This will return a dataframe of upside/downside gap three methods for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
opencol (string): column to use to calculate
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
val = t.CDLXSIDEGAP3METHODS(
df[opencol].values.astype(float),
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
)
return pd.DataFrame(
{
opencol: df[opencol].values,
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cdlxsidegap3methods": val,
}
) | f77f8e7404b2be942919a652facbfea412e962d3 | 12,161 |
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _pyre().finditer(pattern, string, flags) | ff84f88a200b469bbea010b04d4b3f36fd340c9c | 12,162 |
def nufft_j(x, y, freq = None, period_max=1., period_min=.5/24, window=False, oversamp=10.):
"""
nufft_j(x, y, period_max=1.,
period_min=.5/24, window=False, oversamp=10.):
Basic STFT algorithm
for evenly sampled data
"""
srt = np.argsort(x)
x = x[srt] # get sorted x, y arrays
y = y[srt]
if freq is None:
# Get a good frequency sampling, based on scargle in IDL
# freq = LombScargle(x,y).autofrequency()
# minimum_frequency=1./period_max,maximum_frequency=1./period_min)
freq = freq_grid(x,fmin=1./period_max,fmax=1./period_min,oversamp=oversamp)
# create array to hold fft results
fft = np.zeros_like(freq)
if window:
np.absolute(nufft.nufft3(x,y/y,freq*np.pi*2),out=fft)
else:
np.absolute(nufft.nufft3(x,y-np.nanmean(y),freq*np.pi*2),out=fft)
return fft,freq | dc690bd294c28d8b70befc1463eaeed018bf98bf | 12,163 |
def getNamespacePermissions(paths):
"""Get L{Namespace}s and L{NamespacePermission}s for the specified paths.
@param paths: A sequence of L{Namespace.path}s to get L{Namespace}s and
L{NamespacePermission}s for.
@return: A C{ResultSet} yielding C{(Namespace, NamespacePermission)}
2-tuples for the specified L{Namespace.path}s.
"""
store = getMainStore()
return store.find((Namespace, NamespacePermission),
NamespacePermission.namespaceID == Namespace.id,
Namespace.path.is_in(paths)) | cf6c9a898bdc08130702d4aeb6570790a9dc8edc | 12,166 |
def plot(x, y, ey=[], ex=[], frame=[], kind="scatter", marker_option=".",
ls="-", lw=1, label="", color="royalblue", zorder=1, alpha=1.,
output_folder="", filename=""):
"""
Erstellt einen Plot (plot, scatter oder errorbar).
Parameters
----------
x : array-like
x-Werte
y : array-like
y-Werte
ey : array_like
Fehler auf die y-Werte
ex : array_like
Fehler auf die x-Werte
kind : string
Die Art des plots
Möglich sind "plot" (default), "scatter" und "errorbar".
marker_option : string
Definiert die Option marker bei Plottyp "plot" oder "scatter" sowie
die Option fmt bei Plottyp "errorbar".
ls : string
linestyle
lw : float
linewidth
zorder : int
Die "Ebene" der zu plottenden Daten
return frame
"""
#error arrays
if len(ex)==1:
ex = np.ones(len(x))*ex[0]
elif ex==[]:
ex = np.zeros(len(x))
if len(ey)==1:
ey = np.ones(len(y))*ey[0]
#plotting
fig, plot = plt.subplots(1,1) if frame == [] else frame
if kind=="plot":
plot.plot(x, y, color=color, marker=marker_option, ls=ls, lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="scatter":
plot.scatter(x, y, color=color, marker=marker_option, lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="errorbar":
plot.errorbar(x, y, ey, ex, color=color, fmt=marker_option, ls="", lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="bar":
plot.bar(x, y, color=color, label=label, zorder=zorder, alpha=alpha)
#saving plot
if filename!="":
fig.savefig(output_folder+filename,bbox_inches='tight',pad_inches=pad_inches)
return [fig,plot] | 038a12ab841a617bf1ca3106d5664f8942c9e259 | 12,167 |
def find_nominal_hv(filename, nominal_gain):
"""
Finds nominal HV of a measured PMT dataset
Parameters
----------
filename: string
nominal gain: float
gain for which the nominal HV should be found
Returns
-------
nominal_hv: int
nominal HV
"""
f = h5py.File(filename, "r")
gains = []
hvs = []
keys = f.keys()
for key in keys:
gains.append(f[key]["fit_results"]["gain"][()])
hvs.append(int(key))
f.close()
gains = np.array(gains)
hvs = np.array(hvs)
diff = abs(np.array(gains) - nominal_gain)
nominal_hv = int(hvs[diff == np.min(diff)])
return nominal_hv | 122c5c14314e1ad3521a67a7b9287969a471818d | 12,168 |
def parse_match(field, tokens):
"""Parses a match or match_phrase node
:arg field: the field we're querying on
:arg tokens: list of tokens to consume
:returns: list of match clauses
"""
clauses = []
while tokens and tokens[-1] not in (u'OR', u'AND'):
token = tokens.pop()
if token.startswith(u'"'):
clauses.append(build_match_phrase(field, token[1:-1]))
else:
clauses.append(build_match(field, token))
return clauses | ac970f319b74317637c31265981ecebab6ca9611 | 12,169 |
def get_filesystem(namespace):
"""
Returns a patched pyfilesystem for static module storage based on
`DJFS_SETTINGS`. See `patch_fs` documentation for additional details.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
"""
if DJFS_SETTINGS['type'] == 'osfs':
return get_osfs(namespace)
elif DJFS_SETTINGS['type'] == 's3fs':
return get_s3fs(namespace)
else:
raise AttributeError("Bad filesystem: " + str(DJFS_SETTINGS['type'])) | ae00307c0c38a554bebe1bbd940ace0f2d154b47 | 12,170 |
def sym_normalize_adj(adj):
"""symmetrically normalize adjacency matrix"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_inv_sqrt = np.power(np.maximum(degree, np.finfo(float).eps), -0.5)
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() | a172ec18cd88ac8a50356453eb159c001b21d9b1 | 12,171 |
def prepare_label(input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=n_classes)
return input_batch | 3cd049b0d610ed2cec79e17464a0b3d18baa0ab2 | 12,172 |
from typing import Optional
def format_autoupdate_jira_msg(
message_body: str, header_body: Optional[str] = None
) -> str:
"""
Format a JIRA message with useful headers.
An "Automated JIRA Update" title will be added,
as well as either a URL link if a ``BUILD_URL`` env variable is present,
or a note indicating a manual run with user id otherwise.
Args:
message_body: the body of the message
header_body: a header to be added with ``h2`` tag
Returns:
a formatted message with headers
"""
message = "h2. {}".format(header_body) if header_body else ""
message += "\n\nAutomated JIRA Update:\n\n{}\n\n{}".format(
_build_source(), message_body
)
return message | 8470987c886c4c696ebd7537369b9baee9883e20 | 12,174 |
def _unescape_token(escaped_token):
"""Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return "_" if m.group(0) == "\\u" else "\\"
try:
return chr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return "\u3013" # Unicode for undefined character.
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed) | 1e596373f64d2163e467dddf4851e60dba6faa00 | 12,175 |
def create_option_learner(action_space: Box) -> _OptionLearnerBase:
"""Create an option learner given its name."""
if CFG.option_learner == "no_learning":
return KnownOptionsOptionLearner()
if CFG.option_learner == "oracle":
return _OracleOptionLearner()
if CFG.option_learner == "direct_bc":
return _DirectBehaviorCloningOptionLearner(action_space)
if CFG.option_learner == "implicit_bc":
return _ImplicitBehaviorCloningOptionLearner(action_space)
if CFG.option_learner == "direct_bc_nonparameterized":
return _DirectBehaviorCloningOptionLearner(action_space,
is_parameterized=False)
raise NotImplementedError(f"Unknown option_learner: {CFG.option_learner}") | 5642b5c6713dcf3204a0bf98e4435cfb2874e1c6 | 12,176 |
def parse_foochow_romanized_phrase(phrase, allow_omit_ingbing = True):
"""Parse a dash-separated phrase / word in Foochow Romanized."""
syllables = phrase.strip().split('-')
result = []
for syllable in syllables:
try:
parsed = FoochowRomanizedSyllable.from_string(syllable, allow_omit_ingbing)
result.append(parsed)
except:
raise ValueError("%s is not a valid Foochow Romanized syllable.", syllable)
return result | d9b5fa15ab11a596e14c7eecff2ce4fc7ef520ae | 12,177 |
def _update(dict_merged: _DepDict, dict_new: _DepDict) -> _DepDict:
"""
Merge a dictionary `dict_new` into `dict_merged` asserting if there are
conflicting (key, value) pair.
"""
for k, v in dict_new.items():
v = dict_new[k]
if k in dict_merged:
if v != dict_merged[k]:
raise ValueError(
"Key '%s' is assigned to different values '%s' and '%s'"
% (k, v, dict_merged[k])
)
else:
dict_merged[k] = v
return dict_merged | 8c96256dd96f0a75d8e8cde039c7193699bf763f | 12,178 |
from datetime import datetime
def date_convert(value):
"""
日期字符串转化为数据库的日期类型
:param value:
:return:
"""
try:
create_date = datetime.strptime(value, '%Y/%m/%d').date()
except Exception as e:
create_date = datetime.now().date()
return create_date | 40d7a213a8aeed692940bbb285fdad1bbb5b65a6 | 12,179 |
def discriminator_txt2img_resnet(input_images, t_txt, is_train=True, reuse=False):
""" 64x64 + (txt) --> real/fake """
# https://github.com/hanzhanggit/StackGAN/blob/master/stageI/model.py
# Discriminator with ResNet : line 197 https://github.com/reedscot/icml2016/blob/master/main_cls.lua
w_init = tf.random_normal_initializer(stddev=0.02)
gamma_init=tf.random_normal_initializer(1., 0.02)
df_dim = 64 # 64 for flower, 196 for MSCOCO
s = 64 # output image size [64]
s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
tl.layers.set_name_reuse(reuse)
net_in = Input(input_images)
net_h0 = Conv2d(df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2), padding='SAME', W_init=w_init, name='d_h0/conv2d')(net_in)
net_h1 = Conv2d(df_dim * 2, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h1/conv2d')(net_h0)
net_h1 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h1/batchnorm')(net_h1)
net_h2 = Conv2d(df_dim * 4, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h2/conv2d')(net_h1)
net_h2 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h2/batchnorm')(net_h2)
net_h3 = Conv2d(df_dim * 8, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h3/conv2d')(net_h2)
net_h3 = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h3/batchnorm')(net_h3)
net = Conv2d(df_dim * 2, (1, 1), (1, 1), act=None, padding='VALID', W_init=w_init, b_init=None, name='d_h4_res/conv2d')(net_h3)
net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm')(net)
net = Conv2d(df_dim * 2, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d2')(net)
net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm2')(net)
net = Conv2d(df_dim * 8, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d3')(net)
net = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm3')(net)
net_h4 = Elementwise(act=lambda x: tl.act.lrelu(x, 0.2), combine_fn=tf.add, name='d_h4/add')([net_h3, net])
# net_h4.outputs = tl.act.lrelu(net_h4.outputs, 0.2)
if t_txt is not None:
net_in2 = Input(t_txt)
#net_txt = Dense(n_units=t_dim, act=lambda x: tl.act.lrelu(x, 0.2), W_init=w_init, name='d_reduce_txt/dense')(net_txt)
net_txt = ExpandDims(1, name='d_txt/expanddim1')(net_in2)
net_txt = ExpandDims(1, name='d_txt/expanddim2')(net_txt)
net_txt = Tile([1, 4, 4, 1], name='d_txt/tile')(net_txt)
net_h4_concat = Concat(concat_dim=3, name='d_h3_concat')([net_h4, net_txt])
# 243 (ndf*8 + 128 or 256) x 4 x 4
net_h4 = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='VALID', W_init=w_init, b_init=None, name='d_h3/conv2d_2')(net_h4_concat)
net_h4 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h3/batch_norm_2')(net_h4)
net_ho = Conv2d(1, (s16, s16), (s16, s16), act=tf.nn.sigmoid, padding='VALID', W_init=w_init, name='d_ho/conv2d')(net_h4)
# 1 x 1 x 1
net_ho = Flatten()(net_ho)
# logits = net_ho.outputs
# net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)
return tl.models.Model(inputs=[net_in,net_in2], outputs=net_ho) | 200d23ccffe631ea9bea2de5afa82a1794192a7b | 12,180 |
import imghdr
def get_img_content(session,
file_url,
extension=None,
max_retry=3,
req_timeout=5):
"""
Returns:
(data, actual_ext)
"""
retry = max_retry
while retry > 0:
try:
response = session.get(file_url, timeout=req_timeout)
except Exception as e:
print(f'Exception caught when downloading file {file_url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
if response.status_code != 200:
print(f'Response status code {response.status_code}, '
f'file {file_url}')
break
# get the response byte
data = response.content
if isinstance(data, str):
print('Converting str to byte, later remove it.')
data = data.encode(data)
actual_ext = imghdr.what(extension, data)
actual_ext = 'jpg' if actual_ext == 'jpeg' else actual_ext
# do not download original gif
if actual_ext == 'gif' or actual_ext is None:
return None, actual_ext
return data, actual_ext
finally:
retry -= 1
return None, None | 156005420ebc1503d5cf7a194051b93d9fccb8ed | 12,181 |
import urllib
def nextbus(a, r, c="vehicleLocations", e=0):
"""Returns the most recent latitude and
longitude of the selected bus line using
the NextBus API (nbapi)"""
nbapi = "http://webservices.nextbus.com"
nbapi += "/service/publicXMLFeed?"
nbapi += "command=%s&a=%s&r=%s&t=%s" % (c,a,r,e)
xml = minidom.parse(urllib.urlopen(nbapi))
bus=xml.getElementsByTagName("vehicle")
if bus:
at = bus.attributes
return(at["lat"].value, at["lon"].value)
else: return (False, False) | 13d1d26fcda1ad01e145dc1d1d0d4e5377efa576 | 12,182 |
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
root = parse_xml(value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
return serialize_xml(result)
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
root = parse_html(u"<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
# remove tags <div> and </div> from result
return serialize_xml(result)[5:-6] | b95f61fd1f78d567f69bdae3f5d0a1599d7b5cdc | 12,183 |
def check_sc_sa_pairs(tb, pr_sc, pr_sa, ):
"""
Check whether pr_sc, pr_sa are allowed pairs or not.
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
"""
bS = len(pr_sc)
check = [False] * bS
for b, pr_sc1 in enumerate(pr_sc):
pr_sa1 = pr_sa[b]
hd_types1 = tb[b]['types']
hd_types11 = hd_types1[pr_sc1]
if hd_types11 == 'text':
if pr_sa1 == 0 or pr_sa1 == 3: # ''
check[b] = True
else:
check[b] = False
elif hd_types11 == 'real':
check[b] = True
else:
raise Exception("New TYPE!!")
return check | ded05192f26516e54e469bb1fe44ff6170ecea13 | 12,184 |
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper | 091c2bbc2875f32c643afe0a88ddb5980ff9f90c | 12,185 |
def nan_jumps_dlc(files, max_jump=200):
"""Nan stretches in between large jumps, assuming most of the trace is correct"""
# copy the data
corrected_trace = files.copy()
# get the column names
column_names = corrected_trace.columns
# run through the columns
for column in column_names:
# skip the index column if it's there
if column == 'index':
continue
# find the jumps
jump_length = np.diff(corrected_trace[column], axis=0)
jump_location = np.argwhere(abs(jump_length) > max_jump)
if jump_location.shape[0] == 0:
continue
jump_location = [el[0] for el in jump_location]
# initialize a flag
pair_flag = True
# go through pairs of jumps
for idx, jump in enumerate(jump_location[:-1]):
# if this is the second member of a pair, skip
if not pair_flag:
# reset the pair flag
pair_flag = True
continue
# if this jump and the next have the same sign, skip
if (jump_length[jump]*jump_length[jump_location[idx+1]]) > 0:
continue
# nan the segment in between
corrected_trace.loc[jump+1:jump_location[idx+1]+1, column] = np.nan
# set the pair flag
pair_flag = False
return corrected_trace | 5c06e6d020c4b85f2749d46f693dc29fd4f8326c | 12,186 |
def index():
"""View: Site Index Page"""
return render_template("pages/index.html") | 045c45082215bbc6888944386ba901af7412b0a6 | 12,187 |
def histogram(x, bins, bandwidth, epsilon=1e-10):
"""
Function that estimates the histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
"""
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf | bee459eb69daf41ccb0f2810d5e88139f57cad87 | 12,189 |
def generate_kam(
kam_path: str
) -> nx.DiGraph:
"""
Generates the knowledge assembly model as a NetworkX graph.
:param kam_path: Path to the file containing the source, relationship and the target nodes of a knowledge
assembly model (KAM).
:return: KAM graph as a NetworkX DiGraph.
"""
# Read the file containing the kam file
kam_df = pd.read_csv(kam_path, sep='\t', header=None)
# Rename the column headers are Source, Relationship and Target
kam_df.columns = ['Source', 'Relationship', 'Target']
# Map relationships between the nodes as either +1 or -1 based on the interaction
rlsp_mapping = {
'activates': 1,
'inhibits': -1
}
# Add the data to a directed graph
kam = nx.DiGraph()
for edge in kam_df.index:
kam.add_edge(
kam_df.at[edge, 'Source'],
kam_df.at[edge, 'Target'],
effect=rlsp_mapping[kam_df.at[edge, 'Relationship']]
)
return kam | 54f6e6cc0440a9b81cb48078030943013e599847 | 12,191 |
def toDrive(collection, folder, namePattern='{id}', scale=30,
dataType="float", region=None, datePattern=None,
extra=None, verbose=False, **kwargs):
""" Upload all images from one collection to Google Drive. You can use
the same arguments as the original function
ee.batch.export.image.toDrive
:param collection: Collection to upload
:type collection: ee.ImageCollection
:param folder: Google Drive folder to export the images to
:type folder: str
:param namePattern: pattern for the name. See make_name function
:type namePattern: str
:param region: area to upload. Defualt to the footprint of the first
image in the collection
:type region: ee.Geometry.Rectangle or ee.Feature
:param scale: scale of the image (side of one pixel). Defults to 30
(Landsat resolution)
:type scale: int
:param maxImgs: maximum number of images inside the collection
:type maxImgs: int
:param dataType: as downloaded images **must** have the same data type
in all bands, you have to set it here. Can be one of: "float",
"double", "int", "Uint8", "Int8" or a casting function like
*ee.Image.toFloat*
:type dataType: str
:param datePattern: pattern for date if specified in namePattern.
Defaults to 'yyyyMMdd'
:type datePattern: str
:return: list of tasks
:rtype: list
"""
# empty tasks list
tasklist = []
# get region
region = tools.geometry.getRegion(region)
# Make a list of images
img_list = collection.toList(collection.size())
n = 0
while True:
try:
img = ee.Image(img_list.get(n))
name = makeName(img, namePattern, datePattern, extra)
name = name.getInfo()
description = utils.matchDescription(name)
# convert data type
img = utils.convertDataType(dataType)(img)
task = ee.batch.Export.image.toDrive(image=img,
description=description,
folder=folder,
fileNamePrefix=name,
region=region,
scale=scale, **kwargs)
task.start()
if verbose:
print("exporting {} to folder '{}' in GDrive".format(name, folder))
tasklist.append(task)
n += 1
except Exception as e:
error = str(e).split(':')
if error[0] == 'List.get':
break
else:
raise e
return tasklist | 2bf55de8894a063d2e74a462a3959753a1396c0a | 12,192 |
def area(box):
"""Computes area of boxes.
B: batch_size
N: number of boxes
Args:
box: a float Tensor with [N, 4], or [B, N, 4].
Returns:
a float Tensor with [N], or [B, N]
"""
with tf.name_scope('Area'):
y_min, x_min, y_max, x_max = tf.split(
value=box, num_or_size_splits=4, axis=-1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), axis=-1) | 64e7a8e530d28a6c88f8a1c7fd2bb1b6d880617c | 12,193 |
def read(fn):
"""
return a list of the operating systems and a list of the groups in
the given fingerbank config file
"""
cfg = parse_config_with_heredocs(fn)
return create_systems_and_groups(cfg) | 900e70093e469527f20f7be6ed091edf58ff3ace | 12,194 |
def decay_value(base_value, decay_rate, decay_steps, step):
""" decay base_value by decay_rate every decay_steps
:param base_value:
:param decay_rate:
:param decay_steps:
:param step:
:return: decayed value
"""
return base_value*decay_rate**(step/decay_steps) | c593f5e46d7687fbdf9760eb10be06dca3fb6f7b | 12,195 |
def setup_flask_app(manager_ip='localhost',
driver='',
hash_salt=None,
secret_key=None):
"""Setup a functioning flask app, when working outside the rest-service
:param manager_ip: The IP of the manager
:param driver: SQLA driver for postgres (e.g. pg8000)
:param hash_salt: The salt to be used when creating user passwords
:param secret_key: Secret key used when hashing flask tokens
:return: A Flask app
"""
app = Flask(__name__)
manager_config.load_configuration(from_db=False)
with app.app_context():
app.config['SQLALCHEMY_DATABASE_URI'] = manager_config.db_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ENV'] = 'production'
set_flask_security_config(app, hash_salt, secret_key)
Security(app=app, datastore=user_datastore)
Migrate(app=app, db=db)
db.init_app(app)
app.app_context().push()
return app | ca71e94dcfcc4949abc4782a74cd67ce1d089d06 | 12,196 |
import shutil
def download_file(file_id, unique_id, credentials):
"""Downloads a file from google drive if user has been authenticated using oauth2
Args:
file_id (str): [The google drive id of the file]
unique_id (str): [The name of the video that is to be used for stored file]
Returns:
bool: [whether the file has been successfully downloaded or not]
"""
http = credentials.authorize(httplib2.Http())
service = discovery.build("drive", "v3", http=http)
request = service.files().get_media(fileId=file_id)
fh = BytesIO()
# Initialise a downloader object to download the file
# Downloads in chunks of 2MB
downloader = MediaIoBaseDownload(fh, request, chunksize=2048000)
done = False
try:
# Download the data in chunks
while not done:
status, done = downloader.next_chunk()
fh.seek(0)
# Write the received data to the file
with open(f"./{videos_dir}/{unique_id}", "wb") as f:
shutil.copyfileobj(fh, f)
print("File Downloaded")
# Return True if file Downloaded successfully
return True
except Exception as e:
print(str(e))
# Return False if something went wrong
print("Something went wrong.")
return False | ae1f0e9648602737a295ac313f3984d31c51fc7e | 12,197 |
def join_lines(new_lines, txt):
"""Joins lines, adding a trailing return if the original text had one."""
return add_ending('\n'.join(new_lines), txt) | 097fccf3ce6a7a5aab9d4f470c35833af3f63836 | 12,198 |
def get_components_with_metrics(config):
"""
:type: config mycroft_holmes.config.Config
"""
storage = MetricsStorage(config=config)
components = []
for feature_name, feature_spec in config.get_features().items():
feature_id = config.get_feature_id(feature_name)
metrics = config.get_metrics_for_feature(feature_name)
try:
score = storage.get(feature_id, feature_metric='score')
except MycroftMetricsStorageError:
score = None
component = {
'id': feature_id,
# feature's metadata
'name': feature_name,
'docs': feature_spec.get('url'),
'repo': feature_spec.get('repo'),
# fetch metrics and calculated score
'metrics': metrics,
'score': score or 0, # always an int, as we sort latter on
# link to a feature's dashboard
'url': url_for('dashboard.feature', feature_id=feature_id, _external=True),
}
components.append(component)
# sort components by score (descending)
components = sorted(components, key=lambda item: item['score'], reverse=True)
return components | 478743f29620530d7c4d7ca916ec595fa7a1ab3b | 12,199 |
import types
def parse_container_args(field_type: type) -> types.Union[ParamType, types.Tuple[ParamType]]:
"""Parses the arguments inside a container type (lists, tuples and so on).
Args:
field_type (type): pydantic field type
Returns:
types.Union[ParamType, types.Tuple[ParamType]]: single click-compatible type or a tuple
"""
assert is_container(field_type), "Field type is not a container"
args = types.get_args(field_type)
# Early out for untyped containers: standard lists, tuples, List[Any]
# Use strings when the type is unknown, avoid click's type guessing
if len(args) == 0:
return str
# Early out for homogenous containers: Tuple[int], List[str]
if len(args) == 1:
return parse_single_arg(args[0])
# Early out for homogenous tuples of indefinite length: Tuple[int, ...]
if len(args) == 2 and args[1] is Ellipsis:
return parse_single_arg(args[0])
# Then deal with fixed-length containers: Tuple[str, int, int]
return tuple(parse_single_arg(arg) for arg in args) | 925ceab7886f47c41ed5b3189693db2a28c37db1 | 12,200 |
def decrypt_from_base64(ciphertext_bs64, decrypt_key: str , iv : str) -> str:
"""From base64 ciphertext decrypt to string.
"""
aes: AES_Turbe = AES_Turbe(decrypt_key,iv)
content_str = aes.decrypt_from_base64(ciphertext_bs64)
return content_str | 67c4e1bb610b4a28d6fc563b910e86554779f1f2 | 12,201 |
def _resize_data(image, mask):
"""Resizes images to smaller dimensions."""
image = tf.image.resize_images(image, [480, 640])
mask = tf.image.resize_images(mask, [480, 640])
return image, mask | 47961541bf903c8b84066766529bf28d268eaa36 | 12,202 |
def dist_Mpc_to_geo(dist):
"""convert distance from Mpc to geometric units (i.e., metres)"""
return dist * Mpc | d023b80da73420f5499be0eb14f4d2e515e54627 | 12,203 |
def upload_to_bucket(file_path, filename):
"""
Upload file to S3 bucket
"""
s3_client = boto3.client('s3')
success = False
try:
response = s3_client.upload_file(file_path, AWS_S3_BUCKET_NAME, filename)
success = True
except ClientError as e:
logger.error('Error at %s', 'boto3.exceptions.ClientError', exc_info=e)
return success | 630937591307d9ec55fe606b2e199b1219139bcb | 12,204 |
def downsample_data( data, factor, hdr ):
"""Resample data and update the header appropriately
If factor < 1, this is *upsampling*.
Use this function to just return the data and hdr parts in case you want to do further operations prior to saving.
order=3 appears to crash Python 64-bit on Windows when the image is very large (800x500x500) and the method is trilinear. Order=1 works.
"""
fraction = 1.0 / factor
# ds_data = ndimage.interpolation.zoom(data, zoom=fraction, order=1) # default order=3
# order=3 default
# order=1 for very high-resolution images (default crashes)
# order=0 for nearest neighbour
if len(data.shape) > 3:
print(" Data shape is {0}. Only the first three dimensions will be considered! (The output will be 3D: data[:,:,:,0])".format(data.shape))
ds_data = ndimage.interpolation.zoom(data[:,:,:,0], zoom=fraction, order=0)
else:
ds_data = ndimage.interpolation.zoom(data, zoom=fraction, order=0)
ds_hdr = copy.deepcopy(hdr)
ds_hdr.set_data_shape(ds_data.shape)
new_pixdims = hdr.structarr['pixdim'][1:4] * factor
print("Pixdims old: {0}, new: {1}.".format(hdr.structarr['pixdim'][1:4], new_pixdims))
ds_hdr.structarr['pixdim'][1:4] = new_pixdims
sform_old = hdr.get_sform()
print sform_old
resampling_affine = create_affine(trans=[factor,factor,factor], scale=[factor, factor, factor])
# Create the new sform matrix
sform_new = sform_old.dot(resampling_affine)
# Keep the exact-same translation elements
sform_new[0:3,3] = sform_old[0:3,3]
print sform_new
ds_hdr.set_sform(sform_new)
# hdr_new.set_sform(np.eye(4))
# hdr_new['srow_x'][0] = hdr_new['pixdim'][1]
# hdr_new['srow_y'][1] = hdr_new['pixdim'][2]
# hdr_new['srow_z'][2] = hdr_new['pixdim'][3]
# hdr_new.get_sform()
# hdr_new['srow_x'][3] = hdr_new['pixdim'][1]
# hdr_new['srow_y'][3] = hdr_new['pixdim'][2]
# hdr_new['srow_z'][3] = hdr_new['pixdim'][3]
return ds_data, ds_hdr
# End of downsample_data() definition | 889e698531255b862fe407d5ce64a3ed54b44c44 | 12,207 |
def check_if_present(driver: webdriver.Firefox, selector: str):
""" Checks if element is present on page by css selector """
return bool(driver.find_elements_by_css_selector(selector)) | 9cc4ebf92908ed8ef392cc20697734d553eb6db0 | 12,208 |
def cmi(x, y, z, k=3, base=2):
"""Mutual information of x and y, conditioned on z
x,y,z should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x)==len(y), 'Lists should have same length.'
assert k <= len(x) - 1, 'Set k smaller than num samples - 1.'
intens = 1e-10 # Small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
y = [list(p + intens*nr.rand(len(y[0]))) for p in y]
z = [list(p + intens*nr.rand(len(z[0]))) for p in z]
points = zip2(x,y,z)
# Find nearest neighbors in joint space, p=inf means max-norm.
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a = avgdigamma(zip2(x,z), dvec)
b = avgdigamma(zip2(y,z), dvec)
c = avgdigamma(z,dvec)
d = digamma(k)
return (-a-b+c+d) / log(base) | b79a39dbe98202fedf4f572148cdce81c6608e3c | 12,210 |
def connected_plate():
"""Detects which plate from the PMA is connected to the device.
Returns:
FirmwareDeviceID: device ID of the connected plate. None if not detected
"""
for plate_id in (
FirmwareDeviceID.pt4_foundation_plate,
FirmwareDeviceID.pt4_expansion_plate,
):
status = __get_fw_device_status(plate_id)
if status.get("connected") is True:
return plate_id
return None | 4db471df81d63aa3c60f24b50fc25cef452db2d7 | 12,211 |
def islogin(_a=None):
"""
是否已经登录,如果已经登录返回token,否则False
"""
if _a is None:
global a
else:
a = _a
x=a.get(DOMAIN+"/apps/files/desktop/own",o=True)
t=a.b.find("input",{"id":"request_token"})
if t is None:
t = a.b.find("input",{"id":"oc_requesttoken"})
if t is None:
return False
else:
return t["value"] | 235a4662ecca8c496b83aec5bc0408bbd8ae45ec | 12,212 |
def discrete_coons_patch(ab, bc, dc, ad):
"""Creates a coons patch from a set of four or three boundary
polylines (ab, bc, dc, ad).
Parameters
----------
ab : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the first polyline.
bc : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the second polyline.
dc : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the third polyline.
ad : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the fourth polyline.
Returns
-------
list[[float, float, float]]
The points of the coons patch.
list[list[int]]
List of faces, with every face a list of indices into the point list.
Notes
-----
The vertices of the polylines are assumed to be in the following order::
b -----> c
^ ^
| |
| |
a -----> d
To create a triangular patch, one of the input polylines should be None.
(Warning! This will result in duplicate vertices.)
For more information see [1]_ and [2]_.
References
----------
.. [1] Wikipedia. *Coons patch*.
Available at: https://en.wikipedia.org/wiki/Coons_patch.
.. [2] Robert Ferreol. *Patch de Coons*.
Available at: https://www.mathcurve.com/surfaces/patchcoons/patchcoons.shtml
Examples
--------
>>>
"""
if not ab:
ab = [ad[0]] * len(dc)
if not bc:
bc = [ab[-1]] * len(ad)
if not dc:
dc = [bc[-1]] * len(ab)
if not ad:
ad = [dc[0]] * len(bc)
n = len(ab)
m = len(bc)
n_norm = normalize_values(range(n))
m_norm = normalize_values(range(m))
array = [[0] * m for i in range(n)]
for i, ki in enumerate(n_norm):
for j, kj in enumerate(m_norm):
# first function: linear interpolation of first two opposite curves
lin_interp_ab_dc = add_vectors(scale_vector(ab[i], (1 - kj)), scale_vector(dc[i], kj))
# second function: linear interpolation of other two opposite curves
lin_interp_bc_ad = add_vectors(scale_vector(ad[j], (1 - ki)), scale_vector(bc[j], ki))
# third function: linear interpolation of four corners resulting a hypar
a = scale_vector(ab[0], (1 - ki) * (1 - kj))
b = scale_vector(bc[0], ki * (1 - kj))
c = scale_vector(dc[-1], ki * kj)
d = scale_vector(ad[-1], (1 - ki) * kj)
lin_interp_a_b_c_d = sum_vectors([a, b, c, d])
# coons patch = first + second - third functions
array[i][j] = subtract_vectors(add_vectors(lin_interp_ab_dc, lin_interp_bc_ad), lin_interp_a_b_c_d)
# create vertex list
vertices = []
for i in range(n):
vertices += array[i]
# create face vertex list
faces = []
for i in range(n - 1):
for j in range(m - 1):
faces.append([i * m + j, i * m + j + 1, (i + 1) * m + j + 1, (i + 1) * m + j])
return vertices, faces | 19393f8eea5164e0f6cf89463dd3d68329c395d5 | 12,213 |
def w(shape, stddev=0.01):
"""
@return A weight layer with the given shape and standard deviation. Initialized with a
truncated normal distribution.
"""
return tf.Variable(tf.truncated_normal(shape, stddev=stddev)) | fd3d0bb6fb5565ce4ff5b4aafb80eccf711072db | 12,214 |
def lr_mult(alpha):
"""Decreases the learning rate update by a factor of alpha."""
@tf.custom_gradient
def _lr_mult(x):
def grad(dy):
return dy * alpha * tf.ones_like(x)
return x, grad
return _lr_mult | b5ab1c2b01025aee74c5f4c92b81eabf08c20de1 | 12,215 |
import PIL
import io
def get_png_string(mask_array):
"""Builds PNG string from mask array.
Args:
mask_array (HxW): Mask array to generate PNG string from.
Returns: String of mask encoded as a PNG.
"""
# Convert the new mask back to an image.
image = PIL.Image.fromarray(mask_array.astype('uint8')).convert('RGB')
# Save the new image to a PNG byte string.
byte_buffer = io.BytesIO()
image.save(byte_buffer, format='png')
byte_buffer.seek(0)
return byte_buffer.read() | f44dd5417cb5250587926da939b5923921c62ead | 12,216 |
def rotate90ccw(v):
"""Rotate 2d vector 90 degrees counter clockwise
"""
return (-(v[1]), v[0]) | cde43efd01e9fff3002623437f99e521790e33f2 | 12,217 |
def GetListOfCellTestPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the cell test point (e.g. center of
cell bounding box) which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestCellPointList, thisProcDistSqrdList = \
GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestCellPointList, thisProcDistSqrdList)
return nearestCellList | c4dc0fcdb9d85dbc2ce1654743284400ca11e3d6 | 12,218 |
def _BuildBaseMTTCmd(args, host):
"""Build base MTT cmd."""
remote_mtt_binary = _REMOTE_MTT_BINARY_FORMAT % host.context.user
remote_cmd = [remote_mtt_binary]
if args.very_verbose:
remote_cmd += ['-vv']
elif args.verbose:
remote_cmd += ['-v']
# We copy the mtt binary inside mtt_lab to remote host,
# there is not need to update the mtt binary on the remote host.
remote_cmd += ['--no_check_update']
return remote_cmd | 7506a39b79c81bc3b279280e69820ccdbb8ed664 | 12,219 |
def get_service(credentials=get_credentials()):
"""Gets GMail service, given credentials"""
return apiclient.discovery.build("gmail", "v1", credentials=credentials) | 7f344f07cc2d78014381bd449d6655658f2e4881 | 12,220 |
def edb_client_server_info(edb: ElectrolyteDB) -> dict:
"""
Perform an operation that ensures that the `edb` fixture has a client that:
- Is able to connect to the server (non-mock), or
- Is a "convincing fake" (mock), so that test functions using `edb` can expect a realistic behavior
Additionally, if this fixture is dispatched before "real" users (here this is done by using a `test__` prefix with two underscores),
it avoids any first-use inconsistencies, such as e.g. the time spent to wait for a connection
being counted as part of the duration of the first test for which the `edb` fixture is instantiated.
"""
return edb._client.server_info() | 83e9d4bb7bf76e7ed5a73c69a4a2a617227526f8 | 12,221 |
def convert_to_int_list(dataframe: pd.Series) -> "list[list[int]]":
"""
Takes a dataframe with a string representation of a list of ints
and converts that into a list of lists of ints
"""
result_list = []
for row in dataframe:
result_list.append([int(x) for x in row[1:-1].split(", ")])
return result_list | 0d0ed69db3af04a21a65d472c53580d8f88f50f0 | 12,222 |
def get_prism_daily_single(variable,
date,
return_path=False,
**kwargs):
"""Download data for a single day
Parameters
----------
variable : str
Either tmean, tmax, tmin, or ppt
date : str
The date to download in the format YYYY-MM-DD
dest_path : str, optional
Folder to download to, defaults to the current working directory.
return_path : bool, optional
Returns the full path to the final bil file, default False
keep_zip : bool, optional
Keeps the originally downloaded zip file, default True
"""
daily = PrismDaily(variable=variable,
min_date=date,
max_date=date,
**kwargs)
daily.download()
daily.close()
if return_path:
return daily._local_bil_filename(daily.dates[0]) | 5fe14da937452e040e2fcf7d38d04d1068f2bff8 | 12,223 |
def grid_count(grid, shape=None, interpolation='linear', bound='zero',
extrapolate=False):
"""Splatting weights with respect to a deformation field (pull adjoint).
Notes
-----
{interpolation}
{bound}
Parameters
----------
grid : ([batch], *inshape, dim) tensor
Transformation field.
shape : sequence[int], default=inshape
Output shape
interpolation : int or sequence[int], default=1
Interpolation order.
bound : BoundType, or sequence[BoundType], default='zero'
Boundary conditions.
extrapolate : bool or int, default=True
Extrapolate out-of-bound data.
Returns
-------
output : ([batch], 1, *shape) tensor
Spatting weights.
"""
dim = grid.shape[-1]
grid_no_batch = grid.dim() == dim + 1
if grid_no_batch:
grid = grid[None]
if shape is None:
shape = tuple(grid.shape[1:-1])
out = GridCount.apply(grid, shape, interpolation, bound, extrapolate)
if grid_no_batch:
out = out[0]
return out | 620713f2e234d1a2195fdebba5215a2c1f2493a3 | 12,225 |
def check_spf_record(lookup, spf_record):
"""
Check that all parts of lookup appear somewhere in the given SPF record, resolving
include: directives recursively
"""
not_found_lookup_parts = set(lookup.split(" "))
_check_spf_record(not_found_lookup_parts, spf_record, 0)
return not not_found_lookup_parts | 831c6d07a91484ce6b96bdc507a8f31034193590 | 12,226 |
import pytz
def local_tz2() -> pytz.BaseTzInfo:
"""
Second timezone for the second user
"""
return pytz.timezone("America/Los_Angeles") | d841f3ea06334540b8dca6fd2c2a2e823227fa37 | 12,227 |
def get_chemistry_info(sam_header, input_filenames, fail_on_missing=False):
"""Get chemistry triple information for movies referenced in a SAM
header.
Args:
sam_header: a pysam.Samfile.header, which is a multi-level dictionary.
Movie names are read from RG tags in this header.
input_filenames: a list of bas, bax, or fofn filenames.
fail_on_missing: if True, raise an exception if the chemistry
information for a movie in the header cannot be
found. If False, just log a warning.
Returns:
a list of strings that can be written as DS tags to RG entries in the
header of a new SAM or BAM file. For example,
['BINDINGKIT:xxxx;SEQUENCINGKIT:yyyy;SOFTWAREVERSION:2.0']
Raises:
ChemistryLoadingException if chemistry information cannot be found
for a movie in the header and fail_on_missing is True.
"""
# First get the full list of ba[sx] files, reading through any fofn or xml
# inputs
bas_filenames = []
for filename in input_filenames:
bas_filenames.extend(FofnIO.enumeratePulseFiles(filename))
# Then get the chemistry triple for each movie in the list of bas files
triple_dict = {}
for bas_filename in bas_filenames:
bas_file = BasH5IO.BasH5Reader(bas_filename)
movie_name = bas_file.movieName
chem_triple = bas_file.chemistryBarcodeTriple
triple_dict[movie_name] = chem_triple
# Finally, find the movie names that appear in the header and create CO
# lines with the chemistry triple
if 'RG' not in sam_header:
return []
rgds_entries = {}
for rg_entry in sam_header['RG']:
rg_id = rg_entry['ID']
rg_movie_name = rg_entry[MOVIENAME_TAG]
try:
rg_chem_triple = triple_dict[rg_movie_name]
rgds_entries[rg_id] = rg_chem_triple
except KeyError:
err_msg = ("Cannot find chemistry information for movie {m}."
.format(m=rg_movie_name))
if fail_on_missing:
raise ChemistryLoadingException(err_msg)
else:
log.warning(err_msg)
rgds_strings = format_rgds_entries(rgds_entries)
return rgds_strings | 4bfdd7f09061650c0e010f71cd00cbd44481c40f | 12,228 |
import warnings
import http
def json_catalog(request, domain='djangojs', packages=None):
"""
Return the selected language catalog as a JSON object.
Receives the same parameters as javascript_catalog(), but returns
a response with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
warnings.warn(
"The json_catalog() view is deprecated in favor of the "
"JSONCatalog view.", RemovedInDjango20Warning, stacklevel=2
)
locale = _get_locale(request)
packages = _parse_packages(packages)
catalog, plural = get_javascript_catalog(locale, domain, packages)
data = {
'catalog': catalog,
'formats': get_formats(),
'plural': plural,
}
return http.JsonResponse(data) | f2ac449d11299471184f0ddaeac87d543e90b7a3 | 12,229 |
from typing import Dict
from typing import Any
def read_global_config() -> Dict[Text, Any]:
"""Read global Rasa configuration."""
# noinspection PyBroadException
try:
return rasa.utils.io.read_yaml_file(GLOBAL_USER_CONFIG_PATH)
except Exception:
# if things go south we pretend there is no config
return {} | 0287aa03a07b7ce5218f56237cd49d9ea18f8d5f | 12,230 |
def get_uuid_hex(digest_size: int = 10) -> str:
"""Generate hex of uuid4 with the defined size."""
return blake2b(uuid4().bytes, digest_size=digest_size).hexdigest() | 4ec853740b7f17bbf7cb90fd5e25c9d7719440c8 | 12,231 |
def crc16(data):
"""CRC-16-CCITT computation with LSB-first and inversion."""
crc = 0xffff
for byte in data:
crc ^= byte
for bits in range(8):
if crc & 1:
crc = (crc >> 1) ^ 0x8408
else:
crc >>= 1
return crc ^ 0xffff | 2560f53c1f2b597d556a0b63462ef56f0c972db2 | 12,232 |
def _unlink_f(filename):
""" Call os.unlink, but don't die if the file isn't there. This is the main
difference between "rm -f" and plain "rm". """
try:
os.unlink(filename)
return True
except OSError, e:
if e.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES,errno.EROFS):
raise
return False | f0014e7ab0dfb6db519198f1eb052d930542a63f | 12,233 |
def get_member_expr_fullname(expr: MemberExpr) -> str:
"""Return the qualified name representation of a member expression.
Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
argument cannot be represented in this form.
"""
if isinstance(expr.expr, NameExpr):
initial = expr.expr.name
elif isinstance(expr.expr, MemberExpr):
initial = get_member_expr_fullname(expr.expr)
else:
return None
return '{}.{}'.format(initial, expr.name) | 2859751a977f028f0984e86422f31f388314414a | 12,234 |
def _read_dino_waterlvl_metadata(f, line):
"""read dino waterlevel metadata
Parameters
----------
f : text wrapper
line : str
line with meta dictionary keys
meta_dic : dict (optional)
dictionary with metadata
Returns
-------
meta : dict
dictionary with metadata
"""
meta_keys = line.strip().split(",")
meta_values = f.readline().strip().split(",")
meta = {}
for key, value in zip(meta_keys, meta_values):
key = key.strip()
if key in ["X-coordinaat", "Y-coordinaat"]:
if key == "X-coordinaat":
meta["x"] = float(value)
elif key == "Y-coordinaat":
meta["y"] = float(value)
elif key == "Locatie":
meta["locatie"] = value
meta["name"] = value
return meta | 949535f4fc677a7d0afc70a76e377ccefcc8943f | 12,235 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.