content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_dist_genomic(genomic_data,var_or_gene):
"""Get the distribution associated to genomic data for its characteristics
Parameters: genomic_data (dict): with UDN ID as key and list with dictionaries as value,
dict contaning characteristics of the considered genomic data
var_or_gene (str): "Var" if variants, "Gen" otherwise
Returns: gene_effects (collec.Counter): distribution of characteristics for selected genomic data
"""
gene_list=[]
for patient in genomic_data:
for i in range(len(genomic_data[patient])):
if var_or_gene=="Var":
if "effect" in list(genomic_data[patient][i].keys()) and "gene" in list(genomic_data[patient][i].keys()):
gene_list.append([genomic_data[patient][i]["gene"],genomic_data[patient][i]["effect"]])
else:
gene_list.append([genomic_data[patient][i]["gene"],"NA"])
elif var_or_gene=="Gen":
if "status" in list(genomic_data[patient][i].keys()) and "gene" in list(genomic_data[patient][i].keys()):
gene_list.append([genomic_data[patient][i]["gene"],genomic_data[patient][i]["status"]])
else:
gene_list.append([genomic_data[patient][i]["gene"],"NA"])
else:
print("var_or_gene must be Var or Gen")
gene_effects=collec.Counter(np.array(gene_list)[:,1])
return gene_effects | ada0b7ecd57ace9799102e97bc9173d888c23565 | 3,648,580 |
def get_gmb_dataset_train(max_sentence_len):
"""
Returns the train portion of the gmb data-set. See TRAIN_TEST_SPLIT param for split ratio.
:param max_sentence_len:
:return:
"""
tokenized_padded_tag2idx, tokenized_padded_sentences, sentences = get_gmb_dataset(max_sentence_len)
return tokenized_padded_tag2idx[:int(len(tokenized_padded_tag2idx)*TRAIN_TEST_SPLIT)], \
tokenized_padded_sentences[:int(len(tokenized_padded_sentences)*TRAIN_TEST_SPLIT)], \
sentences[:int(len(sentences)*TRAIN_TEST_SPLIT)] | 762800a0b986c74037e79dc1db92d5b2f6cd2e50 | 3,648,581 |
def is_answer_reliable(location_id, land_usage, expansion):
"""
Before submitting to DB, we judge if an answer reliable and set the location done if:
1. The user passes the gold standard test
2. Another user passes the gold standard test, and submitted the same answer as it.
Parameters
----------
location_id : int
ID of the location.
land_usage : int
User's answer of judging if the land is a farm or has buildings.
(check the answer table in model.py for the meaning of the values)
expansion : int
User's answer of judging the construction is expanded.
(check the answer table in model.py for the meaning of the values)
Return
------
bool
Result of the checking.
True : Matches another good answer candiate.
False : No other good answer candidates exist or match.
"""
# If another user passed the gold standard quality test, and submitted an answer to the same location.
good_answer_candidates = Answer.query.filter_by(gold_standard_status=1, location_id=location_id, land_usage=land_usage, expansion=expansion).all()
# If the good answer candidate doesn't exist
if len(good_answer_candidates) == 0:
return False
else:
return True | 3aa510d68115ef519ec2a1318102d302aae81382 | 3,648,582 |
import numpy
def _polyfit_coeffs(spec,specerr,scatter,labelA,return_cov=False):
"""For a given scatter, return the best-fit coefficients"""
Y= spec/(specerr**2.+scatter**2.)
ATY= numpy.dot(labelA.T,Y)
CiA= labelA*numpy.tile(1./(specerr**2.+scatter**2.),(labelA.shape[1],1)).T
ATCiA= numpy.dot(labelA.T,CiA)
ATCiAinv= linalg.inv(ATCiA)
if return_cov:
return (numpy.dot(ATCiAinv,ATY),ATCiAinv)
else:
return numpy.dot(ATCiAinv,ATY) | 9398fa2072625eb66ea8df2f79008577fe6aaabe | 3,648,583 |
def colorize(data, colors, display_ranges):
"""Example:
colors = 'white', (0, 1, 0), 'red', 'magenta', 'cyan'
display_ranges = np.array([
[100, 3000],
[700, 5000],
[600, 3000],
[600, 4000],
[600, 3000],
])
rgb = fig4.colorize(data, colors, display_ranges)
plt.imshow(rgb)
"""
color_map = np.array([to_rgba(c)[:3] for c in colors])
dr = display_ranges[..., None, None]
normed = (data - dr[:, 0]) / (dr[:, 1] - dr[:, 0] )
# there's probably a nicer way to do this
rgb = (color_map.T[..., None, None] * normed[None, ...]).sum(axis=1)
return rgb.clip(min=0, max=1).transpose([1, 2, 0]) | efb6ff9c0573da4a11cbfdbf55acaccbb69de216 | 3,648,585 |
import itertools
def multi_mdf(S, all_drGs, constraints, ratio_constraints=None, net_rxns=[],
all_directions=False, x_max=0.01, x_min=0.000001,
T=298.15, R=8.31e-3):
"""Run MDF optimization for all condition combinations
ARGUMENTS
S : pandas.DataFrame
Pandas DataFrame that corresponds to the stoichiometric matrix. Column
names are reaction IDs and row indices are compound names.
all_drGs : pandas.DataFrame
Pandas DataFrame with reaction IDs in the first column, condition
identifier strings in the intermediate columns, and reaction standard
Gibbs energies in float format in the last column.
constraints : pandas.DataFrame
Pandas DataFrame with a compound ID column (string), a lower
concentration bound column (float) and an upper concentration bound
colunn (float).
ratio_constraints : pandas.DataFrame, optional
Pandas DataFrame with two compound ID columns (string), a lower limit
concentration ratio column (float), an upper limit concentration ratio
column (float) and the concentration ratio range step number (int). The
third column is interpreted as the fixed ratio when the fourth column
contains a None value. The last column indicates the type of spacing to
use for ratio ranges (linear or logarithmic).
net_rxns : list of strings
List with strings referring to the background network reactions for
network-embedded MDF analysis (NEM). The reactions should be in S.
all_directions : bool, optional
Set to True to calculate MDF for all possible reaction direction
combinations. Not recommended for sets of reactions >20.
x_max : float
Maximum default metabolite concentration (M).
x_min : float
Minimum default metabolite concentration (M).
T : float
Temperature (K).
R : float
Universal gas constant (kJ/(mol*K)).
RETURNS
mdf_table : pandas.DataFrame
A Pandas DataFrame containing all MDF results for a single pathway. Each
row corresponds to one individual MDF optimization, with the parameters
described in the columns:
v0 ... : string
Condition identifiers as supplied in all_drGs.
drG_std(rxn_id) : float
The standard reaction Gibbs energy for the reaction 'rxn_id'.
[cpd_id_num]/[cpd_id_den] ... : float
Ratio of concentration between compounds 'cpd_id_num' and
'cpd_id_den'.
dir(rxn_id) ... : int
The direction used for the reaction 'rxn_id'. The order is the same
as the columns in S.
[cpd_id] ... : float
Optimized concentration for compound 'cpd_id' (M).
drG_opt(rxn_id) : float
The optimized reaction Gibbs energy for reaction 'rxn_id' (kJ/mol).
success : int
Indicates optimization success (1) or failure (0).
MDF : float
The Max-min Driving Force determined through linear optimization
(kJ/mol).
"""
# All drGs
# -> All ratio combinations
# -> All directions
# Number of reactions
n_rxn = S.shape[1]
# List the condition identifiers
conditions = list(all_drGs.columns[1:-1])
# Create column labels for output DataFrame
if ratio_constraints is not None:
ratio_labels = [
'ratio_' + ratio_constraints.iloc[row,:]['cpd_id_num'] + \
'_' + ratio_constraints.iloc[row,:]['cpd_id_den'] \
for row in range(ratio_constraints.shape[0])
]
else:
ratio_labels = []
column_labels = [
*conditions,
*['drGstd_' + rxn_id for rxn_id in list(S.columns)],
*ratio_labels,
*['dir_' + rxn_id for rxn_id in list(S.columns)],
*['c_' + cpd_id for cpd_id in list(S.index)],
*['drGopt_' + rxn_id for rxn_id in list(S.columns)],
'success',
'MDF'
]
# Also create labels for sorting (conditions, ratios and directions)
sort_labels = [
*conditions,
*ratio_labels,
*['dir_' + rxn_id for rxn_id in list(S.columns)]
]
# Iterator preparation
def prep_iter():
# Set up conditions iterator
if len(conditions):
cond_iter = all_drGs[conditions].drop_duplicates().iterrows()
else:
cond_iter = [None]
# Set up directions iterator
if not all_directions:
dir_iter = [[1.0]*n_rxn]
else:
dir_iter = itertools.product([1.0,-1.0], repeat=n_rxn)
# Set up ratios iterator
if ratio_constraints is not None:
rats_iter = ratio_iter(ratio_constraints)
else:
rats_iter = [None]
# Set up fixed concentration range constraints iterator
cons_iter = con_iter(constraints)
return itertools.product(cond_iter, dir_iter, rats_iter, cons_iter)
# Set up output DataFrame
mdf_table = pd.DataFrame(columns = column_labels)
# Determine number of rows that will be produced
M = 0
for i in prep_iter():
M += 1
# Iterate over all combinations of conditions, directions and ratios
n = 0
for params in prep_iter():
n += 1
progress = float(n / M * 100)
sWrite("\rPerforming MDF optimization... %0.1f%%" % progress)
# Extract specific condition, direction and ratio constraints
if params[0] is not None:
condition = pd.DataFrame(params[0][1]).T
else:
condition = None
direction = params[1]
rats = params[2]
constraints_mod = params[3]
# Obtain specific standard reaction Gibbs energies with correct sign
if condition is not None:
drGs = pd.merge(condition, all_drGs)
else:
drGs = all_drGs
drGs.is_copy = False
drGs.loc[:,['drG']] = drGs['drG'] * direction
# Modify direction (sign) of reactions in the stoichiometric matrix
S_mod = S * direction
# Set up MDF inputs
c = mdf_c(S_mod)
A = mdf_A(S_mod, net_rxns)
b = mdf_b(S_mod, drGs, constraints_mod, x_max, x_min, T, R)
# Use equality (ratio) constraints if they were specified
if rats is not None:
A_eq = mdf_A_eq(S_mod, rats)
b_eq = mdf_b_eq(rats)
# If the ratio constraints have been filtered out, set to None
if not A_eq.size or not b_eq.size:
A_eq = None
b_eq = None
else:
A_eq = None
b_eq = None
# Perform MDF
mdf_result = mdf(c, A, b, A_eq, b_eq)
# Prepare conditions list
if condition is not None:
conditions_list = list(condition.iloc[0,:])
else:
conditions_list = []
# Prepare ratios list
if rats is not None:
rats_list = list(rats.ratio)
else:
rats_list = []
# Format results row
mdf_row = [
*conditions_list,
*[float(drGs[drGs.rxn_id == rxn_id]['drG']) for rxn_id in S_mod.columns],
*rats_list,
*direction,
]
if mdf_result.success:
mdf_row.extend([
*np.exp(mdf_result.x[:-1]), # Concentrations
*calc_drGs(S_mod, drGs, mdf_result.x[:-1]), # Reaction Gibbs energies
1.0, # Success
mdf_result.x[-1]*R*T # MDF value
])
else:
mdf_row.extend([
*[np.nan]*S_mod.shape[0], # Concentrations
*[np.nan]*S_mod.shape[1], # Reaction Gibbs energies
0.0, # Failure
np.nan # No MDF value
])
# Append row to expected result
mdf_table = mdf_table.append(pd.DataFrame([mdf_row], columns = column_labels))
return mdf_table.sort_values(sort_labels) | 3496105b764bc72b1c52ee28d419617798ad72cc | 3,648,586 |
def nufft_adjoint(input, coord, oshape=None, oversamp=1.25, width=4.0, n=128):
"""Adjoint non-uniform Fast Fourier Transform.
Args:
input (array): Input Fourier domain array.
coord (array): coordinate array of shape (..., ndim).
ndim determines the number of dimension to apply nufft adjoint.
oshape (tuple of ints): output shape.
oversamp (float): oversampling factor.
width (float): interpolation kernel full-width in terms of oversampled grid.
n (int): number of sampling points of interpolation kernel.
Returns:
array: Transformed array.
See Also:
:func:`sigpy.nufft.nufft`
"""
device = backend.get_device(input)
xp = device.xp
ndim = coord.shape[-1]
beta = np.pi * (((width / oversamp) * (oversamp - 0.5))**2 - 0.8)**0.5
if oshape is None:
oshape = list(input.shape[:-coord.ndim + 1]) + estimate_shape(coord)
else:
oshape = list(oshape)
with device:
coord = _scale_coord(backend.to_device(coord, device), oshape, oversamp)
kernel = backend.to_device(
_kb(np.arange(n, dtype=coord.dtype) / n, width, beta, coord.dtype), device)
os_shape = oshape[:-ndim] + [_get_ugly_number(oversamp * i) for i in oshape[-ndim:]]
output = interp.gridding(input, os_shape, width, kernel, coord)
for a in range(-ndim, 0):
i = oshape[a]
os_i = os_shape[a]
idx = xp.arange(i, dtype=input.dtype)
os_shape[a] = i
# Swap axes
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
# Oversampled IFFT
output = ifft(output, axes=[-1], norm=None)
output *= os_i / i**0.5
output = util.resize(output, os_shape)
# Calculate apodization
apod = (beta**2 - (np.pi * width * (idx - i // 2) / os_i)**2)**0.5
apod /= xp.sinh(apod)
# Apodize
output *= apod
# Swap back
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
return output | d3d03ebe3d905cb647fab7d801592e148023709e | 3,648,587 |
def get_bustools_version():
"""Get the provided Bustools version.
This function parses the help text by executing the included Bustools binary.
:return: tuple of major, minor, patch versions
:rtype: tuple
"""
p = run_executable([get_bustools_binary_path()], quiet=True, returncode=1)
match = VERSION_PARSER.match(p.stdout.read())
return tuple(int(ver) for ver in match.groups()) if match else None | 7de14349b9349352c3532fbcc0be58be0f9756c7 | 3,648,588 |
def request_from_url(url):
"""Parses a gopher URL and returns the corresponding Request instance."""
pu = urlparse(url, scheme='gopher', allow_fragments=False)
t = '1'
s = ''
if len(pu.path) > 2:
t = pu.path[1]
s = pu.path[2:]
if len(pu.query) > 0:
s = s + '?' + pu.query
p = '70'
if pu.port:
p = str(pu.port)
return Request(t, pu.hostname, p, s) | aff334f8358edcae028b65fa1b4cf5727638eaad | 3,648,590 |
def enable_pause_data_button(n, interval_disabled):
"""
Enable the play button when data has been loaded and data *is* currently streaming
"""
if n and n[0] < 1: return True
if interval_disabled:
return True
return False | 4257a2deb9b8be87fe64a54129ae869623c323e8 | 3,648,592 |
import scipy
def dProj(z, dist, input_unit='deg', unit='Mpc'):
"""
Projected distance, physical or angular, depending on the input units (if
input_unit is physical, returns angular, and vice-versa).
The units can be 'cm', 'ly' or 'Mpc' (default units='Mpc').
"""
if input_unit in ('deg', 'arcmin', 'arcsec'):
Da = dA(z, unit=unit)
else:
Da = dA(z, unit=input_unit)
# from angular to physical
if input_unit == 'deg':
dist = Da * scipy.pi * dist / 180
elif input_unit == 'arcmin':
dist = Da * scipy.pi * dist / (180 * 60)
elif input_unit == 'arcsec':
dist = Da * scipy.pi * dist / (180 * 3600)
# from physical to angular
if unit == 'deg':
dist = dist * 180 / (scipy.pi * Da)
elif unit == 'arcmin':
dist = dist * 180 * 60 / (scipy.pi * Da)
elif unit == 'arcsec':
dist = dist * 180 * 3600 / (scipy.pi * Da)
return dist | 13610816dfb94a92d6890d351312661b04e8604f | 3,648,593 |
def savgoldiff(x, dt, params=None, options={}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
optimization_method='Nelder-Mead', optimization_options={'maxiter': 10}, metric='rmse'):
"""
Optimize the parameters for pynumdiff.linear_model.savgoldiff
See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.savgoldiff for detailed documentation.
"""
# initial condition
if params is None:
orders = [2, 3, 5, 7, 9, 11, 13]
window_sizes = [3, 10, 30, 50, 90, 130, 200, 300]
smoothing_wins = [3, 10, 30, 50, 90, 130, 200, 300]
params = []
for order in orders:
for window_size in window_sizes:
for smoothing_win in smoothing_wins:
params.append([order, window_size, smoothing_win])
# param types and bounds
params_types = [int, int, int]
params_low = [1, 3, 3]
params_high = [12, 1e3, 1e3]
# optimize
func = pynumdiff.linear_model.savgoldiff
args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
optimization_options=optimization_options)
return opt_params, opt_val | cb6da1a5fe3810ea1f481450667e548a9f64dae2 | 3,648,594 |
import base64
def credentials(scope="module"):
"""
Note that these credentials match those mentioned in test.htpasswd
"""
h = Headers()
h.add('Authorization',
'Basic ' + base64.b64encode("username:password"))
return h | 4f0b1c17da546bfa655a96cfe5bcf74719dff55d | 3,648,595 |
def _set_bias(clf, X, Y, recall, fpos, tneg):
"""Choose a bias for a classifier such that the classification
rule
clf.decision_function(X) - bias >= 0
has a recall of at least `recall`, and (if possible) a false positive rate
of at most `fpos`
Paramters
---------
clf : Classifier
classifier to use
X : array-like [M-examples x N-dimension]
feature vectors
Y : array [M-exmaples]
Binary classification
recall : float
Minimum fractional recall
fpos : float
Desired Maximum fractional false positive rate
tneg : int
Total number of negative examples (including previously-filtered
examples)
"""
df = clf.decision_function(X).ravel()
r = _recall_bias(df[Y == 1], recall)
f = _fpos_bias(df[Y == 1], fpos, tneg)
return min(r, f) | bbc903752d9abc93f723830e5c6c51459d18d0a5 | 3,648,597 |
from typing import Type
from typing import Dict
from typing import Any
def get_additional_params(model_klass: Type['Model']) -> Dict[str, Any]:
"""
By default, we dont need additional params to FB API requests. But in some instances (i.e. fetching Comments),
adding parameters makes fetching data simpler
"""
assert issubclass(model_klass, abstractcrudobject.AbstractCrudObject)
return _default_additional_params.get(model_klass, {}) | 1b4c934a06870a8ae1f2f999bab94fb286ee6126 | 3,648,598 |
def thread_loop(run):
"""decorator to make the function run in a loop if it is a thread"""
def fct(self, *args, **kwargs):
if self.use_thread:
while True:
run(*args, **kwargs)
else:
run(*args, **kwargs)
return fct | a68eee708bc0a1fe0a3da01e68ec84b6a43d9210 | 3,648,599 |
from typing import Dict
def rand_index(pred_cluster: Dict, target_cluster: Dict) -> float:
"""Use contingency_table to get RI directly
RI = Accuracy = (TP+TN)/(TP,TN,FP,FN)
Args:
pred_cluster: Dict element:cluster_id (cluster_id from 0 to max_size)| predicted clusters
target_cluster: Dict element:cluster_id (cluster_id from 0 to max_size) | target clusters
Return:
RI (float)
"""
pred_cluster_ = helper_trans_to_element2clusterid(pred_cluster)
target_cluster_ = helper_trans_to_element2clusterid(target_cluster)
pred_cluster_size = len(pred_cluster_)
target_cluster_size = len(target_cluster_)
contingency_table = np.zeros((pred_cluster_size,target_cluster_size))
for i, p_cluster in enumerate(pred_cluster_):
for j, t_cluster in enumerate(target_cluster_):
#find common element
l = [*p_cluster,*t_cluster]
contingency_table[i][j] = len(l) - len(set(l))
s = comb(np.sum(contingency_table), 2)
a = 0
for i in np.nditer(contingency_table):
a += comb(i,2)
return a/s | 19ccbd6708abe6b3a05dc23843fa21e0f6d804e9 | 3,648,600 |
import re
import time
from datetime import datetime
def _strToDateTimeAndStamp(incoming_v, timezone_required=False):
"""Test (and convert) datetime and date timestamp values.
@param incoming_v: the literal string defined as the date and time
@param timezone_required: whether the timezone is required (ie, for date timestamp) or not
@return datetime
@rtype: datetime.datetime
@raise ValueError: invalid datetime or date timestamp
"""
# First, handle the timezone portion, if there is any
(v, tzone) = _returnTimeZone(incoming_v)
# Check on the timezone. For time date stamp object it is required
if timezone_required and tzone is None:
raise ValueError("Invalid datetime %s" % incoming_v)
# The microseconds should be handled here...
final_v = v
milliseconds = 0
milpattern = "(.*)(\.)([0-9]*)"
match = re.match(milpattern, v)
if match is not None:
# we have a millisecond portion...
try:
final_v = match.groups()[0]
milliseconds = int(match.groups()[2])
except:
raise ValueError("Invalid datetime %s" % incoming_v)
#
# By now, the pattern should be clear
# This may raise an exception...
try:
tstr = time.strptime(final_v, "%Y-%m-%dT%H:%M:%S")
if tzone is not None:
return datetime.datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday, tstr.tm_hour, tstr.tm_min, tstr.tm_sec,
milliseconds, tzone)
else:
return datetime.datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday, tstr.tm_hour, tstr.tm_min, tstr.tm_sec,
milliseconds)
except:
raise ValueError("Invalid datetime %s" % incoming_v) | fa0362976c3362e32c4176b4bd4c84ae0c653080 | 3,648,601 |
def get_price_for_market_stateless(result):
"""Returns the price for the symbols that the API doesnt follow the market state (ETF, Index)"""
## It seems that for ETF symbols it uses REGULAR market fields
return {
"current": result['regularMarketPrice']['fmt'],
"previous": result['regularMarketPreviousClose']['fmt'],
"change": result['regularMarketChange']['fmt'],
"percent": result['regularMarketChangePercent']['fmt']
} | 6afb9d443f246bd0db5c320a41c8341953f5dd7a | 3,648,602 |
def jump(current_command):
"""Return Jump Mnemonic of current C-Command"""
#jump exists after ; if ; in string. Always the last part of the command
if ";" in current_command:
command_list = current_command.split(";")
return command_list[-1]
else:
return "" | 2530ae99fcc4864c5e529d783b687bfc00d58156 | 3,648,604 |
def get_veterans(uname=None):
"""
@purpose: Runs SQL commands to querey the database for information on veterans.
@args: The username of the veteran. None if the username is not provided.
@returns: A list with one or more veterans.
"""
vet = None
if uname:
command = "SELECT * FROM veterans WHERE username = '{}' ".format(uname)
else:
command = "SELECT * FROM veterans"
with sql.connect(DATABASE) as con:
cur = con.cursor()
cur.execute(command)
if uname:
vet = cur.fetchone()
else:
vet = cur.fetchall()
cur.close()
if vet is not None and len(vet) > 10:
return vet[0:10]
else:
return vet | df97dee334332613b52c745c3f20c4509c0e0cb9 | 3,648,605 |
def mock_movement_handler() -> AsyncMock:
"""Get an asynchronous mock in the shape of an MovementHandler."""
return AsyncMock(spec=MovementHandler) | 85579588dc5d8e6cb37bc85bc652f70d3fca8022 | 3,648,607 |
def compute( op , x , y ):
"""Compute the value of expression 'x op y', where -x and y
are two integers and op is an operator in '+','-','*','/'"""
if (op=='+'):
return x+y
elif op=='-':
return x-y
elif op=='*':
return x*y
elif op=='/':
return x/y
else:
return 0 | dbdf73a91bdb7092d2a18b6245ce6b8d75b5ab33 | 3,648,608 |
def list_arg(raw_value):
"""argparse type for a list of strings"""
return str(raw_value).split(',') | 24adb555037850e8458cde575ed360265a20cea5 | 3,648,609 |
def create_tracking(slug, tracking_number):
"""Create tracking, return tracking ID
"""
tracking = {'slug': slug, 'tracking_number': tracking_number}
result = aftership.tracking.create_tracking(tracking=tracking, timeout=10)
return result['tracking']['id'] | 4f5d645654604787892f1373759e5d40ce01b2fe | 3,648,610 |
def get_indentation(line_):
"""
returns the number of preceding spaces
"""
return len(line_) - len(line_.lstrip()) | 23a65ba620afa3268d4ab364f64713257824340d | 3,648,611 |
def main():
"""
Find the 10001th prime main method.
:param n: integer n
:return: 10001th prime
"""
primes = {2, }
for x in count(3, 2):
if prime(x):
primes.add(x)
if len(primes) >= 10001:
break
return sorted(primes)[-1] | 3d4f492fe3d0d7e4991003020694434145cd5983 | 3,648,612 |
def launch(context, service_id, catalog_packages=""):
""" Initialize the module. """
return EnvManager(context=context, service_id=service_id,
catalog_packages=catalog_packages) | ba22d106efca9014d118daf4a8880bbcfe0c11fa | 3,648,615 |
def handle_verification_token(request, token) -> [404, redirect]:
"""
This is just a reimplementation of what was used previously with OTC
https://github.com/EuroPython/epcon/pull/809/files
"""
token = get_object_or_404(Token, token=token)
logout(request)
user = token.user
user.is_active = True
user.save()
user = authenticate(uid=user.id)
login(request, user)
token.delete()
messages.success(request, 'Email verfication complete')
return redirect('user_panel:dashboard') | f369dc743d875bee09afb6e2ca4a2c313695bcbc | 3,648,616 |
def multi_perspective_expand_for_2d(in_tensor, weights):
"""Given a 2d input tensor and weights of the appropriate shape,
weight the input tensor by the weights by multiplying them
together.
"""
# Shape: (num_sentence_words, 1, rnn_hidden_dim)
in_tensor_expanded = tf.expand_dims(in_tensor, axis=1)
# Shape: (1, multi_perspective_dims, rnn_hidden_dim)
weights_expanded = tf.expand_dims(weights, axis=0)
# Shape: (num_sentence_words, multi_perspective_dims, rnn_hidden_dim)
return tf.multiply(in_tensor_expanded, weights_expanded) | 3d153e0bd9808dcd080f3d0ba75ee3bdd16123d3 | 3,648,617 |
import base64
def generateBasicAuthHeader(username, password):
"""
Generates a basic auth header
:param username: Username of user
:type username: str
:param password: Password of user
:type password: str
:return: Dict containing basic auth header
:rtype: dict
>>> generateBasicAuthHeader('test','test')
{'Authorization': 'Basic dGVzdDp0ZXN0'}
"""
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
return {'Authorization': 'Basic %s' % base64string} | 835b3541212e05354a5573a5b35e8184231c7a6c | 3,648,619 |
def correlation(df, target, limit=0, figsize=None, plot=True):
"""
Display Pearson correlation coefficient between target and numerical features
Return a list with low-correlated features if limit is provided
"""
numerical = list(df.select_dtypes(include=[np.number]))
numerical_f = [n for n in numerical if n not in target]
if not numerical_f:
print("There are no numerical features")
return
copy_df = df.copy()
for t in target:
if t not in numerical:
copy_df[t] = copy_df[t].astype(np.float16)
corr = copy_df.corr().loc[numerical_f, target].fillna(0).sort_values(target, ascending=False).round(2)
if not figsize:
figsize = (8, len(numerical_f) // 2 + 1)
corr.plot.barh(figsize=figsize)
plt.gca().invert_yaxis()
if limit>0:
plt.axvline(x=-limit, color='k', linestyle='--', )
plt.axvline(x=limit, color='k', linestyle='--', )
plt.xlabel('Pearson correlation coefficient')
plt.ylabel('feature')
if limit:
return corr.loc[abs(corr[target[0]]) < abs(limit)].index.tolist() | 044f4708ad691ad4d275c58ff6dbd5a57a6a978d | 3,648,620 |
def fasta_to_raw_observations(raw_lines):
"""
Assume that the first line is the header.
@param raw_lines: lines of a fasta file with a single sequence
@return: a single line string
"""
lines = list(gen_nonempty_stripped(raw_lines))
if not lines[0].startswith('>'):
msg = 'expected the first line to start with ">"'
raise ValueError(msg)
data_lines = lines[1:]
return ''.join(data_lines) | e75bd1f08ab68fa5a2a0d45cb23cba087e078d30 | 3,648,621 |
def pc_proj(data, pc, k):
"""
get the eigenvalues of principal component k
"""
return np.dot(data, pc[k].T) / (np.sqrt(np.sum(data**2, axis=1)) * np.sqrt(np.sum(pc[k]**2))) | 768a4a9eba6427b9afda8c34326c140b360feec3 | 3,648,622 |
from datetime import datetime
def compare_time(time_str):
""" Compare timestamp at various hours """
t_format = "%Y-%m-%d %H:%M:%S"
if datetime.datetime.now() - datetime.timedelta(hours=3) <= \
datetime.datetime.strptime(time_str, t_format):
return 3
elif datetime.datetime.now() - datetime.timedelta(hours=6) <= \
datetime.datetime.strptime(time_str, t_format):
return 6
elif datetime.datetime.now() - datetime.timedelta(hours=12) <= \
datetime.datetime.strptime(time_str, t_format):
return 12
elif datetime.datetime.now() - datetime.timedelta(hours=24) <= \
datetime.datetime.strptime(time_str, t_format):
return 24
# Else catch all
return 100 | b3d6d85e4559fa34f412ee81825e4f1214122534 | 3,648,623 |
def trendline(xd, yd, order=1, c='r', alpha=1, Rval=True):
"""Make a line of best fit,
Set Rval=False to print the R^2 value on the plot"""
#Only be sure you are using valid input (not NaN)
idx = np.isfinite(xd) & np.isfinite(yd)
#Calculate trendline
coeffs = np.polyfit(xd[idx], yd[idx], order)
intercept = coeffs[-1]
slope = coeffs[-2]
power = coeffs[0] if order == 2 else 0
minxd = np.min(xd)
maxxd = np.max(xd)
xl = np.array([minxd, maxxd])
yl = power * xl ** 2 + slope * xl + intercept
#Plot trendline
plt.plot(xl, yl, c, alpha=alpha)
#Calculate R Squared
p = np.poly1d(coeffs)
ybar = np.sum(yd) / len(yd)
ssreg = np.sum((p(xd) - ybar) ** 2)
sstot = np.sum((yd - ybar) ** 2)
Rsqr = ssreg / sstot
if not Rval:
#Plot R^2 value
plt.text(0.8 * maxxd + 0.2 * minxd, 0.8 * np.max(yd) + 0.2 * np.min(yd),
'$R^2 = %0.2f$' % Rsqr)
else:
#Return the R^2 value:
return Rsqr | af10643b0d74fd5f7a82f803bcef0bd9e379f086 | 3,648,624 |
import collections
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = collections.defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv | bfbec3f25d1d44c9ff2568045508efbf2a2216d2 | 3,648,625 |
def evo():
"""Creates a test evolution xarray file."""
nevo = 20
gen_data = {1: np.arange(nevo),
2: np.sin(np.linspace(0, 2*np.pi, nevo)),
3: np.arange(nevo)**2}
data = {'X1': np.linspace(0.1, 1.7, nevo)*_unit_conversion['AU'],
'X2': np.deg2rad(np.linspace(60, 120, nevo)),
'X3': np.deg2rad(np.linspace(30, 80, nevo)),
'TIME': np.arange(nevo)*60*60*24,
'DT': np.arange(nevo),
'NSTEP': np.arange(nevo),
'D': scale_variable(gen_data[3], 'den')/_unit_conversion['den'],
'T': scale_variable(gen_data[2], 'temp'),
'V1': scale_variable(gen_data[1], 'vel')/_unit_conversion['vel'],
'V2': scale_variable(gen_data[2], 'vel')/_unit_conversion['vel'],
'V3': scale_variable(gen_data[3], 'vel')/_unit_conversion['vel'],
'B1': scale_variable(gen_data[1], 'mag'),
'B2': scale_variable(gen_data[2], 'mag'),
'B3': scale_variable(gen_data[3], 'mag'),
'DP': np.linspace(0, 0.1, nevo),
'BP': np.linspace(-1, 1, nevo)}
# Need to make data Arrays for all of the variables with the single dim
for x in data:
data[x] = xr.DataArray(data[x], dims=['nevo'])
ds = xr.Dataset(data, coords={'nevo': np.arange(nevo)})
ds.attrs = {'label': 'earth',
'rundate_cal': "2010-01-01T00"}
with NamedTemporaryFile(suffix='.nc') as f:
ds.to_netcdf(f.name)
evo = read_evo(f.name)
return evo | 5873a3ee7a66d338a8df6b8cf6d26cf4cfeb41a3 | 3,648,626 |
from typing import List
from typing import Any
from typing import Optional
def jinja_calc_buffer(fields: List[Any], category: Optional[str] = None) -> int:
"""calculate buffer for list of fields based on their length"""
if category:
fields = [f for f in fields if f.category == category]
return max(len(f.to_string()) for f in fields) | c1f619acd8f68a9485026b344ece0c162c6f0fb0 | 3,648,627 |
def get_delete_op(op_name):
""" Determine if we are dealing with a deletion operation.
Normally we just do the logic in the last return. However, we may want
special behavior for some types.
:param op_name: ctx.operation.name.split('.')[-1].
:return: bool
"""
return 'delete' == op_name | 508a9aad3ac6f4d58f5890c1abc138326747ee51 | 3,648,628 |
def random_radec(nsynths, ra_lim=[0, 360], dec_lim=[-90, 90],
random_state=None, **kwargs):
"""
Generate random ra and dec points within a specified range.
All angles in degrees.
Parameters
----------
nsynths : int
Number of random points to generate.
ra_lim : list-like, optional
ra limits.
dec_lim : list-like, optional
dec limits.
random_state : `None`, int, list of ints, or `numpy.random.RandomState`
If ``seed`` is `None`, return the `~numpy.random.RandomState`
singleton used by ``numpy.random``. If ``seed`` is an `int`,
return a new `~numpy.random.RandomState` instance seeded with
``seed``. If ``seed`` is already a `~numpy.random.RandomState`,
return it. Otherwise raise ``ValueError``.
Returns
-------
points : 2d ndarray
Random ra and dec points in degrees.
"""
rng = check_random_state(random_state)
ra_lim = np.deg2rad(np.asarray(ra_lim))
dec_lim = np.deg2rad(np.asarray(dec_lim))
zlim = np.sin(dec_lim)
z = zlim[0] + zlim.ptp() * rng.uniform(size=int(nsynths))
ra = ra_lim[0] + ra_lim.ptp() * rng.uniform(size=int(nsynths))
dec = np.arcsin(z)
ra, dec = np.rad2deg(ra), np.rad2deg(dec)
points = np.array([ra, dec]).T
return points | 4649d5f8e42ada28ba45c46fac1174fc66976f16 | 3,648,630 |
def warmUp():
"""
Warm up the machine in AppEngine a few minutes before the daily standup
"""
return "ok" | f7c83939d224b06db26570ab8ccc8f04bd69c1d6 | 3,648,631 |
def _map_dvector_permutation(rd,d,eps):
"""Maps the basis vectors to a permutation.
Args:
rd (array-like): 2D array of the rotated basis vectors.
d (array-like): 2D array of the original basis vectors.
eps (float): Finite precision tolerance.
Returns:
RP (list): The permutation of the basis vectors.
"""
n_d = len(rd) # of d-vectors
found = [False]*n_d
RP = []
for iD in range(n_d):
for jD in range(n_d):
if found[jD]:
continue
if np.allclose(rd[iD],d[jD],atol=eps,rtol=0):
RP.append(jD)
found[jD] = True
break
if len(RP) != len(d): #pragma: no cover
print("d-vector didn't permute in map_dvector_permutation "
"This usually means that the d-set from the input structure and the d-set"
" from the struct_enum.out have a different origin or don't live in the same"
" unit cell. This probably isn't your fault---the code should overcome this."
,RP)
exit()
return(RP) | 3060cb093d769059f2af0635aafc0bd0fe94ad86 | 3,648,633 |
import re
def varPostV(self,name,value):
""" Moving all the data from entry to treeview """
regex = re.search("-[@_!#$%^&*()<>?/\|}{~: ]", name) #Prevent user from giving special character and space character
print(regex)
if not regex == None:
tk.messagebox.showerror("Forbidden Entry","The variable name for vehicle must not contain special character or space character")
return None
if not name.strip():
tk.messagebox.showerror("Empty entry","The variable name for vehicle is empty")
return None
if not value.strip():
tk.messagebox.showerror("Empty entry","The variable value for vechicle is empty")
return None
if not value.isdigit():
tk.messagebox.showerror("Empty entry","The variable value for vechicle must be number")
return None
self.varVContent = self.varDispV
self.varVContent.insert("",index="end",text=name,value=float(value)) | 07e108df0ff057ee42e39155562b32fa651ba625 | 3,648,635 |
def _mysql_int_length(subtype):
"""Determine smallest field that can hold data with given length."""
try:
length = int(subtype)
except ValueError:
raise ValueError(
'Invalid subtype for Integer column: {}'.format(subtype)
)
if length < 3:
kind = 'TINYINT'
elif length < 4:
kind = 'SMALLINT'
elif length < 7:
kind = 'MEDIUMINT'
elif length <= 10:
kind = 'INT'
else:
kind = 'BIGINT'
return '{}({})'.format(kind, length) | 3a0e84a3ac602bb018ae7056f4ad06fe0dcab53b | 3,648,636 |
def get_ci(vals, percent=0.95):
"""Confidence interval for `vals` from the Students' t
distribution. Uses `stats.t.interval`.
Parameters
----------
percent : float
Size of the confidence interval. The default is 0.95. The only
requirement is that this be above 0 and at or below 1.
Returns
-------
tuple
The first member is the upper end of the interval, the second
the lower end of the interval.
"""
if len(set(vals)) == 1:
return (vals[0], vals[0])
mu = np.mean(vals)
df = len(vals)-1
sigma = np.std(vals) / np.sqrt(len(vals))
return stats.t.interval(percent, df, loc=mu, scale=sigma) | 1d5e311aab4620e070efcf685505af4a072e56eb | 3,648,637 |
from typing import OrderedDict
from datetime import datetime
def kb_overview_rows(mode=None, max=None, locale=None, product=None, category=None):
"""Return the iterable of dicts needed to draw the new KB dashboard
overview"""
if mode is None:
mode = LAST_30_DAYS
docs = Document.objects.filter(locale=settings.WIKI_DEFAULT_LANGUAGE,
is_archived=False,
is_template=False)
docs = docs.exclude(html__startswith=REDIRECT_HTML)
select = OrderedDict([
('num_visits', 'SELECT `wdv`.`visits` '
'FROM `dashboards_wikidocumentvisits` as `wdv` '
'WHERE `wdv`.`period`=%s '
'AND `wdv`.`document_id`=`wiki_document`.`id`'),
])
docs = docs.extra(select=select,
select_params=(mode,))
if product:
docs = docs.filter(products__in=[product])
if category:
docs = docs.filter(category__in=[category])
docs = docs.order_by('-num_visits', 'title')
if max:
docs = docs[:max]
rows = []
if docs.count():
max_visits = docs[0].num_visits
for d in docs:
data = {
'url': reverse('wiki.document', args=[d.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE),
'trans_url': reverse('wiki.show_translations', args=[d.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE),
'title': d.title,
'num_visits': d.num_visits,
'ready_for_l10n': d.revisions.filter(is_approved=True,
is_ready_for_localization=True).exists()
}
if d.current_revision:
data['expiry_date'] = d.current_revision.expires
if d.num_visits:
data['visits_ratio'] = float(d.num_visits) / max_visits
if 'expiry_date' in data and data['expiry_date']:
data['stale'] = data['expiry_date'] < datetime.now()
# Check L10N status
if d.current_revision:
unapproved_revs = d.revisions.filter(
reviewed=None, id__gt=d.current_revision.id)[:1]
else:
unapproved_revs = d.revisions.all()
if unapproved_revs.count():
data['revision_comment'] = unapproved_revs[0].comment
else:
data['latest_revision'] = True
# Get the translated doc
if locale != settings.WIKI_DEFAULT_LANGUAGE:
transdoc = d.translations.filter(
locale=locale,
is_archived=False).first()
if transdoc:
data['needs_update'] = transdoc.is_outdated()
else: # For en-US we show the needs_changes comment.
data['needs_update'] = d.needs_change
data['needs_update_comment'] = d.needs_change_comment
rows.append(data)
return rows | 768d9aacf564e17b26beb53f924575202fea3276 | 3,648,638 |
def test_query_devicecontrolalert_facets(monkeypatch):
"""Test a Device Control alert facet query."""
_was_called = False
def _run_facet_query(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/alerts/devicecontrol/_facet"
assert body == {"query": "Blort", "criteria": {"workflow": ["OPEN"]},
"terms": {"rows": 0, "fields": ["REPUTATION", "STATUS"]},
"rows": 100}
_was_called = True
return StubResponse({"results": [{"field": {},
"values": [{"id": "reputation", "name": "reputationX", "total": 4}]},
{"field": {},
"values": [{"id": "status", "name": "statusX", "total": 9}]}]})
api = CBCloudAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbc_sdk_api(monkeypatch, api, POST=_run_facet_query)
query = api.select(DeviceControlAlert).where("Blort").set_workflows(["OPEN"])
f = query.facets(["REPUTATION", "STATUS"])
assert _was_called
assert f == [{"field": {}, "values": [{"id": "reputation", "name": "reputationX", "total": 4}]},
{"field": {}, "values": [{"id": "status", "name": "statusX", "total": 9}]}] | 6a0182a7b9ad15e5194bcc80aba19ab386e71f35 | 3,648,639 |
def regularity(sequence):
"""
Compute the regularity of a sequence.
The regularity basically measures what percentage of a user's
visits are to a previously visited place.
Parameters
----------
sequence : list
A list of symbols.
Returns
-------
float
1 minus the ratio between unique and total symbols in the sequence.
"""
n = len(sequence)
n_unique = len(set(sequence))
if n_unique <= 1:
return 1.0
if n_unique == n:
return .0
return 1 - (n_unique / n) | e03d38cc3882ea5d0828b1f8942039865a90d49d | 3,648,641 |
from typing import List
def _make_tick_labels(
tick_values: List[float], axis_subtractor: float, tick_divisor_power: int,
) -> List[str]:
"""Given a collection of ticks, return a formatted version.
Args:
tick_values (List[float]): The ticks positions in ascending
order.
tick_divisor_power (int): The power of ten the tick labels will
be divided by.
axis_subtractor (float): The amount to subtract from the tick
values.
Returns:
Generator[str, None, None]: The generated tick labels.
"""
tick_divisor_prefix = _get_metric_prefix(tick_divisor_power)
return [
f"{(tick - axis_subtractor) / 10 ** tick_divisor_power:0.2f}"
f"{tick_divisor_prefix}"
for tick in tick_values
] | 13aaddc38d5fdc05d38a97c5ca7278e9898a8ed1 | 3,648,642 |
import pickle
def load_coco(dataset_file, map_file):
"""
Load preprocessed MSCOCO 2017 dataset
"""
print('\nLoading dataset...')
h5f = h5py.File(dataset_file, 'r')
x = h5f['x'][:]
y = h5f['y'][:]
h5f.close()
split = int(x.shape[0] * 0.8) # 80% of data is assigned to the training set
x_train, y_train = x[:split], y[:split]
x_test, y_test = x[split:], y[split:]
with open(map_file, 'rb') as mapping:
category_id_map = pickle.load(mapping)
id_category = category_id_map['id_category']
print('Done.')
return (x_train, y_train), (x_test, y_test), id_category | b924b13411075f569b8cd73ee6d5414a4f932a17 | 3,648,643 |
def eval_rule(call_fn, abstract_eval_fn, *args, **kwargs):
"""
Python Evaluation rule for a numba4jax function respecting the
XLA CustomCall interface.
Evaluates `outs = abstract_eval_fn(*args)` to compute the output shape
and preallocate them, then executes `call_fn(*outs, *args)` which is
the Numba kernel.
Args:
call_fn: a (numba.jit) function respecting the calling convention of
XLA CustomCall, taking first the outputs by reference then the
inputs.
abstract_eval_fn: The abstract evaluation function respecting jax
interface
args: The arguments to the `call_fn`
kwargs: Optional keyword arguments for the numba function.
"""
# compute the output shapes
output_shapes = abstract_eval_fn(*args)
# Preallocate the outputs
outputs = tuple(np.empty(shape.shape, dtype=shape.dtype) for shape in output_shapes)
# convert inputs to a tuple
inputs = tuple(np.asarray(arg) for arg in args)
# call the kernel
call_fn(outputs + inputs, **kwargs)
# Return the outputs
return tuple(outputs) | 6e52d9bdeda86c307390b95237589bd9315829ad | 3,648,644 |
def bev_box_overlap(boxes, qboxes, criterion=-1):
"""
Calculate rotated 2D iou.
Args:
boxes:
qboxes:
criterion:
Returns:
"""
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou | 4614a3f8c8838fc4f9f32c2ef625274239f36acf | 3,648,645 |
def filter_shape(image):
"""画像にぼかしフィルターを適用。"""
weight = (
(1, 1, 1),
(1, 1, 1),
(1, 1, 1)
)
offset = 0
div = 9
return _filter(image, weight, offset, div) | f16c181c0eb24a35dfc70df25f1977a04dc9b0f5 | 3,648,646 |
def makeTriangularMAFdist(low=0.02, high=0.5, beta=5):
"""Fake a non-uniform maf distribution to make the data
more interesting - more rare alleles """
MAFdistribution = []
for i in xrange(int(100*low),int(100*high)+1):
freq = (51 - i)/100.0 # large numbers of small allele freqs
for j in range(beta*i): # or i*i for crude exponential distribution
MAFdistribution.append(freq)
return MAFdistribution | 4210fffbe411364b2de8ffbc1e6487c8d3a87a09 | 3,648,648 |
def contains_whitespace(s : str):
"""
Returns True if any whitespace chars in input string.
"""
return " " in s or "\t" in s | c5dc974988efcfa4fe0ec83d115dfa7508cef798 | 3,648,650 |
import itertools
def get_files_to_check(files, filter_function):
# type: (List[str], Callable[[str], bool]) -> List[str]
"""Get a list of files that need to be checked based on which files are managed by git."""
# Get a list of candidate_files
candidates_nested = [expand_file_string(f) for f in files]
candidates = list(itertools.chain.from_iterable(candidates_nested))
if len(files) > 0 and len(candidates) == 0:
raise ValueError("Globs '%s' did not find any files with glob." % (files))
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_candidates(candidates, filter_function) for r in repos]))
if len(files) > 0 and len(valid_files) == 0:
raise ValueError("Globs '%s' did not find any files with glob in git." % (files))
return valid_files | 6b508745aa47e51c4b8f4b88d7a2dff782289206 | 3,648,651 |
def main(argv):
"""Parse the argv, verify the args, and call the runner."""
args = arg_parse(argv)
return run(args.top_foods, args.top_food_categories) | bb2da95cec48b1abfa0e1c0064d413e55767aa89 | 3,648,652 |
import requests
def _failover_read_request(request_fn, endpoint, path, body, headers, params, timeout):
""" This function auto-retries read-only requests until they return a 2xx status code. """
try:
return request_fn('GET', endpoint, path, body, headers, params, timeout)
except (requests.exceptions.RequestException, Non200ResponseException) as ex:
raise FailoverException(ex) | 731a14e72ff4fa88f160215f711fcca2199c736c | 3,648,653 |
def GenerateConfig(context):
"""Generates configuration."""
image = ''.join(['https://www.googleapis.com/compute/v1/',
'projects/google-containers/global/images/',
context.properties['containerImage']])
default_network = ''.join(['https://www.googleapis.com/compute/v1/projects/',
context.env['project'],
'/global/networks/default'])
instance_template = {
'name': context.env['name'] + '-it',
'type': 'compute.v1.instanceTemplate',
'properties': {
'properties': {
'metadata': {
'items': [{
'key': 'google-container-manifest',
'value': GenerateManifest(context)
}]
},
'machineType': 'f1-micro',
'disks': [{
'deviceName': 'boot',
'boot': True,
'autoDelete': True,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {'sourceImage': image}
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': default_network
}]
}
}
}
outputs = [{'name': 'instanceTemplateSelfLink',
'value': '$(ref.' + instance_template['name'] + '.selfLink)'}]
return {'resources': [instance_template], 'outputs': outputs} | 4dbf579c780b0305b4a0b5dcfb3860a086867cbb | 3,648,654 |
from typing import Optional
from typing import List
async def read_all_orders(
status_order: Optional[str] = None,
priority: Optional[int] = None,
age: Optional[str] = None,
value: Optional[str] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
db: AsyncIOMotorClient = Depends(get_database),
) -> List[OrderSchema]:
"""[summary]
Get all item by ID.
[description]
Endpoint to retrieve an specific item.
[optional]
[ON CREATE] Filter order by status: ['to-do', 'doing', 'done']
"""
filters = {
"status": status_order,
"priority": priority,
"age": age,
"value": value,
"start_date": start_date,
"end_date": end_date,
}
orders_list = await orders.get_all(db, filters)
if not orders_list:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Note not found"
)
return list(map(fix_item_id, orders_list)) | 28200a5dd9c508690ad3234c3080f0bb44a425c4 | 3,648,655 |
def read_log_file(path):
"""
Read the log file for 3D Match's log files
"""
with open(path, "r") as f:
log_lines = f.readlines()
log_lines = [line.strip() for line in log_lines]
num_logs = len(log_lines) // 5
transforms = []
for i in range(0, num_logs, 5):
meta_data = np.fromstring(log_lines[i], dtype=int, sep=" \t")
transform = np.zeros((4, 4), dtype=float)
for j in range(4):
transform[j] = np.fromstring(log_lines[i + j + 1], dtype=float, sep=" \t")
transforms.append((meta_data, transform))
return transforms | dcd010f27c94d5bcd2287cb83765fe0eca291628 | 3,648,656 |
import math
def divide_list(l, n):
"""Divides list l into n successive chunks."""
length = len(l)
chunk_size = int(math.ceil(length/n))
expected_length = n * chunk_size
chunks = []
for i in range(0, expected_length, chunk_size):
chunks.append(l[i:i+chunk_size])
for i in range(len(chunks), n):
chunks.append([])
return chunks | bad7c118988baebd5712cd496bb087cd8788abb7 | 3,648,657 |
def sigma(n):
"""Calculate the sum of all divisors of N."""
return sum(divisors(n)) | 13dd02c10744ce74b2a89bb4231c9c055eefa065 | 3,648,658 |
def ATOMPAIRSfpDataFrame(chempandas,namecol,smicol):
"""
AtomPairs-based fingerprints 2048 bits.
"""
assert chempandas.shape[0] <= MAXLINES
molsmitmp = [Chem.MolFromSmiles(x) for x in chempandas.iloc[:,smicol]]
i = 0
molsmi = []
for x in molsmitmp:
if x is not None:
x.SetProp("_Name",chempandas.iloc[i,namecol])
molsmi.append(x)
i += 1
# ATOMPAIRS Fingerprints.
fps = [Pairs.GetAtomPairFingerprintAsBitVect(x) for x in molsmi]
fpsmat = np.matrix(fps)
df = DataFrame(fpsmat,index = [x.GetProp("_Name") for x in molsmi]) # how to name the col?
df['SMILES'] = [Chem.MolToSmiles(x) for x in molsmi]
df['CHEMBL'] = df.index
return(df) | cb2babbebd60162c4cc9aa4300dba14cc2cf7ce8 | 3,648,659 |
def filter_by_minimum(X, region):
"""Filter synapses by minimum.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where((X[:,2] >= i[0])*(X[:,3] >= i[1])*(X[:,4] >= i[2]))[0]
return X[vals,:] | 6af135d7ecc716c957bf44ed17caab4f9dd63215 | 3,648,660 |
import tqdm
def gen_graphs(sizes):
"""
Generate community graphs.
"""
A = []
for V in tqdm(sizes):
G = nx.barabasi_albert_graph(V, 3)
G = nx.to_numpy_array(G)
P = np.eye(V)
np.random.shuffle(P)
A.append(P.T @ G @ P)
return np.array(A) | 6decd819a5e2afad9744270c616a60c532f2e6fd | 3,648,661 |
def daemonize(identity: str, kind: str = 'workspace') -> DaemonID:
"""Convert to DaemonID
:param identity: uuid or DaemonID
:param kind: defaults to 'workspace'
:return: DaemonID from identity
"""
try:
return DaemonID(identity)
except TypeError:
return DaemonID(f'j{kind}-{identity}') | 8cd08d9d8a558b9f78de60d2df96db553fbca8bf | 3,648,662 |
from typing import Counter
def count_gender(data_list:list):
"""
Contar a população dos gêneros
args:
data_list (list): Lista de dados que possui a propriedade 'Gender'
return (list): Retorna uma lista com o total de elementos do gênero 'Male' e 'Female', nessa ordem
"""
genders = column_to_list(data_list, "Gender")
genders_counter = Counter(genders)
male = genders_counter["Male"]
female = genders_counter["Female"]
return [male, female] | 9e8a05067a617ca0606eec8d216e25d3f937e097 | 3,648,663 |
async def card_balance(request: Request):
""" 返回用户校园卡余额 """
cookies = await get_cookies(request)
balance_data = await balance.balance(cookies)
return success(data=balance_data) | 09e2b7e84743a0ed5625a6cc19dda0e97eb6df10 | 3,648,664 |
def _grid_vals(grid, dist_name, scn_save_fs,
mod_thy_info, constraint_dct):
""" efef
"""
# Initialize the lists
locs_lst = []
enes_lst = []
# Build the lists of all the locs for the grid
grid_locs = []
for grid_val_i in grid:
if constraint_dct is None:
grid_locs.append([[dist_name], [grid_val_i]])
else:
grid_locs.append([constraint_dct, [dist_name], [grid_val_i]])
# Get the energies along the grid
for locs in grid_locs:
if scn_save_fs[-1].exists(locs):
scn_path = scn_save_fs[-1].path(locs)
sp_save_fs = autofile.fs.single_point(scn_path)
enes_lst.append(sp_save_fs[-1].file.energy.read(mod_thy_info[1:4]))
locs_lst.append(locs)
return locs_lst, enes_lst | a401632285c9ac48136239fcfa7f3c2eb760734c | 3,648,665 |
import vtool as vt
def group_images_by_label(label_arr, gid_arr):
"""
Input: Length N list of labels and ids
Output: Length M list of unique labels, and lenth M list of lists of ids
"""
# Reverse the image to cluster index mapping
labels_, groupxs_ = vt.group_indices(label_arr)
sortx = np.array(list(map(len, groupxs_))).argsort()[::-1]
labels = labels_.take(sortx, axis=0)
groupxs = ut.take(groupxs_, sortx)
label_gids = vt.apply_grouping(gid_arr, groupxs)
return labels, label_gids | 9bdc83f2a9a5810b5d3cf443dcd72852dd35c26b | 3,648,666 |
from typing import Optional
def ask_user(prompt: str, default: str = None) -> Optional[str]:
"""
Prompts the user, with a default. Returns user input from ``stdin``.
"""
if default is None:
prompt += ": "
else:
prompt += " [" + default + "]: "
result = input(prompt)
return result if len(result) > 0 else default | 7803d7e71b2cc3864440cd99c276784cebf81f91 | 3,648,667 |
def tensor_index_by_tuple(data, tuple_index):
"""Tensor getitem by tuple of various types with None"""
if not tuple_index:
return data
op_name = const_utils.TENSOR_GETITEM
tuple_index = _transform_ellipsis_to_slice(data, tuple_index, op_name)
data, tuple_index = _expand_data_dims(data, tuple_index)
min_data_dim, max_data_dim = 1, 8
const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim)
indexes_types = hyper_map(F.typeof, tuple_index)
contain_type = const_utils.tuple_index_type_cnt(indexes_types, op_name)
if contain_type == const_utils.ALL_BASIC:
return _tensor_getitem_by_tuple_slice(data, tuple_index)
return _tensor_getitem_by_tuple(data, tuple_index, op_name) | 4a32d9f4028f4ac57d1e523c946bb4a4d349b120 | 3,648,668 |
import scipy
def are_neurons_responsive(spike_times, spike_clusters, stimulus_intervals=None,
spontaneous_period=None, p_value_threshold=.05):
"""
Return which neurons are responsive after specific stimulus events, compared to spontaneous
activity, according to a Wilcoxon test.
:param spike_times: times of spikes, in seconds
:type spike_times: 1D array
:param spike_clusters: spike neurons
:type spike_clusters: 1D array, same length as spike_times
:type stimulus_intervals: the times of
the stimulus events onsets and offsets
:param stimulus_intervals: 2D array
:type spontaneous_period: the period of spontaneous activity
:param spontaneous_period: 1D array with 2 elements
:param p_value_threshold: the threshold for the
p value in the Wilcoxon test.
:type p_value_threshold: float
:rtype: 1D boolean array with `n_neurons`
elements (clusters are sorted by increasing cluster
id as appearing in spike_clusters).
"""
stimulus_counts = _get_spike_counts_in_bins(
spike_times, spike_clusters, stimulus_intervals)
# Find spontaneous intervals.
stimulus_durations = np.diff(stimulus_intervals, axis=1).squeeze()
t0, t1 = spontaneous_period
spontaneous_starts = np.linspace(
t0,
t1 - stimulus_durations.max(),
len(stimulus_intervals))
spontaneous_intervals = np.c_[
spontaneous_starts,
spontaneous_starts +
stimulus_durations]
# Count the spontaneous counts.
spontaneous_counts = _get_spike_counts_in_bins(
spike_times, spike_clusters, spontaneous_intervals)
assert stimulus_counts.shape == stimulus_counts.shape
# Generate the responsive vector (for every neuron, whether it is
# responsive).
responsive = np.zeros(stimulus_counts.shape[0], dtype=bool)
n_neurons = stimulus_counts.shape[0]
for i in range(n_neurons):
x = stimulus_counts[i, :]
y = spontaneous_counts[i, :]
try:
_, p = scipy.stats.wilcoxon(x, y)
except ValueError:
pass
responsive[i] = p < p_value_threshold
return responsive | b5b835113644d7c42e7950cc8fc5699e62d631fa | 3,648,669 |
def _get_book(**keywords):
"""Get an instance of :class:`Book` from an excel source
Where the dictionary should have text as keys and two dimensional
array as values.
"""
source = factory.get_book_source(**keywords)
sheets = source.get_data()
filename, path = source.get_source_info()
return sheets, filename, path | 2e6114c2948272ce2d342d954594a8d626a45635 | 3,648,670 |
def handler(
state_store: StateStore,
hardware_api: HardwareAPI,
movement_handler: MovementHandler,
) -> PipettingHandler:
"""Create a PipettingHandler with its dependencies mocked out."""
return PipettingHandler(
state_store=state_store,
hardware_api=hardware_api,
movement_handler=movement_handler,
) | 099532e3c7d51812548643d10758ec23c8354a00 | 3,648,671 |
def get_domain(domain_name):
"""
Query the Rackspace DNS API to get a domain object for the domain name.
Keyword arguments:
domain_name -- the domain name that needs a challenge record
"""
base_domain_name = get_tld("http://{0}".format(domain_name))
domain = rax_dns.find(name=base_domain_name)
return domain | 5ea1bbe9c73250abf60c5ad9f1d796035ed87654 | 3,648,672 |
import sympy
def lobatto(n):
"""Get Gauss-Lobatto-Legendre points and weights.
Parameters
----------
n : int
Number of points
"""
if n == 2:
return ([0, 1],
[sympy.Rational(1, 2), sympy.Rational(1, 2)])
if n == 3:
return ([0, sympy.Rational(1, 2), 1],
[sympy.Rational(1, 6), sympy.Rational(2, 3), sympy.Rational(1, 6)])
if n == 4:
return ([0, (1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2, 1],
[sympy.Rational(1, 12), sympy.Rational(5, 12), sympy.Rational(5, 12),
sympy.Rational(1, 12)])
if n == 5:
return ([0, (1 - sympy.sqrt(3) / sympy.sqrt(7)) / 2, sympy.Rational(1, 2),
(1 + sympy.sqrt(3) / sympy.sqrt(7)) / 2, 1],
[sympy.Rational(1, 20), sympy.Rational(49, 180), sympy.Rational(16, 45),
sympy.Rational(49, 180), sympy.Rational(1, 20)])
if n == 6:
return ([0,
(1 - sympy.sqrt(sympy.Rational(1, 3) + (2 * sympy.sqrt(7) / 21))) / 2,
(1 - sympy.sqrt(sympy.Rational(1, 3) - (2 * sympy.sqrt(7) / 21))) / 2,
(1 + sympy.sqrt(sympy.Rational(1, 3) - (2 * sympy.sqrt(7) / 21))) / 2,
(1 + sympy.sqrt(sympy.Rational(1, 3) + (2 * sympy.sqrt(7) / 21))) / 2,
1],
[sympy.Rational(1, 30), (14 - sympy.sqrt(7)) / 60, (14 + sympy.sqrt(7)) / 60,
(14 + sympy.sqrt(7)) / 60, (14 - sympy.sqrt(7)) / 60, sympy.Rational(1, 30)])
if n == 7:
return ([0,
(1 - sympy.sqrt((5 + 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
(1 - sympy.sqrt((5 - 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
sympy.Rational(1, 2),
(1 + sympy.sqrt((5 - 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
(1 + sympy.sqrt((5 + 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
1],
[sympy.Rational(1, 42),
(124 - 7 * sympy.sqrt(15)) / 700,
(124 + 7 * sympy.sqrt(15)) / 700,
sympy.Rational(128, 525),
(124 + 7 * sympy.sqrt(15)) / 700,
(124 - 7 * sympy.sqrt(15)) / 700,
sympy.Rational(1, 42)])
raise NotImplementedError() | 595610ec035dd9b059ab085375c017856359b837 | 3,648,673 |
def login():
"""Login."""
username = request.form.get('username')
password = request.form.get('password')
if not username:
flask.flash('Username is required.', 'warning')
elif password is None:
flask.flash('Password is required.', 'warning')
else:
user = models.User.login_user(username, password)
if user:
session['user'] = user.username
return flask.redirect(flask.url_for('catalog'))
flask.flash('Invalid username/password.', 'danger')
return flask.redirect(flask.url_for('home')) | c54bc743ac305db8f0d74a7bd62f7bb70a952454 | 3,648,674 |
def sha2_384(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_384(), data) | 7523ca7e2d11e3b686db45a28d09ccdad17ff243 | 3,648,676 |
import string
def cat(arr, match="CAT", upper_bound=None, lower_bound=None):
"""
Basic idea is if a monkey typed randomly, how long would it take for it
to write `CAT`. Practically, we are mapping generated numbers onto the
alphabet.
>"There are 26**3 = 17 576 possible 3-letter words, so the average number of
keystrokes necessary to produce CAT should be around 17 576" [1]
Example
-------
Parameters
----------
word: string or list-type object
All elements of the string must be the same number of characters
match: string or list-type object
The keyword to search for. Other than length, doesn't really matter.
If you pass in a list of strings, it will give you a result for each
passed in string.
upper_bound: int (optional)
Upper bound of random values. If not set, will calculate the minimum
value from the array passed.
lower_bound: int (optional)
Lower bound of random values. If not set, will calculate the maximum
value from the array passed.
Returns
-------
dict
Key is the string passed into match, the value is a list of the
iteration cycles it was found at
Notes
-----
[1]: Marsaglia, G. and Zaman, A., (1995), Monkey tests for random number
generators, Computers & Mathematics with Applications, 9, No. 9, 1–10.
"""
if upper_bound is None:
upper_bound = np.max(arr)
if lower_bound is None:
lower_bound = np.min(arr)
if isinstance(match, str):
match = [match]
match = list(map(str.upper, match))
num_letters = len(match[0])
assert all([len(match_i) == num_letters for match_i in match]), \
"All elements of `match` must have the same number of characters"
n_uppercase = len(string.ascii_uppercase)
# {...number: letter...} mapping
mapping = dict(zip(range(n_uppercase), string.ascii_uppercase))
# Scale the array so that everything is between 0 and 26
arr_norm = np.floor((arr - lower_bound) * (n_uppercase/upper_bound))
# Map the integer component to letters
letters = [mapping[i] for i in arr_norm.astype(np.int)]
# Split the array of letters into words
words = chunker(letters, batch_size=num_letters, complete=True)
iter_counts = {match_i: [] for match_i in match}
for i, letter_list in enumerate(words):
word = ''.join(letter_list)
if word in match:
iter_counts[word].append(i)
return iter_counts | 1077bc5c4bc989e416cdaf27427f5a617491210d | 3,648,677 |
def encrypt_data(key: bytes, data: str) -> str:
"""
Encrypt the data
:param key: key to encrypt the data
:param data: data to be encrypted
:returns: bytes encrypted
"""
# instance class
cipher_suite = Fernet(key)
# convert our data into bytes mode
data_to_bytes = bytes(data, "utf-8")
encrypted = cipher_suite.encrypt(data_to_bytes)
return encrypted.decode("utf-8") | 80e69657987956b3a0dc6d87dee79a4dcc5db3f7 | 3,648,678 |
from typing import List
from typing import Dict
def make_doi_table(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the DOI table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
for paper in dataset.papers:
# Doi, events and grids
doi = paper.doi.upper()
events = make_doi_events(doi, paper.events)
# Affiliations: institutions, countries, regions, subregion, funders, journals, publishers
institutions = make_doi_institutions(paper.authors)
countries = make_doi_countries(paper.authors)
regions = make_doi_regions(paper.authors)
subregions = make_doi_subregions(paper.authors)
funders = make_doi_funders(paper.funders)
journals = make_doi_journals(paper.journal)
publishers = make_doi_publishers(paper.publisher)
# Make final record
records.append(
{
"doi": doi,
"crossref": {
"title": paper.title,
"published_year": paper.published_date.year,
"published_month": paper.published_date.month,
"published_year_month": f"{paper.published_date.year}-{paper.published_date.month}",
"funder": [{"name": funder.name, "DOI": funder.doi} for funder in paper.funders],
},
"unpaywall": {},
"unpaywall_history": {},
"mag": {},
"open_citations": {},
"events": events,
"affiliations": {
"doi": doi,
"institutions": institutions,
"countries": countries,
"subregions": subregions,
"regions": regions,
"groupings": [],
"funders": funders,
"authors": [],
"journals": journals,
"publishers": publishers,
},
}
)
# Sort to match with sorted results
records.sort(key=lambda r: r["doi"])
return records | 6347142546579712574a63048e6d778dfd558249 | 3,648,679 |
def binarySearch(arr, val):
"""
array values must be sorted
"""
left = 0
right = len(arr) - 1
half = (left + right) // 2
while arr[half] != val:
if val < arr[half]:
right = half - 1
else:
left = half + 1
half = (left + right) // 2
if arr[half] == val:
return half
return -1 | 2457e01dee0f3e3dd988471ca708883d2a612066 | 3,648,681 |
from typing import Union
from re import M
from typing import cast
def foreign_key(
recipe: Union[Recipe[M], str], one_to_one: bool = False
) -> RecipeForeignKey[M]:
"""Return a `RecipeForeignKey`.
Return the callable, so that the associated `_model` will not be created
during the recipe definition.
This resolves recipes supplied as strings from other module paths or from
the calling code's module.
"""
if isinstance(recipe, str):
# Load `Recipe` from string before handing off to `RecipeForeignKey`
try:
# Try to load from another module
recipe = baker._recipe(recipe)
except (AttributeError, ImportError, ValueError):
# Probably not in another module, so load it from calling module
recipe = _load_recipe_from_calling_module(cast(str, recipe))
return RecipeForeignKey(cast(Recipe[M], recipe), one_to_one) | f865bf1c7a91a124ca7518a2a2050371112e820e | 3,648,682 |
def posterize(image, num_bits):
"""Equivalent of PIL Posterize."""
shift = 8 - num_bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) | 11dc20facfd5ac57e7547036304d192ce21fdb0a | 3,648,683 |
def polyFit(x, y):
"""
Function to fit a straight line to data and estimate slope and
intercept of the line and corresponding errors using first order
polynomial fitting.
Parameters
----------
x : ndarray
X-axis data
y : ndarray
Y-axis data
Returns
-------
ndarray
slope, intercept, SDslope, SDintercept
Reference
---------
"""
# Number of input points
N = x.shape[0]
# Estimate slope and intercept of fitted line
slope, intercept = np.polyfit(x, y, 1)
# Calculate standard deviation of slope and intercept
yhat = intercept + slope * x
residual = y - yhat
Sx2 = np.sum(x**2)
Sxx = np.sum((x - np.mean(x))**2)
Sy_x = np.sqrt(np.sum(residual**2) / (N -2))
SDslope = Sy_x / np.sqrt(Sxx)
SDintercept = Sy_x * np.sqrt(Sx2 / (N * Sxx))
return np.array([[slope, intercept], [SDslope, SDintercept]]) | 0acc032492a63dbb271293bed8cf11c800621a35 | 3,648,685 |
def evaluate_error(X, y, w):
"""Returns the mean squared error.
X : numpy.ndarray
Numpy array of data.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
dist = (y - y_predict) ** 2
return float(np.sum(dist)) / X.shape[0] | 2e54a2bb64a590e3e35456b5039b4cfce7632c0f | 3,648,687 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up ha_reef_pi from a config entry."""
websession = async_get_clientsession(hass)
coordinator = ReefPiDataUpdateCoordinator(hass, websession, entry)
await coordinator.async_config_entry_first_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][entry.entry_id] = {
"coordinator": coordinator,
"undo_update_listener": undo_listener,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | 655a1265a77cec38346931a6cf5feaf923c1573b | 3,648,688 |
def get_list(client):
"""
"""
request = client.__getattr__(MODULE).ListIpBlocks()
response, _ = request.result()
return response['results'] | 0836bc58d8108a804c9464a713184ac582bd4e90 | 3,648,689 |
import six
def file_asset(class_obj):
"""
Decorator to annotate the FileAsset class. Registers the decorated class
as the FileAsset known type.
"""
assert isinstance(class_obj, six.class_types), "class_obj is not a Class"
global _file_asset_resource_type
_file_asset_resource_type = class_obj
return class_obj | a21ae9d8ba84d2f6505194db6fd8bd84593f3928 | 3,648,690 |
def scheming_field_by_name(fields, name):
"""
Simple helper to grab a field from a schema field list
based on the field name passed. Returns None when not found.
"""
for f in fields:
if f.get('field_name') == name:
return f | ba4d04585b12ab941db8bc0787b076c32e76cadb | 3,648,692 |
def merge_sort(items):
"""Sorts a list of items.
Uses merge sort to sort the list items.
Args:
items: A list of items.
Returns:
The sorted list of items.
"""
n = len(items)
if n < 2:
return items
m = n // 2
left = merge_sort(items[:m])
right = merge_sort(items[m:])
return merge(left, right) | d42c60dda40fc421adef2d47f302426d7c176ba1 | 3,648,694 |
from typing import Optional
def hunk_boundary(
hunk: HunkInfo, operation_type: Optional[str] = None
) -> Optional[HunkBoundary]:
"""
Calculates boundary for the given hunk, returning a tuple of the form:
(<line number of boundary start>, <line number of boundary end>)
If operation_type is provided, it is used to filter down only to lines whose line_type matches
the operation_type. Possible values: "+", "-", None.
If there are no lines of the given type in the hunk, returns None.
"""
line_type_p = lambda line: True
if operation_type is not None:
line_type_p = lambda line: line.line_type == operation_type
admissible_lines = [line for line in hunk.lines if line_type_p(line)]
if not admissible_lines:
return None
return HunkBoundary(
operation_type=operation_type,
start=admissible_lines[0].new_line_number,
end=admissible_lines[-1].new_line_number,
) | c5ec0065e5ab85652a5d0d679a832fdd0dae1629 | 3,648,695 |
from pm4py.statistics.attributes.pandas import get as pd_attributes_filter
from pm4py.statistics.attributes.log import get as log_attributes_filter
def get_activities_list(log, parameters=None):
"""
Gets the activities list from a log object, sorted by activity name
Parameters
--------------
log
Log
parameters
Possible parameters of the algorithm
Returns
-------------
activities_list
List of activities sorted by activity name
"""
if parameters is None:
parameters = {}
activity_key = parameters[
constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY
if type(log) is pd.DataFrame:
activities = pd_attributes_filter.get_attribute_values(log, activity_key)
else:
activities = log_attributes_filter.get_attribute_values(log, activity_key)
return sorted(list(activities.keys())) | b0e335dd31cae3fb291317a559c721199217605f | 3,648,696 |
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_vocab_size,
encoding_embedding_size):
"""
:return: tuple (RNN output, RNN state)
"""
embed = tf.contrib.layers.embed_sequence(rnn_inputs,
vocab_size=source_vocab_size,
embed_dim=encoding_embedding_size)
stacked_cells = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(rnn_size), keep_prob) for _ in range(num_layers)])
outputs, state = tf.nn.dynamic_rnn(stacked_cells,
embed,
dtype=tf.float32)
return outputs, state | 3179d478478e2c7ca5d415bb23643a836127f6fe | 3,648,697 |
def point_selection(start, end, faces):
""" Calculates the intersection points between a line segment and triangle mesh.
:param start: line segment start point
:type start: Vector3
:param end: line segment end point
:type end: Vector3
:param faces: faces: N x 9 array of triangular face vertices
:type faces: numpy.ndarray
:return: array of intersection points
:rtype: numpy.ndarray
"""
direction = end - start
length = direction.length
if length < eps or faces is None:
return np.array([])
direction /= length
distances = segment_triangle_intersection(start, direction, length, faces)
if not distances:
return np.array([])
distances = np.reshape(distances, (len(distances), 1))
return start + direction * distances | 287295de09a5375118ed050f025fa32b0690a9a9 | 3,648,698 |
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar, and the first date is not after
the second."""
month = month2
year = year2
day = day2 - day1
if (day < 0):
day += 30
month -= 1
month = month - month1
if (month < 0):
month += 12
year -= 1
year = year - year1
return (year * 360) + month * 30 + day | 687a7ff0b29ec2a931d872c18057741d93571ac1 | 3,648,699 |
def derive_sender_1pu(epk, sender_sk, recip_pk, alg, apu, apv, keydatalen):
"""Generate two shared secrets (ze, zs)."""
ze = derive_shared_secret(epk, recip_pk)
zs = derive_shared_secret(sender_sk, recip_pk)
key = derive_1pu(ze, zs, alg, apu, apv, keydatalen)
return key | ed2015f683ecdba93288c9a40a9dce35c3d99c37 | 3,648,700 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.