content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Union
from typing import Sequence
def average_false_positive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false positive score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each false positive score for each group/class
"""
if len(np.unique(y_true)) < 3:
return false_positive_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_positive_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes) | 4b789381e25efffc0aa811287bab8299edf6b962 | 15,243 |
import html
def display_text_paragraph(text: str):
"""Displays paragraph of text (e.g. explanation, plot interpretation)
Args:
text (str): Informational text
Returns:
html.Small: Wrapper for text paragraph
"""
return html.P(children=[text],
style={'font-size': '14px',
'white-space': 'pre-wrap'
}) | 8c4ae8f7b606b81726149891fb5db624647ba484 | 15,244 |
def is_numeric(_type) -> bool:
"""
Check if sqlalchemy _type is derived from Numeric
"""
return issubclass(_type.__class__, Numeric) | 1d604873e4043206b50ddc09c691331c4c50c49c | 15,245 |
def make_generic_time_plotter(
retrieve_data,
label,
dt,
time_unit=None,
title=None,
unit=None,
):
"""Factory function for creating plotters that can plot data over time.
The function returns a function which can be called whenever the plot should be drawn.
This function takes no arguments and will create a new figure and plot the given data when called.
This function doesn't call plt.show() so this must be done by the calling code.
:param retrive_data: function that returns data to plot over time when called with no arguments.
:param str label: Label representing the data.
:param number dt: delta time between time steps in data.
:param str time_unit: unit of time, e.g. 'fs'.
:param str title: title of plot.
:param str unit: unit of data, e.g. 'K'.
"""
def plotter():
data = retrieve_data()
t = np.arange(0, len(data)*dt, dt)
fig = plt.figure()
ax = plt.axes()
plt.title(title if title else label)
plt.xlabel(f"Time [{time_unit}]" if time_unit else f"Time")
plt.ylabel(f"{label} [{unit}]" if unit else f"{label}")
ax.plot(t, data, marker = 'o')
return plotter | 3fa391a94973e5b98394e684d8e4018fa16811df | 15,246 |
def registration(request):
"""Registration product page
"""
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(user_form.cleaned_data['password'])
# Save the User object
new_user.save()
return render(request, 'registration//register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'registration/registr.html', {'user_form': user_form}) | d176a5027058124dfd30a247f924776a87f7aba3 | 15,247 |
def error_measure(predictions, labels):
""" calculate sum squared error of predictions """
return np.sum(np.power(predictions - labels, 2)) / (predictions.shape[0]) | 135b3b90047895ecff90aed6f4a37d73ef0ddd17 | 15,248 |
def add3(self, x, y):
"""Celery task: add numbers."""
return x + y | 0d1017953dcdd1a0791afe291ce005247547f198 | 15,249 |
def zscore(dat, mean, sigma):
"""Calculates zscore of a data point in (or outside of) a dataset
zscore: how many sigmas away is a value from the mean of a dataset?
Parameters
----------
dat: float
Data point
mean: float
Mean of dataset
sigma: flaot
Sigma of dataset
"""
zsc = (dat-mean)/sigma
return zsc | b11216e50632e2024af0a389184d5e1dba7ed4fd | 15,250 |
from typing import OrderedDict
from typing import Tuple
from re import S
def _create_ast_bilinear_form(terminal_expr, atomic_expr_field,
tests, d_tests,
trials, d_trials,
fields, d_fields, constants,
nderiv, dim, mapping, d_mapping, is_rational_mapping, spaces, mapping_space, mask, tag, is_parallel,
**kwargs):
"""
This function creates the assembly function of a bilinearform
Parameters
----------
terminal_expr : <Matrix>
atomic representation of the bilinear form
atomic_expr_field: <dict>
dict of atomic expressions of fields
tests : <list>
list of tests functions
d_tests : <dict>
dictionary that contains the symbolic spans and basis values of each test function
trials : <list>
list of trial functions
d_trials: <list>
dictionary that contains the symbolic spans and basis values of each trial function
fields : <list>
list of fields
constants : <list>
list of constants
nderiv : int
the order of the bilinear form
dim : int
number of dimension
mapping : <Mapping>
Sympde Mapping object
d_mapping : <dict>
dictionary that contains the symbolic spans and basis values of the mapping
is_rational_mapping : <bool>
takes the value of True if the mapping is rational
spaces : <list>
list of sympde symbolic test and trial spaces
mask : <int|None>
the masked direction in case of boundary domain
tag : <str>
tag to be added to variable names
is_parallel : <bool>
True if the domain is distributed
Returns
-------
node : DefNode
represents the a function definition node that computes the assembly
"""
pads = variables(('pad1, pad2, pad3'), dtype='int')[:dim]
b0s = variables(('b01, b02, b03'), dtype='int')[:dim]
e0s = variables(('e01, e02, e03'), dtype='int')[:dim]
g_quad = GlobalTensorQuadrature(False)
l_quad = LocalTensorQuadrature(False)
quad_order = kwargs.pop('quad_order', None)
# ...........................................................................................
g_span = OrderedDict((u,d_tests[u]['span']) for u in tests)
f_span = OrderedDict((f,d_fields[f]['span']) for f in fields)
if mapping_space:
m_span = OrderedDict((f,d_mapping[f]['span']) for f in d_mapping)
else:
m_span = {}
m_trials = OrderedDict((u,d_trials[u]['multiplicity']) for u in trials)
m_tests = OrderedDict((v,d_tests[v]['multiplicity']) for v in tests)
lengths_trials = OrderedDict((u,LengthDofTrial(u)) for u in trials)
lengths_tests = OrderedDict((v,LengthDofTest(v)) for v in tests)
lengths_outer_tests = OrderedDict((v,LengthOuterDofTest(v)) for v in tests)
lengths_inner_tests = OrderedDict((v,LengthInnerDofTest(v)) for v in tests)
lengths_fields = OrderedDict((f,LengthDofTest(f)) for f in fields)
# ...........................................................................................
quad_length = LengthQuadrature()
el_length = LengthElement()
lengths = [el_length, quad_length]
# ...........................................................................................
geo = GeometryExpressions(mapping, nderiv)
g_coeffs = {f:[MatrixGlobalBasis(i,i) for i in expand([f])] for f in fields}
l_mats = BlockStencilMatrixLocalBasis(trials, tests, terminal_expr, dim, tag)
g_mats = BlockStencilMatrixGlobalBasis(trials, tests, pads, m_tests, terminal_expr, l_mats.tag)
# ...........................................................................................
if quad_order is not None:
ind_quad = index_quad.set_range(stop=Tuple(*quad_order))
else:
ind_quad = index_quad.set_range(stop=quad_length)
ind_element = index_element.set_range(stop=el_length)
if mapping_space:
ind_dof_test = index_dof_test.set_range(stop=Tuple(*[d+1 for d in list(d_mapping.values())[0]['degrees']]))
# ...........................................................................................
eval_mapping = EvalMapping(ind_quad, ind_dof_test, list(d_mapping.values())[0]['global'],
mapping, geo, mapping_space, nderiv, mask, is_rational_mapping)
eval_fields = []
for f in fields:
f_ex = expand([f])
coeffs = [CoefficientBasis(i) for i in f_ex]
l_coeffs = [MatrixLocalBasis(i) for i in f_ex]
ind_dof_test = index_dof_test.set_range(stop=lengths_fields[f]+1)
eval_field = EvalField(atomic_expr_field[f], ind_quad, ind_dof_test, d_fields[f]['global'],
coeffs, l_coeffs, g_coeffs[f], [f], mapping, nderiv, mask)
eval_fields += [eval_field]
g_stmts = []
if mapping_space:
g_stmts.append(eval_mapping)
g_stmts += [*eval_fields]
g_stmts_texpr = []
# sort tests and trials by their space type
test_groups = regroup(tests)
trial_groups = regroup(trials)
# expand every VectorFunction into IndexedVectorFunctions
ex_tests = expand(tests)
ex_trials = expand(trials)
#=========================================================begin kernel======================================================
for _, sub_tests in test_groups:
for _, sub_trials in trial_groups:
tests_indices = [ex_tests.index(i) for i in expand(sub_tests)]
trials_indices = [ex_trials.index(i) for i in expand(sub_trials)]
sub_terminal_expr = terminal_expr[tests_indices,trials_indices]
if is_zero(sub_terminal_expr):
continue
q_basis_tests = OrderedDict((v,d_tests[v]['global']) for v in sub_tests)
q_basis_trials = OrderedDict((u,d_trials[u]['global']) for u in sub_trials)
m_tests = OrderedDict((v,d_tests[v]['multiplicity']) for v in sub_tests)
m_trials = OrderedDict((u,d_trials[u]['multiplicity']) for u in sub_trials)
tests_degree = OrderedDict((v,d_tests[v]['degrees']) for v in sub_tests)
trials_degrees = OrderedDict((u,d_trials[u]['degrees']) for u in sub_trials)
bs = OrderedDict()
es = OrderedDict()
for v in sub_tests:
v_str = str(SymbolicExpr(v))
bs[v] = variables(('b_{v}_1, b_{v}_2, b_{v}_3'.format(v=v_str)), dtype='int')[:dim] if is_parallel else [S.Zero]*dim
es[v] = variables(('e_{v}_1, e_{v}_2, e_{v}_3'.format(v=v_str)), dtype='int')[:dim] if is_parallel else [S.Zero]*dim
if all(a==1 for a in m_tests[sub_tests[0]]+m_trials[sub_trials[0]]):
stmts = []
for v in sub_tests+sub_trials:
stmts += construct_logical_expressions(v, nderiv)
l_sub_mats = BlockStencilMatrixLocalBasis(sub_trials, sub_tests, sub_terminal_expr, dim, l_mats.tag,
tests_degree=tests_degree, trials_degree=trials_degrees,
tests_multiplicity=m_tests, trials_multiplicity=m_trials)
# Instructions needed to retrieve the precomputed values of the
# fields (and their derivatives) at a single quadrature point
stmts += flatten([eval_field.inits for eval_field in eval_fields])
loop = Loop((l_quad, *q_basis_tests.values(), *q_basis_trials.values(), geo), ind_quad, stmts=stmts, mask=mask)
loop = Reduce('+', ComputeKernelExpr(sub_terminal_expr, weights=False), ElementOf(l_sub_mats), loop)
# ... loop over trials
length = Tuple(*[d+1 for d in trials_degrees[sub_trials[0]]])
ind_dof_trial = index_dof_trial.set_range(stop=length)
loop1 = Loop((), ind_dof_trial, [loop])
# ... loop over tests
length = Tuple(*[d+1 for d in tests_degree[sub_tests[0]]])
ends = Tuple(*[d+1-e for d,e in zip(tests_degree[sub_tests[0]], es[sub_tests[0]])])
starts = Tuple(*bs[sub_tests[0]])
ind_dof_test = index_dof_test.set_range(start=starts, stop=ends, length=length)
loop = Loop((), ind_dof_test, [loop1])
# ...
body = (Reset(l_sub_mats), loop)
stmts = Block(body)
g_stmts += [stmts]
if is_parallel:
ln = Tuple(*[d-1 for d in tests_degree[sub_tests[0]]])
start_expr = TensorMax(TensorMul(TensorAdd(TensorMul(ind_element, Tuple(*[-1]*dim)), ln), Tuple(*b0s)),Tuple(*[S.Zero]*dim))
start_expr = TensorAssignExpr(Tuple(*bs[sub_tests[0]]), start_expr)
end_expr = TensorMax(TensorMul(TensorAdd(TensorMul(Tuple(*[-1]*dim), ind_element.length), TensorAdd(ind_element, Tuple(*tests_degree[sub_tests[0]]))), Tuple(*e0s)), Tuple(*[S.Zero]*dim))
end_expr = TensorAssignExpr(Tuple(*es[sub_tests[0]]), end_expr)
g_stmts_texpr += [start_expr, end_expr]
else:
l_stmts = []
mask_inner = [[False, True] for i in range(dim)]
for mask_inner_i in product(*mask_inner):
mask_inner_i = Tuple(*mask_inner_i)
not_mask_inner_i = Tuple(*[not i for i in mask_inner_i])
stmts = []
for v in sub_tests+sub_trials:
stmts += construct_logical_expressions(v, nderiv)
# Instructions needed to retrieve the precomputed values of the
# fields (and their derivatives) at a single quadrature point
stmts += flatten([eval_field.inits for eval_field in eval_fields])
multiplicity = Tuple(*m_tests[sub_tests[0]])
length = Tuple(*[(d+1)%m if T else (d+1)//m for d,m,T in zip(tests_degree[sub_tests[0]], multiplicity, mask_inner_i)])
ind_outer_dof_test = index_outer_dof_test.set_range(stop=length)
outer = Tuple(*[d//m for d,m in zip(tests_degree[sub_tests[0]], multiplicity)])
outer = TensorAdd(TensorMul(ind_outer_dof_test, not_mask_inner_i),TensorMul(outer, mask_inner_i))
l_sub_mats = BlockStencilMatrixLocalBasis(sub_trials, sub_tests, sub_terminal_expr, dim, l_mats.tag, outer=outer,
tests_degree=tests_degree, trials_degree=trials_degrees,
tests_multiplicity=m_tests, trials_multiplicity=m_trials)
loop = Loop((l_quad, *q_basis_tests.values(), *q_basis_trials.values(), geo), ind_quad, stmts=stmts, mask=mask)
loop = Reduce('+', ComputeKernelExpr(sub_terminal_expr, weights=False), ElementOf(l_sub_mats), loop)
# ... loop over trials
length_t = Tuple(*[d+1 for d in trials_degrees[sub_trials[0]]])
ind_dof_trial = index_dof_trial.set_range(stop=length_t)
loop = Loop((), ind_dof_trial, [loop])
rem_length = Tuple(*[(d+1)-(d+1)%m for d,m in zip(tests_degree[sub_tests[0]], multiplicity)])
ind_inner_dof_test = index_inner_dof_test.set_range(stop=multiplicity)
expr1 = TensorAdd(TensorMul(ind_outer_dof_test, multiplicity),ind_inner_dof_test)
expr2 = TensorAdd(rem_length, ind_outer_dof_test)
expr = TensorAssignExpr(index_dof_test, TensorAdd(TensorMul(expr1,not_mask_inner_i),TensorMul(expr2, mask_inner_i)))
loop = Loop((expr,), ind_inner_dof_test, [loop], mask=mask_inner_i)
loop = Loop((), ind_outer_dof_test, [loop])
l_stmts += [loop]
g_stmts += [Reset(l_sub_mats), *l_stmts]
#=========================================================end kernel=========================================================
# ... loop over global elements
loop = Loop((g_quad, *g_span.values(), *m_span.values(), *f_span.values(), *g_stmts_texpr),
ind_element, stmts=g_stmts, mask=mask)
body = [Reduce('+', l_mats, g_mats, loop)]
# ...
args = OrderedDict()
args['tests_basis'] = tuple(d_tests[v]['global'] for v in tests)
args['trial_basis'] = tuple(d_trials[u]['global'] for u in trials)
args['spans'] = g_span.values()
args['quads'] = g_quad
args['tests_degrees'] = lengths_tests
args['trials_degrees'] = lengths_trials
args['quads_degree'] = lengths
args['global_pads'] = pads
args['local_pads'] = Pads(tests, trials)
args['mats'] = [l_mats, g_mats]
if mapping_space:
args['mapping'] = eval_mapping.coeffs
args['mapping_degrees'] = LengthDofTest(list(d_mapping.keys())[0])
args['mapping_basis'] = list(d_mapping.values())[0]['global']
args['mapping_spans'] = list(d_mapping.values())[0]['span']
if fields:
args['f_span'] = f_span.values()
args['f_coeffs'] = flatten(list(g_coeffs.values()))
args['field_basis'] = tuple(d_fields[f]['global'] for f in fields)
args['fields_degrees'] = lengths_fields.values()
args['f_pads'] = [f.pads for f in eval_fields]
fields = tuple(f.base if isinstance(f, IndexedVectorFunction) else f for f in fields)
args['fields'] = tuple(dict.fromkeys(fields))
if constants:
args['constants'] = constants
args['starts'] = b0s
args['ends'] = e0s
local_vars = []
node = DefNode('assembly', args, local_vars, body, 'bilinearform')
return node | 0929f83f1cfcc6424b00d5b931017ec5af6ffaee | 15,251 |
import math
def asen(x):
"""
El arcoseno de un número.
El resultado está expresado en radianes.
.. math::
\\arcsin(x)
Args:
x (float): Argumento.
Returns:
El ángulo expresado en radianes.
"""
return math.asin(x) | c52f7fc504c1eb02eb240378b14b19b0752c7299 | 15,253 |
def get_mock_response(status_code: int, reason: str, text: str):
"""
Return mock response.
:param status_code: An int representing status_code.
:param reason: A string to represent reason.
:param text: A string to represent text.
:return: MockResponse object.
"""
MockResponse = namedtuple("MockResponse", ["status_code", "reason", "text"])
mock_response = MockResponse(status_code, reason, text)
return mock_response | e1743755c64796e5644a00e26414fc16c110c1b6 | 15,254 |
import traceback
def get_user_stack_depth(tb: TracebackType, f: StackFilter) -> int:
"""Determines the depth of the stack within user-code.
Takes a 'StackFilter' function that filters frames by whether
they are in user code or not and returns the number of frames
in the traceback that are within user code.
The return value can be negated for use with the limit argument
to functions in the traceback module.
"""
depth = 0
for s, _ in traceback.walk_tb(tb):
if depth or f(s):
depth += 1
return depth | e02f1ca3ee6aeb765a09806ecded5919a28b5df0 | 15,255 |
def unused(attr):
"""
This function check if an attribute is not set (has no value in it).
"""
if attr is None:
return True
else:
return False | febc225f3924fdb9de6cfbf7eba871cce5b6e374 | 15,256 |
def compute_npipelines_xgbrf_5_6():
"""Compute the total number of XGB/RF pipelines evaluated"""
df = _load_pipelines_df()
npipelines_rf = np.sum(df['pipeline'].str.contains('random_forest'))
npipelines_xgb = np.sum(df['pipeline'].str.contains('xgb'))
total = npipelines_rf + npipelines_xgb
result = pd.DataFrame(
[npipelines_rf, npipelines_xgb, total],
index=['RF', 'XGB', 'total'],
columns=['pipelines']
)
fn = OUTPUT_DIR.joinpath('5_6_npipelines_xgbrf.csv')
result.to_csv(fn)
return result | 7e7b9ea536564b4796dcf9eea6866a8c64ce0c4e | 15,257 |
def get_evaluate_SLA(SLA_terms, topology, evaluate_individual):
"""Generate a function to evaluate if the flow reliability and latency requirements are met
Args:
SLA_terms {SLA} -- an SLA object containing latency and bandwidth requirements
topology {Topology} -- the reference topology object for the flow
evaluate_individual {function}: a cost function, which returns the metric for a given individual
individual {DEAP individual (list)} -- the individual
Returns:
evaluate_SLA {Function}: a function returning True if the requirements are met, False otherwise
"""
def evaluate_SLA(individual):
evaluation = evaluate_individual(individual)
if evaluation[3] > SLA_terms.latency or evaluation[1] > 1:
return False
return True
return evaluate_SLA | 81fdaa07e3fc21066ab734bef0cc71457d40fb5b | 15,258 |
def latest_consent(user, research_study_id):
"""Lookup latest valid consent for user
:param user: subject of query
:param research_study_id: limit query to respective value
If latest consent for user is 'suspended' or 'deleted', this function
will return None. See ``consent_withdrawal_dates()`` for that need.
:returns: the most recent consent based on given criteria, or None
if no match is located
"""
# consents are ordered desc(acceptance_date)
for consent in user.valid_consents:
if consent.research_study_id != research_study_id:
continue
if consent.status == 'consented':
return consent
return None | 2295b592a0c1fdaf3b1ed21e065f39e73a4bb622 | 15,259 |
def microarray():
""" Fake microarray dataframe
"""
data = np.arange(9).reshape(3, 3)
cols = pd.Series(range(3), name='sample_id')
ind = pd.Series([1058685, 1058684, 1058683], name='probe_id')
return pd.DataFrame(data, columns=cols, index=ind) | 7bca3cf21f2942819c62c597af8761ec04fa91ba | 15,260 |
from typing import Tuple
def find_next_tag(template: str, pointer: int, left_delimiter: str) -> Tuple[str, int]:
"""Find the next tag, and the literal between current pointer and that tag"""
split_index = template.find(left_delimiter, pointer)
if split_index == -1:
return (template[pointer:], len(template))
return (template[pointer:split_index], split_index) | 82d091ef6738ffbe93e8ea8a0096161fc359e9cb | 15,261 |
def hasNLines(N,filestr):
"""returns true if the filestr has at least N lines and N periods (~sentences)"""
lines = 0
periods = 0
for line in filestr:
lines = lines+1
periods = periods + len(line.split('.'))-1
if lines >= N and periods >= N:
return True;
return False; | d75c4d241d7c4364c410f2dbae06f1c4d439b14e | 15,262 |
def CAMNS_LP(xs, N, lptol=1e-8, exttol=1e-8, verbose=True):
"""
Solve CAMNS problem via reduction to Linear Programming
Arguments:
----------
xs : np.ndarray of shape (M, L)
Observation matrix consisting of M observations
N : int
Number of observations
lptol : float
Tolerance for Linear Programming problem
exttol : float
Tolerance for extreme point check
verbose : bool
Whether to print information about progress
Returns:
--------
np.ndarray of shape (N, L)
Estimated source matrix
"""
M, L = xs.shape # Extract dimensions
xs = xs.T
d = np.mean(xs, axis=1, keepdims=True)
C, _, _ = np.linalg.svd(xs - d, full_matrices=False)
C = C[:, :(N - 1)] # Truncate the redundant one
# Step 1. Preparing variables
B = np.diag(np.ones(L))
l = 0 # Number of extracted sources
S = np.zeros((0, L)) # Source matrix
epoch = 1
while l < N:
if verbose:
print("Epoch {}:".format(epoch))
print("=" * 58)
epoch += 1
# Step 2. Choosing random vector and generating direction r
w = np.random.randn(L)
r = B @ w
# Step 3. Solving linear programming problems using CVXPY
alpha1_star = cp.Variable(C.shape[1])
alpha2_star = cp.Variable(C.shape[1])
problem1 = cp.Problem(cp.Minimize(
r.T @ (C @ alpha1_star)), [C @ alpha1_star + d.flatten() >= 0])
problem2 = cp.Problem(cp.Maximize(
r.T @ (C @ alpha2_star)), [C @ alpha2_star + d.flatten() >= 0])
if verbose:
print("\tLaunching LP solver 1")
p_star = problem1.solve()
if verbose:
print("\tLaunching LP solver 2")
q_star = problem2.solve()
if verbose:
print("\tLP solvers have finished, checking results")
alpha1_star = np.expand_dims(alpha1_star.value, axis=1)
alpha2_star = np.expand_dims(alpha2_star.value, axis=1)
s1 = C @ alpha1_star + d
s2 = C @ alpha2_star + d
# Step 4. Checking results (with augmentations from MATLAB implementation)
if l == 0:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
else:
if np.abs(p_star) / (np.linalg.norm(r) * np.linalg.norm(s1)) >= lptol:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if np.abs(q_star) / (np.linalg.norm(r) * np.linalg.norm(s2)) >= lptol:
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
# Step 5. Updating l
l = S.shape[0]
if verbose:
print("\tRetrieved {}/{} sources\n".format(l, N))
# Step 6. Updating B
Q1, R1 = np.linalg.qr(S.T)
B = np.diag(np.ones(L)) - Q1 @ Q1.T
# Step 7 is kinda implicit, as it is hidden in the loop condition
# Yay, we're done!
return S | e7f0416e0fa6949e50341b7a0009e574ecf6b0be | 15,263 |
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w | 9b78d86592622100322d7a4ec031c1bd531ca51a | 15,264 |
def unique_badge():
""" keep trying until a new random badge number has been found to return """
rando = str(randint(1000000000, 9999999999))
badge = User.query.filter_by(badge=rando).first()
print("rando badge query = {}".format(badge))
if badge:
unique_badge()
return rando | 64a60dd420516bdc08a8ac2102b83e0cf92086ef | 15,265 |
def mid_price(high, low, timeperiod: int = 14):
"""Midpoint Price over period 期间中点价格
:param high:
:param low:
:param timeperiod:
:return:
"""
return MIDPRICE(high, low, timeperiod) | 7092d057da86b12b10da6928367aee705e14569a | 15,266 |
import pickle
def load_pyger_pickle(filename):
""" Load pyger data from pickle file back into object compatible with pyger plotting methods
:param filename: File name of pickled output from calc_constraints()
This is only meant to be used to read in the initial constraints object produced by
calc_constraints(), not the cooldown data produced by calc_constraints2(). The data prduced
by calc_constraints2() should be able to be read in with a simple pickle.load() function.
"""
class saved_pyger_data(object):
def __init__(self, pickled_constraint):
for key in pickled_constraint:
self.__dict__.update({key:pickled_constraint[key]})
rawdata = pickle.load(open(filename,'rb'))
pyger_compatible_data = {}
for name in list(rawdata.keys()):
constraint = saved_pyger_data(rawdata[name])
pyger_compatible_data.update({name:constraint})
return pyger_compatible_data | 23f4d4f2e3cae514ed65d62035277417c9b246a8 | 15,267 |
from typing import OrderedDict
def createitemdict(index, tf2info):
"""Take a TF2 item and return a custom dict with a limited number of
keys that are used for search"""
item = tf2info.items[index]
name = item['item_name']
classes = tf2api.getitemclasses(item)
attributes = tf2api.getitemattributes(item,
tf2info.attributes, tf2info.effects)
storeprice = tf2api.getstoreprice(item, tf2info.storeprices)
backpackprice = tf2api.getmarketprice(item, tf2info.backpackprices)
tags = tf2api.getitemtags(item)
# Sort blueprints by crafting chance
blueprint = sorted(tf2info.blueprints[index],
key=lambda k: k['chance'], reverse=True)
description = ''
if 'bundle' in tags and storeprice:
descriptions = tf2info.bundles[index]['descriptions']
text = []
items = []
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in tf2info.itemsbyname:
items.append(value)
else:
text.append(value)
description = '{}---{}'.format('\n'.join(text), '\n'.join(items))
elif 'item_description' in item:
description = item['item_description']
if 'bundle' in tags and name in tf2info.itemsets:
description += '---' + '\n'.join(tf2info.itemsets[name]['items'])
levels = OrderedDict.fromkeys(
str(item[i]) for i in ('min_ilevel', 'max_ilevel'))
level = 'Level {} {}'.format('-'.join(levels), item['item_type_name'])
image, image_large = (url and url.replace(
'http://media.steampowered.com', 'https://steamcdn-a.akamaihd.net'
) for url in (item['image_url'], item['image_url_large']))
itemdict = {'index': index,
'name': name,
'image': image,
'image_large': image_large,
'description': description,
'level': level,
'attributes': attributes,
'classes': classes,
'tags': tags,
'storeprice': storeprice,
'marketprice': {'backpack.tf': backpackprice},
'blueprints': blueprint}
if 'paint' in tags:
paintvalue = item['attributes'][0]['value']
# Ignore Paint Tool
if paintvalue != 0:
itemdict['image'] = itemdict['image_large'] = (
'/images/paints/Paint_Can_{}.png'.format(paintvalue))
return itemdict | 9f9eceb588c7dc031bab633eadc139095806d38a | 15,269 |
def port_list(request, board_id):
"""Get ports attached to a board."""
return iotronicclient(request).port.list() | 0fcf7fc4db60678c7e5ec4606e9b12174966912f | 15,270 |
def pure_python_npairs_per_object_3d(sample1, sample2, rbins, period=None):
"""
"""
if period is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = period, period, period
npts1, npts2, num_rbins = len(sample1), len(sample2), len(rbins)
counts = np.zeros((npts1, num_rbins), dtype=int)
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
d = np.sqrt(dx*dx + dy*dy + dz*dz)
for irbin, r in enumerate(rbins):
if d < r:
counts[i, irbin] += 1
return counts | 98b45bbbf50eea9e4dfa39cfd9093ec6fc0c0459 | 15,272 |
def cal_aic(X, y_pred, centers, weight=None):
"""Ref: https://en.wikipedia.org/wiki/Akaike_information_criterion
"""
if weight is None:
weight = np.ones(X.shape[0], dtype=X.dtype)
para_num = centers.shape[0] * (X.shape[1] + 1)
return cal_log_likelihood(X, y_pred, centers, weight) - para_num | fd6f7019dcd6aec7efb21ff159541cee0e56bdfb | 15,273 |
def get_gid(cfg, groupname):
"""
[description]
gets and returns the GID for a given groupname
[parameter info]
required:
cfg: the config object. useful everywhere
groupname: the name of the group we want to find the GID for
[return value]
returns an integer representing the GID of the group if successful
returns False if unsuccessful
"""
# validate/construct/get the realm.site_id.domain data
fqgn = mothership.validate.v_get_fqn(cfg, name=groupname)
groupname, realm, site_id, domain = mothership.validate.v_split_fqn(cfg, fqgn)
# gather group data
g = cfg.dbsess.query(Groups).\
filter(Groups.groupname==groupname).\
filter(Groups.site_id==site_id).\
filter(Groups.realm==realm).first()
if g:
return g.gid
else:
return False | ee139abfe8904de1983e505db7bf882580768080 | 15,274 |
from typing import Set
from typing import Dict
from typing import Any
def _elements_from_data(
edge_length: float,
edge_width: float,
layers: Set[TemperatureName],
logger: Logger,
portion_covered: float,
pvt_data: Dict[Any, Any],
x_resolution: int,
y_resolution: int,
) -> Any:
"""
Returns mapping from element coordinate to element based on the input data.
:param edge_length:
The maximum length of an edge element along the top and bottom edges of the
panel, measured in meters.
:param edge_width:
The maximum width of an edge element along the side edges of the panel, measured
in meters.
:param layers:
The `set` of layers to include in the system.
:param logger:
The :class:`logging.Logger` logger instance used for the run.
:param portion_covered:
The portion of the PVT absorber that is covered with PV cells. The uncovered
section is mapped as solar absorber only with glazing as appropriate.
:param pvt_data:
The raw PVT data, extracted from the data file.
:param x_resolution:
The x resolution for the run.
:param y_resolution:
The y resolution for the run.
:return:
A mapping between the element coordinates and the element for all elements
within the panel.
"""
# * If 1x1, warn that 1x1 resolution is depreciated and should not really be used.
if x_resolution == 1 and y_resolution == 1:
logger.warn(
"Running the system at a 1x1 resolution is depreciated. Consider running "
"at a higher resolution."
)
return {
element.ElementCoordinates(0, 0): element.Element(
TemperatureName.absorber in layers,
TemperatureName.glass in layers,
pvt_data["pvt_collector"]["length"],
True,
TemperatureName.pv in layers,
TemperatureName.upper_glass in layers,
pvt_data["pvt_collector"]["width"],
0,
0,
0,
)
}
# Extract the necessary parameters from the system data.
try:
number_of_pipes = pvt_data["absorber"]["number_of_pipes"]
except KeyError as e:
raise MissingParametersError(
"Element", "The number of pipes attached to the absorber must be supplied."
) from None
try:
panel_length = pvt_data["pvt_collector"]["length"]
except KeyError as e:
raise MissingParametersError(
"Element", "PVT panel length must be supplied."
) from None
try:
panel_width = pvt_data["pvt_collector"]["width"]
except KeyError as e:
raise MissingParametersError(
"Element", "PVT panel width must be supplied."
) from None
try:
bond_width = pvt_data["bond"]["width"]
except KeyError as e:
raise MissingParametersError(
"Element", "Collector-to-pipe bond width must be supplied."
) from None
# * Determine the spacing between the pipes.
pipe_spacing = (x_resolution - number_of_pipes) / (number_of_pipes + 1)
if int(pipe_spacing) != pipe_spacing:
raise InvalidParametersError(
"The resolution supplied results in an uneven pipe distribution.",
"pipe_spcaing",
)
# * Determine the indicies of elements that have pipes attached.
pipe_positions = list(
range(int(pipe_spacing), x_resolution - 2, int(pipe_spacing) + 1)
)
# Determine whether the width of the elements is greater than or less than the edge
# width and adjust accordingly.
nominal_element_width: float = (
panel_width - number_of_pipes * bond_width - 2 * edge_width
) / (x_resolution - number_of_pipes - 2)
if nominal_element_width < edge_width:
nominal_element_width = (panel_width - number_of_pipes * bond_width) / (
x_resolution - number_of_pipes
)
edge_width = nominal_element_width
# Likewise, determine whether the nominal element height is greater than the edge
# height and adjust accordingly.
nominal_element_length: float = (panel_length - 2 * edge_length) / (
y_resolution - 2
)
if nominal_element_length < edge_length:
nominal_element_length = panel_length / y_resolution
edge_length = nominal_element_length
# * Instantiate the array of elements.
# Construct the elemented array based on the arguments.
pv_coordinate_cutoff = int(y_resolution * portion_covered)
try:
elements = {
element.ElementCoordinates(
x_coordinate(element_number, x_resolution),
y_coordinate(element_number, x_resolution),
): element.Element(
absorber=TemperatureName.absorber in layers,
glass=TemperatureName.glass in layers,
length=edge_length
if y_coordinate(element_number, x_resolution) in {0, y_resolution - 1}
else nominal_element_length,
pipe=x_coordinate(element_number, x_resolution) in pipe_positions
if TemperatureName.pipe in layers
else False,
pv=y_coordinate(element_number, x_resolution) <= pv_coordinate_cutoff
if TemperatureName.pv in layers
else False,
upper_glass=TemperatureName.upper_glass in layers,
# Use the edge with if the element is an edge element.
width=edge_width
if x_coordinate(element_number, x_resolution) in {0, x_resolution - 1}
# Otherwise, use the bond width if the element is a pipe element.
else bond_width
if x_coordinate(element_number, x_resolution) in pipe_positions
# Otherwise, use the nominal element width.
else nominal_element_width,
x_index=x_coordinate(element_number, x_resolution),
y_index=y_coordinate(element_number, x_resolution),
pipe_index=pipe_positions.index(
x_coordinate(element_number, x_resolution)
)
if x_coordinate(element_number, x_resolution) in pipe_positions
else None,
)
for element_number in range(x_resolution * y_resolution)
}
except KeyError as e:
raise MissingParametersError(
"PVT", f"Missing parameters when instantiating the PV-T system: {str(e)}"
) from None
return elements | 80bef4fc80a22da823365fcdc756b6e35d19cdf2 | 15,275 |
def GetControllers(wing_serial):
"""Returns control gain matrices for any kite serial number."""
if wing_serial == m.kWingSerial01:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.209, -0.209, 0.0, 0.0, 0.009, 0.009, -0.005, 0.017]
)
longitudinal_gains_min_airspeed = (
[[0.005, 0.034, -0.716, -0.333, 0.311],
[-9.239, -68.061, 1361.046, 641.777, -589.016]]
)
longitudinal_gains_nominal_airspeed = (
[[0.014, 0.013, -0.509, -0.168, 0.316],
[-6.676, -6.529, 234.939, 80.993, -147.915]]
)
longitudinal_gains_max_airspeed = (
[[0.009, 0.007, -0.401, -0.136, 0.316],
[-1.965, -1.585, 79.966, 28.908, -65.259]]
)
lateral_gains_min_airspeed = (
[[1.477, -1.589, -0.434, 0.296, -0.75, 0.329],
[0.224, 1.045, 0.065, -0.554, -0.429, -0.282],
[-18215.48, -42217.142, -2192.239, 28689.136, 25162.461, 12500.22]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.48, -0.234, 0.195, -0.772, 0.317],
[0.38, 1.123, 0.036, -0.386, -0.609, -0.376],
[-6604.64, -11507.484, -340.275, 5156.255, 9047.472, 4427.592]]
)
lateral_gains_max_airspeed = (
[[0.982, -1.395, -0.198, 0.149, -0.786, 0.309],
[0.27, 1.107, 0.027, -0.287, -0.613, -0.391],
[-2275.783, -4917.11, -119.56, 1730.983, 4062.059, 2033.279]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.587, 0.004, 0.11], [-0.03, -6.079, -0.026], [0.243, 0.006, -1.06]]
)
elif wing_serial == m.kWingSerial04Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.211, -0.211, 0.0, 0.0, 0.011, 0.011, 0.005, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.002, 0.033, -0.73, -0.357, 0.311],
[4.546, -64.827, 1390.055, 688.515, -589.338]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.548, -0.182, 0.316],
[-5.284, -6.728, 254.985, 87.909, -148.241]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.441, -0.144, 0.316],
[-1.676, -1.659, 89.199, 30.631, -65.417]]
)
lateral_gains_min_airspeed = (
[[1.44, -1.617, -0.409, 0.296, -0.743, 0.333],
[0.254, 1.044, 0.06, -0.551, -0.432, -0.277],
[-19794.459, -43094.943, -1997.427, 28857.76, 25564.594, 12475.401]]
)
lateral_gains_nominal_airspeed = (
[[1.036, -1.502, -0.224, 0.194, -0.768, 0.32],
[0.433, 1.136, 0.033, -0.39, -0.614, -0.374],
[-7324.836, -11932.75, -305.45, 5272.765, 9170.382, 4461.79]]
)
lateral_gains_max_airspeed = (
[[0.956, -1.415, -0.192, 0.148, -0.783, 0.311],
[0.323, 1.123, 0.025, -0.291, -0.617, -0.389],
[-2588.374, -5128.587, -109.03, 1771.109, 4110.266, 2055.664]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.92, -0.002, 0.128],
[-0.027, -5.506, -0.024],
[0.252, 0.005, -1.083]]
)
elif wing_serial == m.kWingSerial04Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.206, -0.206, 0.0, 0.0, 0.006, 0.006, 0.004, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[6.879, -65.952, 1416.656, 711.871, -589.814]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.562, -0.187, 0.316],
[-5.048, -6.949, 262.084, 90.532, -148.346]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.451, -0.146, 0.316],
[-1.652, -1.714, 91.319, 31.157, -65.442]]
)
lateral_gains_min_airspeed = (
[[1.473, -1.597, -0.421, 0.294, -0.746, 0.331],
[0.237, 1.043, 0.061, -0.559, -0.431, -0.28],
[-19140.086, -42177.964, -1957.253, 29074.862, 25405.542, 12435.884]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.486, -0.228, 0.193, -0.769, 0.319],
[0.403, 1.124, 0.033, -0.391, -0.613, -0.374],
[-6972.675, -11547.427, -303.445, 5257.572, 9146.686, 4416.334]]
)
lateral_gains_max_airspeed = (
[[0.983, -1.401, -0.195, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.291, -0.617, -0.389],
[-2439.793, -4940.323, -108.619, 1761.451, 4108.271, 2029.644]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.721, -0.001, 0.125],
[-0.028, -5.332, -0.022],
[0.243, 0.005, -1.048]]
)
elif wing_serial == m.kWingSerial05Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.204, -0.204, 0.0, 0.0, 0.004, 0.004, 0.004, 0.007]
)
longitudinal_gains_min_airspeed = (
[[-0.003, 0.033, -0.732, -0.353, 0.311],
[5.756, -65.225, 1393.028, 681.0, -589.458]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.552, -0.181, 0.316],
[-5.157, -6.823, 257.066, 87.46, -148.262]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.445, -0.143, 0.316],
[-1.655, -1.692, 90.117, 30.558, -65.423]]
)
lateral_gains_min_airspeed = (
[[1.438, -1.611, -0.401, 0.289, -0.743, 0.333],
[0.257, 1.041, 0.058, -0.542, -0.432, -0.278],
[-20011.52, -42735.847, -1914.014, 28358.023, 25584.42, 12448.614]]
)
lateral_gains_nominal_airspeed = (
[[1.043, -1.502, -0.221, 0.191, -0.767, 0.32],
[0.428, 1.13, 0.032, -0.383, -0.614, -0.374],
[-7288.823, -11800.514, -296.679, 5172.453, 9185.489, 4445.84]]
)
lateral_gains_max_airspeed = (
[[0.965, -1.415, -0.191, 0.146, -0.782, 0.311],
[0.318, 1.117, 0.024, -0.286, -0.617, -0.389],
[-2567.285, -5064.437, -106.454, 1742.745, 4117.291, 2047.19]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-6.043, -0.014, 0.134],
[-0.027, -5.618, -0.024],
[0.257, 0.005, -1.105]]
)
elif wing_serial == m.kWingSerial05Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.198, -0.198, 0.0, 0.0, -0.002, -0.002, 0.003, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.744, -0.364, 0.311],
[7.876, -66.499, 1418.317, 702.426, -589.905]]
)
longitudinal_gains_nominal_airspeed = (
[[0.01, 0.014, -0.565, -0.186, 0.316],
[-4.942, -7.054, 263.793, 89.867, -148.357]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.454, -0.145, 0.316],
[-1.635, -1.75, 92.08, 31.033, -65.443]]
)
lateral_gains_min_airspeed = (
[[1.47, -1.591, -0.412, 0.286, -0.746, 0.331],
[0.24, 1.039, 0.059, -0.549, -0.431, -0.281],
[-19344.869, -41752.487, -1867.667, 28478.098, 25425.604, 12404.153]]
)
lateral_gains_nominal_airspeed = (
[[1.07, -1.485, -0.226, 0.189, -0.768, 0.32],
[0.397, 1.117, 0.033, -0.383, -0.613, -0.374],
[-6919.209, -11394.187, -294.167, 5138.956, 9160.95, 4397.605]]
)
lateral_gains_max_airspeed = (
[[0.993, -1.401, -0.193, 0.145, -0.782, 0.312],
[0.287, 1.101, 0.025, -0.285, -0.618, -0.389],
[-2410.981, -4866.463, -105.87, 1728.008, 4114.679, 2019.74]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.857, -0.012, 0.131],
[-0.03, -5.457, -0.022],
[0.249, 0.005, -1.072]]
)
elif wing_serial == m.kWingSerial06Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.208, -0.208, 0.0, 0.0, 0.008, 0.008, 0.006, 0.007]
)
longitudinal_gains_min_airspeed = (
[[-0.003, 0.032, -0.731, -0.358, 0.311],
[6.453, -64.539, 1392.121, 689.765, -589.371]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.553, -0.183, 0.316],
[-5.088, -6.779, 257.684, 88.435, -148.279]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.447, -0.144, 0.316],
[-1.637, -1.678, 90.467, 30.782, -65.435]]
)
lateral_gains_min_airspeed = (
[[1.438, -1.616, -0.408, 0.296, -0.742, 0.333],
[0.255, 1.044, 0.059, -0.552, -0.432, -0.277],
[-19907.663, -43108.523, -1968.711, 28927.246, 25591.178, 12468.239]]
)
lateral_gains_nominal_airspeed = (
[[1.038, -1.503, -0.224, 0.194, -0.768, 0.32],
[0.435, 1.136, 0.033, -0.391, -0.614, -0.374],
[-7364.944, -11935.606, -300.999, 5287.24, 9178.769, 4462.368]]
)
lateral_gains_max_airspeed = (
[[0.958, -1.416, -0.192, 0.148, -0.783, 0.311],
[0.325, 1.123, 0.024, -0.291, -0.617, -0.389],
[-2605.535, -5129.038, -107.775, 1775.087, 4114.053, 2056.295]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.912, -0.009, 0.13],
[-0.025, -5.494, -0.024],
[0.252, 0.005, -1.081]]
)
elif wing_serial == m.kWingSerial06Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.203, -0.203, 0.0, 0.0, 0.003, 0.003, 0.004, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[8.412, -65.841, 1417.073, 711.786, -589.819]]
)
longitudinal_gains_nominal_airspeed = (
[[0.01, 0.014, -0.566, -0.188, 0.316],
[-4.888, -7.008, 264.204, 90.884, -148.372]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.455, -0.146, 0.316],
[-1.62, -1.735, 92.313, 31.262, -65.454]]
)
lateral_gains_min_airspeed = (
[[1.471, -1.596, -0.42, 0.293, -0.746, 0.331],
[0.239, 1.043, 0.06, -0.559, -0.431, -0.28],
[-19231.343, -42149.313, -1926.317, 29079.197, 25426.856, 12427.585]]
)
lateral_gains_nominal_airspeed = (
[[1.065, -1.487, -0.228, 0.193, -0.769, 0.319],
[0.404, 1.123, 0.033, -0.391, -0.613, -0.374],
[-6992.628, -11534.142, -299.093, 5258.12, 9152.573, 4415.616]]
)
lateral_gains_max_airspeed = (
[[0.986, -1.402, -0.194, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.29, -0.617, -0.389],
[-2447.327, -4933.324, -107.393, 1761.417, 4110.821, 2029.552]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.725, -0.008, 0.128],
[-0.027, -5.331, -0.022],
[0.243, 0.005, -1.048]]
)
elif wing_serial == m.kWingSerial07Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.211, -0.211, 0.0, 0.0, 0.011, 0.011, 0.005, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.002, 0.033, -0.73, -0.357, 0.311],
[4.546, -64.827, 1390.055, 688.515, -589.338]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.548, -0.182, 0.316],
[-5.284, -6.728, 254.985, 87.909, -148.241]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.441, -0.144, 0.316],
[-1.676, -1.659, 89.199, 30.631, -65.417]]
)
lateral_gains_min_airspeed = (
[[1.44, -1.617, -0.409, 0.296, -0.743, 0.333],
[0.254, 1.044, 0.06, -0.551, -0.432, -0.277],
[-19794.459, -43094.943, -1997.427, 28857.76, 25564.594, 12475.401]]
)
lateral_gains_nominal_airspeed = (
[[1.036, -1.502, -0.224, 0.194, -0.768, 0.32],
[0.433, 1.136, 0.033, -0.39, -0.614, -0.374],
[-7324.836, -11932.75, -305.45, 5272.765, 9170.382, 4461.79]]
)
lateral_gains_max_airspeed = (
[[0.956, -1.415, -0.192, 0.148, -0.783, 0.311],
[0.323, 1.123, 0.025, -0.291, -0.617, -0.389],
[-2588.374, -5128.587, -109.03, 1771.109, 4110.266, 2055.664]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.92, -0.002, 0.128],
[-0.027, -5.506, -0.024],
[0.252, 0.005, -1.083]]
)
elif wing_serial == m.kWingSerial07Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.206, -0.206, 0.0, 0.0, 0.006, 0.006, 0.004, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[6.879, -65.952, 1416.656, 711.871, -589.814]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.562, -0.187, 0.316],
[-5.048, -6.949, 262.084, 90.532, -148.346]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.451, -0.146, 0.316],
[-1.652, -1.714, 91.319, 31.157, -65.442]]
)
lateral_gains_min_airspeed = (
[[1.473, -1.597, -0.421, 0.294, -0.746, 0.331],
[0.237, 1.043, 0.061, -0.559, -0.431, -0.28],
[-19140.086, -42177.964, -1957.253, 29074.862, 25405.542, 12435.884]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.486, -0.228, 0.193, -0.769, 0.319],
[0.403, 1.124, 0.033, -0.391, -0.613, -0.374],
[-6972.675, -11547.427, -303.445, 5257.572, 9146.686, 4416.334]]
)
lateral_gains_max_airspeed = (
[[0.983, -1.401, -0.195, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.291, -0.617, -0.389],
[-2439.793, -4940.323, -108.619, 1761.451, 4108.271, 2029.644]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.721, -0.001, 0.125],
[-0.028, -5.332, -0.022],
[0.243, 0.005, -1.048]]
)
else:
assert False, 'wing_serial %d was not recognized' % wing_serial
return {
'airspeed_table': (
airspeed_table),
'flap_offsets': (
flap_offsets),
'longitudinal_gains_min_airspeed': (
longitudinal_gains_min_airspeed),
'longitudinal_gains_nominal_airspeed': (
longitudinal_gains_nominal_airspeed),
'longitudinal_gains_max_airspeed': (
longitudinal_gains_max_airspeed),
'lateral_gains_min_airspeed': (
lateral_gains_min_airspeed),
'lateral_gains_nominal_airspeed': (
lateral_gains_nominal_airspeed),
'lateral_gains_max_airspeed': (
lateral_gains_max_airspeed),
'B_flaps_to_pqr_min_airspeed': (
B_flaps_to_pqr_min_airspeed),
} | e9e557909cfb9a7e885f14d20948436b653f4f31 | 15,276 |
def rotate(mat, degrees):
"""
Rotates the input image by a given number of degrees about its center.
Border pixels are extrapolated by replication.
:param mat: input image
:param degrees: number of degrees to rotate (positive is counter-clockwise)
:return: rotated image
"""
rot_mat = cv2.getRotationMatrix2D((mat.shape[1] / 2, mat.shape[0] / 2), degrees, 1)
return cv2.warpAffine(mat, rot_mat, (mat.shape[1], mat.shape[0]),
borderMode=cv2.BORDER_REPLICATE) | 6de73e2701fdad422497dd53d271accc1f039128 | 15,277 |
def spec_defaults():
"""
Return a mapping with spec attribute defaults to ensure that the
returned results are the same on RubyGems 1.8 and RubyGems 2.0
"""
return {
'base_dir': None,
'bin_dir': None,
'cache_dir': None,
'doc_dir': None,
'gem_dir': None,
'gems_dir': None,
'ri_dir': None,
'spec_dir': None,
'spec_file': None,
'cache_file': None,
'full_gem_path': None,
'full_name': None,
'metadata': {},
'full_name': None,
'homepage': '',
'licenses': [],
'loaded_from': None,
} | 5f220168e2cc63c4572c29c17cb4192a7a5d1427 | 15,278 |
def rdict(x):
"""
recursive conversion to dictionary
converts objects in list members to dictionary recursively
"""
if isinstance(x, list):
l = [rdict(_) for _ in x]
return l
elif isinstance(x, dict):
x2 = {}
for k, v in x.items():
x2[k] = rdict(v)
return x2
else:
if hasattr(x, '__dict__'):
d = x.__dict__
toremove = []
for k, v in d.items():
if v is None:
toremove.append(k)
else:
d[k] = rdict(v)
for k in toremove:
del(d[k])
return d
else:
return x | dd09486aa76ee1a27306510a1100502bae482015 | 15,279 |
import requests
from bs4 import BeautifulSoup
def get_pid(part_no):
"""Extract the PID from the part number page"""
url = 'https://product.tdk.com/en/search/capacitor/ceramic/mlcc/info?part_no=' + part_no
page = requests.get(url)
if (page.status_code != 200):
print('Error getting page({}): {}'.format(page.status_code, url))
return None
soup = BeautifulSoup(page.text, 'html.parser')
pid_input = soup.find(id='pid')
if pid_input is None:
return None
return pid_input['value'] | 8cc01b011e23d3bc972cb5552662b55ab998dba0 | 15,280 |
def verbatim_det_lcs_all(plags, psr, susp_text, src_text, susp_offsets, src_offsets, th_shortest):
"""
DESCRIPTION: Uses longest common substring algorithm to classify a pair of documents being compared as verbatim plagarism candidate (the pair of documents), and removing the none verbatim cases if positive
INPUT: plags <list of list of two tuples [(int, int), (int, int)]> - Have the plagiarism cases represented by min and max sentence index in suspicious and source document respectively
psr <list of list of tuples (int, int)> - Contains the clusters
susp_text <string> - Suspicios document text
src_text <string> - Source document text
susp_offsets <list of tuples (int, int)> - Contain the char offset and length of each suspicious document sentence
src_offsets <list of tuples (int, int)> - Contain the char offset and length of each source document sentence
th_shortest <int> - Threshold in characters of shortest common substring allowed
OUTPUT: res_plags <list of list of two tuples [(int, int), (int, int)]> - Contains the plagiarism cases as common substrings or the same as the arguments depending on type_plag
res_psr <list of list of tuples (int, int)> - Contains the clusters with seeds present in the common substrings, or the same as the arguments depending on type_plag
type_plag <0 or 1> - 1: verbatim plagiarism case 0: Other plagiarism case
res_long_frag <list> - Contains the lengths of common substrings
"""
#plags [[(susp_ini, susp_end), (src_ini, src_end)], ...]
res_plags = []
res_psr = []
res_long_frag = []
i = 0
type_plag = 0 #0: Unknown, 1: no-obfuscation
#print 'Plags:', len(plags)
while i < len(plags): #For each plagiarism case
#print 'Case',i
#print 'Plag case', plags[i]
#print 'Seeds', psr[i]
#sentences in seeds an those not in seeds
res2 = common_substring_pro_all(susp_text[susp_offsets[plags[i][0][0]][0] : susp_offsets[plags[i][0][1]][0] + susp_offsets[plags[i][0][1]][1]], src_text[src_offsets[plags[i][1][0]][0] : src_offsets[plags[i][1][1]][0] + src_offsets[plags[i][1][1]][1]], th_shortest)
res = []
#Remove overlapping
for tup_i in res2:
flag = 0
for tup_j in res2:
if tup_i != tup_j and tup_i[2] >= tup_j[2] and tup_i[3] <= tup_j[3]:
flag = 1
break
if flag == 0:
res.append(tup_i)
#print 'Res2', res2
#print 'Res', res
#max_len = max([res[1] - res[0], res[3] - res[2]])
#max_len = [(x[1] - x[0], x[3] - x[2]) for x in res]
if len(res) > 0:
if type_plag == 1:
#print max_len, True, 'Removing seeds with lcs shorter than', th_shortest
for sub_case in res:
res_plags.append([(susp_offsets[plags[i][0][0]][0] + sub_case[0], susp_offsets[plags[i][0][0]][0] + sub_case[1]), (src_offsets[plags[i][1][0]][0] + sub_case[2], src_offsets[plags[i][1][0]][0] + sub_case[3])])
res_psr.append(psr[i])
res_long_frag.append(max([sub_case[1] - sub_case[0], sub_case[3] - sub_case[2]]))
else:
#print max_len, 'Type 02-no-obfuscation detected. Starting over!'
#print max_len, 'Type 02-no-obfuscation detected. Removing previously added cases!'
type_plag = 1
res_plags = []
res_psr = []
res_long_frag = []
for sub_case in res:
res_plags.append([(susp_offsets[plags[i][0][0]][0] + sub_case[0], susp_offsets[plags[i][0][0]][0] + sub_case[1]), (src_offsets[plags[i][1][0]][0] + sub_case[2], src_offsets[plags[i][1][0]][0] + sub_case[3])])
res_psr.append(psr[i])
res_long_frag.append(max([sub_case[1] - sub_case[0], sub_case[3] - sub_case[2]]))
#i = -1
else:
if type_plag != 1:
#print max_len, False, 'Adding'
res_plags.append(plags[i])
res_psr.append(psr[i])
res_long_frag.append(-1)
#else:
#print max_len, False, 'Removing case because 02-no-obfuscation was detected'
i += 1
return res_plags, res_psr, type_plag, res_long_frag | d233f3745bdd458fe65cbbdbc056c8cca611d755 | 15,281 |
import multiprocessing
import logging
import multiprocessing.dummy as m
import multiprocessing as m
import itertools
def autopooler(n,
it,
*a,
chunksize=1,
dummy=False,
return_iter=False,
unordered=False,
**ka):
"""Uses multiprocessing.Pool or multiprocessing.dummy.Pool to run iterator in parallel.
Parameters
------------
n: int
Number of parallel processes. Set to 0 to use auto detected CPU count.
it: iterator of (function,tuple,dict)
Each iteration computes **function**\ (\*\ **tuple**\ ,\*\*\ **dict**\ ). **function** must be picklable, i.e. a base level function in a module or file.
a: tuple
Arguments passed to Pool.
chunksize: int
Number of iterations passed to each process each time.
dummy: bool
Whether to use multiprocessing.dummy instead
return_iter: bool
Not Implemented. Whether to return iterator of results instead. If not, return list of results.
unordered: bool
Whether the order of output matters.
ka: dict
Keyword arguments passed to Pool
Returns
----------
list (or iterator if return_iter) of any
Results returned by function(\*tuple,\*\*dict), in same order of the iterator if not unordered.
"""
if dummy:
else:
if n == 0:
n = autocount()
logging.info('Using {} threads'.format(n))
if n == 1:
ans = map(autopooler_caller, it)
if not return_iter:
ans = list(ans)
assert len(ans) > 0
else:
# Catches iterator errors (only if occurs at the first), and emptiness
it = itertools.chain([next(it)], it)
with m.Pool(n, *a, **ka) as p:
if unordered:
ans = p.imap_unordered(autopooler_caller, it, chunksize)
else:
ans = p.imap(autopooler_caller, it, chunksize)
if not return_iter:
ans = list(ans)
else:
raise NotImplementedError
return ans | 489426a16977b632dd16fe351eee167c7eb5fb0d | 15,282 |
def grow_population(initial, days_to_grow):
"""
Track the fish population growth from an initial population, growing over days_to_grow number of days.
To make this efficient two optimizations have been made:
1. Instead of tracking individual fish (which doubles every approx. 8 days which will result O(10^9)
fish over 256 days), we instead compute the sum of fish with the same due date and use the due date
as the offset into the current popluation list. For example, if 5 fish have a timer of 1 and 2 fish
have a timer of 4 the population would be tracked as: [0, 5, 0, 0, 2, 0, 0, 0, 0]
2. Modulo arithmetic is used instead of fully iterating through the entire list to decrement the due
date of each fish every day. Using modula arithmetic provides a projection into the fish data that
looks like its changing each day without needing O(n) operations and instead we can update the list
in constant time regardless of the number of different ages for fish.
"""
current = list(initial)
if days_to_grow == 0:
return current
for day in range(0, days_to_grow):
due_index = day % 9
due_count = current[due_index]
current[(day+7)%9] += due_count
current[(day+9)%9] += due_count
current[due_index] = max(0, current[due_index] - due_count)
return current | 88b8283e5c1e6de19acb76278ef16d9d6b94de00 | 15,283 |
import PySide.QtGui as QtGui
import PyQt5.QtGui as QtGui
def get_QBrush():
"""QBrush getter."""
try:
return QtGui.QBrush
except ImportError:
return QtGui.QBrush | 548226da434077ee1d0d1d2fb4a6762faf5f091d | 15,284 |
def apply_odata_query(query: ClauseElement, odata_query: str) -> ClauseElement:
"""
Shorthand for applying an OData query to a SQLAlchemy query.
Args:
query: SQLAlchemy query to apply the OData query to.
odata_query: OData query string.
Returns:
ClauseElement: The modified query
"""
lexer = ODataLexer()
parser = ODataParser()
model = query.column_descriptions[0]["entity"]
ast = parser.parse(lexer.tokenize(odata_query))
transformer = AstToSqlAlchemyClauseVisitor(model)
where_clause = transformer.visit(ast)
for j in transformer.join_relationships:
if str(j) not in _get_joined_attrs(query):
query = query.join(j)
return query.filter(where_clause) | 666dd05856db79ce90f29e864aeaf4188bd425d0 | 15,285 |
def get_sql(conn, data, did, tid, exid=None, template_path=None):
"""
This function will generate sql from model data.
:param conn: Connection Object
:param data: data
:param did: Database ID
:param tid: Table id
:param exid: Exclusion Constraint ID
:param template_path: Template Path
:return:
"""
name = data['name'] if 'name' in data else None
if exid is not None:
sql = render_template("/".join([template_path, 'properties.sql']),
did=did, tid=tid, cid=exid)
status, res = conn.execute_dict(sql)
if not status:
raise Exception(res)
if len(res['rows']) == 0:
raise ObjectGone(
_('Could not find the exclusion constraint in the table.'))
old_data = res['rows'][0]
if 'name' not in data:
name = data['name'] = old_data['name']
sql = render_template("/".join([template_path, 'update.sql']),
data=data, o_data=old_data)
else:
if 'columns' not in data:
return _('-- definition incomplete'), name
elif isinstance(data['columns'], list) and len(data['columns']) < 1:
return _('-- definition incomplete'), name
sql = render_template("/".join([template_path, 'create.sql']),
data=data, conn=conn)
return sql, name | 45ec23f3e061491ad87ea0a59b7e08e32e5183a2 | 15,286 |
import six
import base64
def bytes_base64(x):
# type: (AnyStr) -> bytes
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '') # type: ignore
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'') | 543b0f1105545cda516890d2d6f4c5a8059c4365 | 15,287 |
def is_planar_enforced(gdf):
"""Test if a geodataframe has any planar enforcement violations
Parameters
----------
Returns
-------
boolean
"""
if is_overlapping(gdf):
return False
if non_planar_edges(gdf):
return False
_holes = holes(gdf)
if _holes.shape[0] > 0:
return False
return True | 0587cd351fcc7355d0767a404e446d91f8c59d4d | 15,288 |
def bson2uuid(bval: bytes) -> UUID:
"""Decode BSON Binary UUID as UUID."""
return UUID(bytes=bval) | 6fc81f03b6eabee3496bab6b407d6c665b001667 | 15,289 |
def ape_insert_new_fex(cookie, in_device_primary_key, in_model, in_serial, in_vendor):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ApeInsertNewFex")
method.cookie = cookie
method.in_device_primary_key = in_device_primary_key
method.in_model = in_model
method.in_serial = in_serial
method.in_vendor = in_vendor
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 2d10c37f26357ac9714d0dfe91967f4029857cd5 | 15,290 |
import aiohttp
import asyncio
async def get_pool_info(address, api_url="https://rest.stargaze-apis.com/cosmos"):
"""Pool value and current rewards via rest API.
Useful links:
https://api.akash.smartnodes.one/swagger/#/
https://github.com/Smart-Nodes/endpoints
"""
rewards_url = f"{api_url}/distribution/v1beta1/delegators/{ADDRESS}/rewards"
delegated_url = f"{api_url}/staking/v1beta1/delegations/{ADDRESS}"
async with aiohttp.ClientSession() as session:
rewards_data, pool_data = await asyncio.gather(
gather_json(session, rewards_url), gather_json(session, delegated_url)
)
rewards = float(rewards_data["rewards"][0]["reward"][0]["amount"]) / 1_000_000
pool_value = (
float(pool_data["delegation_responses"][0]["balance"]["amount"]) / 1_000_000
)
return pool_value, rewards | 34c54c840ed3a412002b99f798c23f495e1eb75d | 15,291 |
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width | 111025b6dddcf7380fd912a84154b551df4be5f3 | 15,292 |
def mnist_reader(numbers):
"""
Read MNIST dataset with specific numbers you needed
:param numbers: A list of number from 0 - 9 as you needed
:return: A tuple of a numpy array with specific numbers MNIST training dataset,
labels of the training set and the length of the training dataset.
"""
# Training Data
f = open('./data/train-images.idx3-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float32) / 127.5 - 1
f = open('./data/train-labels.idx1-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainY = loaded[8:].reshape((60000)).astype(np.int32)
_trainX = []
for idx in range(0, len(trainX)):
if trainY[idx] in numbers:
_trainX.append(trainX[idx])
return np.array(_trainX), trainY, len(_trainX) | 627a7fd41047383cd5869fe83efea2c2b0e2d25a | 15,293 |
import six
def _ensure_list(alist): # {{{
"""
Ensure that variables used as a list are actually lists.
"""
# Authors
# -------
# Phillip J. Wolfram, Xylar Asay-Davis
if isinstance(alist, six.string_types):
# print 'Warning, converting %s to a list'%(alist)
alist = [alist]
return alist | bd8115dad627f4553ded17757bfb838cfdb0200b | 15,294 |
def _parse_einsum_input(operands):
"""Parses einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('@a,@a', '@', [a, b])
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('@a,@a', '@', [a, b])
"""
if not operands:
raise ValueError('No input operands')
if isinstance(operands[0], str):
subscripts = operands[0].replace(' ', '')
operands = operands[1:]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError('Character %s is not a valid symbol.' % s)
# Check for proper "->"
if ('-' in subscripts) or ('>' in subscripts):
if any((
subscripts.count('-') > 1,
subscripts.count('>') > 1,
subscripts.count('->') != 1,
)):
raise ValueError('Subscripts can only contain one \'->\'.')
# Parse "..."
subscripts = subscripts.replace('...', '@')
if '.' in subscripts:
raise ValueError('Invalid Ellipses.')
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = operand_list
subscripts = ''
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
if num != last:
subscripts += ','
if output_list is not None:
subscripts += '->'
for s in output_list:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
# Build output string if does not exist
if '->' in subscripts:
input_subscripts, output_subscript = subscripts.split('->')
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError(
'Output character %s did not appear in the input'
% ('...' if char == '@' else char))
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(',', '')
output_subscript = ''
for s in sorted(set(tmp_subscripts)):
if s == '@' or tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError('Number of einsum subscripts must be equal to the '
'number of operands.')
return input_subscripts, output_subscript, operands | 8c95c3d842a29fa637e6190e006638420b8a0d83 | 15,295 |
def convert_to_numpy(*args, **kwargs):
"""
Converts all tf tensors in args and kwargs to numpy array
Parameters
----------
*args :
positional arguments of arbitrary number and type
**kwargs :
keyword arguments of arbitrary number and type
Returns
-------
list
converted positional arguments
dict
converted keyboard arguments
"""
args = recursively_convert_elements(args, tf.Tensor,
_single_element_tensor_conversion)
kwargs = recursively_convert_elements(kwargs, tf.Tensor,
_single_element_tensor_conversion)
return convert_to_numpy_identity(*args, **kwargs) | 8059832fc4841b4cb96dcc77e96dd354dba399c2 | 15,296 |
async def delete_contact(
contact_key: int, hash: str, resource: Resource = Depends(get_json_resource)
):
"""
Delete the contact with the given key.
If the record has changed since the hash was obtained, a 409 error is returned.
"""
try:
await resource.delete(contact_key, hash)
except SirixServerError:
return Response(status_code=status.HTTP_409_CONFLICT)
return Response(status_code=status.HTTP_204_NO_CONTENT) | f984c5ece28ac8b58bb2d2137dcc94e2f3a7bf7c | 15,297 |
import jsonschema
def update_model_instance_meta_schema(request, file_type_id, **kwargs):
"""copies the metadata schema from the associated model program aggregation over to the model instance aggregation
"""
# Note: decorator 'authorise_for_aggregation_edit' sets the error_response key in kwargs
if 'error_response' in kwargs and kwargs['error_response']:
error_response = kwargs['error_response']
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
# Note: decorator 'authorise_for_aggregation_edit' sets the logical_file key in kwargs
logical_file = kwargs['logical_file']
metadata = logical_file.metadata
if not metadata.executed_by:
msg = "No associated model program was found"
error_response = {"status": "error", "message": msg}
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
elif not metadata.executed_by.metadata_schema_json:
msg = "Associated model program has no metadata schema"
error_response = {"status": "error", "message": msg}
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
logical_file.metadata_schema_json = metadata.executed_by.metadata_schema_json
if metadata.metadata_json:
# validate json data against metadata schema:
try:
metadata_json_schema = logical_file.metadata_schema_json
jsonschema.Draft4Validator(metadata_json_schema).validate(metadata.metadata_json)
except jsonschema.ValidationError as ex:
# delete existing invalid metadata
metadata.metadata_json = {}
logical_file.save()
metadata.is_dirty = True
metadata.save()
resource = logical_file.resource
resource_modified(resource, request.user, overwrite_bag=False)
ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(),
'element_name': 'metadata_schema_json', 'message': "Update was successful"}
return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) | c6f67f2f6386065919239f7d868797d97aec6874 | 15,298 |
def _calculate_permutation_scores_per_col(estimator, X, y, sample_weight, col_idx,
random_state, n_repeats, scorer):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
X_permuted = X.copy()
scores = np.zeros(n_repeats)
shuffling_idx = np.arange(X.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
else:
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
feature_score = _weights_scorer(
scorer, estimator, X_permuted, y, sample_weight
)
scores[n_round] = feature_score
return scores | 52c49ac3e4fd53490af04c9d862b506214e08f95 | 15,299 |
def get_statement_at_line(source: str, lineno: int, checker):
"""Get statements at line *lineno* from a source string.
:param source: The source to get the statements from.
:param lineno: Line number which the statement must include. Counted from 1.
:param checker: A function that checks each statement. It must return *None* if the check
fails. If anything else is returned, that becomes the return value of this function.
:returns: A list of tuples of string with the found statements and and offset between the
beginning of the match and *lineno*.
"""
module = ast_utils.cached_parse(source)
for stmt in module.body:
position = ast_utils.get_position(source, stmt)
if position.lineno <= lineno <= position.end_lineno:
res = checker(stmt, source)
if res is not None:
return res
raise RuntimeError('Statement not found.') | d2066f5fafa1c20c4b5276e44d82ae95ffa2f59b | 15,300 |
def ptrace(Q, sel):
"""
Partial trace of the Qobj with selected components remaining.
Parameters
----------
Q : :class:`qutip.Qobj`
Composite quantum object.
sel : int/list
An ``int`` or ``list`` of components to keep after partial trace.
Returns
-------
oper : :class:`qutip.Qobj`
Quantum object representing partial trace with selected components
remaining.
Notes
-----
This function is for legacy compatibility only. It is recommended to use
the ``ptrace()`` Qobj method.
"""
if not isinstance(Q, Qobj):
raise TypeError("Input is not a quantum object")
return Q.ptrace(sel) | a98e7bea41cff00b44534cecac7f86958ef47ebb | 15,301 |
from operator import index
def createConformations(outputfile, forcefield, smiles, sid):
"""Generate the conformations for a molecule and save them to disk."""
print(f'Generating {index}: {smiles}')
try:
mol = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
fftop = Topology()
fftop.add_molecule(mol)
mmtop = fftop.to_openmm()
system = forcefield.create_openmm_system(fftop)
except:
print(' failed to parametrize')
return
# Generate 10 diverse starting points. Run MD from each one to generate a total
# of 100 high energy conformations.
mol.generate_conformers(n_conformers=10, rms_cutoff=0*unit.nanometers)
assert len(mol.conformers) == 10
def simulate(pos):
integrator = openmm.LangevinMiddleIntegrator(500*unit.kelvin, 1/unit.picosecond, 0.001*unit.picosecond)
simulation = app.Simulation(mmtop, system, integrator, openmm.Platform.getPlatformByName('Reference'))
simulation.context.setPositions(pos)
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(500*unit.kelvin)
states = []
for i in range(10):
simulation.step(10000)
state = simulation.context.getState(getPositions=True, getEnergy=True)
if state.getPotentialEnergy() < 1e4*unit.kilojoules_per_mole:
states.append(state)
return states
futures = []
with ThreadPoolExecutor() as executor:
for pos in mol.conformers:
futures.append(executor.submit(simulate, pos))
states = []
for future in futures:
states += future.result()
# Select 25 that are most different from each other.
if len(states) < 25:
print(' failed to generate states')
return
states = filterByRMSD(states, mmtop)
# Create a nearby, lower energy conformation from each one.
integrator = openmm.LangevinMiddleIntegrator(100*unit.kelvin, 1/unit.picosecond, 0.001*unit.picosecond)
simulation = app.Simulation(mmtop, system, integrator, openmm.Platform.getPlatformByName('Reference'))
for state in states[:]:
simulation.context.setState(state)
simulation.minimizeEnergy(maxIterations=5)
simulation.context.setVelocitiesToTemperature(100*unit.kelvin)
simulation.step(1000)
states.append(simulation.context.getState(getPositions=True))
saveToFile(outputfile, mol, states, sid) | fce6cb1c7620b755a500e76822aa3ac27b7a12f4 | 15,302 |
import numpy
import math
def two_angle_circular_correlation_coef(angles1, angles2, mean1, mean2):
"""
Circular correlation measure. SenGupta 2001
"""
centered_a = angles1-mean1
centered_b = angles2-mean2
sin_centered_a = numpy.sin(centered_a)
sin_centered_b = numpy.sin(centered_b)
sin2_a = sin_centered_a*sin_centered_a
sin2_b = sin_centered_b*sin_centered_b
return numpy.dot(sin_centered_a, sin_centered_b) / math.sqrt(numpy.dot(sin2_a, sin2_b)) | 6a95f8726f45105c68b9c0b4f8f13191a88734e2 | 15,303 |
from typing import Union
import yaml
def format_data(data: Union[dict, list]) -> str:
"""
:param data: input data
:return: pretty formatted yaml representation of a dictionary
"""
return yaml.dump(data, sort_keys=False, default_flow_style=False) | b4e79a8957995fb8e2eaa549a6a208a48574a598 | 15,304 |
def eval_on_dataset(
model, state, dataset,
pmapped_eval_step):
"""Evaluates the model on the whole dataset.
Args:
model: The model to evaluate.
state: Current state associated with the model (contains the batch norm MA).
dataset: Dataset on which the model should be evaluated. Should already
being batched.
pmapped_eval_step: A pmapped version of the `eval_step` function (see its
documentation for more details).
Returns:
A dictionary containing the loss and error rate on the batch. These metrics
are averaged over the samples.
"""
eval_metrics = []
total_num_samples = 0
for eval_batch in dataset:
# Load and shard the TF batch.
eval_batch = load_and_shard_tf_batch(eval_batch)
# Compute metrics and sum over all observations in the batch.
metrics = pmapped_eval_step(model, state, eval_batch)
eval_metrics.append(metrics)
# Number of samples seen in num_replicas * per_replica_batch_size.
total_num_samples += (
eval_batch['label'].shape[0] * eval_batch['label'].shape[1])
# Metrics are all the same across all replicas (since we applied psum in the
# eval_step). The next line will fetch the metrics on one of them.
eval_metrics = common_utils.get_metrics(eval_metrics)
# Finally, we divide by the number of samples to get the mean error rate and
# cross entropy.
eval_summary = jax.tree_map(lambda x: x.sum() / total_num_samples,
eval_metrics)
return eval_summary | dd2296f80db37687de6fc8a4bcf0046d43cda115 | 15,306 |
def factorize(n):
""" Prime factorises n """
# Loop upto sqrt(n) and check for factors
ret = []
sqRoot = int(n ** 0.5)
for f in xrange(2, sqRoot+1):
if n % f == 0:
e = 0
while n % f == 0:
n, e = n / f, e + 1
ret.append((f, e))
if n > 1:
ret.append((n, 1))
return ret | bc4b4a26010f2f18c9989acd2b7d81615b21f8db | 15,308 |
import random
def createSimpleDataSet( numOfAttr, numOfObj ):
"""
This creates a simple data base with 3 attributes
The second one is 2 times the first one with some
Gauss noise. The third one is just random noise.
"""
database = []
for i in range(numOfObj):
data = dataObject(numOfAttr)
w=[random.gauss(2.0, 2.0)]
w.append(w[0]*3+random.gauss(0.0, 0.05))
w.append(random.random()*6)
data.setAttributes(w)
database.append(data)
return database | dd4e8005634bd49411a785982fe3112acaf8e544 | 15,309 |
import tqdm
import warnings
def clean_data(
data,
isz=None,
r1=None,
dr=None,
edge=0,
bad_map=None,
add_bad=None,
apod=True,
offx=0,
offy=0,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
*,
mask=None,
):
"""Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
n_im = data.shape[0]
cube_cleaned = [] # np.zeros([n_im, isz, isz])
l_bad_frame = []
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
for i in tqdm(range(n_im), ncols=100, desc="Cleaning", leave=False):
img0 = data[i]
img0 = _apply_edge_correction(img0, edge=edge)
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map[i], add_bad=add_bad[i])
else:
img1 = img0.copy()
img1 = _remove_dark(img1, darkfile=darkfile, verbose=verbose)
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
center = find_max(img1, filtmed=filtmed, f=f_kernel)
else:
center = None
if sky and (r1 is not None or mask is not None):
img_biased = sky_correction(
img1, r1=r1, dr=dr, verbose=verbose, center=center, mask=mask
)[0]
elif sky:
warnings.warn(
"sky is set to True, but r1 and mask are set to None. Skipping sky correction",
RuntimeWarning,
)
img_biased = img1.copy()
else:
img_biased = img1.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
im_rec_max = crop_max(
img_biased, isz, offx=offx, offy=offy, filtmed=filtmed, f=f_kernel
)[0]
else:
im_rec_max = img_biased.copy()
if (
(im_rec_max.shape[0] != im_rec_max.shape[1])
or (isz is not None and im_rec_max.shape[0] != isz)
or (isz is None and im_rec_max.shape[0] != img0.shape[0])
):
l_bad_frame.append(i)
else:
if apod and window is not None:
img = apply_windowing(im_rec_max, window=window)
elif apod:
warnings.warn(
"apod is set to True, but window is None. Skipping apodisation",
RuntimeWarning,
)
img = im_rec_max.copy()
else:
img = im_rec_max.copy()
cube_cleaned.append(img)
if verbose:
print("Bad centering frame number:", l_bad_frame)
cube_cleaned = np.array(cube_cleaned)
return cube_cleaned | d50cb5b723661925c81f215e3bba903b4f9bb56c | 15,310 |
def select_points():
""" Select points (empty) objects.
Parameters:
None
Returns:
list: Empty objects or None.
"""
selected = bpy.context.selected_objects
if selected:
return [object for object in selected if object.type == 'EMPTY']
print('***** Point (empty) objects were not selected *****')
return None | 4134277f427518da188d8bcac4d5023d0b39e55a | 15,311 |
def mask_conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs) | 0c06482e36ef55322ed3b52e68f321750843ef01 | 15,313 |
import decimal
def decimal_from_tuple(signed, digits, expo):
"""Build `Decimal` objects from components of decimal tuple.
Parameters
----------
signed : bool
True for negative values.
digits : iterable of ints
digits of value each in [0,10).
expo : int or {'F', 'n', 'N'}
exponent of decimal.
Returns
-------
y : Decimal
corresponding decimal object.
"""
# Get everything in correct type because the Py3 decimal package is anal
signed = int(signed)
digits = ensure_tuple_of_ints(digits)
expo = expo if expo in ("F", "n", "N") else int(expo)
y = decimal.Decimal(decimal.DecimalTuple(signed, digits, expo))
return y | c3b67505440600b5e9f3ce944c9018539b32bbf7 | 15,314 |
from typing import Dict
def metadata_update(
repo_id: str,
metadata: Dict,
*,
repo_type: str = None,
overwrite: bool = False,
token: str = None,
) -> str:
"""
Updates the metadata in the README.md of a repository on the Hugging Face Hub.
Example:
>>> from huggingface_hub import metadata_update
>>> metadata = {'model-index': [{'name': 'RoBERTa fine-tuned on ReactionGIF',
... 'results': [{'dataset': {'name': 'ReactionGIF',
... 'type': 'julien-c/reactiongif'},
... 'metrics': [{'name': 'Recall',
... 'type': 'recall',
... 'value': 0.7762102282047272}],
... 'task': {'name': 'Text Classification',
... 'type': 'text-classification'}}]}]}
>>> update_metdata("julien-c/reactiongif-roberta", metadata)
Args:
repo_id (`str`):
The name of the repository.
metadata (`dict`):
A dictionary containing the metadata to be updated.
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if updating to a dataset or space,
`None` or `"model"` if updating to a model. Default is `None`.
overwrite (`bool`, *optional*, defaults to `False`):
If set to `True` an existing field can be overwritten, otherwise
attempting to overwrite an existing field will cause an error.
token (`str`, *optional*):
The Hugging Face authentication token.
Returns:
`str`: URL of the commit which updated the card metadata.
"""
filepath = hf_hub_download(
repo_id,
filename=REPOCARD_NAME,
repo_type=repo_type,
use_auth_token=token,
force_download=True,
)
existing_metadata = metadata_load(filepath)
for key in metadata:
# update model index containing the evaluation results
if key == "model-index":
if "model-index" not in existing_metadata:
existing_metadata["model-index"] = metadata["model-index"]
else:
# the model-index contains a list of results as used by PwC but only has one element thus we take the first one
existing_metadata["model-index"][0][
"results"
] = _update_metadata_model_index(
existing_metadata["model-index"][0]["results"],
metadata["model-index"][0]["results"],
overwrite=overwrite,
)
# update all fields except model index
else:
if key in existing_metadata and not overwrite:
if existing_metadata[key] != metadata[key]:
raise ValueError(
f"""You passed a new value for the existing meta data field '{key}'. Set `overwrite=True` to overwrite existing metadata."""
)
else:
existing_metadata[key] = metadata[key]
# save and push to hub
metadata_save(filepath, existing_metadata)
return HfApi().upload_file(
path_or_fileobj=filepath,
path_in_repo=REPOCARD_NAME,
repo_id=repo_id,
repo_type=repo_type,
identical_ok=False,
token=token,
) | 1faf2ae158d598a7538f86ce328ea22b55308507 | 15,315 |
def system_types():
"""
系统类型(工作空间类型)
:return:
"""
return Workspace.sys_types().values() | 968fbf7993d4ad645fe741ac48702440ba01a2e3 | 15,318 |
def get_rnd_simplex(dimension, random_state):
"""
Uniform random point on a simplex, i.e. x_i >= 0 and sum of the coordinates is 1.
Donald B. Rubin, The Bayesian bootstrap Ann. Statist. 9, 1981, 130-134.
https://cs.stackexchange.com/questions/3227/uniform-sampling-from-a-simplex
Parameters
----------
dimension: int
Dimensionality of the simplex
random_state: optional, RandomState object
Returns
-------
numpy array corresponding to random sample in dimension of space
"""
t = random_state.uniform(0, 1, dimension - 1)
t = np.append(t, [0, 1])
t.sort()
return np.array([(t[i + 1] - t[i]) for i in range(len(t) - 1)]) | d5e1105655192fe13bcad5e3dd08a7247461d8bf | 15,319 |
def backup_generate_metadata(request, created_at='', secret=''):
"""
Generates metadata code for the backup.
Meant to be called by the local handler only with shared secret (not directly).
"""
if not secret == settings.GAEBAR_SECRET_KEY:
return HttpResponseForbidden()
backup = models.GaebarBackup.all().filter('created_at = ', timestamp_to_datetime(created_at)).get()
if not backup:
raise Http404
context = dict(backup = backup)
response = HttpResponse(loader.render_to_string('gaebar/metadata.py', context), 'text/plain')
response['Content-Disposition'] = 'attachment; filename=metadata.py'
return response | 30ae381be8454a1df6b47fd5fc55af68f10e8b1f | 15,320 |
import torch
def contains_conv(module: torch.nn.Module) -> bool:
""" Returns `True` if given `torch.nn.Module` contains at least one convolution module/op (based on `deepcv.meta.nn.is_conv` for convolution definition) """
return any(map(module.modules, lambda m: is_conv(m))) | 0f9ae25fa1189c9c576089c913a5d7d9e2739c78 | 15,321 |
def _construct_cell(empty=False):
"""Constructs a test cell."""
cell = scheduler.Cell('top')
if empty:
return cell
rack1 = scheduler.Bucket('rack:rack1', traits=0, level='rack')
rack2 = scheduler.Bucket('rack:rack2', traits=0, level='rack')
cell.add_node(rack1)
cell.add_node(rack2)
srv1 = scheduler.Server('srv1', [10, 20, 30], traits=1,
valid_until=1000, label='part')
srv2 = scheduler.Server('srv2', [10, 20, 30], traits=3,
valid_until=2000, label='part')
srv3 = scheduler.Server('srv3', [10, 20, 30], traits=0,
valid_until=3000, label='_default')
srv4 = scheduler.Server('srv4', [10, 20, 30], traits=0,
valid_until=4000, label='_default')
rack1.add_node(srv1)
rack1.add_node(srv2)
rack2.add_node(srv3)
rack2.add_node(srv4)
tenant1 = scheduler.Allocation()
cell.partitions['_default'].allocation.add_sub_alloc('t1', tenant1)
tenant11 = scheduler.Allocation()
tenant1.add_sub_alloc('t11', tenant11)
alloc1 = scheduler.Allocation([10, 10, 10], rank=100, traits=0)
tenant11.add_sub_alloc('a1', alloc1)
tenant2 = scheduler.Allocation()
cell.partitions['part'].allocation.add_sub_alloc('t2', tenant2)
alloc2 = scheduler.Allocation([10, 10, 10], rank=100, traits=3)
tenant2.add_sub_alloc('a2', alloc2)
return cell | c1b8016b8ff048ab0ecad8c69f960ce3d099bd8c | 15,322 |
def gaussian_kernel(F: np.ndarray) -> np.ndarray:
"""Compute dissimilarity matrix based on a Gaussian kernel."""
D = squared_dists(F)
return np.exp(-D/np.mean(D)) | 62f97009c791213255d8bdb4efc0fcfa60c20bb0 | 15,323 |
def _parse_yearweek(yearweek):
"""Utility function to convert internal string representations of calender weeks into datetime objects. Uses strings of format `<year>-KW<week>`. Weeks are 1-based."""
year, week = yearweek_regex.search(yearweek).groups()
# datetime.combine(isoweek.Week(int(year), int(week)).wednesday(),time(0))
return isoweek.Week(int(year), int(week)) | 319166595c506a73d125ed53a11433976aa4f106 | 15,324 |
def get_subpixel_indices(galtable, hpix=[], border=0.0, nside=0):
"""
Routine to get subpixel indices from a galaxy table.
Parameters
----------
galtable: `redmapper.Catalog`
A redmapper galaxy table master catalog
hpix: `list`, optional
Healpix number (ring format) of sub-region. Default is [] (full catalog)
border: `float`, optional
Border around hpix (in degrees) to find pixels. Default is 0.0.
Only works if hpix is a single-length list
nside: `int`, optional
Nside of healpix subregion. Default is 0 (full catalog).
Returns
-------
indices: `np.array`
Integer array of indices of galaxy table pixels in the subregion.
"""
if len(hpix) == 0 or nside == 0:
return np.arange(galtable.filenames.size)
theta, phi = hp.pix2ang(galtable.nside, galtable.hpix)
ipring_big = hp.ang2pix(nside, theta, phi)
_, indices = esutil.numpy_util.match(hpix, ipring_big)
# Ignore border if using full catalog
if border > 0.0 and len(hpix) > 0:
if len(hpix) != 1:
raise NotImplementedError("Cannot do boundary around a pixel list.")
# now we need to find the extra boundary...
boundaries = hp.boundaries(nside, hpix[0], step=galtable.nside // nside)
inhpix = galtable.hpix[indices]
for i in range(boundaries.shape[1]):
pixint = hp.query_disc(galtable.nside, boundaries[:, i],
border*np.pi/180., inclusive=True, fact=8)
inhpix = np.append(inhpix, pixint)
inhpix = np.unique(inhpix)
_, indices = esutil.numpy_util.match(inhpix, galtable.hpix)
return indices | 5a2d18f79ef8cc478752ef8059c71a512efced9f | 15,325 |
def is_common_secret_key(key_name: str) -> bool:
"""Return true if the key_name value matches a known secret name or pattern."""
if key_name in COMMON_SECRET_KEYS:
return True
return any(
[
key_name.lower().endswith(key_suffix)
for key_suffix in COMMON_SECRET_KEY_SUFFIXES
]
) | b0250f28638a0ad58a3a45dd8e333610fea378d5 | 15,326 |
def showgraphwidth(context, mapping):
"""Integer. The width of the graph drawn by 'log --graph' or zero."""
# just hosts documentation; should be overridden by template mapping
return 0 | 6e2fad8c80264a1030e5a113d66233c3adc28af8 | 15,328 |
def diff_last_filter(trail, key=lambda x: x['pid']):
""" Filter out trails with last two key different
"""
return trail if key(trail[-1]) != key(trail[-2]) else None | 82e67a98a1b09e11f2f1ebd76f470969b2dd1a51 | 15,329 |
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
procfs_path = get_procfs_path()
set_scputimes_ntuple(procfs_path)
with open_binary('%s/stat' % procfs_path) as f:
values = f.readline().split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields) | 70dd518296bc873add8a7164446e908d80e74174 | 15,330 |
def calc_centeroid(x, network: DTClustering, n_clusters: int):
"""クラスタ中心を計算します.
Notes:
Input x: [batch, sequence, feature, 1]
Output: [n_clusters, hidden sequence, hidden feature, 1]
"""
code = network.encode(x)
feature = code.view(code.shape[0], -1) # [batch, sequence * feature]
feature = feature.detach().cpu().numpy()
km = cluster.KMeans(n_clusters=n_clusters, n_init=10)
km.fit(feature)
centers = km.cluster_centers_.reshape(n_clusters, code.shape[1], code.shape[2], 1)
centers = centers.astype(np.float32)
return centers | cf0a158d86105e34ad476dbfb7bc6ff911a65e52 | 15,331 |
def softXrayMono1(eV, k, m, c, rb_mm, bounce, inOff_deg, outOff_deg, verbose):
"""
# calculate premirror and grating angles for NSLS-II soft xray monos
# eV: energy
# k: central line density in mm-1
# m: diffraction order
# c: cff 0 < cff < infinity
# bounce = 'up' or 'down'
# inOff_deg - input beam angle relative to horizontal, NSLSII sense
# outOff_deg - output beam angle relative to horizontal, NSLSII sense
"""
# correct for several energies for Centurion
# correctly reverses sign of angles if geometry is flipped upside-down
# consider bounce direction
if bounce == "up":
a = -1
elif bounce == "down":
a = +1
else:
a = float("nan")
# calculate angles, no offsets
alpha_deg = ruben2005eqn8m(eV, c, k, m)
beta_deg = getBetaDeg(eV, alpha_deg, k, m)
# include offsets
thetaPMinc_deg = abs(
+0.5 * (outOff_deg - inOff_deg + a * (180.0 - alpha_deg + beta_deg))
)
thetaPM_deg = +0.5 * (outOff_deg + inOff_deg + a * (180.0 - alpha_deg + beta_deg))
thetaGR_deg = a * (90.0 + beta_deg) + outOff_deg
disp = getLinDisp(alpha_deg, beta_deg, k, m, rb_mm)
if verbose:
# alpha, beta both relative to normal and surface
print("eV=", eV, "c=", c)
print("alpha=", alpha_deg, 90.0 - alpha_deg)
print("beta=", beta_deg, (90 + beta_deg))
print("incident angle on pm=", thetaPMinc_deg)
print("dispersion (eV/mm) =", disp)
# grating and premirror rotation angles
print("rotation angles relative to horizontal")
print(" premirror", thetaPM_deg)
print(" grating", thetaGR_deg)
return (thetaPM_deg, thetaGR_deg, alpha_deg, beta_deg, thetaPMinc_deg, disp) | 3309b8ec7e3f5433025c4c676bf2966281df4d02 | 15,333 |
def createHelmholtz3dExteriorCalderonProjector(
context, hminusSpace, hplusSpace, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return the exterior Calderon projector for the
Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- hminusSpace (Space)
Function space representing functions in H^{-1/2}.
- hplusSpace (Space)
Function space representing functions in H^{+1/2}.
- waveNumber (float or complex)
Wave number, i.e. the number k in the Helmholtz equation
nabla^2 u + k^2 u = 0.
- label (string)
Textual label of the operator. If set to None (default), a unique
label will be generated automatically.
- useInterpolation (bool)
If set to False (default), the standard exp() function will be used
to evaluate the exponential factor occurring in the kernel. If set
to True, the exponential factor will be evaluated by piecewise-cubic
interpolation of values calculated in advance on a regular
grid. This normally speeds up calculations, but might result in a
loss of accuracy. This is an experimental feature: use it at your
own risk.
- interpPtsPerWavelength (int)
If useInterpolation is set to True, this parameter determines the
number of points per "effective wavelength" (defined as 2 pi /
abs(waveNumber)) used to construct the interpolation grid. The
default value (5000) is normally enough to reduce the relative or
absolute error, *whichever is smaller*, below 100 * machine
precision. If useInterpolation is set to False, this parameter is
ignored.
*Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType
object, with BasisFunctionType and ResultType determined automatically from
the context argument and equal to either float32, float64, complex64 or
complex128.
"""
basisFunctionType = context.basisFunctionType()
if (basisFunctionType != hminusSpace.basisFunctionType() or
basisFunctionType != hplusSpace.basisFunctionType()):
raise TypeError("BasisFunctionType of context and all spaces must be the same")
resultType = context.resultType()
# construct object
if not label:
label = ""
result = _constructObjectTemplatedOnBasis(
core, 'helmholtz3dExteriorCalderonProjector', basisFunctionType,
context, hminusSpace, hplusSpace, waveNumber, label,
useInterpolation, interpPtsPerWavelength)
result._context = context
result._hminusSpace = hminusSpace
result._hplusSpace = hplusSpace
return result | bfb6c139787355d07cb8c82475f34dcc349c55f3 | 15,334 |
def prepare_xrf_map(data, chunk_pixels=5000, n_chunks_min=4):
"""
Convert XRF map from it's initial representation to properly chunked Dask array.
Parameters
----------
data: da.core.Array, np.ndarray or RawHDF5Dataset (this is a custom type)
Raw XRF map represented as Dask array, numpy array or reference to a dataset in
HDF5 file. The XRF map must have dimensions `(ny, nx, ne)`, where `ny` and `nx`
define image size and `ne` is the number of spectrum points
chunk_pixels: int
The number of pixels in a single chunk. The XRF map will be rechunked so that
each block contains approximately `chunk_pixels` pixels and contain all `ne`
spectrum points for each pixel.
n_chunks_min: int
Minimum number of chunks. The algorithm will try to split the map into the number
of chunks equal or greater than `n_chunks_min`. If HDF5 dataset is not chunked,
then the whole map is treated as one chunk. This should happen only to very small
files, so parallelism is not important.
Returns
-------
data: da.core.Array
XRF map represented as Dask array with proper chunk size. The XRF map may be loaded
block by block when processing using `dask.array.map_blocks` and `dask.array.blockwise`
functions with Dask multiprocessing scheduler.
file_obj: h5py.File object
File object that points to HDF5 file. `None` if input parameter `data` is Dask or
numpy array. Note, that `file_obj` must be kept alive until processing is completed.
Closing the file will invalidate references to the dataset in the respective
Dask array.
Raises
------
TypeError if input parameter `data` is not one of supported types.
"""
file_obj = None # It will remain None, unless 'data' is 'RawHDF5Dataset'
if isinstance(data, da.core.Array):
chunk_size = _compute_optimal_chunk_size(
chunk_pixels=chunk_pixels,
data_chunksize=data.chunksize[0:2],
data_shape=data.shape[0:2],
n_chunks_min=n_chunks_min,
)
data = data.rechunk(chunks=(*chunk_size, data.shape[2]))
elif isinstance(data, np.ndarray):
data = _array_numpy_to_dask(data, chunk_pixels=chunk_pixels, n_chunks_min=n_chunks_min)
elif isinstance(data, RawHDF5Dataset):
fpath, dset_name = data.abs_path, data.dset_name
# Note, that the file needs to remain open until the processing is complete !!!
file_obj = h5py.File(fpath, "r")
dset = file_obj[dset_name]
if dset.ndim != 3:
raise TypeError(
f"Dataset '{dset_name}' in file '{fpath}' has {dset.ndim} dimensions: 3D dataset is expected"
)
ny, nx, ne = dset.shape
if dset.chunks:
chunk_size = _compute_optimal_chunk_size(
chunk_pixels=chunk_pixels,
data_chunksize=dset.chunks[0:2],
data_shape=(ny, nx),
n_chunks_min=n_chunks_min,
)
else:
# The data is not chunked. Process data as one chunk.
chunk_size = (ny, nx)
data = da.from_array(dset, chunks=(*chunk_size, ne))
else:
raise TypeError(f"Type of parameter 'data' is not supported: type(data)={type(data)}")
return data, file_obj | a8c4b442f367759237f77571c51e98bd1cc9d53a | 15,335 |
def colorbar_factory(cax, mappable, **kwargs):
"""
Create a colorbar on the given axes for the given mappable.
.. note::
This is a low-level function to turn an existing axes into a colorbar
axes. Typically, you'll want to use `~.Figure.colorbar` instead, which
automatically handles creation and placement of a suitable axes as
well.
Parameters
----------
cax : `~matplotlib.axes.Axes`
The `~.axes.Axes` to turn into a colorbar.
mappable : `~matplotlib.cm.ScalarMappable`
The mappable to be described by the colorbar.
**kwargs
Keyword arguments are passed to the respective colorbar class.
Returns
-------
`.Colorbar`
The created colorbar instance.
"""
return Colorbar(cax, mappable, **kwargs) | 37b0198ea77db887d92ee4fd45e6df73d49f4223 | 15,336 |
import time
def fourth_measurer_I_R(uniquePairsDf):
"""
fourth_measurer_I_R: computes the measure I_R that is based on the minimal number of tuples that should
be removed from the database for the constraints to hold.
The measure is computed via an ILP and the Gurobi optimizer is used to solve the ILP.
- There is a binary variable x for every tuple in the database.
- The constraints are of the form x + y >= 1 where x and y represent two tuples that jointly vioalte a constraint.
- The objective function is to minimize the sum of all x's.
Parameters
----------
uniquePairsDf : dataframe
the result of the query that finds all pairs of tuples that jointly violate a constraint.
Returns
-------
list of two int variables:
database_measurer.objVal is the minimal number of tuples that should be removed for the constraints to hold.
end1 - start is the running time of the function.
"""
start = time.time()
rows_violations = uniquePairsDf.values
varsDict2 = {}
database_measurer = gp.Model('Minimal deletions of tuples')
database_measurer.setParam('OutputFlag', 0) # do not show any comments on the screen
# variables
for i in rows_violations :
varsDict2[i[0]] = database_measurer.addVar(vtype=GRB.BINARY, name="x")
varsDict2[i[1]] = database_measurer.addVar(vtype=GRB.BINARY, name="x")
# constraints
for i in rows_violations :
database_measurer.addConstr(varsDict2[i[0]]+varsDict2[i[1]]>=1, name='con')
vars= []
for i in varsDict2:
vars.append(varsDict2[i])
# objective function
database_measurer.setObjective(sum(vars), GRB.MINIMIZE)
opt = database_measurer.optimize()
end1 = time.time()
return database_measurer.objVal , end1 - start | a8e29e0a70dfd2e2a4c151ca25b2f7fd528e25f3 | 15,337 |
def is_sublist_equal(list_one, list_two):
"""
Compare the values of two lists of equal length.
:param list_one: list - A list
:param list_two: list - A different list
:return EQUAL or UNEQUAL - If all values match, or not.
>>> is_sublist_equal([0], [0])
EQUAL
>>> is_sublist_equal([1], [0])
UNEQUAL
Iterate over values in each list and compare them
Assumes lists are of equal sizes
"""
for index, value in enumerate(list_one):
if value != list_two[index]:
return UNEQUAL
# Otherwise, all values matched, so it's equal
return EQUAL | 717b4287e212498ef85719fbf4d8e5437f16db48 | 15,338 |
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
return -x ** 2 - (y - 1) ** 2 + 1 | 962c0dd5638ac71ee375f4bb1ba07b2bd241a6e8 | 15,339 |
def file_extension(path):
"""Lower case file extension."""
return audeer.file_extension(path).lower() | 264f8afd0a2328d342693b2ec893706760b5c7ae | 15,340 |
import math
def motion(x, u, dt):
"""
motion model
"""
x[2] += u[1] * dt
x[0] += u[0] * math.cos(x[2]) * dt
x[1] += u[0] * math.sin(x[2]) * dt
x[3] = u[0]
x[4] = u[1]
return x | e33adae2a6c5934dc7e0662570c42292eacbfd89 | 15,342 |
from typing import Union
from typing import Callable
def sweep(
sweep: Union[dict, Callable], entity: str = None, project: str = None,
) -> str:
"""Initialize a hyperparameter sweep.
To generate hyperparameter suggestions from the sweep and use them
to train a model, call `wandb.agent` with the sweep_id returned by
this command. For command line functionality, see the command line
tool `wandb sweep` (https://docs.wandb.ai/ref/cli/wandb-sweep).
Args:
sweep: dict, SweepConfig, or callable. The sweep configuration
(or configuration generator). If a dict or SweepConfig,
should conform to the W&B sweep config specification
(https://docs.wandb.ai/guides/sweeps/configuration). If a
callable, should take no arguments and return a dict that
conforms to the W&B sweep config spec.
entity: str (optional). An entity is a username or team name
where you're sending runs. This entity must exist before you
can send runs there, so make sure to create your account or
team in the UI before starting to log runs. If you don't
specify an entity, the run will be sent to your default
entity, which is usually your username. Change your default
entity in [Settings](wandb.ai/settings) under "default
location to create new projects".
project: str (optional). The name of the project where you're
sending the new run. If the project is not specified, the
run is put in an "Uncategorized" project.
Returns:
sweep_id: str. A unique identifier for the sweep.
Examples:
Basic usage
<!--yeadoc-test:one-parameter-sweep-->
```python
import wandb
sweep_configuration = {
"name": "my-awesome-sweep",
"metric": {"name": "accuracy", "goal": "maximize"},
"method": "grid",
"parameters": {
"a": {
"values": [1, 2, 3, 4]
}
}
}
def my_train_func():
# read the current value of parameter "a" from wandb.config
wandb.init()
a = wandb.config.a
wandb.log({"a": a, "accuracy": a + 1})
sweep_id = wandb.sweep(sweep_configuration)
# run the sweep
wandb.agent(sweep_id, function=my_train_func)
```
"""
if callable(sweep):
sweep = sweep()
"""Sweep create for controller api and jupyter (eventually for cli)."""
if entity:
env.set_entity(entity)
if project:
env.set_project(project)
# Make sure we are logged in
wandb_login._login(_silent=True)
api = InternalApi()
sweep_id, warnings = api.upsert_sweep(sweep)
handle_sweep_config_violations(warnings)
print("Create sweep with ID:", sweep_id)
sweep_url = _get_sweep_url(api, sweep_id)
if sweep_url:
print("Sweep URL:", sweep_url)
return sweep_id | 50ba0d79a8fca5d5eba08b4e739845b797c0c839 | 15,343 |
def connection_end_point (id, node_uuid, nep_uuid, cep_uuid):
"""Retrieve NodeEdgePoint by ID
:param topo_uuid: ID of Topology
:type uuid: str
:param node_uuid: ID of Node
:type node_uuid: str
:param nep_uuid: ID of NodeEdgePoint
:type nep_uuid: str
:param cep_uuid: ID of ConnectionEndPoint
:type cep_uuid: str
:rtype: ConnectionEndPoint
"""
for topo in context.topology_context.topology:
if topo.uuid == topo_uuid:
for node in topo.node:
if node.uuid == node_uuid:
for nep in node.owned_node_edge_point:
if nep.uuid == nep_uuid:
for cep in nep.cep_list.connection_end_point:
if cep.uuid == cep_uuid:
return cep | 76dc345732d3209730b6022ba12cb2ca191e4a40 | 15,344 |
def remove_melt_from_perplex(perplex,melt_percent=-1):
""" Extrapolate high temperature values to remove melt content using sub-solidus values.
The assumption is that alpha and beta are constant and temperature-independent at high temperature."""
Tref = 273
Pref = 0
rho = perplex.rho.reshape( int(perplex.np), int(perplex.nt))
rhoresidual = perplex.rhoresidual.reshape( int(perplex.np), int(perplex.nt))
rhomelt = perplex.rhomelt.reshape( int(perplex.np), int(perplex.nt))
T = perplex.T.reshape( int(perplex.np), int(perplex.nt))
P = perplex.P.reshape( int(perplex.np), int(perplex.nt))
alpha = perplex.alpha.reshape(int(perplex.np), int(perplex.nt))
alpharesidual = perplex.alpharesidual.reshape(int(perplex.np), int(perplex.nt))
alphamelt = perplex.alphamelt.reshape(int(perplex.np), int(perplex.nt))
beta = perplex.beta.reshape( int(perplex.np), int(perplex.nt))
betaresidual = perplex.betaresidual.reshape( int(perplex.np), int(perplex.nt))
betamelt = perplex.betamelt.reshape( int(perplex.np), int(perplex.nt))
cp = perplex.cp.reshape( int(perplex.np), int(perplex.nt))
cpmelt = perplex.cpmelt.reshape( int(perplex.np), int(perplex.nt))
cpresidual = perplex.cpresidual.reshape( int(perplex.np), int(perplex.nt))
melt = perplex.melt.reshape( int(perplex.np), int(perplex.nt))
# smoothing alpha and beta along the boundaries to avoid vertical discontinuities not suitable
n_smooth = 3
rho_smooth = []
rhomelt_smooth = []
rhoresidual_smooth = []
alpha_smooth = []
beta_smooth = []
cp_smooth = []
alphamelt_smooth = []
betamelt_smooth = []
cpmelt_smooth = []
alpharesidual_smooth = []
betaresidual_smooth = []
cpresidual_smooth = []
i_smooth = 0
i_int = 0
#alpha_beta_values = False
for j in range(0,int(perplex.np)):
if (melt_percent<0):
are_values = False
for i in range(int(perplex.nt)-1,-1,-1):
#print('None T {} P {} melt {}'.format(T[j,i],P[j,i],melt[j,i]))
if ( melt[j,i] > 0.0e0 ):
#print('None T {} P {}'.format(T[j,i],P[j,i]))
pass
else:
if (i_smooth<n_smooth):
alpha_smooth.append(alpha[j,i])
beta_smooth.append(beta[j,i])
cp_smooth.append(cp[j,i])
cpmelt_smooth.append(cpmelt[j,i])
cpresidual_smooth.append(cpresidual[j,i])
alphamelt_smooth.append(alphamelt[j,i])
betamelt_smooth.append(betamelt[j,i])
alpharesidual_smooth.append(alpharesidual[j,i])
betaresidual_smooth.append(betaresidual[j,i])
rho_smooth.append(rho[j,i])
rhomelt_smooth.append(rhomelt[j,i])
rhoresidual_smooth.append(rhoresidual[j,i])
i_smooth = i_smooth + 1
else:
alpha_smooth[i_int] = alpha[j,i]
beta_smooth[i_int] = beta[j,i]
cp_smooth[i_int] = cp[j,i]
cpmelt_smooth[i_int] = cpmelt[j,i]
cpresidual_smooth[i_int] = cpresidual[j,i]
alphamelt_smooth[i_int] = alphamelt[j,i]
betamelt_smooth[i_int] = betamelt[j,i]
alpharesidual_smooth[i_int] = alpharesidual[j,i]
betaresidual_smooth[i_int] = betaresidual[j,i]
rho_smooth[i_int] = rho[j,i]
rhomelt_smooth[i_int] = rhomelt[j,i]
rhoresidual_smooth[i_int] = rhoresidual[j,i]
i_int = i_int + 1
if (i_int>=n_smooth):
i_int = 0
alpha_used = sum(alpha_smooth)/len(alpha_smooth)
beta_used = sum(beta_smooth)/len(beta_smooth)
cp_used = sum(cp_smooth)/len(cp_smooth)
rho_ref = sum(rho_smooth)/len(rho_smooth) / ( (1+beta_used*(P[j,i]-Pref)) * (1-alpha_used*(T[j,i]-Tref)) )
alpha_used_melt = sum(alphamelt_smooth)/len(alphamelt_smooth)
beta_used_melt = sum(betamelt_smooth)/len(betamelt_smooth)
cp_used_melt = sum(cpmelt_smooth)/len(cpmelt_smooth)
rho_ref_melt = sum(rhomelt_smooth)/len(rhomelt_smooth) / ( (1+beta_used_melt*(P[j,i]-Pref)) * (1-alpha_used_melt*(T[j,i]-Tref)) )
alpha_used_residual = sum(alpharesidual_smooth)/len(alpharesidual_smooth)
beta_used_residual = sum(betaresidual_smooth)/len(betaresidual_smooth)
cp_used_residual = sum(cpresidual_smooth)/len(cpresidual_smooth)
rho_ref_residual = sum(rhoresidual_smooth)/len(rhoresidual_smooth) / ( (1+beta_used_residual*(P[j,i]-Pref)) * (1-alpha_used_residual*(T[j,i]-Tref)) )
#if ( not alpha_beta_values):
# # we use low pressure value for alpha and beta - upper-bound estimation of it then
# alpha_used = alpha[j,i]
# beta_used = beta[j,i]
# alpha_beta_values = True
#rho_ref = rho[j,i] / ( (1+beta_used*(P[j,i]-Pref)) * (1-alpha_used*(T[j,i]-Tref)) )
melt_ref = 0.0e0
are_values = True
break
if (are_values):
for i in range(int(perplex.nt)-1,-1,-1):
if ( melt[j,i] > 0.0e0 ):
# rho[j,i] = rho_ref*(1+beta_used*(P[j,i]-Pref))*(1-alpha_used*(T[j,i]-Tref))
rho[j,i] = rho_ref*(1+betaresidual[j,i]*(P[j,i]-Pref))*(1-alpharesidual[j,i]*(T[j,i]-Tref))
#alpha[j,i] = alpha_used
#beta[j,i] = beta_used
# we do not extrapolate alpha and beta but only rho_ref
# we keep alpha and beta from residual in order to keep them P,T dependant
alpha[j,i] = alpharesidual[j,i]
beta[j,i] = betaresidual[j,i]
cp[j,i] = cpresidual[j,i]
melt[j,i] = melt_ref
rhomelt[j,i] = float('nan')
alphamelt[j,i] = float('nan')
betamelt[j,i] = float('nan')
cpmelt[j,i] = float('nan')
else:
melt[j,i] = melt_ref
rhomelt[j,i] = float('nan')
alphamelt[j,i] = float('nan')
betamelt[j,i] = float('nan')
cpmelt[j,i] = float('nan')
break
else:
for i in range(int(perplex.nt)-1,-1,-1):
# print('melt[j,i] {}'.format(melt[j,i]))
if (melt[j,i]>melt_percent/100.0e0):
melt[j,i] = melt_percent/100.0e0
rho[j,i] = rhoresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + rhomelt[j,i]*melt_percent/100.0e0
alpha[j,i] = alpharesidual[j,i]*(100.0e0-melt_percent)/100.0e0 + alphamelt[j,i]*melt_percent/100.0e0
beta[j,i] = betaresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + betamelt[j,i]*melt_percent/100.0e0
cp[j,i] = cpresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + cpmelt[j,i]*melt_percent/100.0e0
if (np.isnan(rho[j,i])):
print('NaN melt {} rho {} rhoresidual {} rhomelt {} alpha {} beta {}'.format(
melt[j,i],rho[j,i],rhoresidual[j,i], rhomelt[j,i], alpha[j,i], beta[j,i]))
quit()
perplex.rho = rho.reshape(perplex.np*perplex.nt)
perplex.T = T.reshape(perplex.np*perplex.nt)
perplex.P = P.reshape(perplex.np*perplex.nt)
perplex.alpha = alpha.reshape(perplex.np*perplex.nt)
perplex.beta = beta.reshape(perplex.np*perplex.nt)
perplex.cp = cp.reshape(perplex.np*perplex.nt)
perplex.melt = melt.reshape(perplex.np*perplex.nt)
perplex.melt = np.zeros_like(perplex.melt)
perplex.rhomelt = rhomelt.reshape(perplex.np*perplex.nt)
perplex.alphamelt = alphamelt.reshape(perplex.np*perplex.nt)
perplex.betamelt = betamelt.reshape(perplex.np*perplex.nt)
perplex.cpmelt = cpmelt.reshape(perplex.np*perplex.nt)
perplex.rhoresidual = rhoresidual.reshape(perplex.np*perplex.nt)
perplex.alpharesidual = alpharesidual.reshape(perplex.np*perplex.nt)
perplex.betaresidual = betaresidual.reshape(perplex.np*perplex.nt)
perplex.cpresidual = cpresidual.reshape(perplex.np*perplex.nt)
return perplex | 6d2473d7147cdecdcd64cbb7e3beafd3b5df5c6a | 15,345 |
def similarity_score(text_small, text_large, min_small = 10, min_large = 50):
"""
complexity: len(small) * len(large)
@param text_small: the smaller text
(in this case the text which's validity is being checked)
@param text_large: the larger text (in this case the scientific study)
returns: a number (-1 <= n <= 100) representing the similarity
-1 if the data isn't populated enough for reliability
"""
# cleaning text:
filtered_small = clean(text_small)
filtered_large = clean(text_large)
fSmallLen = len(filtered_small)
fLargeLen = len(filtered_large)
if (fSmallLen < min_small) or (fLargeLen < min_large): return -1
max_rating = fLargeLen * fSmallLen
hits = 0
for sm_word in filtered_small:
for big_word in filtered_large:
if sm_word == big_word: hits += 1
return 100. * hits / max_rating | 8449b5273909382225f9de43d8fb936424d1a43e | 15,346 |
def abline(a_coords, b_coords, ax=None, **kwargs):
"""Draw a line connecting a point `a_coords` with a point `b_coords`.
Parameters
----------
a_coords : array-like, shape (2,)
xy coordinates of the start of the line.
b_coords : array-like, shape(2,)
xy coordiantes of the end of the line.
ax : matplotlib axis
Axe to plot the line
**kwargs : dict
Arguments to pass along to the matplotlib `plot` function.
"""
if ax is None:
ax = plt.gca()
line_start, line_end = list(zip(a_coords, b_coords))
line, = ax.plot(line_start, line_end, **kwargs)
return line | e262b689046ac5dd75152b8472a841c7a1e5db29 | 15,347 |
def get_extensions():
"""
Returns supported extensions of the DCC
:return: list(str)
"""
return ['.hip', '.hiplc', '.hipnc', '.hip*'] | 414391db5cd4f8989967100bae347e741ca4b46c | 15,348 |
def calc_spatially_diffusion_factors(
regions,
fuel_disagg,
real_values,
low_congruence_crit,
speed_con_max,
p_outlier
):
"""
Calculate spatial diffusion values
Arguments
---------
regions : dict
Regions
fuel_disagg : dict
Disaggregated fuel per region
real_values : dict
Real values
p_outlier : float
Percentage of min and max outliers are flattened
Returns
-------
f_reg_norm_abs : dict
Diffusion values with normed population. If no value
is larger than 1, the total sum of all shares calculated
for every region is identical to the defined scenario variable.
spatial_diff_values : dict
Spatial diffusion values (not normed, only considering differences
in speed and congruence values)
Explanation
============
(I) Load diffusion values
(II) Calculate diffusion factors
(III) Calculate sigmoid diffusion values for technology
specific enduse service shares for every region
"""
# -----
# I. Diffusion diffusion values
# -----
spatial_diff_values = spatial_diffusion_values(
regions=regions,
real_values=real_values,
speed_con_max=speed_con_max,
low_congruence_crit=low_congruence_crit,
p_outlier=p_outlier)
# -----
# II. Calculation of diffusion factors (Not weighted with demand)
# -----
# Not weighted with demand
max_value_diffusion = max(list(spatial_diff_values.values()))
f_reg = {}
for region in regions:
f_reg[region] = spatial_diff_values[region] / max_value_diffusion
# Weighted with demand
f_reg_norm_abs, f_reg_norm = calc_diffusion_f(
regions,
f_reg,
spatial_diff_values,
[fuel_disagg['residential'], fuel_disagg['service'], fuel_disagg['industry']])
return f_reg, f_reg_norm, f_reg_norm_abs | 95361bb3f8ba5d3d47cd1a4ad065ec857e291f7b | 15,350 |
def get_set(path):
"""Returns a matrix of data given the path to the CSV file. The heading row and NaN values are excluded."""
df = pd.read_csv(path, sep=';', encoding='latin')
return df.dropna(subset=['PMID1', 'PMID2', 'Authorship'], how='any').values | aa701f440a9535d534826a50e8803fa0095bda25 | 15,351 |
def gaussian_target(img_shape, t, MAX_X=0.85, MIN_X=-0.85, MAX_Y=0.85, MIN_Y=-0.85, sigma2=10):
"""
Create a gaussian bivariate tensor for target or robot position.
:param t: (th.Tensor) Target position (or robot position)
"""
X_range = img_shape[1]
Y_range = img_shape[2]
XY_range = np.arange(X_range*Y_range)
for i in range(t.size(0)):
X_t = int((MAX_X+t[i][1])*(img_shape[1]/(MAX_X-MIN_X)))
Y_t = int((MAX_Y-t[i][0])*(img_shape[2]/(MAX_Y-MIN_Y)))
bi_var_gaussian = -0.5 * (((XY_range // X_range)- X_t)**2 + (XY_range - (XY_range//Y_range)*Y_range - Y_t)**2)/sigma2
img_target = th.from_numpy((np.exp(bi_var_gaussian)/(2*np.pi*sigma2)).reshape(X_range, Y_range))
img_target = img_target[None,...][None,...]
if i==0: output = img_target
else: output = th.cat([output,img_target],0)
return output | 47fbb46e2e46b1a4cc2cec3906e9c0dfb5282c0e | 15,354 |
def XMLToPython (pattern):
"""Convert the given pattern to the format required for Python
regular expressions.
@param pattern: A Unicode string defining a pattern consistent
with U{XML regular
expressions<http://www.w3.org/TR/xmlschema-2/index.html#regexs>}.
@return: A Unicode string specifying a Python regular expression
that matches the same language as C{pattern}."""
new_pattern_elts = []
new_pattern_elts.append('^')
position = 0
while position < len(pattern):
cg = MaybeMatchCharacterClass(pattern, position)
if cg is None:
new_pattern_elts.append(pattern[position])
position += 1
else:
(cps, position) = cg
new_pattern_elts.append(cps.asPattern())
new_pattern_elts.append('$')
return ''.join(new_pattern_elts) | 14072879e11ea0425903be314fdba6fb8bfd2538 | 15,355 |
import fcntl
import termios
import struct
def __termios(fd):
"""Try to discover terminal width with fcntl, struct and termios."""
#noinspection PyBroadException
try:
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return None
return cr | 78f3450d65a453cfd22c575bbddb77fcfbef1496 | 15,356 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.