content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def do_flake8() -> str:
"""
Flake8 Checks
"""
command = "flake8"
check_command_exists(command)
command_text = f"flake8 --config {settings.CONFIG_FOLDER}/.flake8"
command_text = prepinform_simple(command_text)
execute(*(command_text.split(" ")))
return "flake 8 succeeded"
|
1ffaf0ecfd5905f68a9136c597f56c6c86b8d5cb
| 24,577 |
def counter_current_heat_exchange(s0_in, s1_in, s0_out, s1_out,
dT, T_lim0=None, T_lim1=None,
phase0=None, phase1=None,
H_lim0=None, H_lim1=None):
"""
Allow outlet streams to exchange heat until either the given temperature
limits or the pinch temperature and return the total heat transfer
[Q; in kJ/hr].
"""
# Counter current heat exchange setup:
# First find the hot inlet, cold inlet, hot outlet and cold outlet streams
# along with the maximum temperature approaches for the hotside and the
# cold side.
if s0_in.T > s1_in.T:
s_hot_in = s0_in
s_cold_in = s1_in
s_hot_out = s0_out
s_cold_out = s1_out
T_lim_coldside = T_lim0
T_lim_hotside = T_lim1
H_lim_coldside = H_lim0
H_lim_hotside = H_lim1
phase_coldside = phase0
phase_hotside = phase1
else:
s_cold_in = s0_in
s_hot_in = s1_in
s_cold_out = s0_out
s_hot_out = s1_out
T_lim_hotside = T_lim0
T_lim_coldside = T_lim1
H_lim_hotside = H_lim0
H_lim_coldside = H_lim1
phase_hotside = phase0
phase_coldside = phase1
if (s_hot_in.T - s_cold_in.T) <= dT: return 0. # No heat exchange
T_pinch_coldside = s_cold_in.T + dT
if T_lim_coldside:
if T_lim_coldside > s_hot_in.T:
return 0. # No heat exchange
else:
T_lim_coldside = max(T_pinch_coldside, T_lim_coldside)
else:
T_lim_coldside = T_pinch_coldside
T_pinch_hotside = s_hot_in.T - dT
if T_lim_hotside:
if T_lim_hotside < s_cold_in.T:
return 0. # No heat exchange
else:
T_lim_hotside = min(T_pinch_hotside, T_lim_hotside)
else:
T_lim_hotside = T_pinch_hotside
# Find which side reaches the pinch first by selecting the side that needs
# the least heat transfer to reach the pinch.
# Pinch on the cold side
Q_hot_stream = heat_exchange_to_condition(s_hot_in, s_hot_out,
T_lim_coldside, phase_coldside,
H_lim_coldside, heating=False)
# Pinch on the hot side
Q_cold_stream = heat_exchange_to_condition(s_cold_in, s_cold_out,
T_lim_hotside, phase_hotside,
H_lim_hotside, heating=True)
if Q_hot_stream == Q_cold_stream == 0.:
s0_out.copy_like(s0_in)
s1_in.copy_like(s1_out)
return 0.
if Q_hot_stream > 0 or Q_cold_stream < 0:
# Sanity check
if Q_hot_stream / s_hot_in.C < 0.1 or Q_cold_stream / s_cold_in.C > -0.1:
s0_out.copy_like(s0_in)
s1_in.copy_like(s1_out)
return 0.
raise RuntimeError('inlet stream not in vapor-liquid equilibrium')
if Q_cold_stream < -Q_hot_stream:
# Pinch on the hot side
Q = Q_cold_stream
if phase_coldside:
s_hot_out.H = s_hot_in.H - Q
else:
s_hot_out.vle(H=s_hot_in.H - Q, P=s_hot_out.P)
else:
# Pinch on the cold side
Q = Q_hot_stream
if phase_hotside:
s_cold_out.H = s_cold_in.H - Q
else:
s_cold_out.vle(H=s_cold_in.H - Q, P=s_cold_out.P)
return abs(Q)
|
e5654666a56ebd0e32fd3abcde472e138a510d6e
| 24,578 |
def ReadCOSx1dsumSpectrum(filename):
"""
filename with full path
Purporse is to have other variation
of files and differnet way of reading in.
"""
wave,flux,dfp,dfm = np.loadtxt(filename,unpack=True,usecols=[0,1,4,5])
return np.array([wave,flux,dfp,dfm])
|
a74a76a787ba3f0665c8f73d602e5259fa4828ac
| 24,579 |
import cmath
import math
def op_atanh(x):
"""Returns the inverse hyperbolic tangent of this mathematical object."""
if isinstance(x, list):
return [op_atanh(a) for a in x]
elif isinstance(x, complex):
return cmath.atanh(x)
else:
return math.atanh(x)
|
515da3d653f9ab4df6d87f5cec7d021ac2c98da9
| 24,581 |
from typing import Mapping
from typing import Any
from typing import Callable
import warnings
def find_intersections(
solutions: Mapping[Any, Callable],
ray_direction: Array,
target_center: Array,
) -> dict:
"""
find intersections between ray_direction and target_center given a mapping
of functions (like output of `solutions.make_ray_sphere_lambdas`)
"""
# suppress irrelevant warnings about imaginary values
with warnings.catch_warnings:
warnings.simplefilter("ignore")
return {
coordinate: solution(*ray_direction, *target_center)
for coordinate, solution in solutions.items()
}
|
a738fd0521a853c0be52bbf26d63ef515208b37a
| 24,582 |
def Circum_O_R(vertex_pos, tol):
"""
Function finds the center and the radius of the circumsphere of the every tetrahedron.
Reference:
Fiedler, Miroslav. Matrices and graphs in geometry. No. 139. Cambridge University Press, 2011.
Parameters
-----------------
vertex_pos :
The position of vertices of a tetrahedron
tol :
Tolerance defined to identify co-planar tetrahedrons
Returns
----------
circum_center :
The center of the circum-sphere
circum_rad :
The radius of the circum-sphere
"""
dis_ij = pdist(vertex_pos, 'euclidean')
sq_12, sq_13, sq_14, sq_23, sq_24, sq_34 = np.power(dis_ij, 2)
MatrixC = np.array([[0, 1, 1, 1, 1], [1, 0, sq_12, sq_13, sq_14], [1, sq_12, 0, sq_23, sq_24],
[1, sq_13, sq_23, 0, sq_34], [1, sq_14, sq_24, sq_34, 0]])
det_MC = (np.linalg.det(MatrixC))
if (det_MC < tol):
return [0, 0, 0], 0
else:
M = -2*np.linalg.inv(MatrixC)
circum_center = (M[0, 1]*vertex_pos[0, :] + M[0, 2]*vertex_pos[1, :] + M[0, 3]*vertex_pos[2, :] +
M[0, 4] * vertex_pos[3, :]) / (M[0, 1] + M[0, 2] + M[0, 3] + M[0, 4])
circum_rad = np.sqrt(M[0, 0])/2
return circum_center, circum_rad
|
800ee6e56088a1c4df7149e911d4acbc175e2771
| 24,583 |
def reverse_one_hot(image):
"""
Transform a 2D array in one-hot format (depth is num_classes),
to a 2D array with only 1 channel, where each pixel value is
the classified class key.
#Arguments
image: The one-hot format image
#Returns
A 2D array with the same width and height as the input, but
with a depth size of 1, where each pixel value is the calssified
class key.
"""
x = np.argmax(image, axis=-1)
return x
|
912d4a5f9fbb3711b1af9dcd9c2092e6d71869bd
| 24,584 |
import torch
def get_feature_clusters(x: torch.Tensor, output_size: int, clusters: int = 8):
""" Applies KMeans across feature maps of an input activations tensor """
if not isinstance(x, torch.Tensor):
raise NotImplementedError(f"Function supports torch input tensors only, but got ({type(x)})")
if x.ndim == 3:
x = x.unsqueeze(0)
b, c, h, w = x.shape
assert h == w, f"image should be square, but got h = {h} and w = {w}"
scale_factor = int(np.ceil(output_size / h))
x = interpolate(x, scale_factor=scale_factor, mode='bilinear', align_corners=True)
x = torch2np(x, squeeze=True).reshape((output_size * output_size), c)
x = KMeans(n_clusters=clusters).fit_predict(x).reshape(output_size, output_size)
return x
|
a43c2b98239f7474bf70747f464e29f4800159d8
| 24,585 |
def get_phone_operator(phonenumber):
"""
Get operator type for a given phonenumber.
>>> get_phone_operator('+959262624625')
<Operator.Mpt: 'MPT'>
>>> get_phone_operator('09970000234')
<Operator.Ooredoo: 'Ooredoo'>
>>> get_phone_operator('123456789')
<Operator.Unknown: 'Unknown'>
"""
phonenumber = str(phonenumber).strip()
if mpt_re.match(phonenumber):
return (Operator.Mpt)
if ooredoo_re.match(phonenumber):
return (Operator.Ooredoo)
if telenor_re.match(phonenumber):
return (Operator.Telenor)
if mytel_re.match(phonenumber):
return (Operator.Mytel)
return (Operator.Unknown)
|
01ec72a935b6fec466ab3113a61959d316d8f4b4
| 24,586 |
def projectpoints(P, X):
""" Apply full projection matrix P to 3D points X in cartesian coordinates.
Args:
P: projection matrix
X: 3d points in cartesian coordinates
Returns:
x: 2d points in cartesian coordinates
"""
X_hom = cart2hom(X)
X_pro = P.dot(X_hom) # 像素坐标系 齐次三维坐标
x = hom2cart(X_pro)
return x
|
a16df6083a567215b474ec29d2b065c8a200c22c
| 24,587 |
def mdot(a,b):
"""
Computes a contraction of two tensors/vectors. Assumes
the following structure: tensor[m,n,i,j,k] OR vector[m,i,j,k],
where i,j,k are spatial indices and m,n are variable indices.
"""
if (a.ndim == 3 and b.ndim == 3) or (a.ndim == 4 and b.ndim == 4):
c = (a*b).sum(0)
elif a.ndim == 5 and b.ndim == 4:
c = np.empty(np.maximum(a[:,0,:,:,:].shape,b.shape),dtype=b.dtype)
for i in range(a.shape[0]):
c[i,:,:,:] = (a[i,:,:,:,:]*b).sum(0)
elif a.ndim == 4 and b.ndim == 5:
c = np.empty(np.maximum(b[0,:,:,:,:].shape,a.shape),dtype=a.dtype)
for i in range(b.shape[1]):
c[i,:,:,:] = (a*b[:,i,:,:,:]).sum(0)
elif a.ndim == 5 and b.ndim == 5:
c = np.empty((a.shape[0],b.shape[1],a.shape[2],a.shape[3],max(a.shape[4],b.shape[4])),dtype=a.dtype)
for i in range(c.shape[0]):
for j in range(c.shape[1]):
c[i,j,:,:,:] = (a[i,:,:,:,:]*b[:,j,:,:,:]).sum(0)
elif a.ndim == 5 and b.ndim == 6:
c = np.empty((a.shape[0],b.shape[1],b.shape[2],max(a.shape[2],b.shape[3]),max(a.shape[3],b.shape[4]),max(a.shape[4],b.shape[5])),dtype=a.dtype)
for mu in range(c.shape[0]):
for k in range(c.shape[1]):
for l in range(c.shape[2]):
c[mu,k,l,:,:,:] = (a[mu,:,:,:,:]*b[:,k,l,:,:,:]).sum(0)
else:
raise Exception('mdot', 'wrong dimensions')
return c
|
36b8242bf8c643ff35362c4d19f3a222297a1eee
| 24,589 |
def sample_duration(sample):
"""Returns the duration of the sample (in seconds)
:param sample:
:return: number
"""
return sample.duration
|
9aaddb69b106ad941e3d1172c8e789b4969da99d
| 24,590 |
def fetch_commons_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch Commons memberships for all MPs.
fetch_commons_memberships fetches data from the data platform showing
Commons memberships for each MP. The memberships are processed to impose
consistent rules on the start and end dates for memberships.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of Commons memberships for each MP, with one row
per Commons membership.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the Commons memberships
commons_memberships = fetch_commons_memberships_raw()
# Get elections and fix the end dates of memberships
end_dates = commons_memberships['seat_incumbency_end_date'].values
general_elections = elections.get_general_elections().values
general_elections_count = len(general_elections)
# If the end date for a membership falls after dissolution adjust it
for i in range(len(end_dates)):
date = end_dates[i]
if pd.isna(date): continue
for j in range(general_elections_count):
dissolution = general_elections[j, 1]
election = general_elections[j, 2]
if date > dissolution and date <= election:
end_dates[i] = dissolution
continue
commons_memberships['seat_incumbency_end_date'] = end_dates
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Tidy up and return
commons_memberships.sort_values(
by=['family_name',
'seat_incumbency_start_date'],
inplace=True)
commons_memberships.reset_index(drop=True, inplace=True)
return commons_memberships
|
0c9f72f9b2b1bdc090597a69a598ef638383fcf1
| 24,591 |
import win32com.client as win32
def excel_col_w_fitting(excel_path, sheet_name_list):
"""
This function make all column widths of an Excel file auto-fit with the column content.
:param excel_path: The Excel file's path.
:param sheet_name_list: The sheet names of the Excel file.
:return: File's column width correctly formatted.
"""
excel = win32.gencache.EnsureDispatch('Excel.Application')
work_book = excel.Workbooks.Open(excel_path)
for sheet_name in sheet_name_list:
work_sheet = work_book.Worksheets(sheet_name)
work_sheet.Columns.AutoFit()
work_book.Save()
excel.Application.Quit()
return None
|
57de5aa63317d4fae4c1f60b607082b8de1f5f91
| 24,592 |
def padding_reflect(image, pad_size):
"""
Padding with reflection to image by boarder
Parameters
----------
image: NDArray
Image to padding. Only support 2D(gray) or 3D(color)
pad_size: tuple
Padding size for height adn width axis respectively
Returns
-------
ret: NDArray
Image after padding
"""
shape = image.shape
assert len(shape) in [2, 3], 'image must be 2D or 3D'
is_3D = True
if len(shape) == 2:
image = np.expand_dims(image, axis=2)
shape = image.shape
is_3D = False
h, w = pad_size
ret = np.zeros((shape[0]+2*h, shape[1]+2*w, shape[2]))
for i in xrange(shape[0]+2*h):
for j in xrange(shape[1]+2*w):
if i < h:
if j < w:
ret[i, j, :] = image[h-1-i, w-1-j, :]
elif w <= j <= w + shape[1] - 1:
ret[i, j, :] = image[h-1-i, j-w, :]
else:
ret[i, j, :] = image[h-1-i, w+2*shape[1]-1-j, :]
elif h <= i <= h + shape[0] - 1:
if j < w:
ret[i, j, :] = image[i-h, w-1-j, :]
elif w <= j <= w + shape[1] - 1:
ret[i, j, :] = image[i-h, j-w, :]
else:
ret[i, j, :] = image[i-h, w+2*shape[1]-1-j, :]
else:
if j < w:
ret[i, j, :] = image[h+2*shape[0]-1-i, w-1-j, :]
elif w <= j <= w + shape[1] - 1:
ret[i, j, :] = image[h+2*shape[0]-1-i, j-w, :]
else:
ret[i, j, :] = image[h+2*shape[0]-1-i, w+2*shape[1]-1-j, :]
return ret if is_3D else np.squeeze(ret, axis=2)
|
eb9f00bee89cb9a13fef0aa77e2c3eb0bfc8c92c
| 24,594 |
def check_if_all_elements_have_geometry(geodataframes_list):
"""
Iterates over a list and checks if all members of the list have geometry
information associated with them.
Parameters
----------
geodataframes_list : A list object
A list object that contains one or more geopandas.GeoDataFrame objects
Returns
-------
bool
Returns True if all elements within geodataframes_list have geometry info associated with them
Returns False if atleast one element within geodataframes_list does not have geometry info associated with it
"""
valerror_text = "geodataframes_list must be of list type. Got {}".format(type(geodataframes_list))
if not isinstance(geodataframes_list, list):
raise ValueError(valerror_text)
valerror_text = "Elements of the list should be of type geopandas.GeoDataFrame. Got at least one value that is not."
if check_if_all_elements_are_gdf(geodataframes_list) is False:
raise ValueError(valerror_text)
for geodataframe in geodataframes_list:
if has_geometry(geodataframe) is False:
return False
return True
|
4ca7bcdd405c407a0a15be81876627e88a0d9c80
| 24,595 |
def conference_schedule(parser, token):
"""
{% conference_schedule conference schedule as var %}
"""
contents = token.split_contents()
tag_name = contents[0]
try:
conference = contents[1]
schedule = contents[2]
var_name = contents[4]
except IndexError:
raise template.TemplateSyntaxError("%r tag had invalid arguments" % tag_name)
class ScheduleNode(TNode):
def __init__(self, conference, schedule, var_name):
self.var_name = var_name
self.conference = self._set_var(conference)
self.schedule = self._set_var(schedule)
def render(self, context):
schedule = models.Schedule.objects.get(
conference = self._get_var(self.conference, context),
slug = self._get_var(self.schedule, context),
)
context[self.var_name] = schedule_context(schedule)
return ''
return ScheduleNode(conference, schedule, var_name)
|
037e000488a204a9d0094ccda72067ed70e5aa53
| 24,596 |
from typing import Tuple
import http
def run_delete_process() -> Tuple[str, http.HTTPStatus]:
"""Handles deleting tasks pushed from Task Queue."""
return _run_process(constants.Operation.DELETE)
|
94a8c459ed67695894c28973f4a04faa2f2782aa
| 24,597 |
def annotate(f, expr, ctxt):
"""
f: function argument
expr: expression
ctxt: context
:returns: type of expr
"""
t = f(expr, ctxt)
expr.type = t
return t
|
d8fb524f6ca2fbddef78aa150733e768d0e3da01
| 24,598 |
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
|
6b0e412f4aa8d4204530ebeca8a45928213847aa
| 24,599 |
def gen_api_url(endpoint):
"""Construct a Wger API url given the endpoint"""
# type: (str) -> str
return WGER["host_name"] + WGER["api_ext"] + endpoint
|
70623f9130b4dbde277a8c15b3f43a5168e4e487
| 24,600 |
from typing import Literal
def create_inputs(
data: np.ndarray,
input_type_name: Literal[
"data",
"data_one_column",
"one_in_one_out_constant",
"one_in_one_out",
"one_in_batch_out",
"sequentions",
],
input_type_params: dict,
mode: Literal["validate", "in_sample"] = "validate",
predicts: int = 7,
repeatit: int = 10,
predicted_column_index: int = 0,
) -> Inputs:
"""Define configured inputs for various models. For some models use `make_sequences` function => check it's
documentation how it works. For `data` input type name, just return data, if data_one_column, other columns
are deleted, if something else, it create inputs called X and y - same convention as in sklearn plus x_input
- input for predicted values. If constant in used name, it will insert bias 1 to every sample input.
Args:
data (np.ndarray): Time series data.
input_type_name (str): Name of input. Choices are ['data', 'data_one_column', 'one_in_one_out_constant',
'one_in_one_out', 'one_in_batch_out', 'sequentions']. If 'sequentions', than input type
params define produces inputs.
input_type_params (dict): Dict of params used in make_sequences. E.g. {'n_steps_in': cls.default_n_steps_in,
'n_steps_out': cls.predicts, 'default_other_columns_length': cls.default_other_columns_length, 'constant': 0}.
Used only if `input_type_params` is 'sequentions'.
mode (Literal["validate", "in_sample"], optional): 'validate' or 'in_sample'. All data are used but if 'in_sample', 'repeatit' number of in-sample
inputs are used for test validation. If 'validate', just one last input (same like predict input is used). Test
output is generated before this function in test / train split. Defaults to 'validate'.
predicts (int, optional): Number of predicted values. Defaults to 7.
repeatit (int, optional): Number of generated sequentions for testing. Defaults to 10.
predicted_column_index (int, optional): Predicted column index. Defaults to 0.
Returns:
Inputs: model_train_input, model_predict_input, model_test_inputs.
Example:
>>> data = np.array(
... [
... [1, 2, 3, 4, 5, 6, 7, 8],
... [9, 10, 11, 12, 13, 14, 15, 16],
... [17, 18, 19, 20, 21, 22, 23, 24],
... ]
... ).T
...
>>> inputs = create_inputs(
... data,
... "sequentions",
... {
... "n_steps_in": 3,
... "n_steps_out": 1,
... "constant": 1,
... },
... )
>>> inputs[0][1]
array([[4],
[5],
[6],
[7],
[8]])
>>> inputs[1]
array([[ 1., 6., 7., 8., 14., 15., 16., 22., 23., 24.]])
"""
# Take one input type, make all derived inputs (save memory, because only slices) and create dictionary of inputs for one iteration
used_sequences = {}
if input_type_name == "data":
used_sequences = data
elif input_type_name == "data_one_column":
used_sequences = data[:, predicted_column_index]
else:
if input_type_name in [
"one_in_one_out_constant",
"one_in_one_out",
"one_in_batch_out",
]:
used_sequences = data[:, predicted_column_index : predicted_column_index + 1]
else:
used_sequences = data
used_sequences = make_sequences(
used_sequences, predicts=predicts, repeatit=repeatit, **input_type_params
)
if isinstance(used_sequences, tuple):
model_train_input = (used_sequences[0], used_sequences[1])
model_predict_input = used_sequences[2]
if mode == "validate":
model_test_inputs = [model_predict_input]
else:
model_test_inputs = used_sequences[3]
else:
model_train_input = model_predict_input = used_sequences
if mode == "validate":
model_test_inputs = [model_predict_input]
else:
model_test_inputs = []
if used_sequences.ndim == 1:
for i in range(repeatit):
model_test_inputs.append(used_sequences[: -predicts - repeatit + i + 1])
else:
for i in range(repeatit):
model_test_inputs.append(used_sequences[:, : -predicts - repeatit + i + 1])
return Inputs(model_train_input, model_predict_input, model_test_inputs)
|
bdb916915d561aa87435c2af5ea2f8b892f3c4b1
| 24,601 |
def CDLKICKINGBYLENGTH(df):
"""
函数名:CDLKICKINGBYLENGTH
名称:Kicking - bull/bear determined by the longer marubozu 由较长缺影线决定的反冲形态
简介:二日K线模式,与反冲形态类似,较长缺影线决定价格的涨跌。
python API
integer=CDLKICKINGBYLENGTH(open, high, low, close)
:return:
"""
open = df['open']
high = df['high']
low = df['low']
close = df['close']
return talib.CDLKICKINGBYLENGTH(open, high, low, close)
|
e3a0c62627e8b4866580f232b5199570768c5197
| 24,602 |
def report_count_table_sort(s1, s2):
""" """
# Sort order: Class and scientific name.
columnsortorder = [0, 2, 3, 6] # Class, species, size class and trophy.
#
for index in columnsortorder:
s1item = s1[index]
s2item = s2[index]
# Empty strings should be at the end.
if (s1item != '') and (s2item == ''): return -1
if (s1item == '') and (s2item != ''): return 1
if s1item < s2item: return -1
if s1item > s2item: return 1
#
return 0
|
cf207e4e8f524e48f99422017b17e643b66a9e78
| 24,603 |
import urllib
def serch_handler(msg):
"""
处理音乐搜索结果
:param msg: 搜索信息
:return:
"""
# url = 'https://www.ximalaya.com/revision/search?core=all&kw={0}&spellchecker=true&device=iPhone'
url = 'https://www.ximalaya.com/revision/search?kw={0}&page=1&spellchecker=false&condition=relation&rows=50&device=iPhone&core=track&fq=category_id%3A2&paidFilter=false'
request_url = url.format(urllib.parse.quote(msg)) # url编码
return get_url_response(request_url)
|
e91620dce4d4b6e7d79ab0e8cbf612322f0248b3
| 24,604 |
def random(start: int, end: int) -> int:
"""Same as `random.randint(start, end)`"""
return randint(start, end)
|
473f27e528d13cdb649b6e6d6e5ba32498a96cc1
| 24,605 |
def zero_check(grid):
"""Take a 2d grid and calculates number of 0 entries."""
zeros = 0
for row in grid:
for element in row:
if element == 0:
zeros += 1
return zeros
|
0d69a948eef96937f8a5033256c3c4d9f22ce14d
| 24,606 |
from typing import List
def get_channel_clips(channel: Channel) -> List[Clip]:
"""
Uses a (blocking) HTTP request to retrieve Clip info for a specific channel.
:param channel: A Channel object.
:returns: A list of Clip objects.
"""
clips = []
pagination = ""
while True:
query = gql.GET_CHANNEL_CLIPS_QUERY.format(
channel_id=channel.login,
after=pagination, first=100
)
resp = gql.gql_query(query=query).json()
resp = resp["data"]["user"]["clips"]
if not resp or not resp["edges"]:
break
pagination = resp["edges"][-1]["cursor"]
for clip in resp["edges"]:
c = clip["node"]
b = c["broadcaster"]
w = c["curator"]
g = c["game"]
v = c["video"]
v_id = "unknown"
if v is not None:
v_id = v["id"]
w_id = b["id"]
w_login = b["login"]
w_name = b["displayName"]
if w is not None:
w_id = w["id"]
w_login = w["login"]
w_name = w["displayName"]
g_id = ""
g_name = ""
if g is not None:
g_id = g["id"]
g_name = g["name"]
clips.append(
Clip(
id=c["id"], slug=c["slug"], created_at=c["createdAt"],
user_id=b["id"], user_login=b["login"], user_name=b["displayName"],
clipper_id=w_id, clipper_login=w_login, clipper_name=w_name,
game_id=g_id, game_name=g_name, title=c["title"],
view_count=c["viewCount"], length=c["durationSeconds"],
offset=c["videoOffsetSeconds"] or 0, video_id=v_id
)
)
if pagination == "" or pagination == None:
break
return clips
|
7b5d5c19b0ac5f7ec665e8e453a529c5556cbadd
| 24,607 |
def tensorflow2xp(tf_tensor: "tf.Tensor") -> ArrayXd: # pragma: no cover
"""Convert a Tensorflow tensor to numpy or cupy tensor."""
assert_tensorflow_installed()
if tf_tensor.device is not None:
_, device_type, device_num = tf_tensor.device.rsplit(":", 2)
else:
device_type = "CPU"
if device_type == "CPU" or not has_cupy:
return tf_tensor.numpy()
else:
dlpack_tensor = tensorflow.experimental.dlpack.to_dlpack(tf_tensor)
return cupy.fromDlpack(dlpack_tensor)
|
81bb8bea01e2e108c21022699005cb17cab12f0e
| 24,608 |
def __str__(self, indent=0, func_role="obj"):
"""
our own __str__
"""
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_options('Options')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
|
3e55fccb76f8e200ef7e57366c2ccd9609975959
| 24,609 |
def worker_years_6_download(request):
"""
纺织类通信带条件查询,然后下载对应结果文件
:param request:
:return:
"""
return download.worker_years_6_download(request)
|
2250828132dff114665c4d6b3c4c6eb2a21840ce
| 24,611 |
def set_autoscaler_location(autoscaler, is_regional, location):
""" Sets location-dependent properties of the autoscaler. """
name = autoscaler['name']
location_prop_name = 'region' if is_regional else 'zone'
autoscaler['type'] = REGIONAL_LOCAL_AUTOSCALER_TYPES[is_regional]
autoscaler['properties'][location_prop_name] = location
location_output = {
'name': location_prop_name,
'value': '$(ref.{}.{})'.format(name, location_prop_name)
}
return location_output
|
2e663856d4b4d9a3a477de9ce330cc5fe42502a1
| 24,612 |
from typing import Dict
from typing import Iterable
def _define_deformation_axes() -> Dict[str, Iterable[str]]:
"""Defines object sets for each axis of deformation."""
rgb_objects_dim = {}
for a in DEFORMATION_AXES:
rgb_objects_dim[a] = []
for v in _DEFORMATION_VALUES:
obj_id = f'{v}{a}'
# There are excluded objects we need to check for.
if obj_id in RGB_OBJECTS_FULL_SET:
rgb_objects_dim[a].append(f'{v}{a}')
return rgb_objects_dim
|
d50384ca1261a312f48f9f6540252fa5f265cf80
| 24,613 |
def ldns_rr2buffer_wire_canonical(*args):
"""LDNS buffer."""
return _ldns.ldns_rr2buffer_wire_canonical(*args)
|
5012bf22889ab0cd8375750bab8f54ca2ecb0da0
| 24,614 |
def get_stretch_factor(folder_name, indices, **kwargs):
""" Computes the stretch factor using the (16-50-84) percentile estimates
of x0 - x1 for each restframe wavelength assuming orthogonality
Parameters:
folder_name: folder containing the individual likelihoods and their
percentile estimates
indices: which restframe wavelengths to use
Returns:
stretch_x0, stretch_x1: the stretch factors along x0 and x1
"""
x0_cen = np.zeros(len(indices))
x0_err = np.zeros(len(indices))
x1_cen = np.zeros(len(indices))
x1_err = np.zeros(len(indices))
for i, index in enumerate(indices):
_, est_x0, est_x1 = np.loadtxt(folder_name + \
'xx_percentile_est_%d.dat' % index)
x0_cen[i] = est_x0[0]
x0_err[i] = (est_x0[1] + est_x0[2]) / 2.
x1_cen[i] = est_x1[0]
x1_err[i] = (est_x1[1] + est_x1[2]) / 2.
res0 = get_corrfunc(x0_cen, x0_err, model=True, est=True,
sfx=folder_name + "x0_corr")
res1 = get_corrfunc(x1_cen, x1_err, model=True, est=True,
sfx=folder_name + "x1_corr")
stretch_x0 = res0[3] / res0[1]
stretch_x1 = res1[3] / res1[1]
return stretch_x0, stretch_x1
|
17d61ba5205aada23b8f6b6a57c1920770a43408
| 24,615 |
def zonal_length(lat, nlon):
""" length of zonal 1/nlon segment at latitude lat"""
return R_earth * 2*np.pi/nlon * np.cos(lat*np.pi/180)
|
cf539ec73cae803a187d913c84ef7cb739cf8952
| 24,616 |
def create_gw_response(app, wsgi_env):
"""Create an api gw response from a wsgi app and environ.
"""
response = {}
buf = []
result = []
def start_response(status, headers, exc_info=None):
result[:] = [status, headers]
return buf.append
appr = app(wsgi_env, start_response)
close_func = getattr(appr, 'close', None)
try:
buf.extend(list(appr))
finally:
close_func and close_func()
response['body'] = ''.join(buf)
response['statusCode'] = result[0].split(' ', 1)[0]
response['headers'] = {}
for k, v in result[1]:
response['headers'][k] = v
if 'Content-Length' not in response['headers']:
response['headers']['Content-Length'] = str(len(response['body']))
if 'Content-Type' not in response['headers']:
response['headers']['Content-Type'] = 'text/plain'
return response
|
73dd8459cbf9b79655137536ff42195ba62c1372
| 24,618 |
def Squeeze(parent, axis=-1, name=""):
"""\
Dimension of size one is removed at the specified position (batch
dimension is ignored).
:param parent: parent layer
:param axis: squeeze only along this dimension
(default: -1, squeeze along all dimensions)
:param name: name of the output layer
:return: Squeeze layer
"""
return _eddl.Squeeze(parent, axis, name)
|
3c8e6d6292e29d857412db1c74352b02aab99654
| 24,619 |
import attrs
def parse_namespace(tt):
"""
<!ELEMENT NAMESPACE EMPTY>
<!ATTLIST NAMESPACE
%CIMName;>
"""
check_node(tt, 'NAMESPACE', ['NAME'], [], [])
return attrs(tt)['NAME']
|
70a2f4e0ad0f8f98e38fde1892e9d40a34653af1
| 24,620 |
def clean(val, floor, ceiling):
"""Make sure RH values are always sane"""
if val > ceiling or val < floor or pd.isna(val):
return None
if isinstance(val, munits.Quantity):
return float(val.magnitude)
return float(val)
|
6bf2822dc47a0b50cd88f05e2c127d97c5d71c0f
| 24,621 |
from typing import Union
from typing import List
from typing import Tuple
def count_swaps_in_row_order(row_order: Union[List[int], Tuple[int]]) -> int:
"""
Counts the number of swaps in a row order.
Args:
row_order (Union[List[int], Tuple[int]]): A list or tuple
of ints representing the order of rows.
Returns:
int: The minimum number of swaps it takes for a
range(len(row_order)) to reach row_order.
"""
count = 0
for i in range(len(row_order)):
if row_order[i] != i:
row_order[row_order[i]], row_order[i] = (
row_order[i],
row_order[row_order[i]],
)
count += 1
return count
|
c96fcb26fac03d252918f7cfa1dd3048eaf22320
| 24,622 |
def evaluate(ast, env):
"""Evaluate an Abstract Syntax Tree in the specified environment."""
print(ast)
if is_boolean(ast):
return ast
if is_integer(ast):
return ast
if is_string(ast):
return ast
if is_symbol(ast):
return env.lookup(ast)
if is_list(ast):
if len(ast) == 0:
raise DiyLangError("Empty list")
if ast[0] == "quote":
if len(ast[1:]) != 1:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
return ast[1]
if ast[0] == "atom":
if len(ast[1:]) != 1:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
result = evaluate(ast[1], env)
return is_atom(result)
if ast[0] == "eq":
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
left = evaluate(ast[1], env)
right = evaluate(ast[2], env)
if not is_atom(left) or not is_atom(right):
return False
return left == right
if ast[0] in ["+", "-", "/", "*", "mod", ">"]:
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
left = evaluate(ast[1], env)
right = evaluate(ast[2], env)
if not is_integer(left) or not is_integer(right):
raise DiyLangError(f"{left} or {right} is not a number")
if ast[0] == "+":
return left + right
if ast[0] == "-":
return left - right
if ast[0] == "/":
return left // right
if ast[0] == "*":
return left * right
if ast[0] == "mod":
return left % right
if ast[0] == ">":
return left > right
if ast[0] == "if":
if len(ast[1:]) != 3:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
predicate = evaluate(ast[1], env)
if predicate:
return evaluate(ast[2], env)
else:
return evaluate(ast[3], env)
if ast[0] == "define":
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
left = ast[1]
if not is_symbol(left):
raise DiyLangError(f"{left} is not a symbol")
right = evaluate(ast[2], env)
env.set(left, right)
return
if ast[0] == "cons":
head = evaluate(ast[1], env)
tail = evaluate(ast[2], env)
if is_list(tail):
return [head] + tail
if is_string(tail):
return String(head.val + tail.val)
raise DiyLangError("Can't use cons on a non list/string")
if ast[0] == "head":
list_ = evaluate(ast[1], env)
if is_list(list_):
if len(list_) == 0:
raise DiyLangError("Can't use head on empty list")
return list_[0]
if is_string(list_):
if len(list_.val) == 0:
raise DiyLangError("Can't use head on empty string")
return String(list_.val[0])
raise DiyLangError("Can't use head on a non list/string")
if ast[0] == "tail":
list_ = evaluate(ast[1], env)
if is_list(list_):
if len(list_) == 0:
raise DiyLangError("Can't use tail on empty list")
return list_[1:]
if is_string(list_):
if len(list_.val) == 0:
raise DiyLangError("Can't use tail on empty string")
return String(list_.val[1:])
raise DiyLangError("Can't use tail on a non list/string")
if ast[0] == "empty":
list_ = evaluate(ast[1], env)
if is_list(list_):
return len(list_) == 0
if is_string(list_):
return len(list_.val) == 0
raise DiyLangError("Can't use empty on a non list/string")
if ast[0] == "cond":
cases = ast[1]
for (condition, value) in cases:
if evaluate(condition, env):
return evaluate(value, env)
return False
if ast[0] == "let":
new_env = env
for (key, value) in ast[1]:
evaluated_value = evaluate(value, new_env)
new_env = new_env.extend({
key: evaluated_value
})
return evaluate(ast[2], new_env)
if ast[0] == "defn":
return evaluate(["define", ast[1], ["lambda", ast[2], ast[3]]], env)
if ast[0] == "lambda":
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
params = ast[1]
if not is_list(params):
raise DiyLangError(f"{params} is not a list")
for param in params:
if not is_symbol(param):
raise DiyLangError(f"{param} is not a symbol")
body = ast[2]
return Closure(env, params, body)
if is_closure(ast[0]):
closure = ast[0]
args = ast[1:]
return evaluate_closure(closure, args, env)
if is_list(ast[0]):
closure = evaluate(ast[0], env)
args = ast[1:]
return evaluate_closure(closure, args, env)
function_name = ast[0]
if not is_symbol(function_name):
raise DiyLangError(f"{function_name} is not a function")
closure = env.lookup(function_name)
if not is_closure(closure):
raise DiyLangError(f"{closure} is not a function")
args = ast[1:]
return evaluate_closure(closure, args, env)
|
c9e5da8c9b073f72a2b27bfbb84a7939fbcec134
| 24,623 |
from datetime import datetime
import json
def load_data(
assets: tp.Union[None, tp.List[tp.Union[str,dict]]] = None,
min_date: tp.Union[str, datetime.date, None] = None,
max_date: tp.Union[str, datetime.date, None] = None,
dims: tp.Tuple[str, str] = (ds.TIME, ds.ASSET),
forward_order: bool = True,
tail: tp.Union[datetime.timedelta, int, float] = DEFAULT_TAIL,
) -> tp.Union[None, xr.DataArray]:
"""
Loads index time series.
:param assets:
:param min_date:
:param max_date:
:param dims:
:param forward_order:
:param tail:
:return:
"""
track_event("DATA_INDEX_SERIES")
max_date = parse_date(max_date)
if min_date is not None:
min_date = parse_date(min_date)
else:
min_date = max_date - parse_tail(tail)
if assets is not None:
assets = [a['id'] if type(a) == dict else a for a in assets]
if assets is None:
assets_array = load_list(min_date, max_date)
assets_arg = [i['id'] for i in assets_array]
else:
assets_arg = assets
params = {"ids": assets_arg, "min_date": min_date.isoformat(), "max_date": max_date.isoformat()}
params = json.dumps(params)
params = params.encode()
raw = request_with_retry("idx/data", params)
if raw is None or len(raw) < 1:
arr = xr.DataArray(
[[np.nan]],
dims=[ds.TIME, ds.ASSET],
coords={
ds.TIME: pd.DatetimeIndex([max_date]),
ds.ASSET: ['ignore']
}
)[1:,1:]
else:
arr = xr.open_dataarray(raw, cache=True, decode_times=True)
arr = arr.compute()
if forward_order:
arr = arr.sel(**{ds.TIME: slice(None, None, -1)})
if assets is not None:
assets = list(set(assets))
assets = sorted(assets)
assets = xr.DataArray(assets, dims=[ds.ASSET], coords={ds.ASSET:assets})
arr = arr.broadcast_like(assets).sel(asset=assets)
arr = arr.dropna(ds.TIME, 'all')
arr.name = "indexes"
return arr.transpose(*dims)
|
83bd4221b92e2697bc79a416d0d5108fdb79ef27
| 24,624 |
def create_model(values):
"""create the model basing on the calculated values.
Args:
values (dict): values from the get_values_from_path function
Raises:
ValueError: if the loss function doesnt excist
Returns:
torch.nn.Module: model the network originally was trained with.
"""
pretrain = get_model(
values["model"], num_classes=values["embs"], in_channels=values["num_chan"])
pretrain.output = nn.BatchNorm1d(512)
if values["loss"] == "softmax":
classifier = nn.Linear(values["embs"], values["num_cl"], bias=False)
model = Model(pretrain, classifier)
elif values["loss"] == "arcface":
classifier = AAML(values["embs"], values["num_cl"])
model = ModelArc(pretrain, classifier)
elif values["loss"] == "circle":
classifier = CircleLoss(values["embs"], values["num_cl"])
model = ModelArc(pretrain, classifier)
elif values["loss"] == "rbf":
classifier = RBFClassifier(
values["embs"], values["num_cl"], scale=3, gamma=1)
model = Model(pretrain, classifier)
else:
raise ValueError("That loss function doesn't exist!")
return model
|
9750fa13042572c8ca7a5f81ef73c87134a08431
| 24,625 |
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, labels),
name='xentropy_mean')
return loss
|
5d86168b8cffba3feb822120fd112835593a8d36
| 24,626 |
def create_pattern_neighbors_ca2d(width, height, n_states=2):
"""
Returns a list with the weights for 'neighbors' and 'center_idx' parameters
of evodynamic.connection.cellular_automata.create_conn_matrix_ca1d(...).
The weights are responsible to calculate an unique number for each different
neighborhood pattern.
Parameters
----------
width : int
Neighborhood width.
height : int
Neighborhood height.
n_states : int
Number of discrete state in a cell.
Returns
-------
out1 : list
List of weights of the neighbors.
out2 : int
Index of the center of the neighborhood.
"""
return np.array([n_states**p for p in range(width*height)]).reshape(width,height),\
[width//2, height//2]
|
53ce7cd0afb9f9b590754d299ba2d621489bc4e6
| 24,628 |
def by_colname_like(colname, colname_val):
"""
Query to handle the cases in which somebody has the correct
words within their query, but in the incorrect order (likely
to be especially relevant for professors).
"""
def like_clause_constructor(colname, colname_val):
"""
Helper function for constructing like clause.
"""
like_list = colname_val.split(' ')
like_unit = "lower({colname}) like lower('%{word}%') and "
like_clause = ""
for word in like_list:
like_clause += like_unit.format(colname=colname, word=word)
return like_clause
return """
select
title,
section,
instructor,
time,
building,
hours,
interesting,
recommend
from
{table_name}
where
{where_clause}
recommend > 0
limit 3
""".format(where_clause=like_clause_constructor(colname=colname, colname_val=colname_val), table_name=TABLE_NAME)
|
256f5e952fdff02f8de44c45b3283013547d0287
| 24,629 |
import json
def decode_classnames_json(preds, top=5):
"""
Returns class code, class name and probability for each class amongst top=5 for each prediction in preds
e.g.
[[('n01871265', 'tusker', 0.69987053), ('n02504458', 'African_elephant', 0.18252705), ... ]]
"""
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_classnames_json` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
with open('imagenet_class_index.json') as data_file:
data = json.load(data_file)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(data[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
|
807bed051300801a5e6a92bbc96324a66050f6c0
| 24,630 |
import math
def prime_decomposition(number):
"""Returns a dictionary with the prime decomposition of n"""
decomposition = {}
number = int(number)
if number < 2:
return decomposition
gen = primes_gen()
break_condition = int(math.sqrt(number))
while number > 1:
current_prime = next(gen)
if current_prime > break_condition:
decomposition[number] = 1
return decomposition
while number % current_prime == 0 or number == current_prime:
if current_prime in decomposition:
decomposition[current_prime] += 1
else:
decomposition[current_prime] = 1
number /= current_prime
return decomposition
|
67062c15676e02747385e64e2dc177ea95d48de1
| 24,632 |
def levenshtein(s1, s2):
"""
Levenstein distance, or edit distance, taken from Wikibooks:
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
58ef88e60e454fda4b1850cc800f75a1d711a9af
| 24,634 |
from typing import Tuple
from typing import Optional
def configure_mpi_node() -> Tuple[RabbitConfig, Celery]:
"""Will configure and return a celery app targetting GPU mode nodes."""
log.info("Initializing celery app...")
app = _celery_app_mpi
# pylint: disable=unused-variable
@app.task(
name="comp.task.mpi",
bind=True,
autoretry_for=(Exception,),
retry_kwargs={"max_retries": 3, "countdown": 2},
on_failure=_on_task_failure_handler,
on_success=_on_task_success_handler,
track_started=True,
)
def pipeline(
self, user_id: str, project_id: str, node_id: Optional[str] = None
) -> None:
shared_task_dispatch(self, user_id, project_id, node_id)
set_boot_mode(BootMode.MPI)
log.info("Initialized celery app in %s", get_boot_mode())
return (_rabbit_config, app)
|
bbc1c04ac8372ff8f5478d39d3f210e14b284c51
| 24,635 |
def customizations(record):
"""
Use some functions delivered by the library
@type record: record
@param record: a record
@rtype: record
@returns: -- customized record
"""
record = type(record)
# record = author(record)
record = convert_to_unicode(record)
# record = editor(record)
# record = journal(record)
# record = keyword(record)
# record = link(record)
# record = page_double_hyphen(record)
# record = doi(record)
return record
|
32e47923e194a5fcb0540d9c2953be8d4dab019e
| 24,636 |
def _has_letter(pw):
"""
Password must contain a lowercase letter
:param pw: password string
:return: boolean
"""
return any(character.isalpha() for character in pw)
|
2f8eea521e8ca88001b2ecc3bc2501af8b14bbc8
| 24,638 |
import math
def closest_power_of_two(n):
"""Returns the closest power of two (linearly) to n.
See: http://mccormick.cx/news/entries/nearest-power-of-two
Args:
n: Value to find the closest power of two of.
Returns:
Closest power of two to "n".
"""
return pow(2, int(math.log(n, 2) + 0.5))
|
50d78d2a6de4f689ce268a95df97aae72dbd81ac
| 24,639 |
def get_product(barcode, locale='world'):
"""
Return information of a given product.
"""
return utils.fetch(utils.build_url(geography=locale,
service='api',
resource_type='product',
parameters=barcode,
entity="pet"))
|
409cfd2702ee06bab3e02bb446f0ce9d7e284892
| 24,640 |
def score(y_true, y_score):
""" Evaluation metric
"""
fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label = 1)
score = 0.4 * tpr[np.where(fpr>=0.001)[0][0]] + \
0.3 * tpr[np.where(fpr>=0.005)[0][0]] + \
0.3 * tpr[np.where(fpr>=0.01)[0][0]]
return score
|
b561e3cb3bd84d00c78dbd7e906e682c8758859d
| 24,641 |
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
list = []
x = 0
for var in range(0, num):
x = x + 1
if num % x == 0:
list.append(x)
return list
|
848ed77fa92ae1c55d90a5236f0d9db6ae2f377c
| 24,642 |
def files_page():
"""Displays a table of the user's files."""
user = utils.check_user(token=request.cookies.get("_auth_token"))
if user is None:
return redirect(location="/api/login",
code=303), 303
return render_template(template_name_or_list="home/files.html",
user=user,
files=utils.all(iterable=cache.files,
condition=lambda file: file.owner.id == user.id and not file.deleted))
|
58e627297c2d7b881c18459dc47a012b016cee3d
| 24,643 |
def GetCodepage(language):
""" Returns the codepage for the given |language|. """
lang = _LANGUAGE_MAP[language]
return "%04x" % lang[0]
|
7c84552d6b2f2747ee8365d89ba29bc7843054b7
| 24,644 |
def NumVisTerms(doc):
"""Number of visible terms on the page"""
_, terms = doc
return len(terms)
|
a6b762f314732d90c2371adf9472cf80117adae5
| 24,645 |
def replace_inf_price_nb(prev_close: float, close: float, order: Order) -> Order:
"""Replace infinity price in an order."""
order_price = order.price
if order_price > 0:
order_price = close # upper bound is close
else:
order_price = prev_close # lower bound is prev close
return order_nb(
size=order.size,
price=order_price,
size_type=order.size_type,
direction=order.direction,
fees=order.fees,
fixed_fees=order.fixed_fees,
slippage=order.slippage,
min_size=order.min_size,
max_size=order.max_size,
size_granularity=order.size_granularity,
reject_prob=order.reject_prob,
lock_cash=order.lock_cash,
allow_partial=order.allow_partial,
raise_reject=order.raise_reject,
log=order.log
)
|
6b3581e31d69236c950a3ad812bb95eebbedcf10
| 24,646 |
def get_trend(d):
"""
Calcuate trend for a frame `d`.
"""
dv = d.reset_index(drop=True)
dv["minutes"] = np.arange(dv.shape[0], dtype=np.float64)
covariance = dv.cov()
return (((covariance["minutes"]) / covariance.loc["minutes", "minutes"])[d.columns]
.rename(lambda cl: "_".join([cl, "trend"])))
|
b649e60b8ef74b0a64ec935fde35271b68b0dad7
| 24,647 |
def getIpAddress():
"""Returns the IP address of the computer the client is running on,
as it appears to the client.
See also: system.net.getExternalIpAddress().
Returns:
str: Returns the IP address of the local machine, as it sees it.
"""
return "127.0.0.1"
|
d6aefaa4027a899344c762bc7df5ce40a5dbde4e
| 24,648 |
def line(x, y, weights=None, clip=0.25):
"""Fit a line
Args:
x (numpy.array): x-values
y (numpy.array): y-values
clip (float, optional): Fit only first part. Defaults to 0.25.
Returns:
pandas.Series: fit parameters
"""
if 0 < clip < 1:
clip_int = int(len(x) * clip) - 1
else:
clip_int = int(clip_int)
# clip data for fit to only use first part
X = x[:clip_int]
Y = y[:clip_int]
if weights:
W = 1 / weights[:clip_int]
else:
W = np.ones((len(X)))
# weighted LS
X = sm.add_constant(X)
wls_model = sm.WLS(Y, X, weights=W)
fit_params = wls_model.fit().params
fit_params["diffusion_constant"] = fit_params["tau"] / 2 / 2
return fit_params
|
cfc794095e1b60f608b94d44480cb79ece0af653
| 24,649 |
def one_hot(data):
"""
Using pandas to convert the 'data' into a one_hot enconding format.
"""
one_hot_table = pd.get_dummies(data.unique())
one_hot = data.apply(lambda x: one_hot_table[x] == 1).astype(int)
return one_hot
|
fed9c171ae5b3bcdb78311afa47017c21e1c4b59
| 24,650 |
def update_set(j, n):
"""Computes the update set of the j-th orbital in n modes
Args:
j (int) : the orbital index
n (int) : the total number of modes
Returns:
Array of mode indexes
"""
indexes = np.array([])
if n % 2 == 0:
if j < n / 2:
indexes = np.append(indexes, np.append(
n - 1, update_set(j, n / 2)))
else:
indexes = np.append(indexes, update_set(j - n / 2, n / 2) + n / 2)
return indexes
|
c1d91f245710b1e11aa7178db490f050827d5683
| 24,651 |
def chunklist(inlist: list, chunksize: int) -> list:
"""Split a list into chucks of determined size.
Keyword arguments:
inList -- list to chunk
chunkSize -- number of elements in each chunk
"""
if not isinstance(inlist, list):
raise TypeError
def __chunkyield() -> list:
# https://www.geeksforgeeks.org/break-list-chunks-size-n-python/
for i in range(0, len(inlist), chunksize):
yield inlist[i:i + chunksize]
return list(__chunkyield())
|
1351f0fa2ca208095a35ac0806a625f3227b24ef
| 24,652 |
def getLocation(seq, meifile, zones):
""" Given a sequence of notes and the corresponding MEI Document, calculates and returns the json formatted list of
locations (box coordinates) to be stored for an instance of a pitch sequence in our CouchDB.
If the sequence is contained in a single system, only one location will be stored. If the sequence
spans two systems, a list of two locations will be stored.
"""
ulys = []
lrys = []
twosystems = 0
endofsystem = len(seq)-1
if seq[0].getId() not in systemcache:
systemcache[seq[0].getId()] = meifile.lookBack(seq[0], "sb")
# systemcache[seq[0]] = meifile.get_system(seq[0])
if seq[endofsystem].getId() not in systemcache:
systemcache[seq[endofsystem].getId()] = meifile.lookBack(seq[endofsystem], "sb")
# systemcache[seq[endofsystem]] = meifile.get_system(seq[endofsystem])
if systemcache[seq[0].getId()] != systemcache[seq[endofsystem].getId()]: # then the sequence spans two systems and we must store two seperate locations to highlight
twosystems = 1
for i in range(1 , len(seq)):
if seq[i-1].getId() not in systemcache:
systemcache[seq[i-1].getId()] = meifile.lookBack(seq[i-1], "sb")
if seq[i] not in systemcache:
systemcache[seq[i].getId()] = meifile.lookBack(seq[i], "sb")
# find the last note on the first system and the first note on the second system
if systemcache[seq[i-1].getId()] != systemcache[seq[i].getId()]:
endofsystem = i # this will be the index of the first note on second system
# ulx1 = int(meifile.get_by_facs(seq[0].parent.parent.facs)[0].ulx)
# lrx1 = int(meifile.get_by_facs(seq[i-1].parent.parent.facs)[0].lrx)
# ulx2 = int(meifile.get_by_facs(seq[i].parent.parent.facs)[0].ulx)
# lrx2 = int(meifile.get_by_facs(seq[-1].parent.parent.facs)[0].lrx)
ulx1 = int(findbyID(zones, seq[0].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value)
lrx1 = int(findbyID(zones, seq[i-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value)
ulx2 = int(findbyID(zones, seq[i].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value)
lrx2 = int(findbyID(zones, seq[-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value)
else: # the sequence is contained in one system and only one box needs to be highlighted
ulx = int(findbyID(zones, seq[0].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value)
lrx = int(findbyID(zones, seq[-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value)
# ulx = int(meifile.get_by_facs(seq[0].parent.parent.facs)[0].ulx)
# lrx = int(meifile.get_by_facs(seq[-1].parent.parent.facs)[0].lrx)
for note in seq:
ulys.append(int(findbyID(zones, note.parent.parent.getAttribute("facs").value, meifile).getAttribute("uly").value))
lrys.append(int(findbyID(zones, note.parent.parent.getAttribute("facs").value, meifile).getAttribute("lry").value))
if twosystems:
uly1 = min(ulys[:endofsystem])
uly2 = min(ulys[endofsystem:])
lry1 = max(lrys[:endofsystem])
lry2 = max(lrys[endofsystem:])
return [{"ulx": int(ulx1), "uly": int(uly1), "height": abs(uly1 - lry1), "width": abs(ulx1 - lrx1)}, {"ulx": int(ulx2), "uly": int(uly2), "height": abs(uly2 - lry2), "width": abs(ulx2 - lrx2)}]
else:
uly = min(ulys)
lry = max(lrys)
return [{"ulx": int(ulx), "uly": int(uly), "height": abs(uly - lry), "width": abs(ulx - lrx)}]
|
18297c74cb867e018e7a4f3147cdd50ba1eb8225
| 24,653 |
import warnings
def calculate_A0_moving_LE(psi_baseline, psi_goal_0, Au_baseline, Au_goal, deltaz,
c_baseline, l_LE, eps_LE):
"""Find the value for A_P0^c that has the same arc length for the first bay
as for the parent."""
def integrand(psi_baseline, Al, deltaz, c ):
return c*np.sqrt(1 + dxi_u(psi_baseline, Al, deltaz/c)**2)
def equation(A0, L_baseline, Au_goal, deltaz):
Au_goal[0] = A0
c = calculate_c_baseline(c_P, Au_goal, Au_baseline, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
y, err = quad(integrand, 0, psi_goal_0, args=(Au_goal, deltaz, c))
print('y', y, y - (1-eps_LE)*L_baseline, A0, c)
return y - (1-eps_LE)*(L_baseline - c*l_LE)
L_baseline, err = quad(integrand, 0, psi_baseline[0], args=(Au_baseline, deltaz,
c_baseline))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = fsolve(equation, Au_goal[0], args=(L_baseline, Au_goal, deltaz))
return y[0]
|
9a01a8c02aa51db0a675e94bf32d6a4b26a13206
| 24,654 |
def create_train_val_set(x_train, one_hot_train_labels):
"""[summary]
Parameters
----------
x_train : [type]
[description]
y_train : [type]
[description]
"""
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
return (x_val, partial_x_train, y_val, partial_y_train)
|
dd0b3ca06b3d8bdae8e75284b17eb60ba6bbe36b
| 24,655 |
def update_dataset_temporal_attrs(dataset: xr.Dataset,
update_existing: bool = False,
in_place: bool = False) -> xr.Dataset:
"""
Update temporal CF/THREDDS attributes of given *dataset*.
:param dataset: The dataset.
:param update_existing: If ``True``, any existing attributes will be updated.
:param in_place: If ``True``, *dataset* will be modified in place and returned.
:return: A new dataset, if *in_place* is ``False`` (default), else the passed and modified *dataset*.
"""
return _update_dataset_attrs(dataset, [_TIME_ATTRS_DATA],
update_existing=update_existing, in_place=in_place)
|
99bb1a638e275a4788e2bd99122b67d3f0d5b536
| 24,658 |
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
## index for dets
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
|
78d5b3c672142dc1861a2df11e6f1a4671467fd4
| 24,659 |
import itertools
def _split_iterators(iterator, n=None):
"""Split itererator of tuples into multiple iterators.
:param iterator: Iterator to be split.
:param n: Amount of iterators it will be split in. toolz.peak can be used to determine this value, but that is not lazy.
This is basically the same as x, y, z = zip(*a), however,
this function is lazy.
"""
#if n is None:
# item, iterator = cytoolz.peek(iterator)
# n = len(item)
iterators = itertools.tee(iterator, n)
#iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators))
# Above does not work?!
out = list()
out.append(s[0] for s in iterators[0])
out.append(s[1] for s in iterators[1])
out.append(s[2] for s in iterators[2])
iterators = out
return iterators
|
25b9409941eaf958aef755c0124d4aee4a3a67e5
| 24,660 |
def get_league_listing(**kwargs):
"""
Get a list of leagues
"""
return make_request("GetLeaguelisting", **kwargs)
|
5b5fb9ee4f06a0ede684f6584fd3af0638c807a4
| 24,661 |
async def perhaps_this_is_it(
disc_channel: disnake.TextChannel = commands.Param(lambda i: i.channel),
large: int = commands.Param(0, large=True),
) -> PerhapsThis:
"""This description should not be shown
Parameters
----------
disc_channel: A channel which should default to the current one - uses the id
large: A large number which defaults to 0 - divided by 2
"""
return PerhapsThis(disc_channel.id, large / 2)
|
158d9d6b278c5142d13e392becd9df35c2844961
| 24,662 |
def maybe_setup_moe_params(model_p: InstantiableParams):
"""Convert a FeedforwardLayer to a MoE Layer for StackedTransformer."""
if model_p.cls == layers.StackedTransformerRepeated:
model_p = model_p.block
if model_p.num_experts == 0:
return model_p
ff_p = model_p.transformer_layer_params_tpl.tr_fflayer_tpl
assert issubclass(ff_p.cls, layers.TransformerFeedForward)
moe_p = model_p.moe_layer_tpl
# Copy over the base params.
base_layer.BaseLayer.copy_base_params(ff_p, moe_p)
# Copy over othe params.
moe_p.name = ff_p.name
moe_p.input_dims = ff_p.input_dims
moe_p.hidden_dims = ff_p.hidden_dims
moe_p.ln_tpl = ff_p.ln_tpl.Copy()
moe_p.activation = ff_p.activation
moe_p.relu_dropout_tpl = ff_p.relu_dropout_tpl.Copy()
moe_p.relu_dropout_prob = ff_p.relu_dropout_prob
moe_p.residual_dropout_tpl = ff_p.residual_dropout_tpl.Copy()
moe_p.residual_dropout_prob = ff_p.residual_dropout_prob
moe_p.add_skip_connection = ff_p.add_skip_connection
moe_p.norm_policy = ff_p.norm_policy
|
2ba82eb85ca85f8c16b3ee5de2d8ac3edb90275a
| 24,663 |
import re
def verify_message( message ):
"""Verifies that a message is valid. i.e. it's similar to: 'daily-0400/20140207041736'"""
r = re.compile( "^[a-z]+(-[0-9])?-([a-z]{3})?[0-9]+/[0-9]+" )
return r.match( message )
|
f25a37a5e3f076a647c0a03c26d8f2d2a8fd7b2e
| 24,664 |
def get_all_unicode_chars():
"""Get all unicode characters."""
all_unicode_chars = []
i = 0
while True:
try:
all_unicode_chars.append(chr(i))
except ValueError:
break
i += 1
return all_unicode_chars
|
da63b26dd082987937b17fdfffb1219726d9d2c6
| 24,665 |
def get_east_asian_width_property(value, binary=False):
"""Get `EAST ASIAN WIDTH` property."""
obj = unidata.ascii_east_asian_width if binary else unidata.unicode_east_asian_width
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated)
else:
value = unidata.unicode_alias['eastasianwidth'].get(value, value)
return obj[value]
|
9eb8f70229a6d53faae071b641a39b79d8807941
| 24,666 |
def ModifyListRequest(instance_ref, args, req):
"""Parse arguments and construct list backups request."""
req.parent = instance_ref.RelativeName()
if args.database:
database = instance_ref.RelativeName() + '/databases/' + args.database
req.filter = 'database="{}"'.format(database)
return req
|
fde5a06cde30ed1cf163299dc8ae5f0e826e3f9d
| 24,667 |
from datetime import datetime
def day_start(src_time):
"""Return the beginning of the day of the specified datetime"""
return datetime(src_time.year, src_time.month, src_time.day)
|
2bcc7b136e5cb1e7929e6655daf67b07dbbaa542
| 24,669 |
def _construct_corrections_dict(file):
"""Construct a dictionary of corrections.
Given the name of a .ifa corrections file, construct a dictionary
where the keys are wavelengths (represented as integers) and the values
are measures of the instrument sensitivity (represented as floats).
Intensity data should be divided by the correction value corresponding
to the wavelength at which it was collected.
"""
str_file = resources.read_text(wrangling.fluorimeter.corrections, file)
data = str_file[str_file.find("[Data]") + 6 :]
data = [x for x in data.split("\n") if x != ""]
corrections = {}
for entry in data:
wavelength, correction = [
x.strip() for x in entry.split("\t") if x != ""
]
corrections.update({int(wavelength[:-3]) : float(correction)})
return corrections
|
972db28c3609357a0ce742bf6679fbb46b86ef7c
| 24,670 |
def get_activation_func(activation_label):
""" Returns the activation function given the label
Args:
activation_label: Name of the function
"""
if activation_label == 'sigmoid':
return tf.nn.sigmoid
elif activation_label == 'identity':
return tf.identity
elif activation_label == 'relu':
return tf.nn.relu
elif activation_label == 'tanh':
return tf.nn.tanh
else:
raise ValueError('Unknown activation function %s'
% activation_label)
|
cfa1a46b9c40fe2680bfa49831ed09e222ef3335
| 24,671 |
def sim_share(
df1, df2, group_pop_var1, total_pop_var1, group_pop_var2, total_pop_var2,
):
"""Simulate the spatial population distribution of a region using the CDF of a comparison region.
For each spatial unit i in region 1, take the unit's percentile in the distribution, and swap the group share
with the value of the corresponding percentile in region 2. The share is the minority population of unit i
divided by total population of minority population. This approach will shift the total population of
each unit without changing the regional proportion of each group
Parameters
----------
df1 : pandas.DataFrame or geopandas.GeoDataFrame
dataframe for first dataset with columns holding group and total population counts
df2 : pandas.DataFrame or geopandas.GeoDataFrame
dataframe for second dataset with columns holding group and total population counts
group_pop_var1 : str
column holding population counts for group of interest on input df1
total_pop_var1 : str
column holding total population counts on input df1
group_pop_var2 : str
column holding population counts for group of interest on input df2
total_pop_var2 : str
column holding total population counts on input df2
Returns
-------
two pandas.DataFrame
dataframes with simulated population columns appended
"""
df1, df2 = _prepare_comparative_data(df1, df2, group_pop_var1, group_pop_var2, total_pop_var1, total_pop_var2)
df1["compl_pop_var"] = df1[total_pop_var1] - df1[group_pop_var1]
df2["compl_pop_var"] = df2[total_pop_var2] - df2[group_pop_var2]
df1["share"] = (df1[group_pop_var1] / df1[group_pop_var1].sum()).fillna(0)
df2["share"] = (df2[group_pop_var2] / df2[group_pop_var2].sum()).fillna(0)
df1["compl_share"] = (df1["compl_pop_var"] / df1["compl_pop_var"].sum()).fillna(0)
df2["compl_share"] = (df2["compl_pop_var"] / df2["compl_pop_var"].sum()).fillna(0)
# Rescale due to possibility of the summation of the counterfactual share values being grater or lower than 1
# CT stands for Correction Term
CT1_2_group = df1["share"].rank(pct=True).apply(df2["share"].quantile).sum()
CT2_1_group = df2["share"].rank(pct=True).apply(df1["share"].quantile).sum()
df1["counterfactual_group_pop"] = (
df1["share"].rank(pct=True).apply(df2["share"].quantile)
/ CT1_2_group
* df1[group_pop_var1].sum()
)
df2["counterfactual_group_pop"] = (
df2["share"].rank(pct=True).apply(df1["share"].quantile)
/ CT2_1_group
* df2[group_pop_var2].sum()
)
# Rescale due to possibility of the summation of the counterfactual share values being grater or lower than 1
# CT stands for Correction Term
CT1_2_compl = (
df1["compl_share"].rank(pct=True).apply(df2["compl_share"].quantile).sum()
)
CT2_1_compl = (
df2["compl_share"].rank(pct=True).apply(df1["compl_share"].quantile).sum()
)
df1["counterfactual_compl_pop"] = (
df1["compl_share"].rank(pct=True).apply(df2["compl_share"].quantile)
/ CT1_2_compl
* df1["compl_pop_var"].sum()
)
df2["counterfactual_compl_pop"] = (
df2["compl_share"].rank(pct=True).apply(df1["compl_share"].quantile)
/ CT2_1_compl
* df2["compl_pop_var"].sum()
)
df1["counterfactual_total_pop"] = (
df1["counterfactual_group_pop"] + df1["counterfactual_compl_pop"]
)
df2["counterfactual_total_pop"] = (
df2["counterfactual_group_pop"] + df2["counterfactual_compl_pop"]
)
return df1.fillna(0), df2.fillna(0)
|
cc7857d6deb81e7224e4e21fe6908376c963169a
| 24,672 |
def fixextensions(peeps, picmap, basedir="."):
"""replaces image names with ones that actually exist in picmap"""
fixed = [peeps[0].copy()]
missing = []
for i in range(1, len(peeps)):
name, ext = peeps[i][2].split(".", 1)
if (name in picmap):
fixed.append(peeps[i].copy())
fixed[i][2] = picmap[name]
else:
missing.append(i)
return fixed, missing
|
d2af911aacea80f7e25cbdde0f5dfad0f1757aee
| 24,673 |
def do_divide(data, interval):
"""
使用贪心算法,得到“最优”的分段
"""
category = []
p_value, chi2, index = divide_data(data, interval[0], interval[1])
if chi2 < 15:
category.append(interval)
else:
category += do_divide(data, [interval[0], index])
category += do_divide(data, [index, interval[1]])
return category
|
2e25f913c664dd1cc3d60e4c3f89146b81476e3b
| 24,674 |
import json
def get_config(key_path='/'):
"""
Return (sub-)configuration stored in config file.
Note that values may differ from the current ``CONFIG`` variable if it was
manipulated directly.
Parameters
----------
key_path : str, optional
``'/'``-separated path to sub-configuration. Default is ``'/'``, which
returns the full configuration dict.
Returns
-------
sub_config
(sub-)configuration, either a dict or a value
"""
keys = [k for k in key_path.split('/') if k != '']
with open(CONFIG_FILENAME, 'r') as config_fp:
config = json.load(config_fp)
sub_config = config
for k in keys:
sub_config = sub_config[k]
return sub_config
|
075b3cd021be67c5c2f23203236f839fc47a678b
| 24,675 |
def _get_options():
"""
Function that aggregates the configs for sumo and returns them as a list of dicts.
"""
if __mods__['config.get']('hubblestack:returner:sumo'):
sumo_opts = []
returner_opts = __mods__['config.get']('hubblestack:returner:sumo')
if not isinstance(returner_opts, list):
returner_opts = [returner_opts]
for opt in returner_opts:
processed = {'sumo_nebula_return': opt.get('sumo_nebula_return'),
'proxy': opt.get('proxy', {}),
'timeout': opt.get('timeout', 9.05)}
sumo_opts.append(processed)
return sumo_opts
try:
sumo_nebula_return = __mods__['config.get']('hubblestack:returner:sumo:sumo_nebula_return')
except Exception:
return None
sumo_opts = {'sumo_nebula_return': sumo_nebula_return,
'proxy': __mods__['config.get']('hubblestack:nebula:returner:sumo:proxy', {}),
'timeout': __mods__['config.get']('hubblestack:nebula:returner:sumo:timeout',
9.05)}
return [sumo_opts]
|
3d4d491b12f89501e7f5cdadda1d983676027367
| 24,678 |
from typing import Any
from typing import Optional
def convert_boolean(value: Any) -> Optional[bool]:
"""Convert a value from the ToonAPI to a boolean."""
if value is None:
return None
return bool(value)
|
d479898afe1bb8eaba3615a9e69a7a38637c6ec6
| 24,679 |
def Split4(thisBrep, cutters, normal, planView, intersectionTolerance, multiple=False):
"""
Splits a Brep into pieces using a combination of curves, to be extruded, and Breps as cutters.
Args:
cutters (IEnumerable<GeometryBase>): The curves, surfaces, faces and Breps to be used as cutters. Any other geometry is ignored.
normal (Vector3d): A construction plane normal, used in deciding how to extrude a curve into a cutter.
planView (bool): Set True if the assume view is a plan, or parallel projection, view.
intersectionTolerance (double): The tolerance with which to compute intersections.
Returns:
Brep[]: A new array of Breps. This array can be empty.
"""
url = "rhino/geometry/brep/split-brep_geometrybasearray_vector3d_bool_double"
if multiple: url += "?multiple=true"
args = [thisBrep, cutters, normal, planView, intersectionTolerance]
if multiple: args = list(zip(thisBrep, cutters, normal, planView, intersectionTolerance))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response
|
74009e33a3da88c7d096e6835952014bc8b40ef9
| 24,680 |
def generate_passwords_brute_force(state):
"""
String Based Generation
:param state: item position for response
:return:
"""
if state is None:
state = [0, 0]
k, counter = state
password = ''
i = counter
while i > 0:
r = i % base
password = alphabet[r] + password
i = i // base
password = alphabet[0] * (k - len(password)) + password
counter += 1
if password == alphabet[-1] * k:
k += 1
counter = 0
return password, [k, counter]
|
11e503e53903c884545c2324e721ebcbad1eb7c2
| 24,681 |
from typing import Tuple
import ctypes
def tparse(instring: str, lenout: int = _default_len_out) -> Tuple[float, str]:
"""
Parse a time string and return seconds past the J2000
epoch on a formal calendar.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tparse_c.html
:param instring: Input time string, UTC.
:param lenout: Available space in output error message string.
:return: Equivalent UTC seconds past J2000, Descriptive error message.
"""
errmsg = stypes.string_to_char_p(lenout)
lenout = ctypes.c_int(lenout)
instring = stypes.string_to_char_p(instring)
sp2000 = ctypes.c_double()
libspice.tparse_c(instring, lenout, ctypes.byref(sp2000), errmsg)
return sp2000.value, stypes.to_python_string(errmsg)
|
33b322062e7756d9bc1d728d9655adafd1c4d989
| 24,682 |
def align_decision_ref(id_human, title):
""" In German, decisions are either referred to as 'Beschluss' or
'Entscheidung'. This function shall align the term used in the
title with the term used in id_human.
"""
if 'Beschluss' in title:
return id_human
return id_human.replace('Beschluss ', 'Entscheidung ')
|
ac4f584b8e008576816d9a49dba58bc9c9a6dbc4
| 24,683 |
def get_headers(soup):
"""get nutrient headers from the soup"""
headers = {'captions': [], 'units': []}
footer = soup.find('tfoot')
for cell in footer.findAll('td', {'class': 'nutrient-column'}):
div = cell.find('div')
headers['units'].append(div.text)
headers['captions'].append(div.previous_sibling.strip())
return headers
|
5e7772a8830271f800791c75ef7ceecc98aba2bb
| 24,684 |
def table2rank(table, transpose=False, is_large_value_high_performance=True, add_averaged_rank=False):
"""
transform a performance value table to a rank table
:param table: pandas DataFrame or numpy array, the table with performance values
:param transpose: bool, whether to transpose table (default: False; the method is column and data set is row)
:param is_large_value_high_performance: bool, whether a larger value has higher performance
:param add_averaged_rank: bool, whether add averaged ranks after the last row/column
:return: a rank table (numpy.array or pd.DataFrame)
"""
table = table.copy()
if isinstance(table, pd.DataFrame):
column_name = table.columns.values
if table.iloc[:, 0].dtype == 'object':
index_name = table.iloc[:, 0].values
table = table.iloc[:, 1:]
else:
index_name = None
data = table.values
else:
data = table
if transpose:
data = data.transpose()
# rank each row
rank_table = list()
for row in data:
if is_large_value_high_performance:
index = np.argsort(-row)
else:
index = np.argsort(row)
rank = np.zeros(len(index))
for i, value in enumerate(index):
if i > 0:
if row[value] == row[index[i - 1]]:
rank[value] = i - 1
continue
rank[value] = i
rank += 1
rank_table.append(rank)
rank_table = np.asarray(rank_table)
if add_averaged_rank:
averaged_rank = [np.mean(rank_table[:, i]) for i in range(rank_table.shape[1])]
rank_table = np.concatenate([rank_table, np.asarray([averaged_rank])])
if transpose:
rank_table = rank_table.transpose()
if isinstance(table, pd.DataFrame): # reconstruct the pandas table
if index_name is not None:
if add_averaged_rank:
if not transpose:
index_name = np.concatenate([index_name, np.array(['AR'])])
else:
column_name = np.concatenate([column_name, np.asarray(['AR'])])
rank_table = np.concatenate([index_name[:, np.newaxis], rank_table], axis=1)
rank_table = pd.DataFrame(data=rank_table, columns=column_name)
return rank_table
|
36b313b8c19f6767690b4ef6d76fcc4b5633865c
| 24,685 |
def F_z_i(z, t, r1, r2, A):
""" Function F for Newton's method
:param z:
:param t:
:param r1:
:param r2:
:param A:
:return:
F: function
"""
mu = mu_Earth
C_z_i = c2(z)
S_z_i = c3(z)
y_z = r1 + r2 + A * (z * S_z_i - 1.0) / np.sqrt(C_z_i)
F = (y_z / C_z_i) ** 1.5 * S_z_i + A * np.sqrt(np.abs(y_z)) - np.sqrt(mu) * t
return F
|
ca4af99f8722d8e932f58896120883f09e73fb1a
| 24,686 |
from tayph.vartests import typetest
import numpy as np
from astropy.stats import mad_std
def sigma_clip(array,nsigma=3.0,MAD=False):
"""This returns the n-sigma boundaries of an array, mainly used for scaling plots.
Parameters
----------
array : list, np.ndarray
The array from which the n-sigma boundaries are required.
nsigma : int, float
The number of sigma's away from the mean that need to be provided.
MAD : bool
Use the true standard deviation or MAD estimator of the standard deviation
(works better in the presence of outliers).
Returns
-------
vmin,vmax : float
The bottom and top n-sigma boundaries of the input array.
"""
typetest(array,[list,np.ndarray],'array in fun.sigma_clip()')
typetest(nsigma,[int,float],'nsigma in fun.sigma_clip()')
typetest(MAD,bool,'MAD in fun.sigma_clip()')
m = np.nanmedian(array)
if MAD:
s = mad_std(array,ignore_nan=True)
else:
s = np.nanstd(array)
vmin = m-nsigma*s
vmax = m+nsigma*s
return vmin,vmax
|
e62e76c0a92dde4de324a31ecc03968da18de7d3
| 24,687 |
def points_distance(xyz_1, xyz_2):
"""
:param xyz_1:
:param xyz_2:
:return:
"""
if len(xyz_1.shape) >= 2:
distance = np.sqrt(np.sum((xyz_1 - xyz_2)**2, axis=1))
else:
distance = np.sqrt(np.sum((xyz_1 - xyz_2)**2))
return distance
|
0acc6bf45c03ed554cb13c4375095871dee482fb
| 24,688 |
from typing import Optional
def ensure_society(sess: SQLASession, name: str, description: str,
role_email: Optional[str] = None) -> Collect[Society]:
"""
Register or update a society in the database.
For existing societies, this will synchronise member relations with the given list of admins.
"""
try:
society = get_society(name, sess)
except KeyError:
res_record = yield from _create_society(sess, name, description, role_email)
society = res_record.value
else:
yield _update_society(sess, society, description, role_email)
return society
|
49700a80ab23b0f4211c8bf5f0bc2c1d68c2f1cb
| 24,689 |
def odd_numbers_list(n):
""" Returns the list of n first odd numbers """
return [2 * k - 1 for k in range(1, n + 1)]
|
2066cf07e926e41d358be0012a7f2a248c5987a7
| 24,690 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.