content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import colorsys
def hsv_to_rgb(image):
"""
Convert HSV img to RGB img.
Args:
image (numpy.ndarray): NumPy HSV image array of shape (H, W, C) to be converted.
Returns:
numpy.ndarray, NumPy HSV image with same shape of image.
"""
h, s, v = image[:, :, 0], image[:, :, 1], image[:, :, 2]
to_rgb = np.vectorize(colorsys.hsv_to_rgb)
r, g, b = to_rgb(h, s, v)
return np.stack((r, g, b), axis=2) | 4e356beb6e9579c96cea3b050d6cc9b863723554 | 3,657,900 |
def dom_to_tupletree(node):
"""Convert a DOM object to a pyRXP-style tuple tree.
Each element is a 4-tuple of (NAME, ATTRS, CONTENTS, None).
Very nice for processing complex nested trees.
"""
if node.nodeType == node.DOCUMENT_NODE:
# boring; pop down one level
return dom_to_tupletree(node.firstChild)
assert node.nodeType == node.ELEMENT_NODE
name = node.nodeName
attrs = {}
contents = []
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
contents.append(dom_to_tupletree(child))
elif child.nodeType == child.TEXT_NODE:
assert is_text(child.nodeValue), \
"text node %s is not a string" % repr(child)
contents.append(child.nodeValue)
elif child.nodeType == child.CDATA_SECTION_NODE:
contents.append(child.nodeValue)
else:
raise RuntimeError("can't handle %s" % child)
for i in range(node.attributes.length):
attr_node = node.attributes.item(i)
attrs[attr_node.nodeName] = attr_node.nodeValue
# XXX: Cannot yet handle comments, cdata, processing instructions and
# other XML batshit.
# it's so easy in retrospect!
return name, attrs, contents, None | a3df44ff17c1a36eb30a57bb1e327f8c633c51fc | 3,657,901 |
def import_bom_rf3(filename, **kwargs):
"""Import a NetCDF radar rainfall product from the BoM Rainfields3.
Parameters
----------
filename : str
Name of the file to import.
Returns
-------
out : tuple
A three-element tuple containing the rainfall field in mm/h imported
from the Bureau RF3 netcdf, the quality field and the metadata. The
quality field is currently set to None.
"""
if not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required to import BoM Rainfields3 products "
"but it is not installed"
)
R = _import_bom_rf3_data(filename)
geodata = _import_bom_rf3_geodata(filename)
metadata = geodata
# TODO(import_bom_rf3): Add missing georeferencing data.
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(R)
if np.any(np.isfinite(R)):
metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
else:
metadata["threshold"] = np.nan
return R, None, metadata | 1763a35ba3d46f1c584c53b9bce8dd91f55cfd20 | 3,657,902 |
def pipe_collapse(fq, outdir, gzipped=True):
"""
Collapse, by sequence
"""
fname = filename(fq)
check_path(outdir)
fq_out = collapse_fx(fq, outdir, gzipped=True)
stat_fq(fq_out)
# 1U 10A
fq_list = split_fq_1u10a(fq_out)
for f in fq_list:
stat_fq(f)
# wrap stat
df = wrap_stat_fq(outdir)
return [fq_out, df] | 5f510894b0eecc5d0c7e35a0b5e4e3550cfc49f9 | 3,657,903 |
from tqdm.auto import tqdm #if they have it, let users have a progress bar
def multi_bw(init, y, X, n, k, family, tol, max_iter, rss_score, gwr_func,
bw_func, sel_func, multi_bw_min, multi_bw_max, bws_same_times,
verbose=False):
"""
Multiscale GWR bandwidth search procedure using iterative GAM backfitting
"""
if init is None:
bw = sel_func(bw_func(y, X))
optim_model = gwr_func(y, X, bw)
else:
bw = init
optim_model = gwr_func(y, X, init)
bw_gwr = bw
err = optim_model.resid_response.reshape((-1, 1))
param = optim_model.params
XB = np.multiply(param, X)
if rss_score:
rss = np.sum((err)**2)
iters = 0
scores = []
delta = 1e6
BWs = []
bw_stable_counter = 0
bws = np.empty(k)
gwr_sel_hist = []
try:
except ImportError:
def tqdm(x, desc=''): #otherwise, just passthrough the range
return x
for iters in tqdm(range(1, max_iter + 1), desc='Backfitting'):
new_XB = np.zeros_like(X)
params = np.zeros_like(X)
for j in range(k):
temp_y = XB[:, j].reshape((-1, 1))
temp_y = temp_y + err
temp_X = X[:, j].reshape((-1, 1))
bw_class = bw_func(temp_y, temp_X)
if bw_stable_counter >= bws_same_times:
#If in backfitting, all bws not changing in bws_same_times (default 5) iterations
bw = bws[j]
else:
bw = sel_func(bw_class, multi_bw_min[j], multi_bw_max[j])
gwr_sel_hist.append(deepcopy(bw_class.sel_hist))
optim_model = gwr_func(temp_y, temp_X, bw)
err = optim_model.resid_response.reshape((-1, 1))
param = optim_model.params.reshape((-1, ))
new_XB[:, j] = optim_model.predy.reshape(-1)
params[:, j] = param
bws[j] = bw
#If bws remain the same as from previous iteration
if (iters > 1) and np.all(BWs[-1] == bws):
bw_stable_counter += 1
else:
bw_stable_counter = 0
num = np.sum((new_XB - XB)**2) / n
den = np.sum(np.sum(new_XB, axis=1)**2)
score = (num / den)**0.5
XB = new_XB
if rss_score:
predy = np.sum(np.multiply(params, X), axis=1).reshape((-1, 1))
new_rss = np.sum((y - predy)**2)
score = np.abs((new_rss - rss) / new_rss)
rss = new_rss
scores.append(deepcopy(score))
delta = score
BWs.append(deepcopy(bws))
if verbose:
print("Current iteration:", iters, ",SOC:", np.round(score, 7))
print("Bandwidths:", ', '.join([str(bw) for bw in bws]))
if delta < tol:
break
opt_bws = BWs[-1]
return (opt_bws, np.array(BWs), np.array(scores), params, err, gwr_sel_hist, bw_gwr) | f1631a7c9d511fa4d6e95002042f846c484f30f5 | 3,657,904 |
def _auth_url(url):
"""Returns the authentication URL based on the URL originally requested.
Args:
url: String, the original request.url
Returns:
String, the authentication URL.
"""
parsed_url = urlparse.urlparse(url)
parsed_auth_url = urlparse.ParseResult(parsed_url.scheme,
parsed_url.netloc,
'/_auth',
parsed_url.params,
parsed_url.query,
parsed_url.fragment)
return parsed_auth_url.geturl() | ede4b75e10e605b1ae36970ffdf3f6f31d70b810 | 3,657,905 |
import os
import errno
import stat
def ismount(path):
"""
Test whether a path is a mount point.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
raise
if stat.S_ISLNK(s1.st_mode):
# A symlink can never be a mount point
return False
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
return False | d1d18af449c720ed0b616436d905c28313ed88d1 | 3,657,906 |
def getSupportedPrintTypes(mainControl, guiParent=None):
"""
Returns dictionary {printTypeName: (printObject, printTypeName,
humanReadableName, addOptPanel)}
addOptPanel is the additional options GUI panel and is always None if
guiParent is None
"""
return groupOptPanelPlugins(mainControl,
getPrintTypeDict(mainControl), guiParent=guiParent) | 468eb8a0aa404ac701f574b7ae2af276e2fd6136 | 3,657,907 |
def read_image(file_name: str) -> np.array:
"""
pomocna funkce na nacteni obrazku
:param file_name: cesta k souboru
:return: numpy array, pripravene na upravy pomoci nasich funkcni
"""
return np.asarray(Image.open(file_name), dtype=np.int32) | 1241049c6cb2dff2a467cadea15cc92f4f8be958 | 3,657,908 |
def weighted_img(img, initial_img, α=0.8, β=1.0, γ=0.0):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ) | 635870b037dabaa02ea7d2acb8726d98b604c288 | 3,657,909 |
def md5SessionKey(params, password):
"""
If the "algorithm" directive's value is "MD5-sess", then A1
[the session key] is calculated only once - on the first request by the
client following receipt of a WWW-Authenticate challenge from the server.
This creates a 'session key' for the authentication of subsequent
requests and responses which is different for each "authentication
session", thus limiting the amount of material hashed with any one
key.
Because the server need only use the hash of the user
credentials in order to create the A1 value, this construction could
be used in conjunction with a third party authentication service so
that the web server would not need the actual password value. The
specification of such a protocol is beyond the scope of this
specification.
"""
keys = ("username", "realm", "nonce", "cnonce")
params_copy = {}
for key in keys:
params_copy[key] = params[key]
params_copy["algorithm"] = MD5_SESS
return _A1(params_copy, password) | 2df5a7ce449553b55155618e1c93621a306eb6c8 | 3,657,910 |
def all_state_action(buffer: RolloutBuffer, learner: BaseAlgorithm, state_only: bool = False):
""" Equivalent of state_action on the whole RolloutBuffer."""
o_shape = get_obs_shape(learner.observation_space)
t = lambda x, shape=[-1]: buffer.to_torch(x).view(buffer.buffer_size*buffer.n_envs, *shape)
if isinstance(buffer.observations, dict):
observations = {k: t(v, o_shape[k]) for k, v in buffer.observations.items()} # OrderedDict?
else:
observations = t(buffer.observations, o_shape)
actions = t(buffer.actions)
return state_action(observations, actions, learner, state_only) | 89c236ac32893b7bab41cb2a00e7484dd0f15b1f | 3,657,911 |
def write_results(conn, cursor, mag_dict, position_dict):
"""
Write star truth results to the truth table
Parameters
----------
conn is a sqlite3 connection to the database
cursor is a sqlite3.conneciton.cursor() object
mag_dict is a dict of mags. It is keyed on the pid of the
Process used to process a chunk of magnitudes. Each value
is a 2-D numpy array of shape (n_obj, n_bandpasses). It is
produced by calculate_magnitudes.
position_dict is a dict keyed on pid of the Process used to
process a chunk of stars. The values are also dicts, these
keyed on 'healpix', 'ra', 'dec', 'id' with the values being
arrays of those quantities for the corresponding chunk of
stars.
Returns
-------
None
Just writes to the database
"""
assert len(mag_dict) == len(position_dict)
row_ct = 0
for k in mag_dict.keys():
mm = mag_dict[k]
pp = position_dict[k]
row_ct += len(pp['ra'])
if len(mm) != len(pp['ra']):
raise RuntimeError('%d mm %d pp' % (len(mm), len(pp['ra'])))
values = ((int(pp['healpix'][i_obj]),
int(pp['id'][i_obj]), 1, 0, 0,
pp['ra'][i_obj], pp['dec'][i_obj], 0.0,
mm[i_obj][0], mm[i_obj][1], mm[i_obj][2],
mm[i_obj][3], mm[i_obj][4], mm[i_obj][5])
for i_obj in range(len(pp['ra'])))
cursor.executemany('''INSERT INTO truth
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', values)
conn.commit()
return row_ct | 0b0c9234a32050277a7e70fee3ab7ba1be5931bb | 3,657,912 |
def get_sparameters(sim: td.Simulation) -> np.ndarray:
"""Adapted from tidy3d examples.
Returns full Smatrix for a component
https://support.lumerical.com/hc/en-us/articles/360042095873-Metamaterial-S-parameter-extraction
"""
sim = run_simulation(sim).result()
def get_amplitude(monitor):
f, b = sim.data(monitor)["mode_amps"]
return np.squeeze(f), np.squeeze(b)
monitors = sim.monitors
n = len(monitors) - 1
S = np.zeros((n, n), dtype=np.complex128)
# for i, monitor_i in enumerate(monitors):
# for j, monitor_j in enumerate(monitors):
# if i > 0 and j > 0:
# if monitor_i.name.startswith("W"):
# ai, bi = get_amplitude(monitor_i)
# else:
# bi, ai = get_amplitude(monitor_i)
# if monitor_j.name.startswith("W"):
# aj, bj = get_amplitude(monitor_j)
# else:
# bj, aj = get_amplitude(monitor_j)
# S[i - i, j - 1] = bi / aj
if len(monitors) == 5:
_, incident, reflect, top, bot = monitors
S[0, 0] = get_amplitude(incident)[-1]
S[1, 0] = get_amplitude(reflect)[-1]
S[0, 1] = get_amplitude(top)[0]
S[1, 1] = get_amplitude(bot)[0]
elif len(monitors) == 3:
_, incident, reflect = monitors
S[0, 0] = S[1, 1] = get_amplitude(incident)[-1]
S[1, 0] = S[0, 1] = get_amplitude(reflect)[-1]
return S | 6577fac645e195c4e30406c6252c9b55831343a0 | 3,657,913 |
def mapmri_STU_reg_matrices(radial_order):
""" Generates the static portions of the Laplacian regularization matrix
according to [1]_ eq. (11, 12, 13).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
Returns
-------
S, T, U : Matrices, shape (N_coef,N_coef)
Regularization submatrices
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
S = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
S[i, j] = map_laplace_s(i, j)
T = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
T[i, j] = map_laplace_t(i, j)
U = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
U[i, j] = map_laplace_u(i, j)
return S, T, U | 40cb1159f04d1291e06146dabd89380936c407a0 | 3,657,914 |
def _checker(word: dict):
"""checks if the 'word' dictionary is fine
:param word: the node in the list of the text
:type word: dict
:return: if "f", "ref" and "sig" in word, returns true, else, returns false
:rtype: bool
"""
if "f" in word and "ref" in word and "sig" in word:
return True
return False | ee6ec5a7ee393ddcbc97b13f6c09cdd9019fb1a6 | 3,657,915 |
def construc_prob(history, window, note_set, model, datafilename):
"""
This function constructs the proabilities of seeing each next note
Inputs:
history, A list of strings, the note history in chronological order
window, and integer how far back we are looking
note_set, the set of notes to be considered
model, the model used to construct probabilities
datafilename, a string, the name of the file containing the information to convert strings of notes to interaction dummies
Outputs:
A list of probabilities of len(note_set)
"""
recent_history = history[len(history)-window + 1:len(history)]
like_prob = [] # Initialize a empty list of probabilities of liking a certain sequence
for note in note_set:
potential_hist = recent_history + [note]
X = create_X(potential_hist, datafilename)
# print(potential_hist)
# print(model(X))
like_prob.append(model(X))
return selection_prob(like_prob) | 92e75d386c5fce984302ca60f80b2dc1891fc873 | 3,657,916 |
def renderPybullet(envs, config, tensor=True):
"""Provides as much images as envs"""
if type(envs) is list:
obs = [
env_.render(
mode="rgb_array",
image_size=config["image_size"],
color=config["color"],
fpv=config["fpv"],
camera_id=0,
)
for env_ in envs
]
obs = np.array(obs).transpose(0, 3, 1, 2) / 255.0
else:
obs = envs.render(
mode="rgb_array",
image_size=config["image_size"],
color=config["color"],
fpv=config["fpv"],
camera_id=0,
)
obs = obs.transpose(2, 0, 1) / 255.0
if tensor:
obs = obs[None]
return obs | fb04ecda7e0dbfbe7899d4684979828b3fcd83c6 | 3,657,917 |
def wifi(request):
"""Collect status information for wifi and return HTML response."""
context = {
'refresh': 5,
'item': '- Wifi',
'timestamp': timestamp(),
'wifi': sorted(Wifi().aps),
}
return render(request, 'ulm.html', context) | 0a5412c2912eaeae192dd6d5fe85d336dec1b169 | 3,657,918 |
def genModel( nChars, nHidden, numLayers = 1, dropout = 0.5, recurrent_dropout = 0.5 ):
"""Generates the RNN model with nChars characters and numLayers hidden units with
dimension nHidden."""
model = Sequential()
model.add( LSTM( nHidden, input_shape = (None, nChars), return_sequences = True,
dropout = dropout, recurrent_dropout = recurrent_dropout ) )
for _ in range( numLayers - 1 ):
model.add( LSTM( nHidden, return_sequences = True,
dropout = dropout, recurrent_dropout = recurrent_dropout ) )
model.add( TimeDistributed( Dense(nChars) ) )
model.add( Activation('softmax') )
model.compile( loss = "categorical_crossentropy", optimizer = "adam" )
return model | 4aeef47b8a4948e37eaa2ea07ac22ecee167df51 | 3,657,919 |
def rotate_system(shape_list, angle, center_point = None):
"""Rotates a set of shapes around a given point
If no center point is given, assume the center of mass of the shape
Args:
shape_list (list): A list of list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list] | 64c4ff717fd432a187d2616263405ae89a0d89f8 | 3,657,920 |
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using tf.float16
Args:
tensor_type: a dtype to determine the type.
Returns:
a large negative number.
"""
if tensor_type == tf.float16:
return tf.float16.min
return -1e9 | c73a9e2de341d771ec07ecf2b2a178911ecc27bd | 3,657,921 |
def classified_unread_counts():
"""
Unread counts return by
helper.classify_unread_counts function.
"""
return {
'all_msg': 12,
'all_pms': 8,
'unread_topics': {
(1000, 'Some general unread topic'): 3,
(99, 'Some private unread topic'): 1
},
'unread_pms': {
1: 2,
2: 1,
},
'unread_huddles': {
frozenset({1001, 11, 12}): 3,
frozenset({1001, 11, 12, 13}): 2
},
'streams': {
1000: 3,
99: 1
}
} | 4d5e984641de88fd497b6c78891b7e6478bb8385 | 3,657,922 |
def company_key(company_name=DEFAULT_COMPANY_NAME):
"""Constructs a Datastore key for a Company entity with company_name."""
return ndb.Key('Company', company_name) | f9387ef2ee33ea87a4a9fd721f14c35ca60ac482 | 3,657,923 |
def to_n_class(digit_lst, data, labels):
"""to make a subset of MNIST dataset, which has particular digits
Parameters
----------
digit_lst : list
for example, [0,1,2] or [1, 5, 8]
data : numpy.array, shape (n_samples, n_features)
labels : numpy.array or list of str
Returns
-------
numpy.array, list of int
"""
if not set(digit_lst) <= set(range(10)):
raise ValueError
indices = []
new_labels = []
for i, x in enumerate(data):
for digit in digit_lst:
if labels[i] == str(digit):
indices.append(i)
new_labels.append(digit)
return data[indices], new_labels | 79652687ec0670ec00d67681711903ae01f4cc87 | 3,657,924 |
from re import T
import numpy
from operator import ne
def acosh(x: T.Tensor) -> T.Tensor:
"""
Elementwise inverse hyperbolic cosine of a tensor.
Args:
x (greater than 1): A tensor.
Returns:
tensor: Elementwise inverse hyperbolic cosine.
"""
y = numpy.clip(x,1+T.EPSILON, numpy.inf)
return ne.evaluate('arccosh(y)') | c5566c9b67b8be57be47c96762ce7371e1d4d988 | 3,657,925 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('replace',
metavar='str',
help='The string that will be inserted')
return parser.parse_args() | 3b7211aea79cf67e4b4b4e8e6c29d37cbf65bac6 | 3,657,926 |
def run_unit_tests():
""" Run unit tests against installed tools rpms """
# At the time of this writing, no unit tests exist.
# A unit tests script will be run so that unit tests can easily be modified
print "Running unit tests..."
success, output = run_cli_cmd(["/bin/sh", UNIT_TEST_SCRIPT], False)
return success, output | fd2241bd471b7de61bac922f3da485cb954fbe06 | 3,657,927 |
def encode_input_descr(prm):
""" Encode process description input."""
elem = NIL("Input", *_encode_param_common(prm))
elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)]
elem.attrib["maxOccurs"] = "1"
if isinstance(prm, LiteralData):
elem.append(_encode_literal(prm, True))
elif isinstance(prm, ComplexData):
elem.append(_encode_complex(prm, True))
elif isinstance(prm, BoundingBoxData):
elem.append(_encode_bbox(prm, True))
return elem | 9d5db979f5da325595501a50c2031f56fd438b47 | 3,657,928 |
def poly_quo(f, g, *symbols):
"""Returns polynomial quotient. """
return poly_div(f, g, *symbols)[0] | 2a4b04b053189db9bd5cb946b6399257b49a8afb | 3,657,929 |
import random
from typing import OrderedDict
def preprocess_data(dataset, encoder, config):
"""
Function to perform 4 preprocessing steps:
1. Exclude classes below minimum threshold defined in config.threshold
2. Exclude all classes that are not referenced in encoder.classes
3. Encode and normalize data into (path: str, label: int) tuples
4. Partition data samples into fractional splits defined in config.data_splits_meta
Parameters
----------
dataset : BaseDataset
Any instance of BaseDataset or its subclasses
encoder : LabelEncoder
Description of parameter `encoder`.
config : Namespace or stuf.stuf
Config object containing the attributes/properties:
config.threshold
config.data_splits_meta
Returns
-------
dict
Dictionary mapping from keys defined in config.data_splits_meta.keys(), to lists of tuples representing each sample.
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>> dataset = LeavesDataset()
... encoder = LabelEncoder(dataset.data.family)
... data_splits = preprocess_data(dataset, encoder, config)
"""
dataset.exclude_rare_classes(threshold=config.threshold)
encoder.encoder = dataset.classes
dataset, _ = dataset.enforce_class_whitelist(class_names=encoder.classes)
x = list(dataset.data['path'].values)#.reshape((-1,1))
y = np.array(encoder.encode(dataset.data['family']))
# import pdb;pdb.set_trace()
shuffled_data = list(zip(x,y))
random.shuffle(shuffled_data)
partitioned_data = partition_data(data=shuffled_data,
partitions=OrderedDict(config.data_splits_meta)
)
return {k:v for k,v in partitioned_data.items() if len(v)>0} | ed7f7382c4d1c8bc6ce718605b9d64cc2cb6ff6e | 3,657,930 |
from dronekit.mavlink import MAVConnection
def connect(ip,
_initialize=True,
wait_ready=None,
timeout=30,
still_waiting_callback=default_still_waiting_callback,
still_waiting_interval=1,
status_printer=None,
vehicle_class=None,
rate=4,
baud=115200,
heartbeat_timeout=30,
source_system=255,
source_component=0,
use_native=False):
"""
Returns a :py:class:`Vehicle` object connected to the address specified by string parameter ``ip``.
Connection string parameters (``ip``) for different targets are listed in the :ref:`getting started guide <get_started_connecting>`.
The method is usually called with ``wait_ready=True`` to ensure that vehicle parameters and (most) attributes are
available when ``connect()`` returns.
.. code:: python
from dronekit import connect
# Connect to the Vehicle using "connection string" (in this case an address on network)
vehicle = connect('127.0.0.1:14550', wait_ready=True)
:param String ip: :ref:`Connection string <get_started_connecting>` for target address - e.g. 127.0.0.1:14550.
:param Bool/Array wait_ready: If ``True`` wait until all default attributes have downloaded before
the method returns (default is ``None``).
The default attributes to wait on are: :py:attr:`parameters`, :py:attr:`gps_0`,
:py:attr:`armed`, :py:attr:`mode`, and :py:attr:`attitude`.
You can also specify a named set of parameters to wait on (e.g. ``wait_ready=['system_status','mode']``).
For more information see :py:func:`Vehicle.wait_ready <Vehicle.wait_ready>`.
:param status_printer: (deprecated) method of signature ``def status_printer(txt)`` that prints
STATUS_TEXT messages from the Vehicle and other diagnostic information.
By default the status information is handled by the ``autopilot`` logger.
:param Vehicle vehicle_class: The class that will be instantiated by the ``connect()`` method.
This can be any sub-class of ``Vehicle`` (and defaults to ``Vehicle``).
:param int rate: Data stream refresh rate. The default is 4Hz (4 updates per second).
:param int baud: The baud rate for the connection. The default is 115200.
:param int heartbeat_timeout: Connection timeout value in seconds (default is 30s).
If a heartbeat is not detected within this time an exception will be raised.
:param int source_system: The MAVLink ID of the :py:class:`Vehicle` object returned by this method (by default 255).
:param int source_component: The MAVLink Component ID fo the :py:class:`Vehicle` object returned by this method (by default 0).
:param bool use_native: Use precompiled MAVLink parser.
.. note::
The returned :py:class:`Vehicle` object acts as a ground control station from the
perspective of the connected "real" vehicle. It will process/receive messages from the real vehicle
if they are addressed to this ``source_system`` id. Messages sent to the real vehicle are
automatically updated to use the vehicle's ``target_system`` id.
It is *good practice* to assign a unique id for every system on the MAVLink network.
It is possible to configure the autopilot to only respond to guided-mode commands from a specified GCS ID.
The ``status_printer`` argument is deprecated. To redirect the logging from the library and from the
autopilot, configure the ``dronekit`` and ``autopilot`` loggers using the Python ``logging`` module.
:returns: A connected vehicle of the type defined in ``vehicle_class`` (a superclass of :py:class:`Vehicle`).
"""
if not vehicle_class:
vehicle_class = Vehicle
handler = MAVConnection(ip, baud=baud, source_system=source_system, source_component=source_component, use_native=use_native)
vehicle = vehicle_class(handler)
if status_printer:
vehicle._autopilot_logger.addHandler(ErrprinterHandler(status_printer))
if _initialize:
vehicle.initialize(rate=rate, heartbeat_timeout=heartbeat_timeout)
if wait_ready:
if wait_ready is True:
vehicle.wait_ready(still_waiting_interval=still_waiting_interval,
still_waiting_callback=still_waiting_callback,
timeout=timeout)
else:
vehicle.wait_ready(*wait_ready)
return vehicle | 3cd30bcc35b308913a5f54f39f2e0fb7a5583032 | 3,657,931 |
import os
import importlib
import sys
def load_test_environment(skill):
"""Load skill's test environment if present
Arguments:
skill (str): path to skill root folder
Returns:
Module if a valid test environment module was found else None
"""
test_env = None
test_env_path = os.path.join(skill, 'test/__init__.py')
if exists(test_env_path):
skill_env = skill + '.test_env'
spec = importlib.util.spec_from_file_location(skill_env, test_env_path)
module = importlib.util.module_from_spec(spec)
sys.modules[skill_env] = module
spec.loader.exec_module(module)
if (hasattr(module, 'test_runner') and
callable(module.test_runner) or
hasattr(module, 'test_setup') and
callable(module.test_setup)):
test_env = module
return test_env | 333d769ea59455afe73e20df00eedd88dd0a3f88 | 3,657,932 |
import json
from datetime import datetime
async def ready(request):
"""
For Kubernetes readiness probe,
"""
try:
# check redis valid.
if app.redis_pool:
await app.redis_pool.save('health', 'ok', 1)
# check mysql valid.
if app.mysql_pool:
sql = "SELECT 666"
result = await app.mysql_pool.fetchone(sql)
if result is None:
raise ServerError(error='内部错误', code='10500', message="msg")
except Exception as e:
raise ServerError(error='内部错误', code='10500', message="msg")
return json({
'pong': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'version': app.config['API_VERSION']
}) | f776787f65609fa341eb360c801cf8ebdc16a2eb | 3,657,933 |
def surface_area(polygon_mesh):
""" Computes the surface area for a polygon mesh.
Parameters
----------
polygon_mesh : ``PolygonMesh`` object
Returns
-------
result : surface area
"""
if isinstance(polygon_mesh, polygonmesh.FaceVertexMesh):
print("A FaceVertex Mesh")
result = 0.0
for face in polygon_mesh.faces:
v1, v2, v3 = face
result += 0.5 * abs(np.linalg.norm(
np.cross(
polygon_mesh.vertices[v2]-polygon_mesh.vertices[v1],
polygon_mesh.vertices[v3]-polygon_mesh.vertices[v1] )))
return result
return None | 587740d493ef5762c85f75f81d98e141121b5d7d | 3,657,934 |
from scipy.optimize import fsolve # non-linear solver
import numpy as np
def gas_zfactor(T_pr, P_pr):
"""
Calculate Gas Compressibility Factor
For range: 0.2 < P_pr < 30; 1 < T_pr < 3 (error 0.486%)
(Dranchuk and Aboukassem, 1975)
"""
# T_pr : calculated pseudoreduced temperature
# P_pr : calculated pseudoreduced pressure
if T_pr > 1 and T_pr < 3 and P_pr > 0.2 and P_pr < 30:
a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475
a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210
def f(y):
rho_pr, z = y
c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))
c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))
c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))
c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))
f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1
f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))
return[f1, f2]
pseudo_rho, z_factor = fsolve(f, [1, 1]) # initial guess
else:
pseudo_rho, z_factor = np.nan, np.nan
return(pseudo_rho, z_factor) | b9b1d770483737da8277a89b3f1100ea0c49c1c0 | 3,657,935 |
def format_value_with_percentage(original_value):
"""
Return a value in percentage format from
an input argument, the original value
"""
percentage_value = "{0:.2%}".format(original_value)
return percentage_value | 78bfb753b974bc7cbe3ac96f58ee49251063d2e7 | 3,657,936 |
import numpy
def get_Z_and_extent(topofile):
"""Get data from an ESRI ASCII file."""
f = open(topofile, "r")
ncols = int(f.readline().split()[1])
nrows = int(f.readline().split()[1])
xllcorner = float(f.readline().split()[1])
yllcorner = float(f.readline().split()[1])
cellsize = float(f.readline().split()[1])
nodatavalue = float(f.readline().split()[1])
data = numpy.zeros((nrows, ncols), dtype=numpy.float64)
for i in range(nrows):
data[i, :] = f.readline().strip().split()
f.close()
extent = [xllcorner, xllcorner+ncols*cellsize,
yllcorner, yllcorner+nrows*cellsize]
return data, extent | e96db5c2ae4a0d6c94654d7ad29598c3231ec186 | 3,657,937 |
from typing import Sequence
from typing import MutableMapping
import copy
def modified_config(
file_config: submanager.models.config.ConfigPaths,
request: pytest.FixtureRequest,
) -> submanager.models.config.ConfigPaths:
"""Modify an existing config file and return the path."""
# Get and check request params
request_param = getattr(request, PARAM_ATTR, None)
if request_param is None:
raise ValueError("Update dict must be passed via request param")
if isinstance(request_param, Sequence):
update_dict, disable_all = request_param
else:
update_dict = request_param
disable_all = False
if not isinstance(update_dict, MutableMapping):
raise TypeError(
f"Update dict {update_dict!r} must be a mapping, "
f"not {type(update_dict)!r}",
)
# Disable all items if requested
config_data = submanager.config.utils.load_config(file_config.static)
if disable_all:
config_data_modified = (
submanager.utils.dicthelpers.process_items_recursive(
dict(config_data),
fn_torun=lambda value: False,
keys_match={"enabled"},
inplace=False,
)
)
if isinstance(disable_all, str):
config_data_level = config_data_modified
for key in disable_all.split("."):
config_data_level = config_data_level[key]
if config_data_level.get("enabled", None) is not None:
config_data_level["enabled"] = True
else:
config_data_modified = copy.deepcopy(dict(config_data))
# Modify config and write it back
config_data_modified = submanager.utils.dicthelpers.update_recursive(
base=config_data_modified,
update=dict(update_dict),
inplace=False,
)
submanager.config.utils.write_config(
config_data_modified,
config_path=file_config.static,
)
return file_config | 8a453233b6340b50fdcbc4d3bf7b2f1f1e7e15ce | 3,657,938 |
import torch
def train_discrim(discrim, state_features, actions, optim, demostrations,
settings):
"""demostractions: [state_features|actions]
"""
criterion = torch.nn.BCELoss()
for _ in range(settings.VDB_UPDATE_NUM):
learner = discrim(torch.cat([state_features, actions], dim=-1))
expert = discrim(demostrations)
discrim_loss = criterion(learner, torch.ones(
[len(state_features), 1])) + criterion(
expert, torch.zeros(len(demostrations), 1))
optim.zero_grad()
discrim_loss.backward()
optim.step()
expert_acc = ((discrim(demostrations) < 0.5).float()).mean()
learner_acc = ((discrim(torch.cat([state_features, actions], dim=1)) >
0.5).float()).mean()
return expert_acc, learner_acc | 7e6c16fc396b371e92d3a04179eacb9cae63659c | 3,657,939 |
import sys
from pathlib import Path
def task_install():
"""install the packages into the sys.packages"""
def install(pip):
if pip:
name = get_name()
assert not doit.tools.CmdAction(
f"python -m pip install --find-links=dist --no-index --ignore-installed --no-deps {name}"
).execute(sys.stdout, sys.stderr)
elif PYPROJECT_TOML.exists():
backend = build_backend()
if backend == "flit_core":
needs("flit")
assert not doit.tools.CmdAction("flit install").execute(
sys.stdout, sys.stderr
)
elif backend == "poetry":
needs("poetry")
assert not doit.tools.CmdAction("poetry install").execute(
sys.stdout, sys.stderr
)
else:
assert not doit.tools.CmdAction("pip install . --no-deps").execute(
sys.stdout, sys.stderr
)
name, version = get_name(), get_version()
return Task(
file_dep=[
PYPROJECT_TOML,
to_whl(Path(), name, version),
to_sdist(Path(), name, version),
],
actions=[install],
task_dep=["build"],
params=[_DEVELOP, _PIP],
) | 48c413f3b478a9fdba22ba5780efa982158f0fc9 | 3,657,940 |
def filter_column(text, column, start=0, sep=None, **kwargs):
""" Filters (like grep) lines of text according to a specified column and operator/value
:param text: a string
:param column: integer >=0
:param sep: optional separator between words (default is arbitrary number of blanks)
:param kwargs: operator=value eg eq='exact match', contains='substring', startswith='prefix' etc...
:return:
"""
if len(kwargs) != 1:
raise TypeError("Missing or too many keyword parameter in filter_column")
op, value = kwargs.items()[0]
if op in ('eq', 'equals'):
op = '__eq__'
elif op in ('contains', 'includes'):
op = '__contains__'
elif not op in ('startswith', 'endswith'):
raise ValueError("Unknown filter_column operator: {}".format(op))
lines = text.splitlines() if isinstance(text, basestring) else text
if start:
lines = lines[start:]
values = []
for line in lines:
elts = line.split(sep) if sep else line.split()
if elts and column < len(elts):
elt = elts[column]
if getattr(elt, op)(value):
values.append(line.strip())
return values | f7a788d2d79dba33961213c6bc469d41a0151812 | 3,657,941 |
def max_tb(collection): # pragma: no cover
"""Returns the maximum number of TB recorded in the collection"""
max_TB = 0
for doc in collection.find({}).sort([('total_TB',-1)]).limit(1):
max_TB = doc['total_TB']
return max_TB | bde417de0b38de7a7b5e4e3db8c05e87fa6c55ca | 3,657,942 |
def prep_im_for_blob(im, pixel_means, target_size_1, target_size_2, max_size_1, max_size_2):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale_1 = float(target_size_1) / float(im_size_min)
im_scale_2 = float(target_size_2) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale_1 * im_size_max) > max_size_1:
im_scale_1 = float(max_size_1) / float(im_size_max)
if np.round(im_scale_2 * im_size_max) > max_size_2:
im_scale_2 = float(max_size_2) / float(im_size_max)
im_1 = cv2.resize(im, None, None, fx=im_scale_1, fy=im_scale_1,
interpolation=cv2.INTER_LINEAR)
im_2 = cv2.resize(im, None, None, fx=im_scale_2, fy=im_scale_2,
interpolation=cv2.INTER_LINEAR)
return im_1, im_2, im_scale_1, im_scale_2 | a1842d918149f5d1ccc52e04cc499005570b72ea | 3,657,943 |
import os
import logging
def bandit_run_bandit_scan(attr_dict, path_package, package_name, path_sdk_settings=None, **__):
"""
Run Bandit Scan on whole package using the settings defined in ``constants.BANDIT_DEFAULT_ARGS``.
Raises a SDKException if ``bandit`` isn't installed. In use with ``validate``, this method should
only be called after successfully calling ``bandit_validate_bandit_installed``. If a call to that
method returns a failing SDKValidateIssue, this method shouldn't be called
The default severity level on which the bandit scan fails is "medium" (defined as command line arg "-ll")
The user can overwrite the default settings using an SDK Settings JSON file either in the default
location or by using the --settings flag to pass in a path. The settings file should have a "bandit"
attribute which is a list of bandit command line options. Example (to change level to "low" and
give 5 context lines):
.. code-block:: json
{
"bandit": [
"-l", "-n", "5"
]
}
NOTE: that you can include more than just the severity level in the list. Any valid bandit command
line args will be parsed (as seen above with the "-n" arg added in).
More info here: https://github.com/PyCQA/bandit#readme or by running ``bandit -h``
The user can run the scan in ``verbose`` mode using the ``-v`` flag for the SDK to get output live as
the scan is running.
:param attr_dict: dictionary of attributes for the bandit scan defined in ``bandit_attributes``
:type attr_dict: dict
:param path_package: path to package
:type path_package: str
:param package_name: name of the package (i.e. fn_my_package)
:type package_name: str
:param path_sdk_settings: (optional) path to a sdk settings JSON file
:type path_sdk_settings: str
:param __: (unused) other unused named args
:type __: dict
:return: 1 or 0 and a SDKValidateIssue with details about the bandit scan
:rtype: (int, SDKValidateIssue)
"""
# Because this method requires importing bandit, it must be installed in the env
if not sdk_helpers.get_package_version(constants.BANDIT_PACKAGE_NAME):
raise SDKException("Cannot call {0} without bandit installed".format(bandit_run_bandit_scan.__name__))
bandit_args = [constants.BANDIT_PACKAGE_NAME, "-r", os.path.join(path_package, package_name)]
bandit_args.extend(constants.BANDIT_DEFAULT_ARGS)
if LOG.isEnabledFor(logging.DEBUG):
# if running validate in verbose, append verbose flag to bandit args
bandit_args.extend(constants.BANDIT_VERBOSE_FLAG)
# grab bandit settings from sdk settings file if given and exists
# if either file doesn't exist or file doesn't have "bandit" section
# append on default severity level
if path_sdk_settings and os.path.exists(path_sdk_settings):
# if a settings file exists, check if it has a bandit section
settings_file_contents = sdk_helpers.read_json_file(path_sdk_settings)
# grab the bandit section (should be a list)
settings_bandit_section = settings_file_contents.get(constants.SDK_SETTINGS_BANDIT_SECTION_NAME)
if settings_bandit_section and isinstance(settings_bandit_section, list):
LOG.debug("Reading bandit command line args from sdk settings JSON file {0}".format(path_sdk_settings))
LOG.debug("Bandit settings found in settings file: {0}".format(settings_bandit_section))
bandit_args.extend(settings_bandit_section)
else:
bandit_args.extend(constants.BANDIT_DEFAULT_SEVERITY_LEVEL)
else:
bandit_args.extend(constants.BANDIT_DEFAULT_SEVERITY_LEVEL)
# run bandit as a subprocess
exit_code, details = sdk_helpers.run_subprocess(bandit_args, cmd_name="bandit scan")
# bandit will return a non-zero exit code if an issue of minimum severity level or higher
# is found.
# Example: if "-ll" (our default level which is called "medium") is passed, the process
# will only return a non-zero code if there are "medium" or "high" issues.
# if only "low" or "uncategorized" issues are found, it will return 0
if exit_code != 0:
# all information above the "Test results" are not really relevant
# but incase that string is not found, we just take the whole details
details_start_string = "Test results"
if details.index(details_start_string) != -1:
details = details[details.index(details_start_string):]
details = details.replace("\n", "\n\t\t")
return 0, SDKValidateIssue(
name=attr_dict.get("name"),
description=attr_dict.get("fail_msg").format(details),
severity=attr_dict.get("severity"),
solution=attr_dict.get("fail_solution") if not LOG.isEnabledFor(logging.DEBUG) else ""
)
else:
# success
return 1, SDKValidateIssue(
name=attr_dict.get("name"),
description=attr_dict.get("pass_msg"),
severity=SDKValidateIssue.SEVERITY_LEVEL_INFO,
solution=attr_dict.get("pass_solution")
) | 0229a0ca9980232d5c9ff4b52f7ae4d4ec2c737c | 3,657,944 |
def plotann(annotation, title = None, timeunits = 'samples', returnfig = False):
""" Plot sample locations of an Annotation object.
Usage: plotann(annotation, title = None, timeunits = 'samples', returnfig = False)
Input arguments:
- annotation (required): An Annotation object. The sample attribute locations will be overlaid on the signal.
- title (default=None): A string containing the title of the graph.
- timeunits (default='samples'): String specifying the x axis unit.
Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'.
- returnfig (default=False): Specifies whether the figure is to be returned as an output argument
Output argument:
- figure: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True.
Note: The plotrec function is useful for plotting annotations on top of signal waveforms.
Example Usage:
import wfdb
annotation = wfdb.rdann('sampledata/100', 'atr', sampfrom = 100000, sampto = 110000)
annotation.fs = 360
wfdb.plotann(annotation, timeunits = 'minutes')
"""
# Check the validity of items used to make the plot
# Get the x axis annotation values to plot
plotvals = checkannplotitems(annotation, title, timeunits)
# Create the plot
fig=plt.figure()
plt.plot(plotvals, np.zeros(len(plotvals)), 'r+')
if title is not None:
plt.title(title)
# Axis Labels
if timeunits == 'samples':
plt.xlabel('index/sample')
else:
plt.xlabel('time/'+timeunits[:-1])
plt.show(fig)
# Return the figure if requested
if returnfig:
return fig | 2159c1ffed52ef6524990f861d7e986b7aa00c25 | 3,657,945 |
def match_assignments(nb_assignments, course_id):
"""
Check sqlalchemy table for match with nbgrader assignments from a specified course. Creates a dictionary with nbgrader
assignments as the key
If match is found, query the entry from the table and set as the value.
Else, set the value to None
"""
nb_matches = {assignment.name:AssignmentMatch.query.filter_by(nbgrader_assign_name=assignment.name, course_id=course_id).first()
for assignment in nb_assignments}
return nb_matches | 22158bc0d3655a78b8e5b6cb245b781e187f1481 | 3,657,946 |
def tan(input):
"""Computes tangent of values in ``input``.
:rtype: TensorList of tan(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("tan", input) | 27e6487591ff4d207baea094293be83ef22a4099 | 3,657,947 |
def recall_from_IoU(IoU, samples=500):
"""
plot recall_vs_IoU_threshold
"""
if not (isinstance(IoU, list) or IoU.ndim == 1):
raise ValueError('IoU needs to be a list or 1-D')
iou = np.float32(IoU)
# Plot intersection over union
IoU_thresholds = np.linspace(0.0, 1.0, samples)
recall = np.zeros_like(IoU_thresholds)
for idx, IoU_th in enumerate(IoU_thresholds):
tp, relevant = 0, 0
inds, = np.where(iou >= IoU_th)
recall[idx] = len(inds) * 1.0 / len(IoU)
return recall, IoU_thresholds | 9c24a4e546a76998339ce85e02fae6fec3adb00d | 3,657,948 |
import math
def _GetImage(options):
"""Returns the ndvi regression image for the given options.
Args:
options: a dict created by _ReadOptions() containing the request options
Returns:
An ee.Image with the coefficients of the regression and a band called "rmse" containing the
Root Mean Square Error for the ndvi value calculated by the regression or None if collection is empty.
"""
# renaming the used options
regression = options["regression"]
start = options["start"]
collection = _GetCollection(options)
# _GetCollection() returns None if collection is empty
if collection is None:
return None
# Function to calculate the values needed for a regression with a polynomial of degree 1
def makePoly1Variables(img):
date = img.date()
doy = date.getRelative("day", "year")
x1 = doy
x0 = 1
return (img.select()
.addBands(ee.Image.constant(x0)) # 0. a0 constant term
.addBands(ee.Image.constant(x1)) # 1. a1*x
.addBands(img.normalizedDifference(["NIR","RED"])) # 2. response variable (NDVI)
.toFloat())
# Function to calculate the values needed for a regression with a polynomial of degree 2
def makePoly2Variables(img):
date = img.date()
doy = date.getRelative("day", "year")
x2 = doy.pow(2)
x1 = doy
x0 = 1
return (img.select()
.addBands(ee.Image.constant(x0)) # 0. a0 constant term
.addBands(ee.Image.constant(x1)) # 1. a1*x
.addBands(ee.Image.constant(x2)) # 2. a2*x^2
.addBands(img.normalizedDifference(["NIR","RED"])) # 4. response variable (NDVI)
.toFloat())
# Function to calculate the values needed for a regression with a polynomial of degree 3
def makePoly3Variables(img):
date = img.date()
doy = date.getRelative("day", "year")
x3 = doy.pow(3)
x2 = doy.pow(2)
x1 = doy
x0 = 1
return (img.select()
.addBands(ee.Image.constant(x0)) # 0. a0 constant term
.addBands(ee.Image.constant(x1)) # 1. a1*x
.addBands(ee.Image.constant(x2)) # 2. a2*x^2
.addBands(ee.Image.constant(x3)) # 3. a3*x^3
.addBands(img.normalizedDifference(["NIR","RED"])) # 4. response variable (NDVI)
.toFloat())
# Function to calculate the values needed for a regression with the model after Zhu & Woodcock
def makeZhuWoodVariables(img):
seconds = img.date().millis().divide(1000).floor()
seconds_start = ee.Date("%s-01-01" % start).millis().divide(1000).floor()
seconds_offset = seconds.subtract(seconds_start)
sin_intra = ee.Number(2).multiply(math.pi).divide(365*24*60*60).multiply(seconds_offset).sin()
cos_intra = ee.Number(2).multiply(math.pi).divide(365*24*60*60).multiply(seconds_offset).cos()
inter = seconds_offset
return (img.select()
.addBands(ee.Image.constant(1)) # 0. constant term
.addBands(ee.Image.constant(cos_intra)) # 1. cos intra-annual
.addBands(ee.Image.constant(sin_intra)) # 2. sin intra-annual
.addBands(ee.Image.constant(inter)) # 3. inter-annual
.addBands(img.normalizedDifference(["NIR","RED"])) # 5. response variable (NDVI)
.toFloat())
makeVariables = {"poly1": makePoly1Variables,"poly2": makePoly2Variables, "poly3": makePoly3Variables, "zhuWood": makeZhuWoodVariables}
# calculate the needed values for the regression
collection_prepared = collection.map(makeVariables[regression])
predictorsCount = {"poly1": 2,"poly2": 3, "poly3": 4, "zhuWood": 4}
# counts the ndvi values per pixel
countValues = collection_prepared.select("nd").reduce(ee.Reducer.count())
# masks pixels with less than 2 * number of predictors, to deliver better results
def countMask(img):
return img.updateMask(countValues.gt(predictorsCount[regression]*2-1))
# use the countMask
collection_prepared = collection_prepared.map(countMask)
# doing the regression
coefficients = collection_prepared.reduce(ee.Reducer.linearRegression(predictorsCount[regression], 1))
# flattens regression coefficients to one image with multiple bands
flattenPattern = {"poly1": ["a0", "a1"], "poly2": ["a0", "a1", "a2"], "poly3": ["a0", "a1", "a2", "a3"], "zhuWood": ["a0", "a1", "a2", "a3"]}
renamePattern = {"poly1": "doy", "poly2": "doy", "poly3": "doy", "zhuWood": "sec"}
coefficientsImage = coefficients.select(["coefficients"]).arrayFlatten([flattenPattern[regression],[renamePattern[regression]]])
# flattens the root mean square of the predicted ndvi values
rmse = coefficients.select("residuals").arrayFlatten([["rmse"]])
# combines coefficients and rmse and returns them a one ee.Image
return coefficientsImage.addBands(rmse) | 00b4bd82e772a8afa8c4f92c3dd9afa880af79f2 | 3,657,949 |
def get_registered_plugins(registry, as_instances=False, sort_items=True):
"""Get registered plugins.
Get a list of registered plugins in a form if tuple (plugin name, plugin
description). If not yet auto-discovered, auto-discovers them.
:param registry:
:param bool as_instances:
:param bool sort_items:
:return list:
"""
ensure_autodiscover()
if as_instances:
return registry._registry
registered_plugins = []
for uid, plugin in registry._registry.items():
plugin_name = safe_text(plugin.name)
registered_plugins.append((uid, plugin_name))
if sort_items:
registered_plugins.sort()
return registered_plugins | 68b695ebe3de95a86d37831fe38ce934bcced16c | 3,657,950 |
import time
def datetime_to_timestamp(d):
"""convert a datetime object to seconds since Epoch.
Args:
d: a naive datetime object in default timezone
Return:
int, timestamp in seconds
"""
return int(time.mktime(d.timetuple())) | 356ac090b0827d49e9929a7ef26041b26c6cc690 | 3,657,951 |
import torch
def gumbel_softmax(logits, temperature):
"""From https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f
logits: a tensor of shape (*, n_class)
returns an one-hot vector of shape (*, n_class)
"""
y = gumbel_softmax_sample(logits, temperature)
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
return (y_hard - y).detach() + y | 49a79bf5955cfc01fd27f0a56c23d001e3ef65cc | 3,657,952 |
def in_whitelist(address):
"""
Test if the given email address is contained in the list of allowed addressees.
"""
if WHITELIST is None:
return True
else:
return any(regex.search(address) for regex in WHITELIST) | ed552f16a2cd4b9d5e97033e47d5ec8950841164 | 3,657,953 |
def decomposePath(path):
"""
:example:
>>> decomposePath(None)
>>> decomposePath("")
>>> decomposePath(1)
>>> decomposePath("truc")
('', 'truc', '', 'truc')
>>> decomposePath("truc.txt")
('', 'truc', 'txt', 'truc.txt')
>>> decomposePath("/home/truc.txt")
('/home/', 'truc', 'txt', 'truc.txt')
>>> decomposePath("/home/truc.txt.bz2")
('/home/', 'truc.txt', 'bz2', 'truc.txt.bz2')
>>> decomposePath("/truc.txt.bz2")
('/', 'truc.txt', 'bz2', 'truc.txt.bz2')
>>> decomposePath("./truc.txt.bz2")
('./', 'truc.txt', 'bz2', 'truc.txt.bz2')
>>> decomposePath(".truc.txt.bz2")
('', '.truc.txt', 'bz2', '.truc.txt.bz2')
"""
if path is None or type(path) is not str or len(path) == 0:
return None
filenameExt = path.split("/")[-1]
dir = path[0:-len(filenameExt)]
filename = ".".join(filenameExt.split(".")[0:-1])
ext = filenameExt.split(".")[-1]
if len(filename) == 0 and len(ext) > 0:
filename, ext = ext, filename
return (dir, filename, ext, filenameExt) | 7b45cfe64f631912fc56246f404ddbea51b9f1ec | 3,657,954 |
def BSCLLR(c,p):
"""
c: A list of ones and zeros representing a codeword received over a BSC.
p: Flip probability of the BSC.
Returns log-likelihood ratios for c.
"""
N = len(c)
evidence = [0]*N
for i in range(N):
if (c[i]):
evidence[i] = log(p/(1-p))
else:
evidence[i] = log((1-p)/p)
return evidence | 2ee6f4a72a8c2aa3257ae00e8374511f74edcbdb | 3,657,955 |
import torch
def _res_dynamics_fwd(
real_input, imag_input,
sin_decay, cos_decay,
real_state, imag_state,
threshold, w_scale, dtype=torch.int32
):
""" """
dtype = torch.int64
device = real_state.device
real_old = (real_state * w_scale).clone().detach().to(dtype).to(device)
imag_old = (imag_state * w_scale).clone().detach().to(dtype).to(device)
sin_decay_int = (sin_decay).clone().detach().to(dtype).to(device)
cos_decay_int = (cos_decay).clone().detach().to(dtype).to(device)
real = torch.zeros_like(real_input)
imag = torch.zeros_like(imag_input)
threshold *= w_scale
num_steps = real_input.shape[-1]
for n in range(num_steps):
real_new = right_shift_to_zero(cos_decay_int * real_old, 12) \
- right_shift_to_zero(sin_decay_int * imag_old, 12) \
+ (w_scale * real_input[..., n]).to(dtype)
imag_new = right_shift_to_zero(sin_decay_int * real_old, 12) \
+ right_shift_to_zero(cos_decay_int * imag_old, 12) \
+ (w_scale * imag_input[..., n]).to(dtype)
if threshold >= 0:
spike_new = (imag_new >= threshold).to(dtype)
real_old = ((1 - spike_new) * real_new).to(dtype)
imag_old = (
spike_new * (threshold - 1) + (1 - spike_new) * imag_new
).to(dtype)
else:
real_old = real_new
imag_old = imag_new
real[..., n] = real_new / w_scale
imag[..., n] = imag_new / w_scale
return real, imag | 259b520c9ba4491931726b02ff51bc1c69283cdd | 3,657,956 |
def make_json_error(error):
"""
Handle errors by logging and
"""
message = extract_error_message(error)
status_code = extract_status_code(error)
context = extract_context(error)
retryable = extract_retryable(error)
headers = extract_headers(error)
# Flask will not log user exception (fortunately), but will log an error
# for exceptions that escape out of the application entirely (e.g. if the
# error handler raises an error)
error_logger.debug("Handling {} error: {}".format(
status_code,
message,
))
# Serialize into JSON response
response_data = {
"code": status_code,
"context": context,
"message": message,
"retryable": retryable,
}
# Don't pass in the error schema because it will suppress any extra fields
return dump_response_data(None, response_data, status_code, headers) | ea249272428cdab765ef21cc3cef8d899c9edb19 | 3,657,957 |
def tokenize_finding(finding):
"""Turn the finding into multiple findings split by whitespace."""
tokenized = set()
tokens = finding.text.split()
cursor = 0
# Note that finding.start and finding.end refer to the location in the overall
# text, but finding.text is just the text for this finding.
for token in tokens:
start = finding.text.find(token, cursor)
cursor = end = start + len(token)
tokenized.add(Finding(
finding.category, start + finding.start, end + finding.start, token,
finding.context_start, finding.raw_context))
return tokenized | 28974a87bdb006bbdf37fff68345a9df81ea0962 | 3,657,958 |
import scipy
def gaussian_filter_density(gt):
"""generate ground truth density map
Args:
gt: (height, width), object center is 1.0, otherwise 0.0
Returns:
density map
"""
density = np.zeros(gt.shape, dtype=np.float32)
gt_count = np.count_nonzero(gt)
if gt_count == 0:
return density
pts = np.array(list(zip(np.nonzero(gt)[1], np.nonzero(gt)[0]))) # (x,y)
leaf_size = 2048
# build kd tree
tree = scipy.spatial.KDTree(pts.copy(), leafsize=leaf_size)
# query kd tree
distances, locations = tree.query(pts, k=4)
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[pt[1], pt[0]] = 1.
if gt_count > 1:
sigma = (distances[i][1] + distances[i][2] + distances[i][3]) * 0.085
sigma = min(sigma, 999) # avoid inf
else:
raise NotImplementedError('should not be here!!')
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
return density | 9a51de844a08af18e5d1f72d368dbd6b05d24d34 | 3,657,959 |
def RGBfactorstoBaseandRange(
lumrange: list[int, int],
rgbfactors: list[float,
float,
float]):
"""Get base color luminosity and
luminosity range from color
expressed as r, g, b float
values and min and max byte
luminosity values
Args:
lumrange: [minval: byte
maxval: byte]
rgbfactors: color as
[r: float,
g: float,
b: float]
Returns:
base luminosity as
[r: byte, g: byte, b: byte]
luminosity range as
[r: byte, g: byte, b: byte]
"""
baselum = intscalarmulvect(
rgbfactors,
lumrange[0])
lumrange = subvect(scalarmulvect(
rgbfactors,
lumrange[1]),
baselum)
return baselum, lumrange | 47fba5a98b324fc27869fee8b03903f844ef2c38 | 3,657,960 |
def mean_by_orbit(inst, data_label):
"""Mean of data_label by orbit over Instrument.bounds
Parameters
----------
data_label : string
string identifying data product to be averaged
Returns
-------
mean : pandas Series
simple mean of data_label indexed by start of each orbit
"""
return _core_mean(inst, data_label, by_orbit=True) | 55e3edac3231d4c42428cd87ee758f1b27d959b9 | 3,657,961 |
from typing import Callable
from typing import Optional
def quantile_constraint(
column: str,
quantile: float,
assertion: Callable[[float], bool],
where: Optional[str] = None,
hint: Optional[str] = None,
) -> Constraint:
"""
Runs quantile analysis on the given column and executes the assertion
column:
Column to run the assertion on
quantile:
Which quantile to assert on
assertion
Callable that receives a float input parameter (the computed quantile)
and returns a boolean
hint:
A hint to provide additional context why a constraint could have failed
"""
quant = Quantile(column, quantile, where)
constraint = AnalysisBasedConstraint[float](
quant, assertion, hint=hint # type: ignore[arg-type]
)
return NamedConstraint(constraint, f"QuantileConstraint({quant})") | b3e3924a830ec7fd47de981e1ae9eb3f1810c2a1 | 3,657,962 |
from typing import Tuple
import torch
def _compute_rank(
kg_embedding_model,
pos_triple,
corrupted_subject_based,
corrupted_object_based,
device,
) -> Tuple[int, int]:
"""
:param kg_embedding_model:
:param pos_triple:
:param corrupted_subject_based:
:param corrupted_object_based:
:param device:
:param all_pos_triples_hashed: This parameter isn't used but is necessary for compatability
"""
corrupted_subject_based = torch.tensor(
corrupted_subject_based,
dtype=torch.long,
device=device
)
corrupted_object_based = torch.tensor(corrupted_object_based, dtype=torch.long, device=device)
scores_of_corrupted_subjects = kg_embedding_model.predict(corrupted_subject_based)
scores_of_corrupted_objects = kg_embedding_model.predict(corrupted_object_based)
pos_triple = np.array(pos_triple)
pos_triple = np.expand_dims(a=pos_triple, axis=0)
pos_triple = torch.tensor(pos_triple, dtype=torch.long, device=device)
score_of_positive = kg_embedding_model.predict(pos_triple)
scores_subject_based = np.append(arr=scores_of_corrupted_subjects, values=score_of_positive)
indice_of_pos_subject_based = scores_subject_based.size - 1
scores_object_based = np.append(arr=scores_of_corrupted_objects, values=score_of_positive)
indice_of_pos_object_based = scores_object_based.size - 1
_, sorted_score_indices_subject_based = torch.sort(
torch.tensor(scores_subject_based, dtype=torch.float),
descending=False)
sorted_score_indices_subject_based = sorted_score_indices_subject_based.cpu().numpy()
_, sorted_score_indices_object_based = torch.sort(
torch.tensor(scores_object_based, dtype=torch.float),
descending=False)
sorted_score_indices_object_based = sorted_score_indices_object_based.cpu().numpy()
# Get index of first occurrence that fulfills the condition
rank_of_positive_subject_based = np.where(sorted_score_indices_subject_based == \
indice_of_pos_subject_based)[0][0]
rank_of_positive_object_based = np.where(sorted_score_indices_object_based == \
indice_of_pos_object_based)[0][0]
return (
rank_of_positive_subject_based,
rank_of_positive_object_based,
) | 2b5043dfed43907563c473141257626bb93027b7 | 3,657,963 |
def _get_bool_argument(ctx: ClassDefContext, expr: CallExpr,
name: str, default: bool) -> bool:
"""Return the boolean value for an argument to a call or the
default if it's not found.
"""
attr_value = _get_argument(expr, name)
if attr_value:
ret = ctx.api.parse_bool(attr_value)
if ret is None:
ctx.api.fail('"{}" argument must be True or False.'.format(name), expr)
return default
return ret
return default | 7f903f884edcb4af328207a0b7d2569cefce0a93 | 3,657,964 |
import json
def validate_filter_parameter(string):
""" Extracts a single filter parameter in name[=value] format """
result = ()
if string:
comps = string.split('=', 1)
if comps[0]:
if len(comps) > 1:
# In the portal, if value textbox is blank we store the value as empty string.
# In CLI, we should allow inputs like 'name=', which correspond to empty string value.
# But there is no way to differentiate between CLI inputs 'name=' and 'name=""'.
# So even though "" is invalid JSON escaped string, we will accept it and set the value as empty string.
filter_param_value = '\"\"' if comps[1] == "" else comps[1]
try:
# Ensure that provided value of this filter parameter is valid JSON. Error out if value is invalid JSON.
filter_param_value = json.loads(filter_param_value)
except ValueError:
raise CLIError('Filter parameter value must be a JSON escaped string. "{}" is not a valid JSON object.'.format(filter_param_value))
result = (comps[0], filter_param_value)
else:
result = (string, '')
else:
# Error out on invalid arguments like '=value' or '='
raise CLIError('Invalid filter parameter "{}". Parameter name cannot be empty.'.format(string))
return result | 8258cff656889a57aaeb24644ea4efc9a60a6997 | 3,657,965 |
def ones(distribution, dtype=float):
"""Create a LocalArray filled with ones."""
la = LocalArray(distribution=distribution, dtype=dtype)
la.fill(1)
return la | d3caa46b76932a44d441574c78ebbd9c4e8d29f9 | 3,657,966 |
def update_podcast_url(video):
"""Query the DDB table for this video. If found, it means
we have a podcast m4a stored in S3. Otherwise, return no
podcast.
"""
try:
response = PODCAST_TABLE_CLIENT.query(
KeyConditionExpression=Key('session').eq(video.session_id) & Key('year').eq(video.get_published_year())
)
except ClientError as error:
print('Problem getting data from DynamoDB: {}'.format(error))
return False
else:
if response['Count'] == 1:
video.podcast_url = response['Items'][0]['url']
return True | 50a39aceaba7980dff90043bf444b01607b258ae | 3,657,967 |
def translate(filename):
"""
File editing handler
"""
if request.method == 'POST':
return save_translation(app, request, filename)
else:
return open_editor_form(app, request, filename) | 5f9419db30ebd76e17f9f5c6efd746b3ddc1d8b0 | 3,657,968 |
def read_fileset(fileset):
"""
Extract required data from the sdoss fileset.
"""
feat_data = {
'DATE_OBS': [],
'FEAT_HG_LONG_DEG': [],
'FEAT_HG_LAT_DEG': [],
'FEAT_X_PIX': [],
'FEAT_Y_PIX': [],
'FEAT_AREA_DEG2': [],
'FEAT_FILENAME': []}
for current_file in fileset:
current_date = get_date_obs(current_file)
current_data = read_csv(current_file)
if (len(current_data) == 0):
LOG.error("Empty file: %s!", current_file)
return None
for cd in current_data:
feat_data['DATE_OBS'].append(current_date)
feat_data['FEAT_HG_LONG_DEG'].append(float(cd['FEAT_HG_LONG_DEG']))
feat_data['FEAT_HG_LAT_DEG'].append(float(cd['FEAT_HG_LAT_DEG']))
feat_data['FEAT_X_PIX'].append(int(cd['FEAT_X_PIX']))
feat_data['FEAT_Y_PIX'].append(int(cd['FEAT_Y_PIX']))
feat_data['FEAT_AREA_DEG2'].append(float(cd['FEAT_AREA_DEG2']))
feat_data['FEAT_FILENAME'].append(current_file)
return feat_data | 3c1c9018444af04ca8cc7d95176032ad92c42928 | 3,657,969 |
def get_branch_index(edge_index, edge_degree, branch_cutting_frequency=1000):
"""Finds the branch indexes for each branch in the MST.
Parameters
----------
edge_index : array
The node index of the ends of each edge.
edge_degree : array
The degree for the ends of each edge.
branch_cutting_frequency : int, optional
An optimisation parameter, used to remove edges that have already been placed into a branch.
This significantly improves the speed of the algorithm as branches that are already constructed
are now removed from the branch finder.
Returns
-------
branch_index : list
A list of branches where each branch is a list of the edge index of edges contained in each branch.
branch_index_rejected : list
A list of branches that have not been completed. This will occur only if a subset of the edge indexes
of the full tree is provided.
"""
degree1 = edge_degree[0]
degree2 = edge_degree[1]
index1 = edge_index[0]
index2 = edge_index[1]
condition = np.where((degree1 == 2.) & (degree2 == 2.))[0]
index_branch_mid = condition
index_branch_mid1 = index1[index_branch_mid]
index_branch_mid2 = index2[index_branch_mid]
condition = np.where(((degree1 == 2.) & (degree2 != 2.)) | ((degree1 != 2.) & (degree2 == 2.)))[0]
index_branch_end = condition
index_branch_end1 = index1[index_branch_end]
index_branch_end2 = index2[index_branch_end]
degree_branch_end1 = degree1[index_branch_end]
degree_branch_end2 = degree2[index_branch_end]
check_mid = np.ones(len(index_branch_mid))
check_end = np.ones(len(index_branch_end))
branch_index = []
branch_index_rejected = []
mask_end = np.ones(index_branch_end.shape, dtype=np.bool)
mask_mid = np.ones(index_branch_mid.shape, dtype=np.bool)
count = 0
item = 0
while item < len(index_branch_end):
if check_end[item] == 1.:
check_end[item] = 0.
done = 0.
_twig = []
_twig.append(index_branch_end[item])
if degree_branch_end1[item] == 2.:
node_index = index_branch_end1[item]
elif degree_branch_end2[item] == 2.:
node_index = index_branch_end2[item]
else:
assert ValueError("branch edge incorrect.")
mask_end[item] = False
while done == 0.:
condition = np.where(((check_mid == 1.) & (index_branch_mid1 == node_index)) |
((check_mid == 1.) & (index_branch_mid2 == node_index)))[0]
if len(condition) == 0:
condition = np.where(((check_end == 1.) & (index_branch_end1 == node_index)) |
((check_end == 1.) & (index_branch_end2 == node_index)))[0]
if len(condition) == 0:
branch_index_rejected = branch_index_rejected + \
np.ndarray.tolist(np.ndarray.flatten(np.array(_twig)))
done = 1.
else:
check_end[condition] = 0.
_twig.append(index_branch_end[condition])
done = 1.
mask_end[condition] = False
branch_index.append(np.ndarray.tolist(np.ndarray.flatten(np.array(_twig))))
else:
if len(condition) == 1:
check_mid[condition] = 0.
_twig.append(index_branch_mid[condition])
if index_branch_mid1[condition] == node_index:
node_index = index_branch_mid2[condition]
elif index_branch_mid2[condition] == node_index:
node_index = index_branch_mid1[condition]
else:
assert ValueError("Identification error.")
mask_mid[condition] = False
else:
assert ValueError("Found more than one vertex.")
else:
pass
if count % branch_cutting_frequency == 0 and count != 0:
index_branch_end = index_branch_end[mask_end]
check_end = check_end[mask_end]
index_branch_end1 = index_branch_end1[mask_end]
index_branch_end2 = index_branch_end2[mask_end]
degree_branch_end1 = degree_branch_end1[mask_end]
degree_branch_end2 = degree_branch_end2[mask_end]
index_branch_mid = index_branch_mid[mask_mid]
check_mid = check_mid[mask_mid]
index_branch_mid1 = index_branch_mid1[mask_mid]
index_branch_mid2 = index_branch_mid2[mask_mid]
mask_end = mask_end[mask_end]
mask_mid = mask_mid[mask_mid]
count = count + 1
item = 0
elif count % 1001 == 0:
count = count + 1
item = item + 1
elif item == len(index_branch_end) - 1:
index_branch_end = index_branch_end[mask_end]
check_end = check_end[mask_end]
index_branch_end1 = index_branch_end1[mask_end]
index_branch_end2 = index_branch_end2[mask_end]
degree_branch_end1 = degree_branch_end1[mask_end]
degree_branch_end2 = degree_branch_end2[mask_end]
index_branch_mid = index_branch_mid[mask_mid]
check_mid = check_mid[mask_mid]
index_branch_mid1 = index_branch_mid1[mask_mid]
index_branch_mid2 = index_branch_mid2[mask_mid]
mask_end = mask_end[mask_end]
mask_mid = mask_mid[mask_mid]
count = count + 1
item = 0
else:
count = count + 1
item = item + 1
branch_index_rejected = branch_index_rejected + np.ndarray.tolist(np.ndarray.flatten(np.array(index_branch_mid)))
branch_index = [np.ndarray.tolist(np.hstack(np.array(branch_index[i]))) for i in range(0, len(branch_index))]
if len(branch_index_rejected) != 0:
branch_index_rejected = np.ndarray.tolist(np.hstack(np.array(branch_index_rejected)))
return branch_index, branch_index_rejected | 3ac24625f9c67cdb60759e840b06b21f260733c9 | 3,657,970 |
def update_coverage(coverage, path, func, line, status):
"""Add to coverage the coverage status of a single line"""
coverage[path] = coverage.get(path, {})
coverage[path][func] = coverage[path].get(func, {})
coverage[path][func][line] = coverage[path][func].get(line, status)
coverage[path][func][line] = coverage[path][func][line].combine(status)
return coverage | 46e5a1e5c4ebba3a9483f90ada96a0f7f94d8c1d | 3,657,971 |
def cross_product(v1, v2):
"""Calculate the cross product of 2 vectors as (x1 * y2 - x2 * y1)."""
return v1.x * v2.y - v2.x * v1.y | 871d803ef687bf80facf036549b4b2062f713994 | 3,657,972 |
def loadData(fname='Unstra.out2.00008.athdf'):
"""load 3d bfield and calc the current density"""
#data=ath.athdf(fname,quantities=['B1','B2','B3'])
time,data=ath.athdf(fname,quantities=['vel1'])
vx = data['vel1']
time,data=ath.athdf(fname,quantities=['vel2'])
vy = data['vel2']
time,data=ath.athdf(fname,quantities=['vel3'])
vz = data['vel3']
x = data['x1f']
y = data['x2f']
z = data['x3f']
# ---
def curl(vx,vy,vz,dx,dy,dz):
[dzvx,dyvx,dxvx] = np.gradient(vx)
[dzvy,dyvy,dxvy] = np.gradient(vy)
[dzvz,dyvz,dxvz] = np.gradient(vz)
cx = dyvz/dy-dzvy/dz
cy = dzvx/dz-dxvz/dx
cz = dxvy/dx-dyvx/dy
# No need to del the reference by one manually
# allow python to perform its own garbage collection
# after the function return cxyz
#del dzvx
#del dzvy
#del dzvz
return cx,cy,cz
# ---
dx = dz = x[1]-x[0]
dy = y[1]-y[0]
jx,jy,jz = curl(vx,vy,vz,dx,dy,dz)
w2 = jx**2+jy**2+jz**2
del jx,jy,jz,vx,vy,vz
return w2 | 121768232fe71ce8ce3714aea70b5bf2c7493907 | 3,657,973 |
def text_iou(ground_truth: Text, prediction: Text) -> ScalarMetricValue:
"""
Calculates agreement between ground truth and predicted text
"""
return float(prediction.answer == ground_truth.answer) | 5ea135b30ba93da45fb1ecd624fe7dc556f01cf5 | 3,657,974 |
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
# Fill in the function and change the return statment.
return 0 | f15169b2672847294a219207f6022ad3e49338d2 | 3,657,975 |
def space_oem(*argv):
"""Handle oem files
Usage:
space-oem get <selector>...
space-oem insert (- | <file>)
space-oem compute (- | <selector>...) [options]
space-oem list <selector>... [options]
space-oem purge <selector>... [--until <until>]
space-oem list-tags <selector>...
space-oem tag <selector> <tag> [options]
Options:
get Retrieve an existing OEM from the database
insert Insert an OEM into the database
compute Compute OEM from an other OPM, OEM or TLE
list List existing ephemerides
purge Remove old OEMs. Use --last option
list-tags List available tags for ephems of the selected objects
tag Create a tag for a particular ephem
<selector> Selector of the satellite (see help of the "sat" command)
-f, --frame <frame> Frame in which to write the file to
-d, --date <date> Start date of the ephem [default: midnight]
(format %Y-%m-%dT%H:%M:%S)
-r, --range <days> Duration of extrapolation [default: 3d]
-s, --step <step> Step size of the OEM [default: 180s]
-i, --interp <inter> Interpolation method (linear, lagrange) [default: lagrange]
-l, --last <last> When listing print the last N OEM [default: 10]
-I, --insert Insert the computed OEM into the database
-F, --force Force insertion
--until <until> When purging, remove all file older than this date [default: 4w]
May be a duration, or a date
"""
return _generic_cmd("oem", space_oem.__doc__, *argv) | 54c479f7008f475f778f491c7b6c5574390fd38c | 3,657,976 |
def compare_distance(tree,target):
"""
Checks tree edit distance. Since every node has a unique position, we know that the node is the
same when the positions are the same. Hence, a simple method of counting the number of edits
one needs to do to create the target tree out of a given tree is equal to the number of positional
differences.
"""
# check for positional overlap
edit_value = 0
for node in target:
node.found = False
for node in tree:
same_node = False
for t_node in target:
if node.pos[0] == t_node.pos[0] and node.pos[1] == t_node.pos[1]:
same_node = True
t_node.found = True
if same_node == False:
edit_value += 1
# count found
for node in target:
if not node.found:
edit_value += 1
return edit_value | 96b57e88b8e70dbb43231b56cbe7e9b7ebcfd10f | 3,657,977 |
def header(name='peptide'):
"""
Parameters
----------
name
Returns
-------
"""
with open('{}.pdb'.format(name), 'r') as f:
file = f.read()
model = file.find('\nMODEL')
atom = file.find('\nATOM')
if atom < 0:
raise ValueError('no ATOM entries found in PDB')
if model < 0:
index = atom
else:
index = min(model, atom)
return file[:index] + '\n' | 84e75e34771b7c395ee36611c8d055ca1fdf67dc | 3,657,978 |
from datetime import datetime
def isoUTC2datetime(iso):
"""Convert and ISO8601 (UTC only) like string date/time value to a
:obj:`datetime.datetime` object.
:param str iso: ISO8061 string
:rtype: datetime.datetime
"""
formats = ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.%f"]
if 'T' in iso:
formats = ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S.%fZ"]
for fmt in formats:
try:
return datetime.datetime.strptime(iso, fmt)
except ValueError:
continue
raise ValueError("Couldn't parse ISO8061 string '{}'".format(iso)) | 0dae4fb7828f7319afa7190deca6ae4fda5ffd1d | 3,657,979 |
from typing import Optional
from typing import Dict
from typing import Union
def groupstatus(aid: int, state: int = 0) -> EndpointResult:
"""Retrieve anime release status for different groups.
:param aid: anidb anime id
:type aid: int
:param state: release state. int 1 to 6. Example: zenchi.mappings.group_status.ONGOING
:type state: int, optional
:return: a tuple (data, code). data is a dictionary with the keys:
if code == (325, 330:
:message str: NO SUCH GROUPS FOUND, NO SUCH ANIME
if code == 225:
:status: List of dictionaries with the following keys:
:group_id int:
:group_name str:
:completion_state int:
:last_episode_number int:
:rating int:
:votes int:
:episode_range str:
:truncated bool: if the response was truncated because it didn't fit the UDP packet, this will be True.
:rtype: EndpointResult
"""
def cb(code: int, response: str) -> Optional[EndpointDict]:
if code in (325, 330):
return dict(message=response_message[code])
if code == 225:
result = []
groups_data = response.splitlines()[1:]
truncated = False
for group_data in groups_data:
parts = group_data.split("|")
if len(parts) < 7:
logger.warning(
"Response was truncated, too much data for UDP packet."
)
truncated = True
break
result.append(
{
"group_id": int(parts[0]),
"group_name": parts[1],
"completion_state": int(parts[2]),
"last_episode_number": int(parts[3]),
"rating": int(parts[4]),
"votes": int(parts[5]),
"episode_range": parts[6],
}
)
return dict(status_list=result, truncated=truncated)
return None
params: Dict[str, Union[str, int]] = dict(aid=aid)
if state:
params["state"] = state
return send("GROUPSTATUS", params, cb) | f81ab06c8d47b9660cac9bde76978a722a13f49f | 3,657,980 |
def get_communities_codes(communities, fields=None, community_field='Community'):
"""From the postal code conversion file, select entries for the `communities`.
This function is similar to get_community_codes, but works if
`communities` and `fields` are strings or lists of strings.
"""
if not isinstance(communities, pd.DataFrame) and not isinstance(communities, pd.Series):
communities = pd.Series(communities, name=community_field)
df = _pccf_df.merge(communities, on=community_field)
return df if fields is None else df[ensure_list(fields) + [community_field]].drop_duplicates() | 552fef722cd138f1a935755349116c89e0df3e3b | 3,657,981 |
from vba import VBA
from dataFrame import DF
def GLMFit_(file, designMatrix, mask, outputVBA, outputCon,
fit="Kalman_AR1"):
"""
Call the GLM Fit function with apropriate arguments
Parameters
----------
file
designmatrix
mask
outputVBA
outputCon
fit='Kalman_AR1'
Returns
-------
glm, a vba.VBA instance representing the GLM
"""
if fit == "Kalman_AR1":
model = "ar1"
method = "kalman"
elif fit == "Ordinary Least Squares":
method = "ols"
model="spherical"
elif fit == "Kalman":
method = "kalman"
model = "spherical"
s = dict()
s["GlmDumpFile"] = outputVBA
s["ConfigFilePath"] = outputCon
s["DesignFilePath"] = designMatrix
tab = DF.read(designMatrix)
glm = VBA(tab, mask_url=mask, create_design_mat = False, mri_names = file,
model = model, method = method)
glm.fit()
glm.save(s)
return glm | 25ced91bc6c865faaffab30278d59aad6a475d4f | 3,657,982 |
from typing import Iterable
def get_stoch_rsi(quotes: Iterable[Quote], rsi_periods: int, stoch_periods: int, signal_periods: int, smooth_periods: int = 1):
"""Get Stochastic RSI calculated.
Stochastic RSI is a Stochastic interpretation of the Relative Strength Index.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`rsi_periods` : int
Number of periods for the RSI.
`stoch_periods` : int
Number of periods for the Stochastic.
`signal_periods` : int
Number of periods for the Stochastic RSI SMA signal line.
`smooth_periods` : int, defaults 1
Number of periods for Stochastic Smoothing. Use 1 for Fast or 3 for Slow.
Returns:
`StochRSIResults[StochRSIResult]`
StochRSIResults is list of StochRSIResult with providing useful helper methods.
See more:
- [Stochastic RSI Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/StochRsi/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
stoch_rsi_results = CsIndicator.GetStochRsi[Quote](CsList(Quote, quotes), rsi_periods, stoch_periods, signal_periods, smooth_periods)
return StochRSIResults(stoch_rsi_results, StochRSIResult) | b548a620ef3b3bc4cb37049d1dfb29aac442b394 | 3,657,983 |
def PUtilHann (inUV, outUV, err, scratch=False):
""" Hanning smooth a UV data set
returns smoothed UV data object
inUV = Python UV object to smooth
Any selection editing and calibration applied before average.
outUV = Predefined UV data if scratch is False, ignored if
scratch is True.
err = Python Obit Error/message stack
scratch = True if this is to be a scratch file (same type as inUV)
"""
################################################################
if inUV.myClass=='AIPSUVData':
raise TypeError("Function unavailable for "+inUV.myClass)
# Checks
if not inUV.UVIsA():
raise TypeError("inUV MUST be a Python Obit UV")
if ((not scratch) and (not outUV.UVIsA())):
raise TypeError("outUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Create output for scratch
if scratch:
outUV = UV("None")
outUV.me = Obit.UVUtilHann(inUV.me, scratch, outUV.me, err.me)
if err.isErr:
OErr.printErrMsg(err, "Error Hanning UV data")
# Get scratch file info
if scratch:
PUVInfo (outUV, err)
return outUV
# end PUtilHann | a53f8d442055b2d575b36f49a96b68f6c6eff7ed | 3,657,984 |
def str2bytes(seq):
""" Converts an string to a list of integers """
return map(ord,str(seq)) | 7afe8e40cd4133c59be673b537f2717591b093cf | 3,657,985 |
def __downloadFilings(cik: str) -> list:
"""Function to download the XML text of listings pages for a given CIK
from the EDGAR database.
Arguments:
cik {str} -- Target CIK.
Returns:
list -- List of page XML, comprising full listing metadata for CIK.
"""
idx = 0 # Current page index
end = False # Flags for loop
count = 100 # Number of results per page (limited by SEC)
# Text indicating next page exists
next_page_text = 'rel="next" type="application/atom+xml" />'
pages = []
while not end:
# Making request
page_text = __makeRequest(cik=cik, start_idx=idx, count=count)
end = (page_text.find(next_page_text) == -1) # Update end flag
idx += count # Increment index for next page
pages.append(page_text) # Save page text
return pages | c98996d3607076ed0328a5e0621ef015037ddc2e | 3,657,986 |
def KK_RC43_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen ([email protected] / [email protected])
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
) | 9f88b73ac5da422069e67af28c15c2846178169b | 3,657,987 |
from .column import ColumnVirtualConstant
def vconstant(value, length, dtype=None, chunk_size=1024):
"""Creates a virtual column with constant values, which uses 0 memory.
:param value: The value with which to fill the column
:param length: The length of the column, i.e. the number of rows it should contain.
:param dtype: The preferred dtype for the column.
:param chunk_size: Could be used to optimize the performance (evaluation) of this column.
"""
return ColumnVirtualConstant(value=value, length=length, dtype=dtype, chunk_size=chunk_size) | b712ec9f1aea2f65f1f992cd3b23ab671339f97a | 3,657,988 |
import os
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size | f21bd048bf1fdbc80cdcbd4f14dba8f390439f74 | 3,657,989 |
def get_all_nsds_of_node(logger, instance):
"""
This function performs "mmlsnsd -X -Y".
Args:
instance (str): instance for which disks are use by filesystem.
region (str): Region of operation
Returns:
all_disk_names (list): Disk names in list format.
Ex: [nsd_1a_1_0, nsd_1c_1_0, nsd_1c_d_1]
"""
logger.debug("Function Entry: get_all_nsds_of_node. "
"Args: instance={0}".format(instance))
nsd_list = []
nsd_list = SpectrumScaleNSD.get_all_nsd_info()
all_nsd_names = []
for nsd in nsd_list:
if nsd.get_remarks() == 'server node' and instance in nsd.get_server_list():
all_nsd_names.append(nsd.get_name())
logger.debug("Function Exit: get_all_nsds_of_node(). "
"Return Params: all_nsd_names={0} ".format(all_nsd_names))
return all_nsd_names | 648bb6706c2dd01d448044b98425642319a75eca | 3,657,990 |
def gen_color_palette(n: int):
""" Generates a hex color palette of size n, without repeats
and only light colors (easily visible on dark background).
Adapted from code by 3630 TAs Binit Shah and Jerred Chen
Args:
n (int): number of clouds, each cloud gets a unique color
"""
palette = []
do_replace = False if len(COLOR_OPTIONS) >= n else True
for i in np.random.choice(len(COLOR_OPTIONS), n, replace=do_replace):
palette.append(COLOR_OPTIONS[i])
return palette | 6b1004674d1448cdcca8c3500b149b1602e0045f | 3,657,991 |
def absolute_vorticity(u, v, dx, dy, lats, dim_order='yx'):
"""Calculate the absolute vorticity of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data
Returns
-------
(M, N) ndarray
absolute vorticity
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
"""
f = coriolis_parameter(lats)
relative_vorticity = vorticity(u, v, dx, dy, dim_order=dim_order)
return relative_vorticity + f | 9ae200b3a8b8415f67fc640b0702bc5272c77d3a | 3,657,992 |
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = input.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return input * random_tensor | 289ae545fa184bb459275685d3a2894e5219db2e | 3,657,993 |
from typing import Dict
from typing import Any
from typing import List
import secrets
def ask_user_config() -> Dict[str, Any]:
"""
Ask user a few questions to build the configuration.
Interactive questions built using https://github.com/tmbo/questionary
:returns: Dict with keys to put into template
"""
questions: List[Dict[str, Any]] = [
{
"type": "confirm",
"name": "dry_run",
"message": "Do you want to enable Dry-run (simulated trades)?",
"default": True,
},
{
"type": "text",
"name": "stake_currency",
"message": "Please insert your stake currency:",
"default": 'USDT',
},
{
"type": "text",
"name": "stake_amount",
"message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):",
"default": "100",
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
"filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"'
if val == UNLIMITED_STAKE_AMOUNT
else val
},
{
"type": "text",
"name": "max_open_trades",
"message": f"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):",
"default": "3",
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val),
"filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"'
if val == UNLIMITED_STAKE_AMOUNT
else val
},
{
"type": "text",
"name": "timeframe",
"message": "Please insert your desired timeframe (e.g. 5m):",
"default": "5m",
},
{
"type": "text",
"name": "fiat_display_currency",
"message": "Please insert your display Currency (for reporting):",
"default": 'USD',
},
{
"type": "select",
"name": "exchange_name",
"message": "Select exchange",
"choices": [
"binance",
"binanceus",
"bittrex",
"kraken",
"ftx",
"kucoin",
"gateio",
Separator(),
"other",
],
},
{
"type": "autocomplete",
"name": "exchange_name",
"message": "Type your exchange name (Must be supported by ccxt)",
"choices": available_exchanges(),
"when": lambda x: x["exchange_name"] == 'other'
},
{
"type": "password",
"name": "exchange_key",
"message": "Insert Exchange Key",
"when": lambda x: not x['dry_run']
},
{
"type": "password",
"name": "exchange_secret",
"message": "Insert Exchange Secret",
"when": lambda x: not x['dry_run']
},
{
"type": "password",
"name": "exchange_key_password",
"message": "Insert Exchange API Key password",
"when": lambda x: not x['dry_run'] and x['exchange_name'] == 'kucoin'
},
{
"type": "confirm",
"name": "telegram",
"message": "Do you want to enable Telegram?",
"default": False,
},
{
"type": "password",
"name": "telegram_token",
"message": "Insert Telegram token",
"when": lambda x: x['telegram']
},
{
"type": "text",
"name": "telegram_chat_id",
"message": "Insert Telegram chat id",
"when": lambda x: x['telegram']
},
{
"type": "confirm",
"name": "api_server",
"message": "Do you want to enable the Rest API (includes FreqUI)?",
"default": False,
},
{
"type": "text",
"name": "api_server_listen_addr",
"message": "Insert Api server Listen Address (best left untouched default!)",
"default": "127.0.0.1",
"when": lambda x: x['api_server']
},
{
"type": "text",
"name": "api_server_username",
"message": "Insert api-server username",
"default": "freqtrader",
"when": lambda x: x['api_server']
},
{
"type": "text",
"name": "api_server_password",
"message": "Insert api-server password",
"when": lambda x: x['api_server']
},
]
answers = prompt(questions)
if not answers:
# Interrupted questionary sessions return an empty dict.
raise OperationalException("User interrupted interactive questions.")
# Force JWT token to be a random string
answers['api_server_jwt_key'] = secrets.token_hex()
return answers | 7697ba65c7ba7f73b81af3ae3575beb0eb9b30b8 | 3,657,994 |
def generate_menusystem():
""" Generate Top-level Menu Structure (cached for specified timeout) """
return '[%s] Top-level Menu System' % timestamp() | eb3575835889af768887f3071816d0f22f867568 | 3,657,995 |
import time
def main():
""" main loop """
#TODO: enable parallezation to use multiple cores
N = int(1e18)
def sum(N):
start = time.time()
result = 0
for i in range(N):
if i % int(1e9) == 0:
print("** step %i **" % int(i/1e9))
result += i
end = time.time()
print("Sum till %N yields %s, took %s seconds" %(N,result, end-start))
return result, end-start
def count_friend_numbers(N, base=10):
"""
Project Euler problem 612 - https://projecteuler.net/problem=612
Let's call two numbers friend numbers if their representation in base 10 has at least one common digit.
E.g. 1123 and 3981 are friend numbers.
Let f(N) be the number of pairs (p,q) with 1≤p<q<N such that p and q are friend numbers.
This function will calculate f(N)
:param N: ceiling number to calculate the number of friend numbers
:return: number of friend number (p,q) s.t. 1≤p<q<N
"""
start = time.time()
result = 0
for i in range(1,base):
# 1. till N=100=1e2
# single digit: friends XX
result += 2*base-2
# double digits: friends XX
result += int( ((base-1)+(base-i))*i/2 )
result += int(( 1 + base-i-2)*(base-i-2)/2 )
result += (i+1)*2*(base-i-1)
result += (base-i-1)*(base-2 + 2*(base-i-1))
# 2. till N=1000=1e3
# single digits: extra friends XXX
result += base^2 + (base-2)*(2*base-1)
# double digits: extra friends XXX
result += (base-2)*( 2*base^2 + (base-2)*(2*base-1) + (base-3)*(2*base-1) )
result += 2*( base^2 + (base-2)*base + 2*(base-2)*(base-1) )
# triple digits: friends XXX
# 3. till N=10000=1e4
# single digits: extra friends XXXX
# result += base^3 + (base-2)*( base^2 + (base-1)*(2*base-1) )
end = time.time()
print("Counting friend numbers till %s yields %s, took %0.5f seconds" %(N,result, end-start))
return result, end-start
# sum(N)
count_friend_numbers(100) | 1327fcff07e706624cfb5fa9f270bbac25f340bb | 3,657,996 |
import json
import aiohttp
import asyncio
from loguru import logger
from typing import Optional
async def simple_post(session, url: str, data: dict, timeout: int = 10) -> Optional[dict]:
"""
A simple post function with exception feedback
Args:
session (CommandSession): current session
url (str): post url
data (dict): post data
timeout (int): timeout threshold
Returns:
Json response in dict if no exception occurred
Otherwise, return None and send feedback to user.
"""
try:
logger.debug(f"Start posting {data} to {url} ...")
async with aiohttp.ClientSession() as client:
async with client.post(url, data=data, timeout=timeout, proxy=get_local_proxy()) as response:
if response.status != 200:
logger.error(f"Cannot connect to {url}, Status: {response.status}")
session.send("无法连接到服务器")
return None
r = json.loads(await response.text())
logger.debug(f"Response: {r}")
return r
except asyncio.TimeoutError:
logger.error(f"Cannot connect to {url}, Error: Timeout")
await session.send("请求超时") | 2a0b6a26154f322f42850106a21b167161fe7cc0 | 3,657,997 |
def gnomonic_proj(lon, lat, lon0=0, lat0=0):
"""
lon, lat : arrays of the same shape; longitude and latitude
of points to be projected
lon0, lat0: floats, longitude and latitude in radians for
the tangency point
---------------------------
Returns the gnomonic projection, x, y
https://mathworld.wolfram.com/GnomonicProjection.html
"""
cosc = sin(lat0)*sin(lat) + cos(lat0)*cos(lat)*cos(lon-lon0)
x = cos(lat)*sin(lon-lon0)/cosc
y = (cos(lat0)*sin(lat) - sin(lat0)*cos(lat)*cos(lon-lon0))/cosc
return x, y | 61daaee7bc0ca5dd901582adc03ec6c36ddf2ef2 | 3,657,998 |
def local_pluggables(pluggable_type):
"""
Accesses pluggable names
Args:
pluggable_type (Union(PluggableType,str)): The pluggable type
Returns:
list[str]: pluggable names
Raises:
AquaError: if the type is not registered
"""
_discover_on_demand()
if isinstance(pluggable_type, str):
for ptype in PluggableType:
if ptype.value == pluggable_type:
pluggable_type = ptype
break
if not isinstance(pluggable_type, PluggableType):
raise AquaError(
'Invalid pluggable type {}'.format(pluggable_type))
if pluggable_type not in _REGISTRY_PLUGGABLE.registry:
raise AquaError('{} not registered'.format(pluggable_type))
return [pluggable.name for pluggable in _REGISTRY_PLUGGABLE.registry[pluggable_type].values()] | 8626e931da1fd33d76cef4ed85b6ea1a7d7e907d | 3,657,999 |
Subsets and Splits