content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import traceback
import itertools
import operator
def build_missing_wheels(
packages_and_envts,
build_remotely=False,
with_deps=False,
dest_dir=THIRDPARTY_DIR,
):
"""
Build all wheels in a list of tuple (Package, Environment) and save in
`dest_dir`. Return a list of tuple (Package, Environment), and a list of
built wheel filenames.
"""
not_built = []
built_filenames = []
packages_and_envts = itertools.groupby(
sorted(packages_and_envts), key=operator.itemgetter(0))
for package, pkg_envts in packages_and_envts:
envts = [envt for _pkg, envt in pkg_envts]
python_versions = sorted(set(e.python_version for e in envts))
operating_systems = sorted(set(e.operating_system for e in envts))
built = None
try:
built = build_wheels(
requirements_specifier=package.specifier,
with_deps=with_deps,
build_remotely=build_remotely,
python_versions=python_versions,
operating_systems=operating_systems,
verbose=False,
dest_dir=dest_dir,
)
print('.')
except Exception as e:
print('#############################################################')
print('############# WHEEL BUILD FAILED ######################')
traceback.print_exc()
print()
print('#############################################################')
if not built:
for envt in pkg_envts:
not_built.append((package, envt))
else:
for bfn in built:
print(f' --> Built wheel: {bfn}')
built_filenames.append(bfn)
return not_built, built_filenames
|
261433deb7bc691f92d995d606e108a807201b97
| 24,809 |
def str_to_bool(string):
"""
Parses string into boolean
"""
string = string.lower()
return True if string == "true" or string == "yes" else False
|
e7c1645ab3ba59fc4721872df76f406c571cab8f
| 24,812 |
def rerotateExtremaPoints(minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y,\
lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y,\
Phi, Op, yrealAllRealInds):
""" Rotate the extrema points from (the projected ellipse centered at the origin
and x-axis aligned with semi-major axis) to the original projected ellipse
Args:
minSepPoints_x (numpy array):
the first quadrant x-coordinates of the minimum separations (with length n)
minSepPoints_y (numpy array):
the first quadrant y-coordinates of the minimum separations (with length n)
maxSepPoints_x (numpy array):
the first quadrant x-coordinates of the maximum separations (with length n)
maxSepPoints_y (numpy array):
the first quadrant y-coordinates of the maximum separations (with length n)
lminSepPoints_x (numpy array):
the first quadrant x-coordinates of the local minimum separations (with same length as yrealImagInds)
lminSepPoints_y (numpy array):
the first quadrant y-coordinates of the local minimum separations (with same length as yrealImagInds)
lmaxSepPoints_x (numpy array):
the first quadrant x-coordinates of the local maximum separations (with same length as yrealImagInds)
lmaxSepPoints_y (numpy array):
the first quadrant y-coordinates of the local maximum separations (with same length as yrealImagInds)
phi (numpy array):
angle from X-axis to semi-minor axis of projected ellipse
Op (numpy array):
the geometric center of the projected ellipse
yrealAllRealInds (numpy array):
an array of integers acting as indicies of planets which have min, max, local min, local max
Returns:
minSepPoints_x_dr (numpy array):
derotated minSepPoints_x
minSepPoints_y_dr (numpy array):
derotated minSepPoints_y
maxSepPoints_x_dr (numpy array):
derotated maxSepPoints_x
maxSepPoints_y_dr (numpy array):
derotated maxSepPoints_y
lminSepPoints_x_dr (numpy array):
derotated lminSepPoints_x
lminSepPoints_y_dr (numpy array):
derotated lminSepPoints_y
lmaxSepPoints_x_dr (numpy array):
derotated lmaxSepPoints_x
lmaxSepPoints_y_dr (numpy array):
derotated lmaxSepPoints_y
"""
minSepPoints_x_dr = np.zeros(len(minSepPoints_x))
minSepPoints_y_dr = np.zeros(len(minSepPoints_y))
maxSepPoints_x_dr = np.zeros(len(maxSepPoints_x))
maxSepPoints_y_dr = np.zeros(len(maxSepPoints_y))
lminSepPoints_x_dr = np.zeros(len(lminSepPoints_x))
lminSepPoints_y_dr = np.zeros(len(lminSepPoints_y))
lmaxSepPoints_x_dr = np.zeros(len(lmaxSepPoints_x))
lmaxSepPoints_y_dr = np.zeros(len(lmaxSepPoints_y))
minSepPoints_x_dr, minSepPoints_y_dr = rerotateEllipsePoints(minSepPoints_x, minSepPoints_y,Phi,Op[0],Op[1])
maxSepPoints_x_dr, maxSepPoints_y_dr = rerotateEllipsePoints(maxSepPoints_x, maxSepPoints_y,Phi,Op[0],Op[1])
lminSepPoints_x_dr, lminSepPoints_y_dr = rerotateEllipsePoints(lminSepPoints_x, lminSepPoints_y,Phi[yrealAllRealInds],Op[0][yrealAllRealInds],Op[1][yrealAllRealInds])
lmaxSepPoints_x_dr, lmaxSepPoints_y_dr = rerotateEllipsePoints(lmaxSepPoints_x, lmaxSepPoints_y,Phi[yrealAllRealInds],Op[0][yrealAllRealInds],Op[1][yrealAllRealInds])
return minSepPoints_x_dr, minSepPoints_y_dr, maxSepPoints_x_dr, maxSepPoints_y_dr,\
lminSepPoints_x_dr, lminSepPoints_y_dr, lmaxSepPoints_x_dr, lmaxSepPoints_y_dr
|
b656116d73cd98903fae0f54e3d575a59ae4b102
| 24,813 |
import glob
def does_name_exist(name):
""" check if a file with that name already exists """
return len(glob.glob('./photos/'+name+'.*')) > 0
|
c377f5fdb15d1d88ba6082c9be0e0400f5a8094d
| 24,814 |
def cont_hires(npoints, elecs, start_timestamp=0):
"""
Retrieve hires data (sampled at 2 kHz).
Parameters and outputs are the same as the `cont_raw` function.
Args:
npoints: number of datapoints to retrieve
elecs: list of electrodes to sample
start_timestamp: NIP timestamp to start data at, or most recent if 0
Returns:
"""
return _cont_base(_c.xl_cont_hires, npoints, elecs, start_timestamp)
|
6d420e19e0de94f83992c0945e0f9a994b1e5483
| 24,815 |
import torch
def batch_grid_subsampling_kpconv_gpu(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0):
"""
Same as batch_grid_subsampling, but implemented in GPU. This is a hack by using Minkowski
engine's sparse quantization functions
Note: This function is not deterministic and may return subsampled points
in a different ordering, which will cause the subsequent steps to differ slightly.
"""
if labels is not None or features is not None:
raise NotImplementedError('subsampling not implemented for features and labels')
if max_p != 0:
raise NotImplementedError('subsampling only implemented by considering all points')
B = len(batches_len)
batch_start_end = torch.nn.functional.pad(torch.cumsum(batches_len, 0), (1, 0))
device = points[0].device
coord_batched = ME.utils.batched_coordinates(
[points[batch_start_end[b]:batch_start_end[b + 1]] / sampleDl for b in range(B)], device=device)
sparse_tensor = ME.SparseTensor(
features=points,
coordinates=coord_batched,
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
)
s_points = sparse_tensor.features
s_len = torch.tensor([f.shape[0] for f in sparse_tensor.decomposed_features], device=device)
return s_points, s_len
|
9d4eb2b0d5ad7d36199cc6ff6ba567c49dccff4b
| 24,816 |
import configparser
import re
def hot_word_detection(lang='en'):
"""
Hot word (wake word / background listen) detection
What is Hot word detection?
ANSWER: Hot word listens for specific key words chosen to activate the “OK Google” voice interface. ...
Voice interfaces use speech recognition technologies to allow user input through spoken commands.
You can set your custom HOT WORD just by calling setup(). Your bot_name is your Hot word
:param lang: str
default 'en'
:return: Bool, str
status, command
"""
try:
config = configparser.ConfigParser()
config.read('config/config.ini')
bot_name = config['default']['bot_name']
except Exception as e:
raise DefaultFileNotFound
try:
r = sr.Recognizer()
with sr.Microphone() as source:
print("Background listening")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
command = r.recognize_google(audio, language=lang).lower()
if re.search(bot_name, command):
print("Waking up...")
return True, command
else:
return False, False
except Exception:
return False, None
|
d982c33dc9b8af0e1592a88438664342fb25b8cc
| 24,817 |
def parse_ph5_length(length):
"""
Method for parsing length argument.
:param length: length
:type: str, numeric, or None
:returns: length value as a float
:type: float or None
"""
err_msg = "Invalid length value. %s" % (length)
return str_to_pos_float(length, err_msg)
|
f5f669bdcd28611e45bbefa80fce6f2bf16a663f
| 24,818 |
from typing import Tuple
def check_proper_torsion(
torsion: Tuple[int, int, int, int], molecule: "Ligand"
) -> bool:
"""
Check that the given torsion is valid for the molecule graph.
"""
for i in range(3):
try:
_ = molecule.get_bond_between(
atom1_index=torsion[i], atom2_index=torsion[i + 1]
)
except TopologyMismatch:
return False
return True
|
7b43e4838bf65ebb4505d1660819ace98bdbd038
| 24,820 |
def find_all_occurrences_and_indexes(seq):
"""
seq: array-like of pretty_midi Note
Finds all patterns and indexes of those patterns.
"""
list_patterns = list()
list_indexes = list()
res = list()
seq_x = seq
while res!=None:
seq_x, res, indexes = find_occurrences_and_indexes(seq_x)
if res!=None:
list_patterns.append(res)
list_indexes.append(indexes)
for i in range(len(seq_x)):
# special case for non recurring patterns: notes that appear only once
if seq_x[i]!=None:
list_patterns.append([seq_x[i]])
list_indexes.append([i])
return list_patterns,list_indexes
|
ab85dca7f30768d75e28ab76b974e50364e8746a
| 24,821 |
def get_uas_volume_admin(volume_id):
"""Get volume info for volume ID
Get volume info for volume_id
:param volume_id:
:type volume_id: str
:rtype: AdminVolume
"""
if not volume_id:
return "Must provide volume_id to get."
return UasManager().get_volume(volume_id=volume_id)
|
55cd59c8e7c116f8a975ef4f14f808c07700d955
| 24,823 |
def cyclic_learning_rate(global_step,
learning_rate=0.01,
max_lr=0.1,
step_size=50000.,
gamma=0.99994,
max_steps=100000.,
scale_rate=0.9,
mode='triangular',
policy=None,
name=None):
"""Cyclic learning rate (CLR).
This method is revised from [TensorFlow pull request: Add support for Cyclic Learning Rate](https://github.com/tensorflow/tensorflow/pull/20758)
From the paper:
Smith, Leslie N. "Cyclical learning
rates for training neural networks." 2017.
[https://arxiv.org/pdf/1506.01186.pdf]
This method lets the learning rate cyclically
vary between reasonable boundary values
achieving improved classification accuracy and
often in fewer iterations.
This code varies the learning rate linearly between the
minimum (learning_rate) and the maximum (max_lr).
It returns the cyclic learning rate. It is computed as:
```python
cycle = floor( 1 + global_step / ( 2 * step_size ) )
x = abs( global_step / step_size – 2 * cycle + 1 )
clr = learning_rate + ( max_lr – learning_rate ) * max( 0 , 1 - x )
```
Modes:
'triangular':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle.
'triangular2':
The same as the triangular policy except the learning
rate difference is cut in half at the end of each cycle.
This means the learning rate difference drops after each cycle.
'exp_range':
The learning rate varies between the minimum and maximum
boundaries and each boundary value declines by an exponential
factor of: gamma^global_step.
Args:
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the cyclic computation. Must not be negative.
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate which is the lower bound
of the cycle (default = 0.1).
max_lr: A scalar. The maximum learning rate boundary.
step_size: A scalar. The number of iterations in half a cycle.
The paper suggests step_size = 2-8 x training iterations in epoch.
gamma: constant in 'exp_range' mode:
gamma**(global_step)
max_steps: A scalar. The number of total iterations.
scale_rate: A scale factor for decreasing the learning rate after the completion of one cycle.
Must be between 0 and 1.
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
policy: one of {None, one-cycle}.
Default 'None'.
name: String. Optional name of the operation. Defaults to
'CyclicLearningRate'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The cyclic
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for cyclic_learning_rate.")
with ops.name_scope(name, "CyclicLearningRate",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
step_size = math_ops.cast(step_size, dtype)
max_steps = math_ops.cast(max_steps, dtype)
def cyclic_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
# computing: cycle = floor( 1 + global_step / ( 2 * step_size ) )
double_step = math_ops.multiply(2., step_size)
global_div_double_step = math_ops.divide(global_step, double_step)
cycle = math_ops.floor(math_ops.add(1., global_div_double_step))
# computing: x = abs( global_step / step_size – 2 * cycle + 1 )
double_cycle = math_ops.multiply(2., cycle)
global_div_step = math_ops.divide(global_step, step_size)
tmp = math_ops.subtract(global_div_step, double_cycle)
x = math_ops.abs(math_ops.add(1., tmp))
# computing: clr = learning_rate + ( max_lr – learning_rate ) * max( 0, 1 - x )
a1 = math_ops.maximum(0., math_ops.subtract(1., x))
a2 = math_ops.subtract(max_lr, learning_rate)
clr = math_ops.multiply(a1, a2)
if mode == 'triangular2':
clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast(
cycle-1, tf.int32)), tf.float32))
if mode == 'exp_range':
clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr)
return math_ops.add(clr, learning_rate, name=name)
def after_cycle():
gap = math_ops.subtract(global_step, math_ops.multiply(2., step_size))
cur_percent = math_ops.divide(gap, math_ops.subtract(max_steps, math_ops.multiply(2., step_size)))
temp = math_ops.add(1., math_ops.multiply(cur_percent, -0.99))
next_lr = math_ops.multiply(learning_rate, math_ops.multiply(temp, scale_rate))
return next_lr
if policy == 'one_cycle':
cyclic_lr = tf.cond(tf.less(global_step, 2*step_size), cyclic_lr , after_cycle)
else:
cyclic_lr = cyclic_lr()
return cyclic_lr
|
cae33d6b167c4356dec52c0511d36fd23ca68434
| 24,824 |
def getElementsOnFirstLevelExceptTag(parent, element):
"""Return all elements below *parent* except for the ones tagged *element*.
:param parent: the parent dom object
:param elemnt: the tag-name of elements **not** to return
"""
elements = []
children = getElements(parent)
for c in children:
if c.parentNode == parent and c.tagName.lower() != element.lower():
elements.append(c)
return elements
|
7ce5b578090d7079cf6bc3905d0d25fecf06461a
| 24,825 |
def get_first_child_element(node, tag_name):
"""Get the first child element node with a given tag name.
:param node: Parent node.
:type node: xml.dom.Node
:returns: the first child element node with the given tag name.
:rtype: xml.dom.Node
:raises NodeNotFoundError:
if no child node with the given tag name was found.
"""
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE and \
child.tagName == tag_name:
return child
raise NodeNotFoundError('no child element node with tag %s was found' %
(tag_name))
|
479b311ec52814b9276e401361bbb1b040527d23
| 24,826 |
import re
def parse_iso(filename='iso.log'):
""" parse the isotropy output file
Args:
filename: the isotropy output file name
Returns:
lname: list of irreps
lpt: list of atom coordinate
lpv: list of distortion vectors, might be multi-dimensional
"""
#read in the isotropy output
try:
with open(filename,'r') as f:
read_data = f.read()
except BaseException:
print('the output of isotropy is required here')
return
#parse the isotropy output
#pt - atom coordinates (kind of weird definition, pt = original reduced coordinate * supercell matrix)
#pv - distortion vectors
#lpt, lpv - list of wy, pt, pv
#lname - name of modes
#nmode - number of modes
nmode = 0
lname = []
lpt = []
lpv = []
pattern_name = re.compile(r"^[A-Z0-9\+\-]+(?=\s)")
pattern_coor = re.compile(r"(?<=\().*?(?=\))")
pattern_vec = re.compile(r"(?<=\()[0-9,\.\-]*(?=\))")
for line in read_data.split('\n'):
if pattern_name.search(line):
if nmode>0:
lpt.append(pt)
lpv.append(pv)
pt = []
pv = []
nmode += 1
lname.append(pattern_name.search(line).group())
if nmode==0:
continue
if re.search(r"Irrep|Enter", line):
continue
find = pattern_coor.findall(line)
find2 = pattern_vec.findall(line)
if (len(find)!=len(find2)):
npv = 0
for element in find:
coor = list(map(float, element.split(',')))
if npv==0:
pt.append(coor)
if npv==1:
pv.append([coor])
if npv>1:
pv[-1].append(coor)
npv += 1
else:
for element in find:
coor = list(map(float, element.split(',')))
if npv==1:
pv.append([coor])
if npv>1:
pv[-1].append(coor)
npv += 1
lpt.append(pt)
lpv.append(pv)
return lname, lpt, lpv
|
f0331c7a0c962d9763f1b3a15c997dda5a3c951c
| 24,827 |
def revoke_database(cursor: Cursor, user: str, db: str) -> Result:
"""
Remove any permissions for the user to create, manage and delete this database.
"""
db = db.replace("%", "%%")
return Result(_truthy(query(cursor, _format("REVOKE ALL ON {}.* FROM %s@'%%'", db), user)))
|
9cb496ffde12fbbed4750a9442572a6bbd74497a
| 24,828 |
def get_ex1():
"""Loads array A for example 1 and its TruncatedSVD with top 10 components
Uk, Sk, Vk = argmin || A - Uk*diag(Sk)*Vk||
Over;
Uk, Sk, Vk
Where;
Uk is a Orthonormal Matrix of size (20000, 10)
Sk is a 10 dimensional non-negative vector
Vk is a Orthonormal Matrix of size (10, 8000)
Returns
-------
A : numpy.ndarray
array of size (20000, 8000)
Uk : numpy.ndarray
orthonormal array of size (20000, 10)
Top 10 Left Singular Vectors of `A`
Sk : numpy.ndarray
array of size (10, )
Top 10 Singular Values of `A`
Vk : numpy.ndarray
transposed orthonormal array of size (10, 8000)
Top 10 Right Singular Vectors of `A`
"""
try:
Uk = load_np_file('ex1_Uk.npy')
Sk = load_np_file('ex1_Sk.npy')
Vk = load_np_file('ex1_Vk.npy')
ex1 = _make_a_ex1()
return ex1, Uk, Sk, Vk
except FileNotFoundError:
raise FileNotFoundError("A, Uk, Sk, Vk cannot be loaded. Try make_ex1()")
|
9afab6220acd28eedcfed1eee9920da97c1ff207
| 24,829 |
from typing import Any
def render_variable(context: 'Context', raw: Any):
"""
Render the raw input. Does recursion with dict and list inputs, otherwise renders
string.
:param raw: The value to be rendered.
:return: The rendered value as literal type.
"""
if raw is None:
return None
elif isinstance(raw, str):
render_string(context, raw)
elif isinstance(raw, dict):
return {
render_string(context, k): render_variable(context, v)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(context, v) for v in raw]
else:
return raw
return render_string(context, raw)
|
36b6148589a447c8c9397f2b199a0c9da025fd50
| 24,830 |
def is_p2wpkh_output(cscript: CScript) -> bool:
"""Checks if the output script if of the form:
OP_0 <pubkey hash>
:param script: Script to be analyzed.
:type script: CScript
:return: True if the passed in bitcoin CScript is a p2wpkh output script.
:rtype: bool
"""
if len(cscript) != 22:
return False
return cscript[0] == script.OP_0
|
1efec498daa89c1b345538d4e976aaa9ac9dd6cd
| 24,833 |
def check_update_needed(db_table_object, repository_name, pushed_at):
"""
Returns True if there is a need to clone the github repository
"""
logger.info(f"This is the repo name from check_update <<{repository_name}>> and db_table <<{db_table_object}>>")
result = get_single_repository(db_table_object, repository_name)
logger.info(result)
if not result:
logger.info("result not found")
return True
else:
logger.info("result found")
logger.info(f"This is the result {result}")
epoch = date_parse(pushed_at).timestamp() ##the pushed_at timetsamp available in the repo right now
logger.info(f"Comparing {int(epoch)} and {result['downloaded_at']} for {repository_name}")
if int(epoch) > int(result["downloaded_at"]):
return True
return False
##Check if the updated is needed from the database
|
1ab5b3e29e504b60deb928f634a0e8d11cf71d3b
| 24,834 |
def return_one(result):
"""return one statement"""
return " return " + result
|
94298fd5811877fa9e6a84cb061fc6244f3fda3b
| 24,835 |
def inv(a):
"""The inverse rotation"""
return -a
|
dbf88fc5f8f2f289f0132a19e0d0af0e82f232bd
| 24,836 |
from App import Proxys
def wrapCopy(object):
"""Wrap a copy of the object."""
return eval( serialize(object), Proxys.__dict__ )
|
ece99c49d9e5cd3603ee91f6614e5f63bc122751
| 24,837 |
def pi_eq_func(ylag,pilag,v,s,slag,alpha,h,b,phi,gamma):
""" equilibrium value for inflation
Args:
ylag (float): lagged output
pilag (float): lagged inflation
v (float): demand disturbance
s (float): supply disturbance
slag (float): lagged supply disturbance
alpha (float): sensitivity of demand to real interest rate
h (float): coefficient on inflation in Taylor rule
b (float): coefficient on output in Taylor rule
phi (float): degree of sticiness in inflation expectations
gamma (float): effect of output on inflation in SRAS
Returns:
(float): equilibrium value for inflation
"""
return 1/(alpha*h)*(v-1/(alpha*b+alpha*gamma*h+1)*(alpha*b+1)*(-pilag*alpha*h+alpha*gamma*h*phi*ylag+alpha*h*phi*slag-alpha*h*s+v))
|
fca249f970e2d97b32d0f8a8b03602370c19b36d
| 24,839 |
import time
def set_mode(vehicle, mode):
"""
Set the vehicle's flight modes. 200ms period state validation.
Args:
vehicle(dronekit.Vehicle): the vehicle to be controlled.
mode(str): flight mode string, supported by the firmware.
Returns:
bool: True if success, False if failed.
Failure will set shared.status['abort'].
"""
util.log_info("Setting %s." % mode)
shared.status['manual_mode'] = mode
vehicle.mode = VehicleMode(mode)
wait_count = 0
while True:
time.sleep(.2)
wait_count = wait_count + 1
if vehicle.mode.name == mode :
return True
elif wait_count >= 45:
util.log_warning("Unable to set %s. Assume link lost." % mode)
shared.status['abort'] = True
return False
elif wait_count % 15 == 0 :
util.log_warning("Retry setting %s" % mode)
vehicle.mode = VehicleMode(mode)
|
7c8590989ce7d7c0ffbc9910c8f8cf7018090e95
| 24,840 |
def _setdoc(super): # @ReservedAssignment
"""This inherits the docs on the current class. Not really needed for Python 3.5,
due to new behavior of inspect.getdoc, but still doesn't hurt."""
def deco(func):
func.__doc__ = getattr(getattr(super, func.__name__, None), "__doc__", None)
return func
return deco
|
47da03ae9951e18fccaaa2cf891d39dfdcc324c9
| 24,842 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed.
:rtype: ``str``
"""
# This should validate all the inputs given in the integration configuration panel,
# either manually or by using an API that uses them.
if client.client_credentials:
raise DemistoException("When using a self-deployed configuration, run the !microsoft-365-defender-auth-test"
"command in order to test the connection")
test_connection(client)
return "ok"
|
d3bd13ee0b928d9ffb14a93efb4738f484979dfc
| 24,843 |
import functools
import warnings
def warns(message, category=None):
"""警告装饰器
:param message: 警告信息
:param category: 警告类型:默认是None
:return: 装饰函数的对象
"""
def _(func):
@functools.wraps(func)
def warp(*args, **kwargs):
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
return warp
return _
|
4c481dc7eeb42751aef07d87ab9da34b04c573f4
| 24,844 |
import urllib
def handle_exceptions(func) -> object:
"""
This is needed since pytube current version is
quite unstable and can raise some unexpected errors.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError as e:
window.s_append('An error with the cipher has ocurred. '
'See documentation in GitHub to resolve: '
'https://github.com/f4ll-py/ytdownloader.')
except pytube.exceptions.RegexMatchError:
window.s_append('Could not find any YouTube videos with that URL.')
except urllib.error.HTTPError:
window.s_append('This video is not available. Try again later.')
except PermissionError:
window.s_append('Permission denied for the current path.')
return wrapper
|
ef162302186b5da5c86cec67286dcfecefd1ddd0
| 24,845 |
def templated_sequence_component(location_descriptor_tpm3):
"""Create test fixture for templated sequence component"""
params = {
"component_type": "templated_sequence",
"region": location_descriptor_tpm3.dict(exclude_none=True),
"strand": "+"
}
return TemplatedSequenceComponent(**params)
|
c92cfd2e0691c097898d82d7ff2d9eb16e5e2023
| 24,846 |
def joint_probability(people, one_gene, two_genes, have_trait):
"""
Compute and return a joint probability.
The probability returned should be the probability that
* everyone in set `one_gene` has one copy of the gene, and
* everyone in set `two_genes` has two copies of the gene, and
* everyone not in `one_gene` or `two_gene` does not have the gene, and
* everyone in set `have_trait` has the trait, and
* everyone not in set` have_trait` does not have the trait.
"""
joint_p = 1
# zero_genes = set(people.keys()) - two_genes - one_gene
for person in people:
# Calculate probability to have the genes of interest
this_genes = get_nbr_genes(person, one_gene, two_genes)
if people[person]['mother'] is None: # Assumes both parents info, or nothing
gene_prob = PROBS['gene'][this_genes]
else: # If there is parent's info
prob_mother = get_parent_prob(people[person]['mother'], one_gene, two_genes)
prob_father = get_parent_prob(people[person]['father'], one_gene, two_genes)
if this_genes == 0:
gene_prob = (1 - prob_mother) * (1 - prob_father) # None can transmit
elif this_genes == 1:
gene_prob = (1 - prob_mother) * prob_father + prob_mother * (1 - prob_father) # Two possibilities
else:
gene_prob = prob_father * prob_mother # Both need to transmit
# Calculate probability to have trait, given genes of interest
trait = get_trait(person, have_trait) # Trait for this person
trait_prob = PROBS['trait'][this_genes][trait]
joint_p *= gene_prob * trait_prob # Accumulates joint probability of all people
return joint_p
|
c2e8d5d617220d44f625c80f9474ab7327800b6f
| 24,847 |
def rf_predict_img_win(win_arr, trained_classifier, prob=True):
"""Predict image window using input trained classifier.
Args:
win_arr (numpy.arr): In rasterio order (channels, y, x)
trained_classifier (sklearn.model): Trained sklearn model to use for predictions.
prob (bool, optional): Generate probability of prediction or binary prediction. Defaults to True.
Returns:
numpy.arr: Array of predictions.
"""
# Get dims
b, y, x = win_arr.shape
segment_idx = b - 1
# Reshape for classifier
win_arr = np.transpose(win_arr.reshape(b, -1))
img_bnds = [i for i in range(0, b) if i != segment_idx]
win_arr = win_arr[:, img_bnds]
# No data rows
no_data = np.any(win_arr, axis=1).astype("uint8")
# Calc ndvi
# win_arr = calc_ndvi(win_arr, 2, 3)
# Prob predictions
if prob:
pred_arr = trained_classifier.predict_proba(win_arr)
# subset just the positive (forest) class probaility for all pixels
pred_arr = pred_arr[:, 1:]
# Or class predictions
else:
pred_arr = trained_classifier.predict(win_arr)
# Reshape back to image
pred_arr = pred_arr.reshape(y, x)
no_data = no_data.reshape(y, x)
# Apply no data mask so not positive prediction
pred_arr = pred_arr * no_data
return pred_arr
|
29b79fb4bcc909cce18889bdd624d6db17eb2d29
| 24,849 |
def ShowIPC(cmd_args=None):
""" Routine to print data for the given IPC space
Usage: showipc <address of ipc space>
"""
if not cmd_args:
print "No arguments passed"
print ShowIPC.__doc__
return False
ipc = kern.GetValueFromAddress(cmd_args[0], 'ipc_space *')
if not ipc:
print "unknown arguments:", str(cmd_args)
return False
print PrintIPCInformation.header
PrintIPCInformation(ipc, False, False)
|
4511c0aa1315fe594ee8e1209b6bc2e26d633ad9
| 24,851 |
def get_mesh_faces(edge_array):
"""
Uses an edge array of mesh to generate the faces of the mesh. For each triangle in the mesh this returns the list of indices
contained in it as a tuple (index1, index2, index3)
"""
triangles = []
neibs = neibs_from_edges(edge_array)
for edge in edge_array:
for vert in get_opposite_verts(neibs, edge):
triangle = sorted([edge[0], edge[1], vert])
if not (triangle in triangles):
triangles.append(sorted([edge[0], edge[1], vert]))
return triangles
|
e1f555985e3e55c2d0fbc3d0fd92befc6eb2c878
| 24,853 |
def _make_attribution_from_nodes(mol: Mol, nodes: np.ndarray,
global_vec: np.ndarray) -> GraphsTuple:
"""Makes an attribution from node information."""
senders, receivers = _get_mol_sender_receivers(mol)
data_dict = {
'nodes': nodes.astype(np.float32),
'senders': senders,
'receivers': receivers,
'globals': global_vec.astype(np.float32)
}
return graph_nets.utils_np.data_dicts_to_graphs_tuple([data_dict])
|
dcf3f82c0634afa8ba6ea97694ea36e9fd62c563
| 24,854 |
def subrepo(repo, subset, x):
"""Changesets that add, modify or remove the given subrepo. If no subrepo
pattern is named, any subrepo changes are returned.
"""
# i18n: "subrepo" is a keyword
args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
pat = None
if len(args) != 0:
pat = getstring(args[0], _("subrepo requires a pattern"))
m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
def submatches(names):
k, p, m = util.stringmatcher(pat)
for name in names:
if m(name):
yield name
def matches(x):
c = repo[x]
s = repo.status(c.p1().node(), c.node(), match=m)
if pat is None:
return s.added or s.modified or s.removed
if s.added:
return any(submatches(c.substate.keys()))
if s.modified:
subs = set(c.p1().substate.keys())
subs.update(c.substate.keys())
for path in submatches(subs):
if c.p1().substate.get(path) != c.substate.get(path):
return True
if s.removed:
return any(submatches(c.p1().substate.keys()))
return False
return subset.filter(matches, condrepr=('<subrepo %r>', pat))
|
c95cdb08671ca1800ffbc0df94833cdb29ba534a
| 24,855 |
def format_output(item, show_url=False):
""" takes a voat post and returns a formatted string """
if not item["Title"]:
item["Title"] = formatting.truncate(item["Linkdescription"], 70)
else:
item["Title"] = formatting.truncate(item["Title"], 70)
item["link"] = voat_fill_url.format(item["Subverse"], item["Id"])
raw_time = isodate.parse_date(item['Date'])
item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True)
item["comments"] = formatting.pluralize(item["CommentCount"], 'comment')
item["points"] = formatting.pluralize(item["Likes"], 'point')
if item["Type"] == 2:
item["warning"] = " \x02Link\x02"
else:
item["warning"] = ""
if show_url:
return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \
" - \x02{Name}\x02 {timesince} ago - {link}{warning}".format(**item)
else:
return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \
" - \x02{Name}\x02, {timesince} ago{warning}".format(**item)
|
1b730507fbff1ab2deadeaaefcfbcd23356ee437
| 24,856 |
def get_compiled_table_name(engine, schema, table_name):
"""Returns a table name quoted in the manner that SQLAlchemy would use to query the table
Args:
engine (sqlalchemy.engine.Engine):
schema (str, optional): The schema name for the table
table_name (str): The name of the table
Returns:
str: The compiled table name
Examples:
>>> from sqlalchemy import create_engine
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), 'a_schema', 'a_table') == six.text_type('a_schema.a_table')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), 'a_schema-1', 'a_table-1') == six.text_type('"a_schema-1"."a_table-1"')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), None, 'a_table-1') == six.text_type('"a_table-1"')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), '', 'a_table-1') == six.text_type('"a_table-1"')
True
"""
target = sqlalchemy.Table(table_name, sqlalchemy.MetaData(), schema=schema)
return engine.dialect.identifier_preparer.format_table(target)
|
91234bbfea2ff55d9d3e14b1ad70eb81ff09fc5d
| 24,857 |
def build(filepath):
"""Returns the window with the popup content."""
ttitlebar = titlebar.build()
hheading = heading.build(HEADING_TITLE)
top_txt_filler = fillers.horizontal_filler(2, colors.BACKGROUND)
message = sg.Text(
text=MESSAGE_TEXT + filepath,
font=MESSAGE_FONT,
text_color=colors.BLACK,
background_color=colors.BACKGROUND,
justification='c',
pad=(10, None) # adds space between l/r borders.
)
# adds space between message and button
bottom_txt_filler = fillers.horizontal_filler(1, colors.BACKGROUND)
# the key is not needed
done = button.build(BUTTON_TEXT, '', BUTTON_FONT, BUTTON_SIZE)
bottom_sep = fillers.horizontal_filler(2, colors.BACKGROUND)
return sg.Window(
title='',
no_titlebar=True,
keep_on_top=True,
layout=[
[ttitlebar],
[hheading],
[top_txt_filler],
[message],
[bottom_txt_filler],
[done],
[bottom_sep]
],
element_justification='c'
)
|
b5bfd7f46955351a94763360fb44cc59eeccbb39
| 24,858 |
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
|
9f719b7e1609744d673226eb32f0395b50b34f51
| 24,859 |
def get_overexpressed_genes(
matrix: ExpMatrix, cell_labels: pd.Series,
exp_thresh: float = 0.05, ignore_outliers: bool = True,
num_genes: int = 20) -> pd.DataFrame:
"""Determine most over-expressed genes for each cluster."""
# make sure matrix and cell_labels are aligned
matrix = matrix.loc[:, cell_labels.index]
if ignore_outliers:
# ignore the cluster named "Outliers", if it exists
sel = (cell_labels != 'Outliers')
matrix = matrix.loc[:, sel]
cell_labels = cell_labels.loc[sel]
_LOGGER.info('Ignoring mean expression values below %.3f', exp_thresh)
data = []
# scale matrix
matrix = matrix.scale()
# determine fold-changes for all clusters
vc = cell_labels.value_counts()
clusters = vc.index.tolist()
X = np.zeros((len(clusters), matrix.num_genes), dtype=np.float32)
cluster_mean = ExpMatrix(genes=matrix.genes, cells=clusters, data=X.T)
for l in clusters:
sel = (cell_labels == l)
cluster_mean.loc[:, l] = matrix.loc[:, sel].mean(axis=1)
# in calculation of fold change,
# ignore all expression values below exp_thresh
thresh_cluster_mean = cluster_mean.copy()
thresh_cluster_mean[thresh_cluster_mean < exp_thresh] = exp_thresh
# calculate fold change relative to average of other clusters
X = np.ones((len(clusters), matrix.num_genes), dtype=np.float32)
fold_change = ExpMatrix(genes=matrix.genes, cells=clusters, data=X.T)
for l in clusters:
sel = (thresh_cluster_mean.cells != l)
fold_change.loc[:, l] = thresh_cluster_mean.loc[:, l] / \
(thresh_cluster_mean.loc[:, sel].mean(axis=1))
markers = []
for l in clusters:
change = fold_change.loc[:, l].sort_values(ascending=False)
change = change[:num_genes]
# scale mean expression values to 10K transcripts
mean = cluster_mean.loc[change.index, l]
mean = (10000 / cluster_mean.loc[:, l].sum()) * mean
cluster_index = [l] * num_genes
gene_index = change.index
index = pd.MultiIndex.from_arrays(
[cluster_index, gene_index], names=['cluster', 'gene'])
data = np.c_[change.values, mean.values]
markers.append(
pd.DataFrame(
index=index,
columns=['Fold change', 'Mean expression (TP10K)'],
data=data))
markers = pd.concat(markers, axis=0)
#markers = markers.swaplevel(0, 1).sort_index(
# level=1, sort_remaining=False).swaplevel(0, 1)
return markers
|
44c02d2a9be936cee582c750c680925024604f64
| 24,860 |
def heat_degree_day(Tcolumn):
"""
Returns a list of the heating degree day from an outdoor temperature list
params:
df is a pandas dataframe with datetime index and field named 'outT' which contains outdoor temperature in Fahrenheit
base -- temperature base for the heating degree day value e.g. 65 for 65 degrees Fahrenheit
column: the string name of the column containing temperature data
Returns:
hdd -- pandas dataframe of temperature and heating degree day values arranged by day
This function provides the heating degree day value of a given list of outdoor temperature data
(in Fahrenheit) with an accompanying datetime object list, needed for the definition of a heating degree day (https://www.weather.gov/key/climate_heat_cool).
"""
Temp = Tcolumn.groupby(pd.Grouper(freq = 'D')).mean()
hdd = BASE - Temp
hdd.name='hdd'
return hdd
|
60cb58520f5451b25b3e8a42bb35c262018bb902
| 24,861 |
def quantile_loss(y_true, y_pred, taus):
"""
The quantiles loss for a list of quantiles. Sums up the error contribution
from the each of the quantile loss functions.
"""
e = skewed_absolute_error(
K.flatten(y_true), K.flatten(y_pred[:, 0]), taus[0])
for i, tau in enumerate(taus[1:]):
e += skewed_absolute_error(K.flatten(y_true),
K.flatten(y_pred[:, i + 1]),
tau)
return e
|
1d06085b0939cf8307d1ceb2bd65a8f7bbde53e0
| 24,862 |
from typing import Tuple
from typing import Sequence
import string
def parse_a3m(a3m_string: str) -> Tuple[Sequence[str], DeletionMatrix]:
"""Parses sequences and deletion matrix from a3m format alignment.
Args:
a3m_string: The string contents of a a3m file. The first sequence in the
file should be the query sequence.
Returns:
A tuple of:
* A list of sequences that have been aligned to the query. These
might contain duplicates.
* The deletion matrix for the alignment as a list of lists. The element
at `deletion_matrix[i][j]` is the number of residues deleted from
the aligned sequence i at residue position j.
"""
sequences, _ = parse_fasta(a3m_string)
deletion_matrix = []
for msa_sequence in sequences:
deletion_vec = []
deletion_count = 0
for j in msa_sequence:
if j.islower():
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
deletion_matrix.append(deletion_vec)
# Make the MSA matrix out of aligned (deletion-free) sequences.
deletion_table = str.maketrans('', '', string.ascii_lowercase)
aligned_sequences = [s.translate(deletion_table) for s in sequences]
return aligned_sequences, deletion_matrix
|
5b1f5f9cfc54cd55602e1d73b92460fbc99b3594
| 24,863 |
def build_sub_lattice(lattice, symbol):
"""Generate a sub-lattice of the lattice based on equivalent atomic species.
Args:
lattice (ASE crystal class): Input lattice
symbol (string): Symbol of species identifying sub-lattice
Returns:
list of lists:
sub_lattice: Cartesian coordinates of the sub-lattice of symbol
"""
sub_lattice = []
i = 0
atomic_labels = lattice.get_chemical_symbols()
positions = lattice.get_scaled_positions()
for atom in atomic_labels:
if atom == symbol:
sub_lattice.append(positions[i])
i = i + 1
return sub_lattice
|
7e7748c31f7f082b2e5ec6f21d0a56f60d5ec06c
| 24,864 |
def make_url(connection_str):
""" """
return _parse_rfc1738_args(connection_str)
|
2927b541399df8ab134688ae3a3a7274e0efb648
| 24,865 |
from typing import Union
from typing import Tuple
def get_graphs_within_cutoff(structure: Union[Structure, MEGNetMolecule, Molecule],
cutoff: float = 5.0, numerical_tol: float = 1e-8) -> Tuple[np.ndarray]:
"""
Get graph representations from structure within cutoff
Args:
structure: (pymatgen Structure)
cutoff: (float) cutoff radius
numerical_tol: (float) numerical tolerance
Returns:
center_indices, neighbor_indices, images, distances
"""
if isinstance(structure, Structure):
lattice_matrix = np.ascontiguousarray(np.array(structure.lattice.matrix), dtype=float)
pbc = np.array([1, 1, 1], dtype=int)
elif isinstance(structure, MEGNetMolecule) or isinstance(structure, Molecule):
lattice_matrix = np.array([[1000.0, 0., 0.], [0., 1000., 0.], [0., 0., 1000.]], dtype=float)
pbc = np.array([0, 0, 0], dtype=int)
else:
raise ValueError('structure type not supported')
r = float(cutoff)
cart_coords = np.ascontiguousarray(np.array(structure.cart_coords), dtype=float)
center_indices, neighbor_indices, images, distances = \
find_points_in_spheres(cart_coords, cart_coords, r=r, pbc=pbc,
lattice=lattice_matrix, tol=numerical_tol)
exclude_self = (center_indices != neighbor_indices) | (distances > numerical_tol)
return center_indices[exclude_self], neighbor_indices[exclude_self], images[exclude_self], distances[exclude_self]
|
a745808938160148ddaa345f0e5f8aa11b4a3a5f
| 24,866 |
def add_cals1():
"""
Add nutrients to daily intake for products.
"""
if 'username' in session:
food = request.form.get("keyword")
pr = Product(food)
lst = pr.get_products()
for i in lst:
lyst.append(i)
if len(lst) != 0:
return render_template('productsearch.html', username=escape(session['username']), vars=lst)
else:
return render_template("failure.html")
else:
return render_template("failure.html")
|
acdda46ad1fdce23baee8bae2018cf5e6510895f
| 24,867 |
def format_percent(percentage, pos):
"""
Formats percentages for the 'x' axis of a plot.
:param percentage: The fraction between 0.0 and 1.0
:type percentage: float
:param pos: The position argument
:type pos: int
:return: A formatted percentage string
:rtype: str
"""
# pylint: disable=unused-argument
return '{:.0f}%'.format(percentage * 100.)
|
d8566ce36b21adb351141ac72413b927e0f02c11
| 24,868 |
import inspect
def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None,
overrides: dict = {}):
"""
Return a simple representation string for `obj`.
If `attrs` is not None, it should be a list of attributes to include.
"""
params = inspect.signature(obj.__class__).parameters
attrs_repr = []
if attrs is None:
attrs = list(params.keys())
for attr in attrs:
display = False
if attr in overrides:
value = overrides[attr]
elif hasattr(obj, attr):
value = getattr(obj, attr)
else:
continue
if attr in params:
param = params[attr]
if param.default is inspect._empty or value != param.default: # type: ignore
display = True
else:
display = True
if display:
attrs_repr.append(f"{attr}={value}")
return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
|
4aaa3090a2a0fbb282cfc8403d365c562ae6c5d9
| 24,869 |
import torch
def Gaussian_RadialBasis(basis_size: int, max_radius: float, min_radius=0.,
num_layers: int = 0, num_units: int = 0, activation_function='relu'):
"""
Note: based on e3nn.radial.GaussianRadialModel.
:param basis_size:
:param max_radius:
:param min_radius:
:param num_layers:
:param num_units:
:param activation_function:
:return:
"""
activation_function = get_scalar_non_linearity(activation_function)
"""exp(-x^2 /spacing)"""
spacing = (max_radius - min_radius) / (basis_size - 1)
reference_points = torch.linspace(min_radius, max_radius, basis_size)
sigma = 0.8 * spacing
basis = partial(gaussian_basis_fn, sigma=sigma)
return FiniteElement_RadialBasis(reference_points, radial_basis_fn=basis,
radial_basis_type_name='φ_gauss',
num_layers=num_layers, num_units=num_units,
activation_function=activation_function)
|
f518e2706dcb672bf65f0ed9299c1579fec411a3
| 24,870 |
def _get_column_outliers_std(column, m=3):
"""
given a pandas Series representing a column in a dataframe
returns pandas Series without the values which are further than m*std
:param column: pandas Series representing a column in a dataframe
:param m: num of std as of to remove outliers
:return: pandas Series with the values which exceeds m*std
"""
outliers = column[abs(column - np.mean(column)) > m * np.std(column)].index
return outliers
|
b55dd119ce36cdae7f17bb91aae4257b2dfca29e
| 24,871 |
def set_filters(request, query, result, static_items=None):
"""
Sets filters in the query
"""
query_filters = query['filter']['and']['filters']
used_filters = {}
if static_items is None:
static_items = []
# Get query string items plus any static items, then extract all the fields
qs_items = list(request.params.items())
total_items = qs_items + static_items
qs_fields = [item[0] for item in qs_items]
fields = [item[0] for item in total_items]
# Now make lists of terms indexed by field
all_terms = {}
for item in total_items:
if item[0] in all_terms:
all_terms[item[0]].append(item[1])
else:
all_terms[item[0]] = [item[1]]
for field in fields:
if field in used_filters:
continue
terms = all_terms[field]
if field in ['type', 'limit', 'y.limit', 'x.limit', 'mode', 'annotation',
'format', 'frame', 'datastore', 'field', 'region', 'genome',
'sort', 'from', 'referrer']:
continue
# Add filter to result
if field in qs_fields:
for term in terms:
qs = urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in qs_items
if '{}={}'.format(k, v) != '{}={}'.format(field, term)
])
result['filters'].append({
'field': field,
'term': term,
'remove': '{}?{}'.format(request.path, qs)
})
if field == 'searchTerm':
continue
# Add to list of active filters
used_filters[field] = terms
# Add filter to query
query_filters.append(build_terms_filter(field, terms))
return used_filters
|
fcd4fdb6b804fdcf0dce6dac3d19f1945d858a12
| 24,873 |
def generate_random_initial_population(population_size, n_nodes, al):
"""
Randomly create an initial population
:param population_size: population size
:type population_size: int
:param n_nodes: number of nodes
:type n_nodes: int
:param al: adjacency list
:type al: list of lists
:return: random population
:rtype: list of World_Map
"""
input_population = []
# Generate random initial population
for _ in range(population_size):
color_list = np.random.choice(['r', 'b', 'g'], n_nodes, replace=True)
color_string = "".join(color_list)
input_population.append(World_Map(color_string, al))
print('A random population of ' + str(population_size) + ' people was created')
return input_population
|
0a219ee6de88f97fa099d5fbc0698cc6712c525f
| 24,874 |
def import_mlp_args(hyperparameters):
"""
Returns parsed config for MultiLayerPerceptron classifier from provided settings
*Grid-search friendly
"""
types = {
'hidden_layer_sizes': make_tuple,
'activation': str,
'solver': str,
'alpha': float,
'batch_size': int,
'learning_rate': str,
'learning_rate_init': float,
'max_iter': int,
'tol': float,
}
args = {
'hidden_layer_sizes': hyperparameters.get('hidden_layer_sizes', fallback='(100,)'), # Formatting matters!
'activation': hyperparameters.get('activation', fallback='relu'),
'solver': hyperparameters.get('solver', fallback='adam'),
'alpha': hyperparameters.get('alpha', fallback='0.0001'),
'batch_size': hyperparameters.get('batch_size', fallback='200'),
'learning_rate': hyperparameters.get('learning_rate', fallback='constant'),
'learning_rate_init': hyperparameters.get('learning_rate_init', fallback='0.001'),
'max_iter': hyperparameters.get('max_iter', fallback='200'),
'tol': hyperparameters.get('tol', fallback='1e-4'),
}
for key in args.keys():
args[key] = cast_to_typed_list(args[key], types[key])
return args
|
0bfe7cdd4d85b8cfeee32d82f3dd189d60359690
| 24,875 |
import torch
def getLayers(model):
"""
get each layer's name and its module
:param model:
:return: each layer's name and its module
"""
layers = []
def unfoldLayer(model):
"""
unfold each layer
:param model: the given model or a single layer
:param root: root name
:return:
"""
# get all layers of the model
layer_list = list(model.named_children())
for item in layer_list:
module = item[1]
sublayer = list(module.named_children())
sublayer_num = len(sublayer)
# if current layer contains sublayers, add current layer name on its sublayers
if sublayer_num == 0:
layers.append(module)
# if current layer contains sublayers, unfold them
elif isinstance(module, torch.nn.Module):
unfoldLayer(module)
unfoldLayer(model)
return layers
|
22565e786eb95e2b8996fad99778068ba15273ea
| 24,877 |
def get_config_pdf_version(config_version: str, max_input_version: str) -> str:
"""
From the PDF version as set in the configuration and the maximum version of all input files, checks for
the best PDF output version. Logs a warning, if the version set in the configuration is lower than any of the
input files.
>>> get_config_pdf_version('auto', '1.6')
'1.6'
>>> get_config_pdf_version('1.3', '1.5')
'1.3'
>>> get_config_pdf_version('1.x', '1.5')
Traceback (most recent call last):
...
ValueError: ('Invalid PDF version in configuration', '1.x')
:param config_version: Version string from the configuration. Set to ``auto`` to just use ``max_input_version``.
:param max_input_version: Maximum version from all input files.
:return: ``config_version``, unless set to ``auto``, then ``max_input_version``. However, the automatic version
setting will never be lower than ``1.3``.
:raises ValueError: If the configuration-set version is an invalid pattern.
"""
if config_version == 'auto':
return max(max_input_version, '1.3')
if not PDF_VERSION_PATTERN.fullmatch(config_version):
raise ValueError("Invalid PDF version in configuration", config_version)
if max_input_version > config_version:
log.warning("PDF version specified in config (%s) is lower than at least one of the input documents (%s). "
"The resulting PDF may not be displayed correctly in all viewers.",
config_version, max_input_version)
return config_version
|
6bb98c455a2b701d89c90576a958348f84344cb8
| 24,878 |
def threshold_otsu(hist):
"""Return threshold value based on Otsu's method.
hist : array, or 2-tuple of arrays, optional
Histogram from which to determine the threshold, and optionally a
corresponding array of bin center intensities.
An alternative use of this function is to pass it only hist.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
"""
counts, bin_centers = hist
bin_centers = bin_centers[:-1]
# class probabilities for all possible thresholds
weight1 = np.cumsum(counts)
weight2 = np.cumsum(counts[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(counts * bin_centers) / weight1
mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of ``weight1``/``mean1`` should pair with zero values in
# ``weight2``/``mean2``, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
if len(variance12) == 0:
return 0
idx = np.nanargmax(variance12)
threshold = bin_centers[idx]
return threshold
|
ea55dd483b0b60f8428240a62720fb53d4e0e80c
| 24,879 |
def filter_options(v):
"""Disable option v"""
iris = dataframe()
return [
{"label": col, "value": col, "disabled": col == v}
for col in iris.columns
]
|
54277b38d30389b302f1f962667bbb91f0999b4f
| 24,880 |
def scatter(x):
"""
matrix x x^t
"""
x1 = np.atleast_2d(x)
xt = np.transpose(x1)
s = np.dot(xt,x1)
assert np.array_equal( np.shape(s), [len(x),len(x)] )
return s
|
9d68eba6d3ffde7fb15d21a5a0d09a775bef96e7
| 24,881 |
def get_owned_object_or_40x(klass, owner, include_staff=False,
include_superuser=True, *args, **kwargs):
"""
Returns an object if it can be found (using get_object_or_404).
If the object is not owned by the supplied owner a 403 will be raised.
"""
obj = get_object_or_404(klass, *args, **kwargs)
if obj.is_not_owned_by(owner, include_staff, include_superuser):
raise PermissionDenied()
return obj
|
7535aa0ce7c77c41823f45c89885b5e2c32ed252
| 24,882 |
def ampMeritFunction2(voltages,**kwargs):
"""Simple merit function calculator.
voltages is 1D array of weights for the influence functions
distortion is 2D array of distortion map
ifuncs is 4D array of influence functions
shade is 2D array shade mask
Simply compute sum(ifuncs*voltages-distortion)**2)
"""
#Numpy way
distortion = kwargs['inp'][0]
ifuncs = kwargs['inp'][1]
res = np.mean((np.dot(ifuncs,voltages)-distortion)**2)
return res, [], 0
|
4443369f5424f536e839439c8deb479d09339e90
| 24,883 |
def get_transpose_graph(graph):
"""Get the transpose graph"""
transpose = {node: set() for node in graph.keys()}
for node, target_nodes in graph.items():
for target_node in target_nodes:
transpose[target_node].add(node)
return transpose
|
f7f8e083659e4214d79472961c7240778f37268d
| 24,884 |
def inventory_to_kml_string(
inventory,
icon_url="https://maps.google.com/mapfiles/kml/shapes/triangle.png",
icon_size=1.5, label_size=1.0, cmap="Paired", encoding="UTF-8",
timespans=True, strip_far_future_end_times=True):
"""
Convert an :class:`~obspy.core.inventory.inventory.Inventory` to a KML
string representation.
:type inventory: :class:`~obspy.core.inventory.inventory.Inventory`
:param inventory: Input station metadata.
:type icon_url: str
:param icon_url: Internet URL of icon to use for station (e.g. PNG image).
:type icon_size: float
:param icon_size: Icon size.
:type label_size: float
:param label_size: Label size.
:type encoding: str
:param encoding: Encoding used for XML string.
:type timespans: bool
:param timespans: Whether to add timespan information to the single station
elements in the KML or not. If timespans are used, the displayed
information in e.g. Google Earth will represent a snapshot in time,
such that using the time slider different states of the inventory in
time can be visualized. If timespans are not used, any station active
at any point in time is always shown.
:type strip_far_future_end_times: bool
:param strip_far_future_end_times: Leave out likely fictitious end times of
stations (more than twenty years after current time). Far future end
times may produce time sliders with bad overall time span in third
party applications viewing the KML file.
:rtype: byte string
:return: Encoded byte string containing KML information of the station
metadata.
"""
twenty_years_from_now = UTCDateTime() + 3600 * 24 * 365 * 20
# construct the KML file
kml = Element("kml")
kml.set("xmlns", "http://www.opengis.net/kml/2.2")
document = SubElement(kml, "Document")
SubElement(document, "name").text = "Inventory"
# style definition
cmap = get_cmap(name=cmap, lut=len(inventory.networks))
for i in range(len(inventory.networks)):
color = _rgba_tuple_to_kml_color_code(cmap(i))
style = SubElement(document, "Style")
style.set("id", "station_%i" % i)
iconstyle = SubElement(style, "IconStyle")
SubElement(iconstyle, "color").text = color
SubElement(iconstyle, "scale").text = str(icon_size)
icon = SubElement(iconstyle, "Icon")
SubElement(icon, "href").text = icon_url
hotspot = SubElement(iconstyle, "hotSpot")
hotspot.set("x", "0.5")
hotspot.set("y", "0.5")
hotspot.set("xunits", "fraction")
hotspot.set("yunits", "fraction")
labelstyle = SubElement(style, "LabelStyle")
SubElement(labelstyle, "color").text = color
SubElement(labelstyle, "scale").text = str(label_size)
for i, net in enumerate(inventory):
folder = SubElement(document, "Folder")
SubElement(folder, "name").text = str(net.code)
SubElement(folder, "open").text = "1"
SubElement(folder, "description").text = str(net)
style = SubElement(folder, "Style")
liststyle = SubElement(style, "ListStyle")
SubElement(liststyle, "listItemType").text = "check"
SubElement(liststyle, "bgColor").text = "00ffff"
SubElement(liststyle, "maxSnippetLines").text = "5"
# add one marker per station code
for sta in net:
placemark = SubElement(folder, "Placemark")
SubElement(placemark, "name").text = ".".join((net.code, sta.code))
SubElement(placemark, "styleUrl").text = "#station_%i" % i
SubElement(placemark, "color").text = color
if sta.longitude is not None and sta.latitude is not None:
point = SubElement(placemark, "Point")
SubElement(point, "coordinates").text = "%.6f,%.6f,0" % \
(sta.longitude, sta.latitude)
SubElement(placemark, "description").text = str(sta)
if timespans:
start = sta.start_date
end = sta.end_date
if start is not None or end is not None:
timespan = SubElement(placemark, "TimeSpan")
if start is not None:
SubElement(timespan, "begin").text = str(start)
if end is not None:
if not strip_far_future_end_times or \
end < twenty_years_from_now:
SubElement(timespan, "end").text = str(end)
if timespans:
start = net.start_date
end = net.end_date
if start is not None or end is not None:
timespan = SubElement(folder, "TimeSpan")
if start is not None:
SubElement(timespan, "begin").text = str(start)
if end is not None:
if not strip_far_future_end_times or \
end < twenty_years_from_now:
SubElement(timespan, "end").text = str(end)
# generate and return KML string
return tostring(kml, pretty_print=True, xml_declaration=True,
encoding=encoding)
|
3b10decafe34b006a41be01e44a5073c2d2d13fb
| 24,887 |
def extract_yelp_data(term, categories, price, location,
limit, sort_by, attributes, yelp_api_key=yelp):
"""
This function takes search results (a dictionary) and obtains the
name, zip code, address of the possible restaurant matches in the
form of a pandas dataframe.
Inputs:
- yelp_api_key: a string of the Yelp API Key
- term: a string of search terms input by the user
- lat: a float representing either a user's current location
latitude or their desired location latitude
- long: a float representing either a user's current location
longitude or their desired location longitude
- limit: an integer of maximum number of Yelp results that
will be returned from the query
- sort_by: string representing a user's sorting preference
(options are: distance, best_match, rating,
review_count)
Outputs:
- yelp_results: a pandas dataframe containing the zip code,
name, address, of each potential result.
"""
yelp_api = YelpAPI(yelp_api_key)
search_results = yelp_api.search_query(term=term,
categories=categories,
price=price,
location=location,
limit=limit,
sort_by=sort_by,
attributes=attributes)
# If Yelp query returns nothing, return None
if not search_results:
return None
# Initialize lists for each planned column in Yelp results dataframe;
# these are characteristics of each business that get returned to user
addresses = []
names = []
zip_code = []
latitude = []
longitude = []
phone = []
price = []
# obtain business information
businesses = search_results['businesses']
for i in businesses:
# In case a Yelp business is missing a field:
try:
a_address = i['location']['display_address'][0]
a_name = i['name']
a_zip = i['location']['zip_code']
a_latitude = i['coordinates']['latitude']
a_longitude = i['coordinates']['longitude']
a_phone = i['phone']
a_price = i['price']
if all([a_address != "", a_name != "", a_zip != "",
a_latitude != "", a_longitude != "",
a_phone != "", a_price != ""]):
addresses.append(a_address)
names.append(a_name)
zip_code.append(a_zip)
latitude.append(a_latitude)
longitude.append(a_longitude)
phone.append(a_phone)
price.append(a_price)
except KeyError:
print("Key Error, some missing field from the Yelp return!")
# cast Yelp results lists into pandas dataframe
yelp_results = pd.DataFrame()
yelp_results['zip_code'] = zip_code
yelp_results['name'] = names
yelp_results['addr'] = addresses
yelp_results['phone'] = phone
yelp_results['price'] = price
yelp_results['latitude'] = latitude
yelp_results['longitude'] = longitude
# change zip code column to appropriate data type
yelp_results['zip_code'] = pd.to_numeric(yelp_results['zip_code'])
return yelp_results
|
e57ca05265944a3971dfb0af7715e9764dd3112e
| 24,889 |
def collect_photo_info(api_key, tag, max_count):
"""Collects some interesting info about some photos from Flickr.com for a given tag """
photo_collection = []
url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&tags=%s&format=json&nojsoncallback=1&api_key=%s" %(tag, api_key)
resp = requests.get(url)
results = resp.json()
count = 0
for p in results['photos']['photo']:
if count >= max_count:
return photo_collection
print 'Processing photo: "%s"' % p['title']
photo = {}
url = "http://api.flickr.com/services/rest/?method=flickr.photos.getInfo&photo_id=" + p['id'] + "&format=json&nojsoncallback=1&api_key=" + api_key
info = requests.get(url).json()
photo["flickrid"] = p['id']
photo["title"] = info['photo']['title']['_content']
photo["description"] = info['photo']['description']['_content']
photo["page_url"] = info['photo']['urls']['url'][0]['_content']
photo["farm"] = info['photo']['farm']
photo["server"] = info['photo']['server']
photo["secret"] = info['photo']['secret']
# comments
numcomments = int(info['photo']['comments']['_content'])
if numcomments:
#print " Now reading comments (%d)..." % numcomments
url = "http://api.flickr.com/services/rest/?method=flickr.photos.comments.getList&photo_id=" + p['id'] + "&format=json&nojsoncallback=1&api_key=" + api_key
comments = requests.get(url).json()
photo["comment"] = []
for c in comments['comments']['comment']:
comment = {}
comment["body"] = c['_content']
comment["authorid"] = c['author']
comment["authorname"] = c['authorname']
photo["comment"].append(comment)
photo_collection.append(photo)
count = count + 1
return photo_collection
|
26e9525639da658f9c9920b5356dd9af4753a1c5
| 24,890 |
def yolo_eval(yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes=max_boxes,iou_threshold=iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
|
119524a2f850abba7ff9e1c1c1ca669e44f0a181
| 24,891 |
import re
def check_date_mention(tweet):
"""Check the tweet to see if there is a valid date mention for the
three dates of pyconopenspaces: 5/11, 5/12, 5/13. Quick fix to override
SUTime defaulting to today's date and missing numeric info about event's date
"""
date_pat = re.compile("([5]{1}\/\d{2})")
valid_dates = ["5/11", "5/12", "5/13"]
dates = [d for d in tweet.split() if date_pat.match(d) and d in valid_dates]
return dates if len(dates) == 1 else False
|
67c0de3beac5036d8b7aefa161b82a15257da04f
| 24,894 |
def make_nointer_beta():
"""Make two random non-intersecting triangles in R^3 that pass the beta test."""
# Corners of triangle B.
b1, b2, b3 = np.random.random(3), np.random.random(3), np.random.random(3)
# Two edges of B.
p1 = b2 - b1
p2 = b3 - b1
n = np.cross(p1, p2)
n /= np.linalg.norm(n)
T = b1 + (0.5 + 0.5 * np.random.random()) * p1 + (0.5 + 0.5 * np.random.random()) * p2
a1 = T + np.random.random() * n
a2 = T - np.random.random() * n
a3 = b1 + (1.5 + 0.5 * np.random.random()) * p1 + (1.5 + 0.5 * np.random.random()) * p2
A, B = np.array([a1, a2, a3]), np.array([b1, b2, b3])
# More fuzzing.
if np.random.randint(2) == 1:
A, B = B, A
return A, B
|
6502e5992f4fe959ec1fe87f3c5e849bfc428d30
| 24,896 |
def get_all_with_given_response(rdd, response='404'):
"""
Return a rdd only with those requests
that received the response code entered.
Default set to '404'.
return type: pyspark.rdd.PipelinedRDD
"""
def status_iterator(ln):
try:
status = ln.split(' ')[-2]
return True if status == response else False
except:
pass
return rdd.filter(status_iterator)
|
8268095938bbc35a6418f557af033a458f041c89
| 24,897 |
def get_refl_weight(value, source_node):
"""Returns the reflection weight for Redshift Material
:param value:
:param source_node:
:return:
"""
refl_color_map = source_node.ParameterBlock.texmap_reflection.Value
refl_color_map_name = None
try:
refl_color_map_name = refl_color_map.GetName()
except RuntimeError:
pass
if value.GetIntensity() > 0.0 or refl_color_map_name is not None:
return 1.0
else:
return 0.0
|
601f4be49c536e9efdac4873dddbc76726dc63ba
| 24,899 |
def createSMAbasis(delta, pistonMode, pistonProj):
"""
Input args:
<delta> is the geometric covariance matrix of actuators, it is computed elsewhere.
It is a square, symmetric matrix 60x60
<pistonMode> : piston mode (will be used in sparta)
This will create a basis orthogonal to piston
with last modes having large voltages and only small phase variance.
"""
m = filterOutPiston( np.identity(60), pistonMode, pistonProj )
lam, mo = diagonalisation( np.dot(m.T, np.dot(delta,m)) )
mo = np.dot(m, mo)
SMAbasis = np.zeros(delta.shape)
SMAbasis[:,0] = pistonMode
SMAbasis[:,1:] = mo[:,:-1]
return SMAbasis
|
216e0cd5e36ab9ea437f47b349ccffc670d3a898
| 24,900 |
def s3_put_bucket_website(s3_obj, bucketname, website_config):
"""
Boto3 client based Put bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
website_config (dict): Website configuration info
Returns:
dict : PutBucketWebsite response
"""
return s3_obj.s3_client.put_bucket_website(
Bucket=bucketname, WebsiteConfiguration=website_config
)
|
a60d95ef43e5a3643edeb6dacb2b149fef1892d9
| 24,901 |
from typing import List
from typing import Tuple
def check_assignment(tokenlist : List[str], current_line : int) -> Tuple[bool, List[Token.Token]]:
"""Checks if the given construction is of the type 'assignment'. If it is, the first value will return True and the second value will return a list of tokens.
If it isn't of the type 'assignment', the first value will return False and the second value wil return None or an error token.
Args:
tokenlist (List[str]): A list of strings consisting of an instruction and their parameters
Returns(either):
bool, List[Token.Token]: Returns a bool(whether the token is of this type) and a list of tokens, which is the instruction and the parameters.
bool, None : Returns a bool(whether the token is of this type) and None
"""
variable_keywords = {
"int": int
}
assignment_operators = ['=']
variable_keyword,tokenlist = tokenlist.next()
if variable_keyword not in variable_keywords:
return False, [Token.Token('ERROR', "Token is not of type 'location'", current_line)]
name,tokenlist = tokenlist.next()
assignment_operator,tokenlist = tokenlist.next()
if assignment_operator not in assignment_operators:
return True, [Token.Token('ERROR', "Unknown assignment operator", current_line)]
value,tokenlist = tokenlist.next()
if type(eval(value)) != variable_keywords[variable_keyword]:
return True, [Token.Token('ERROR', 'Error: Value does not match type', current_line)]
tokens = [Token.Token('TYPE', variable_keyword, current_line), Token.Token('IDENTIFIER', name, current_line),
Token.Token('ASSIGNMENT', assignment_operator, current_line), Token.Token('VALUE', value, current_line)]
return True, tokens
|
2faa56afe89c7d89ff4ec6f4443d8542073bcdaa
| 24,903 |
def load_nifc_fires():
"""load nifc data for 2020/2021 fire season
NB this is a bit of an undocumented NIFC feature -- the data supposedly only cover 2021
but there are definitely 2020 fires included at the endpoint.
This might not be true in the future.
https://data-nifc.opendata.arcgis.com/datasets/nifc::wfigs-wildland-fire-perimeters-full-history/about
"""
nifc_uri = "https://storage.googleapis.com/carbonplan-data/raw/nifc/WFIGS_-_Wildland_Fire_Perimeters_Full_History.geojson"
fires = geopandas.read_file(nifc_uri)
nifc_colnames = {"poly_IncidentName": "name", "poly_Acres_AutoCalc": "acres"}
fires = fires.rename(columns=nifc_colnames)
fires = fires[fires["irwin_FireDiscoveryDateTime"].str[:4].isin(["2020", "2021"])]
fires["ignite_at"] = (
fires["irwin_FireDiscoveryDateTime"]
.apply(pd.Timestamp)
.apply(lambda x: pd.Timestamp(x.date()))
)
return fires.to_crs(crs)[["name", "acres", "ignite_at", "geometry"]]
|
97767eb2bf850e7753cab5fc945efa7b4e235b85
| 24,904 |
from datetime import datetime
from unittest.mock import call
from unittest.mock import patch
def test_api_query_paginated_trades_pagination(mock_bitstamp):
"""Test pagination logic for trades works as expected.
First request: 2 results, 1 valid trade (id 2)
Second request: 2 results, no trades
Third request: 2 results, 1 valid trade (id 5) and 1 invalid trade (id 6)
Trades with id 2 and 5 are expected to be returned.
"""
# Not a trade
user_transaction_1 = """
{
"id": 1,
"type": -1,
"datetime": "2020-12-02 09:00:00"
}
"""
# First trade, buy BTC with USD, within timestamp range
user_transaction_2 = """
{
"id": 2,
"type": 2,
"datetime": "2020-12-02 09:30:00",
"btc": "0.50000000",
"usd": "-10000.00000000",
"btc_usd": "0.00005000",
"fee": "20.00000000",
"order_id": 2
}
"""
# Not a trade
user_transaction_3 = """
{
"id": 3,
"type": -1,
"datetime": "2020-12-02 18:00:00"
}
"""
# Not a trade
user_transaction_4 = """
{
"id": 4,
"type": -1,
"datetime": "2020-12-03 9:00:00"
}
"""
# Second trade, sell EUR for USD, within timestamp range
user_transaction_5 = """
{
"id": 5,
"type": 2,
"datetime": "2020-12-03 11:30:00",
"eur": "-1.00000000",
"usd": "1.22000000",
"eur_usd": "0.81967213",
"fee": "0.00610000",
"order_id": 3
}
"""
# Third trade, buy ETH with USDC, out of timestamp range
user_transaction_6 = """
{
"id": 6,
"type": 2,
"datetime": "2020-12-03 12:00:01",
"eth": "1.00000000",
"usdc": "-750.00000000",
"eth_usdc": "0.00133333",
"fee": "3.75000000",
"order_id": 1
}
"""
api_limit = 2
now = datetime.now()
now_ts = int(now.timestamp())
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': api_limit,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
expected_calls = [
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 1,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 3,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 3,
'limit': 2,
'sort': 'asc',
'offset': 2,
},
),
]
def get_paginated_response():
results = [
f'[{user_transaction_1},{user_transaction_2}]',
f'[{user_transaction_3},{user_transaction_4}]',
f'[{user_transaction_5},{user_transaction_6}]',
]
for result_ in results:
yield result_
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, next(get_response))
get_response = get_paginated_response()
with patch(
'rotkehlchen.exchanges.bitstamp.API_MAX_LIMIT',
new_callable=MagicMock(return_value=api_limit),
):
with patch.object(
mock_bitstamp,
'_api_query',
side_effect=mock_api_query_response,
) as mock_api_query:
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(now_ts),
options=options,
case='trades',
)
assert mock_api_query.call_args_list == expected_calls
expected_result = [
Trade(
timestamp=1606901400,
location=Location.BITSTAMP,
pair=TradePair('BTC_USD'),
trade_type=TradeType.BUY,
amount=FVal("0.50000000"),
rate=FVal("0.00005000"),
fee=FVal("20.00000000"),
fee_currency=Asset('USD'),
link='2',
notes='',
),
Trade(
timestamp=1606995000,
location=Location.BITSTAMP,
pair=TradePair('EUR_USD'),
trade_type=TradeType.SELL,
amount=FVal("1.22000000"),
rate=FVal("0.81967213"),
fee=FVal("0.00610000"),
fee_currency=Asset('EUR'),
link='5',
notes='',
),
]
assert result == expected_result
|
f1bb9cd15c0b595bb9fc1bbfb7e6ce87042ff087
| 24,905 |
def eigenvalue_nonunitary_entanglement_infidelity(a, b, mx_basis):
"""
Returns (d^2 - 1)/d^2 * (1 - sqrt(U)), where U is the eigenvalue-unitarity of a*b^{-1}
Parameters
----------
a : numpy.ndarray
The first process (transfer) matrix.
b : numpy.ndarray
The second process (transfer) matrix.
mx_basis : Basis or {'pp', 'gm', 'std'}
the basis that `a` and `b` are in.
Returns
-------
float
"""
d2 = a.shape[0]; U = eigenvalue_unitarity(a, b)
return (d2 - 1.0) / d2 * (1.0 - _np.sqrt(U))
|
754717a951868bdc498f4f3a7bc0013c9ffe662f
| 24,906 |
def morph(word, rootlist, Indo = False, n = 5):
"""
Bagi sesuatu perkataan ("word"), kembalikan n analisis morphologi yang paling mungkin berdasarkan
senarai akar ("rootlist").
Format output: akar, perkataan, proklitik/awalan, akhiran/enklitik, apitan, reduplikasi
@param Indo: Jika benar, awalan N- dan akhiran -in juga termasuk dalam analisis.
@param n: Bilangan calon yang dikembalikan.
"""
cand = set()
check = set()
cand1 = NyahApitan(word, rootlist)
cand2 = NyahAwalan(word, rootlist)
cand3 = NyahAkhiran(word, rootlist)
if Indo:
cand1 = NyahApitan(word, rootlist, Indo = True)
cand2 = NyahAwalan(word, rootlist, Indo = True)
cand3 = NyahAkhiran(word, rootlist, Indo = True)
# Tanpa imbuhan
for (c1, c2, c3) in [(c1, c2, c3) for c1 in cand1 for c2 in cand2 for c3 in cand3]:
if c1[0] == c2[0] == c3[0] and (c1[4], c2[4], c3[4]) == ("0", "0", "0"):
cand.add((c1[0], c1[1], "0", "0", "0", c1[5]))
# Dengan imbuhan
else:
for c1 in cand1:
# Tanpa awalan, tanpa akhiran
if not c1[2] and not c1[3]:
cand.add((c1[0], c1[1], "0", "0", c1[4], c1[5]))
# Tanpa awalan
elif not c1[2]:
temp = c1[1] + c1[3] # bentuk tanpa huruf-huruf apitan
cand3c = NyahAkhiran(temp, rootlist)
if Indo:
cand3c = NyahAkhiran(temp, rootlist, Indo = True)
for c3 in cand3c:
if c1[1] == c3[0][0] and c1[3] == c3[0][2] and not c3[3]:
cand.add((c1[0], c1[1], "0", c3[4], c1[4], c1[5]))
# Tanpa akhiran
elif not c1[3]:
temp = c1[2] + c1[1] # bentuk tanpa huruf-huruf apitan
cand2c = NyahAwalan(temp, rootlist)
if Indo:
cand2c = NyahAwalan(temp, rootlist, Indo = True)
for c2 in cand2c:
if c1[1] == c2[0][0] and c1[2] == c2[0][1] and not c2[2]:
cand.add((c1[0], c1[1], c2[4], "0", c1[4], c1[5]))
# Dengan awalan dan akhiran
else:
temp = c1[2] + c1[1] + c1[3] # bentuk tanpa huruf-huruf apitan
cand2c = NyahAwalan(temp, rootlist)
cand3c = NyahAkhiran(temp, rootlist)
if Indo:
cand2c = NyahAwalan(temp, rootlist, Indo = True)
cand3c = NyahAkhiran(temp, rootlist, Indo = True)
for c2 in cand2c:
if c1[1] == c2[0][0] and c1[2] == c2[0][1] and not c2[2]:# and c1[3] == c2[0][2]:
for c3 in cand3c:
if c1[1] == c3[0][0] and c1[3] == c3[0][2] and not c3[3]:
cand.add((c1[0], c1[1], c2[4], c3[4], c1[4], c1[5]))
# Utamakan akar yang sedia ada
cand4 = set([c for c in cand if c[1] in rootlist])
if cand4:
cand = cand4
# Jika tiada analisis ditemui, cuba dengan huruf kecil
if not cand:
if not word.islower():
kecil = morph(word.lower(), rootlist)
for k in kecil:
check.add((k[0], word, k[2], k[3], k[4], k[5]))
else:
check.add((word, word, "0", "0", "0", c1[5]))
# Susun mengikut jumlah suku kata (2 > 3 > 1 > 4 ...) dan panjang akar
cand = sorted(cand, key = lambda x: SylCount(x[1], root = True, mono = True) + len(x[1])/100)
# Tambah 5 hasil yang paling besar kemungkinannnya kepada senarai semak
for c in cand[:n]:
check.add((c[1], word, c[2], c[3], c[4], c[5]))
return check
|
320ee4767b87ee336df4c132fb282d2a6a987412
| 24,908 |
def get_logger(name):
"""
Returns a logger from the registry
Parameters
----------
name : str
the name indicating the logger to return
Returns
-------
:class:`delira.logging.base_logger.Logger`
the specified logger object
"""
return _AVAILABLE_LOGGERS[name]
|
3228e2e0bff57795c590868a06276a0ec57ea985
| 24,909 |
from typing import Union
from pathlib import Path
import yaml
def load_yaml(path: Union[str, Path], pure: bool = False) -> dict:
"""config.yaml file loader.
This function converts the config.yaml file to `dict` object.
Args:
path: .yaml configuration filepath
pure: If True, just load the .yaml without converting to EasyDict
and exclude extra info.
Returns:
`dict` object containing configuration parameters.
Example:
.. code-block:: python
from dlp import CNF_PATH
config = load_yaml(CNF_PATH)
print(config["project_name"])
"""
path = str(Path(path).absolute().resolve())
# * Load config file
with open(path) as file:
config = yaml.load(file)
if pure == False: # Add extra features
# Convert dict to easydict
config = edict(config)
return config
|
163f48dc48e8dff998ce35dd5f9f1dcfce94eeee
| 24,910 |
def InterpolatedCurveOnSurfaceUV1(thisSurface, points, tolerance, closed, closedSurfaceHandling, multiple=False):
"""
Returns a curve that interpolates points on a surface. The interpolant lies on the surface.
Args:
points (System.Collections.Generic.IEnumerable<Point2d>): List of at least two UV parameter locations on the surface.
tolerance (double): Tolerance used for the fit of the push-up curve. Generally, the resulting interpolating curve will be within tolerance of the surface.
closed (bool): If false, the interpolating curve is not closed. If true, the interpolating curve is closed, and the last point and first point should generally not be equal.
closedSurfaceHandling (int): If 0, all points must be in the rectangular domain of the surface. If the surface is closed in some direction,
then this routine will interpret each point and place it at an appropriate location in the covering space.
This is the simplest option and should give good results.
If 1, then more options for more control of handling curves going across seams are available.
If the surface is closed in some direction, then the points are taken as points in the covering space.
Example, if srf.IsClosed(0)=True and srf.IsClosed(1)=False and srf.Domain(0)=srf.Domain(1)=Interval(0,1)
then if closedSurfaceHandling=1 a point(u, v) in points can have any value for the u coordinate, but must have 0<=v<=1.
In particular, if points = { (0.0,0.5), (2.0,0.5) } then the interpolating curve will wrap around the surface two times in the closed direction before ending at start of the curve.
If closed=True the last point should equal the first point plus an integer multiple of the period on a closed direction.
Returns:
NurbsCurve: A new NURBS curve if successful, or None on error.
"""
url = "rhino/geometry/surface/interpolatedcurveonsurfaceuv-surface_point2darray_double_bool_int"
if multiple: url += "?multiple=true"
args = [thisSurface, points, tolerance, closed, closedSurfaceHandling]
if multiple: args = list(zip(thisSurface, points, tolerance, closed, closedSurfaceHandling))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response
|
84ef7894b7d2f3aba43d494212f900ddb683bb92
| 24,911 |
def show(request, url, alias_model, template):
"""List all vouched users with this group."""
group_alias = get_object_or_404(alias_model, url=url)
if group_alias.alias.url != url:
return redirect('groups:show_group', url=group_alias.alias.url)
group = group_alias.alias
in_group = group.members.filter(user=request.user).exists()
profiles = group.members.vouched()
page = request.GET.get('page', 1)
paginator = Paginator(profiles, settings.ITEMS_PER_PAGE)
try:
people = paginator.page(page)
except PageNotAnInteger:
people = paginator.page(1)
except EmptyPage:
people = paginator.page(paginator.num_pages)
show_pagination = paginator.count > settings.ITEMS_PER_PAGE
profile = request.user.userprofile
hide_leave_group_button = (hasattr(group, 'steward') and
profile == group.steward)
data = dict(people=people,
group=group,
in_group=in_group,
show_pagination=show_pagination,
hide_leave_group_button=hide_leave_group_button)
if isinstance(group, Group) and group.steward:
""" Get the most globally popular skills that appear in the group
Sort them with most members first
"""
skills = (Skill.objects
.filter(members__in=profiles)
.annotate(no_users=Count('members'))
.order_by('-no_users'))
data.update(skills=skills)
data.update(irc_channels=group.irc_channel.split(' '))
data.update(members=profiles.count())
return render(request, template, data)
|
f2fccbc267ac8ce589182ff9bdf520c0d91cf294
| 24,912 |
import re
def index():
""" Home page. Displays subscription info and smart-sorted episodes. """
client = JsonClient(session["username"], session["password"])
subs = get_subscriptions(client, session["username"])
recent_episodes = smart_sort(client, session["username"])
for ep in recent_episodes:
ep['description'] = re.sub(r'http\S+', '', ep['description'])
ep['released'] = ep['released'].split('T', 1)[0]
if request.method == 'POST':
if request.form['submit'] == 'fetch':
if not request.form['queryvalue']:
return render_template('index.html', subs=subs)
else:
return redirect(url_for('searchresults', query=request.form['queryvalue']))
elif request.form['submit'] == 'advanced':
return redirect(url_for('advancedsearch'))
elif request.form['submit'] == 'sugg':
return redirect(url_for('suggestions'))
return render_template('index.html', subs=subs, recent_episodes=recent_episodes)
|
5596198aa8e8f257f0f2531dd4e76d4f3ec9d23a
| 24,913 |
from typing import Optional
def range(
lower: int, upper: int, step: Optional[int] = None, name: Optional[str] = None
) -> Series:
"""
Create a Series that ranges from lower bound to upper bound.
Parameters
----------
lower
Lower bound value.
upper
Upper bound value.
step
Optional step size. If none given, the step size will be 1.
name
Name of the Series
"""
if name is None:
name = ""
return Series(name, np.arange(lower, upper, step), nullable=False)
|
849cb808495d89294768d8d98d7444f03eade593
| 24,914 |
import bisect
def ticks_lt(exact_price):
"""
Returns a generator for all the ticks below the given price.
>>> list(ticks_lt(Decimal('0.35')))
[Decimal('0.34'), Decimal('0.33'), Decimal('0.20'), Decimal('0.10'), Decimal('0.01')]
>>> list(ticks_lt(Decimal('0.20')))
[Decimal('0.10'), Decimal('0.01')]
>>> list(ticks_lt(Decimal('0.0001')))
[]
"""
first_viable = bisect.bisect_left(_ALL_TICKS, exact_price) - 1
first_invalid_index, step = -1, -1
return (_ALL_TICKS[i] for i in range(first_viable, first_invalid_index, step))
|
aa291f00021e4b3bfe78c7fb406aa81beb9d3467
| 24,916 |
def _decode_to_string(to_decode):
"""
This function is needed for Python 3,
because a subprocess can return bytes instead of a string.
"""
try:
return to_decode.decode("utf-8")
except AttributeError: # bytesToDecode was of type string before
return to_decode
|
3a9f4ef2719f74e259e119dc1e43a9cbdd655dd5
| 24,917 |
def nn(x_dict):
""" Implementation of a shallow neural network."""
# Extract Input.
x = x_dict["images"]
# First Hidden Layer.
layer_1 = tf.layers.dense(x, 256)
# Second Hidden Layer.
layer_2 = tf.layers.dense(layer_1, 256)
# Output Layer.
output_layer = tf.layers.dense(layer_2, 10)
return output_layer
|
6e47efcd03c335137f0ce30665978a9d38c7df3f
| 24,918 |
def find_negamax_move_alphabeta(game_state, valid_moves, depth, alpha, beta, turn_multiplier):
"""
NegaMax algorithm with alpha beta pruning.
Alpha beta pruning eliminates the need to check all moves within the game_state tree when
a better branch has been found or a branch has too low of a score.
alpha: upper bound (max possible); beta: lower bound (min possible)
If max score is greater than alpha, that becomes the new alpha value.
If alpha becomes >= beta, break out of branch.
White is always trying to maximise score and black is always
trying to minimise score. Once the possibility of a higher max or lower min
has been eliminated, there is no need to check further branches.
"""
global next_move
if depth == 0:
return turn_multiplier * score_board(game_state)
max_score = -checkmate_points
for move in valid_moves:
game_state.make_move(move)
next_moves = game_state.get_valid_moves()
score = -find_negamax_move_alphabeta(game_state, next_moves, depth - 1, -beta, -alpha, -turn_multiplier)
if score > max_score:
max_score = score
if depth == set_depth:
next_move = move
game_state.undo_move()
# Pruning
if max_score > alpha:
alpha = max_score
if alpha >= beta:
break
return max_score
|
de245eaa7a675af7348348d84e61972138663270
| 24,919 |
def sum_kernel(X, Y, kernels = None):
"""
Meta Kernel for summing multiple kernels.
"""
_sum = 0
for kernel in kernels:
print("Doing", kernel["class"], "with parameters:", kernel["parameters"])
_sum = _sum + globals()[kernel["class"]](X, Y, **kernel["parameters"])
return _sum
|
a2b042b08026e4c87f028687c4521cc1e81c4af5
| 24,920 |
def pretvori_v_sekunde(niz):
"""
Pretvori niz, ki predstavlja dolžino skladbe v formatu hh:mm:ss v število sekund.
"""
h, m, s = map(int, niz.split(":"))
return s + m*60 + h*3600
|
db0cc5872109b15e635b2b1e8731a5343d63f518
| 24,921 |
import logging
def _get_profiling_data(filename):
"""Read a given file and parse its content for profiling data."""
data, timestamps = [], []
try:
with open(filename, "r") as f:
file_data = f.readlines()
except Exception:
logging.error("Could not read profiling data.", exc_info=True)
raise SystemExit(1)
for line in file_data:
if line == "\n":
continue
line = line.strip()
line_data = line.split(" ")
if len(line_data) != 3:
continue
_, mem_usage, timestamp = line.split(" ")
data.append(float(mem_usage))
timestamps.append(float(timestamp))
if not data:
logging.error("No samples to parse in {}.".format(filename))
raise SystemExit(1)
return {"data": data, "timestamp": timestamps}
|
85f434c9aa22d60bae06205162623cde83e5a716
| 24,922 |
def parse_dataset_name(dataset_name: str) -> (str, str):
"""
Split the string of the dataset name into two parts: dataset source name (e.g., cnc_in_domain)
and dataset part (e.g., train).
:param dataset_name:
:return: dataset source name (e.g., cnc_in_domain) and dataset part (e.g., train).
"""
name_parts = dataset_name.rsplit('_', 1)
dataset_source = name_parts[0]
dataset_part = DatasetPart[name_parts[1].upper()]
return dataset_source, dataset_part
|
e308d3f29e37b5453d47a36ef2baf94454ac90d3
| 24,923 |
def plot_pq(df_pq, df_pq_std=None, columns=('mae', 'r2s'),
title='Performance-Quantile'):
"""Plot the quantile performance plot from the prepared metrics table.
Args:
df_pq (pd.DataFrame): The QP table information with mean values.
df_pq_std (pd.DataFrame): The QP table information with std values.
columns (tuple): Which column of the qp table to be plotted, limited
to 2 items.
title (str): An optional name of the figure.
Returns:
plt.Figure: A figure of the resulting QP plot.
"""
fig, ax1 = plt.subplots(figsize=(5, 3))
if len(columns) == 1:
ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r', label=columns[0])
ax1.set_ylabel(columns[0].upper())
ax1.legend(loc=1)
if df_pq_std is not None:
ax1.fill_between(df_pq['quantile'],
df_pq[columns[0]] - df_pq_std[columns[0]],
df_pq[columns[0]] + df_pq_std[columns[0]],
color='r',
alpha=0.5
)
elif len(columns) == 2:
_ = ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r',
label=columns[0])
ax1.set_ylabel(columns[0].upper())
ax2 = ax1.twinx()
_ = ax2.plot(df_pq['quantile'], df_pq[columns[1]], 'g',
label=columns[1])
ax2.set_ylabel(columns[1].upper())
ax1.legend(loc=1)
ax2.legend(loc=4)
if df_pq_std is not None:
ax1.fill_between(df_pq['quantile'],
df_pq[columns[0]] - df_pq_std[columns[0]],
df_pq[columns[0]] + df_pq_std[columns[0]],
color='r',
alpha=0.5
)
ax2.fill_between(df_pq['quantile'],
df_pq[columns[1]] - df_pq_std[columns[1]],
df_pq[columns[1]] + df_pq_std[columns[1]],
color='g',
alpha=0.5
)
else:
raise ValueError('Too many columns. Currently only two are allowed.')
ax1.set_xlabel('Quantile')
ax1.set_title(title)
plt.show()
return fig
|
3bd02080c74b1bf05f9f6a8cda3b0d22ac847e9f
| 24,925 |
def protoToOpenAPISchemaRecursive(lines, schemas, schemaPrefix, basename):
"""
Recursively create a schema from lines read from a proto file.
This method is recursive because proto messages can contain internal messages and enums.
If this is the case the method will call itself recursively.
:param lines: list of lines read from a proto file.
:param schemas: dictionary of schemas to which the new definitions will be added.
:param basename: basename respectively prefix which is added before the name of a schema.
This is used to prefix internal messages/enums with the name of the message containing it.
:return: the filled schemas dictionary and the current procssing index. The return value should not be used
because it deals with parameters only required for the recursion.
"""
# create a new schema
schema = {}
# save the current name for the schema
name = ""
# index for the current line parsed
i = 0;
# iterate till end of file
while (i < len(lines)):
# get current line and remove whitespaces at front and end
line = lines[i].strip()
# replace multiple whitepaces with a single one, see https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python
line = ' '.join(line.split())
# increase index
i += 1
# if the line is irrelevant for parsing, continue the loop
if skipLine(line):
continue
# closing curly brackets indicate that a message/enum definition has ended
if line.startswith('}'):
# return schemas and current index so that loop which recursively called this can resume at the correct location
return schemas, i
# test if line indicates an internal message/enum
if name != "" and (line.startswith('message') or line.startswith('enum')):
# name is already specified but there is a message/enum, so it is internal
# recursively call this method but splice the lines to begin at the definition of the internal type
_, processedLines = protoToOpenAPISchemaRecursive(lines[(i-1):len(lines)-1], schemas, schemaPrefix, basename=(name + '.'))
# move the index of this iteration after the definition of the internal type
i += processedLines
continue
# type is a message
if line.startswith('message'):
# set message flag
isMessage = True
# extract name
name = basename + line.split(' ')[1]
if basename == '':
name = schemaPrefix + name
# create schema and add to schemas
schemas[name] = schema
schema['type'] = 'object'
schema['properties'] = {}
continue
# type is an enum
if line.startswith('enum'):
# set message flag to false
isMessage = False
# extract name
name = basename + line.split(' ')[1]
if basename == '':
name = schemaPrefix + name
# create schema for enum and add to schemas
schemas[name] = schema
schema['type'] = 'string'
schema['enum'] = []
continue
# if item is an enum, parse lines as its values
if not isMessage:
enumValue = line.split('=')[0].strip()
# ignore values called unknown
if enumValue == "UNKNOWN":
continue
else:
schema['enum'].append(enumValue)
continue
# extract information for field
split = line.split(' ')
option = split[0] # option is repeated, optional, ...
fieldType = split[1] # fieldType is string, uint64, reference to another type, ...
fieldName = split[2] # the name of the field
# create a property for the field
prop = {}
# if the field option is repeated add the property as an array, else normally
if option == "repeated":
properties = schema['properties']
properties[fieldName] = {}
properties[fieldName]['type'] = 'array'
properties[fieldName]['items'] = prop
else:
schema['properties'][fieldName] = prop
# add property fields based on field type and print an error if it could not be done
if not addTypeToProp(fieldType, prop, schemaPrefix, schemas):
print('Could not parser fieldType[' + fieldType + '] into an openAPI property')
return schemas, i
|
c011a37ddc3fa9fea7c141f24f60a178ac0f7032
| 24,926 |
import typing
def to_binary(s: typing.Union[str, bytes], encoding='utf8') -> bytes:
"""Cast function.
:param s: object to be converted to bytes.
"""
return s if isinstance(s, bytes) else bytes(s, encoding=encoding)
|
ddc442a8124b7d55618cdc06081e496930d292a5
| 24,927 |
import gzip
def load_numpy(data_path, save_disk_flag=True):
"""Load numpy."""
if save_disk_flag:
# Save space but slow
f_data = gzip.GzipFile(f'{data_path}.gz', "r")
data = np.load(f_data)
else:
data = np.load(data_path)
return data
|
9979e2e232fcc96d5fe865ba01a4ba5d36fd1b11
| 24,928 |
def mock_weather_for_coordinates(*args, **kwargs): # noqa: F841
"""Return mock data for request weather product type."""
if args[2] == aiohere.WeatherProductType[MODE_ASTRONOMY]:
return astronomy_response
if args[2] == aiohere.WeatherProductType[MODE_HOURLY]:
return hourly_response
if args[2] == aiohere.WeatherProductType[MODE_DAILY]:
return daily_response
if args[2] == aiohere.WeatherProductType[MODE_DAILY_SIMPLE]:
return daily_simple_forecasts_response
if args[2] == aiohere.WeatherProductType[MODE_OBSERVATION]:
return observation_response
|
527bd91866984cc966ff6ad2e1b438591bc7f9d2
| 24,929 |
def get_user(request, project_key):
"""Return the ID of the current user for the given project"""
projects = request.cookies.get('projects')
if projects is None:
return None
try:
projects = json.loads(projects)
except (ValueError, KeyError, TypeError):
print "JSON format error"
logging.exception("Cookie json could not be decoded")
return None
user_id = projects.get(project_key)
if user_id is not None:
return int(user_id)
else:
return None
|
4edb40eb0ccece32bd1c0fc3f44ab42e97b9770c
| 24,930 |
def extract_month(cube, month):
"""
Slice cube to get only the data belonging to a specific month.
Parameters
----------
cube: iris.cube.Cube
Original data
month: int
Month to extract as a number from 1 to 12
Returns
-------
iris.cube.Cube
data cube for specified month.
"""
if month not in range(1, 13):
raise ValueError('Please provide a month number between 1 and 12.')
if not cube.coords('month_number'):
iris.coord_categorisation.add_month_number(cube, 'time',
name='month_number')
return cube.extract(iris.Constraint(month_number=month))
|
31e51654875abb08f727ecf6eb226a3b3b008657
| 24,931 |
def is_insert_grad_of_statement(node):
"""Check whether a context manager calls `insert_grad_of`.
Args:
node: The context manager node.
Returns:
Whether or not this node contains `insert_grad_of` calls.
Raises:
ValueError: If the `insert_grad_of` calls are mixed with other calls.
"""
tangent_calls = [anno.getanno(item.context_expr, 'func', None)
is utils.insert_grad_of for item in node.items]
if all(tangent_calls):
return True
elif any(tangent_calls):
raise ValueError
else:
return False
|
f1a8494716577f349b780880210d80cc4a941c1e
| 24,932 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.