content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from sets import Set
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
if problem.isGoalState(startState):
return []
# Each element in the fringe stores the state and the cost to reach it.
fringe = util.PriorityQueue()
fringe.push(startState, 0)
# Each pair in itemsInFringe stores a state and the list of actions
# required to reach it. States are added in itemsInFringe when they are
# added to the fringe. The states are removed from itemsInFringe when
# they get removed from the fringe.
itemsInFringe = {startState: []}
visitedStates = Set()
while not fringe.isEmpty():
currState = fringe.pop()
actionsToCurrState = itemsInFringe[currState]
del itemsInFringe[currState]
costOfActionsToCurrState = problem.getCostOfActions(actionsToCurrState)
if problem.isGoalState(currState):
return actionsToCurrState
visitedStates.add(currState)
for successor, action, stepCost in problem.getSuccessors(currState):
newCostToSuccessor = costOfActionsToCurrState + stepCost
newActionsToSuccessor = actionsToCurrState + [action]
if successor not in visitedStates:
fringe.update(successor, newCostToSuccessor)
if successor in itemsInFringe and \
problem.getCostOfActions(itemsInFringe[successor]) <= \
newCostToSuccessor:
# If successor is already in itemsInFringe, only update the
# cost if the current cost is greater than the new cost.
continue
itemsInFringe[successor] = newActionsToSuccessor
# Goal not found, so no action.
return []
|
fd338afde09f48f73e30aace7e926c26adc2e977
| 30,998 |
def create_new_dataset(data, num_schedules):
"""
creates a dataset where each row is the twenty timesteps of a schedule alongside the chosen task
creates a schedule array that tracks when each schedules starts and ends
:return:
"""
X = []
Y = []
schedule_array = []
for i in range(0, num_schedules):
rand_schedule = i
timesteps_where_things_scheduled = find_nums_with_task_scheduled_pkl(data, rand_schedule) # should be of size 20
if len(timesteps_where_things_scheduled) != 20:
print('schedule wrong size, WHY?')
continue
if i == 0:
start = 0
else:
start = schedule_array[-1][1] + 1 # end of previous list + 1
end = start + len(timesteps_where_things_scheduled) - 1
schedule_array.append([start, end])
for each_timestep in timesteps_where_things_scheduled:
input_nn, output = rebuild_input_output_from_pickle(data, i, each_timestep)
X.append(input_nn)
Y.append(output)
return X, Y, schedule_array
|
35f6bbc5b5e9968e7890af17ae77a6c3cbd5d0a5
| 31,001 |
def init_language():
"""
기본 언어 설정. 없으면 영어. 글-로벌
"""
yaml_data = read_yaml("config.yaml", '!default')
if yaml_data:
lang = yaml_data.get('LANGUAGE')
return lang + "\\" if lang else ""
else: return ""
|
03ee90ead37060d843ebc71b6d857a719a6d9c7b
| 31,003 |
def __clean_term__(term, convert_letter = True, w_space = True, is_url=True):
"""
Prepares an input term to be queried in a url
Input
----------------------------------------------------------------
term : str
term to clean
convert_letter : bool (default True)
Whether or not to convert letter to greek representation
w_space : bool (default True)
keep space in term, removes spaces if false
is_url : bool (default True)
Replaces spaces with %20 if true
Returns
----------------------------------------------------------------
term : str
same term as input after cleaning process
"""
term = term.lower().strip()
term = __greek_letter_converter__(term, convert_letter=convert_letter)
# Keeps spaces in string
if w_space:
if is_url:
term = term.replace(' ', '%20') # To replace ' ' in request
else:
pass
else:
term = term.replace(' ', '')
return term
|
e07f0f828227176665e85532b0f755084d0b294f
| 31,005 |
from typing import Counter
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size)
|
3926e4717317ca9fd897b98d2db28bfdced74487
| 31,006 |
def find_local_minimum(data, threshold=None):
"""
Find local minimum in data.
:param data: input data.
:param threshold: (optional) local minimum whose value is not less than threshold won't be selected.
:return: a 1-D array.
"""
local_min_idx = argrelextrema(data, np.less)
local_min_idx = local_min_idx[0]
if threshold:
local_min_idx = [idx for idx in local_min_idx if data[idx] < threshold]
return local_min_idx
|
5b240627d1f5d1203e0ab592ec29be3ab71ad967
| 31,007 |
def mouse_annotations(mouse_file):
"""
Updates and get JSON file for mouse annotations
"""
zipped_rows = get_rows_from_file(mouse_file, '\n')
# Too many processes causes the http requests causes the remote to respond with error
pool = mp.Pool(processes=1)
annotations = pool.map(mouse_single_annotation, zipped_rows)
return prepare_for_bulk_indexing(annotations)
|
ddd89a7ea9a02a6aa646691c36968d66eaa93ff3
| 31,008 |
def thomsen_parameters(vp, vs, rho, lb, dz):
"""
Liner, C, and T Fei (2006). Layer-induced seismic anisotropy from
full-wave sonic logs: Theory, application, and validation.
Geophysics 71 (6), p D183–D190. DOI:10.1190/1.2356997
Args:
vp (ndarray): P-wave interval velocity.
vs (ndarray): S-wave interval velocity.
rho (ndarray): Bulk density.
lb (float): The Backus averaging length in m.
dz (float): The depth sample interval in m.
Returns:
namedtuple: delta, epsilon and gamma.
"""
A, C, F, L, M = backus_parameters(vp, vs, rho, lb, dz)
delta = ((F + L)**2.0 - (C - L)**2.0) / (2.0 * C * (C - L))
epsilon = (A - C) / (2.0 * C)
gamma = (M - L) / (2.0 * L)
ThomsenParameters = namedtuple('ThomsenParameters', ['δ', 'ε', 'γ'])
return ThomsenParameters(delta, epsilon, gamma)
|
9fb2418608d154deb2bf362a5966e32d420d9c74
| 31,009 |
def replace_last(source_string, replace_what, replace_with):
""" Function that replaces the last ocurrence of a string in a word
:param source_string: the source string
:type source_string: str
:param replace_what: the substring to be replaced
:type replace_what: str
:param replace_with: the string to be inserted
:type replace_with: str
:returns: string with the replacement
:rtype: str
:Example:
>>> import chana.lemmatizer
>>> chana.lemmatizer.replace_last('piati','ti','ra')
'piara'
"""
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail
|
6fbc36824b960fb125b722101f21b5de732194c5
| 31,010 |
def unescape(text):
"""Unescapes text
>>> unescape(u'abc')
u'abc'
>>> unescape(u'\\abc')
u'abc'
>>> unescape(u'\\\\abc')
u'\\abc'
"""
# Note: We can ditch this and do it in tokenizing if tokenizing
# returned typed tokens rather than a list of strings.
new_text = []
escape = False
for c in text:
if not escape and c == u'\\':
escape = True
continue
new_text.append(c)
escape = False
return u''.join(new_text)
|
7db9fa5bb786ea5c1f988ee26eed07abe66a2942
| 31,011 |
def patched_novoed_tasks(mocker):
"""Patched novoed-related tasks"""
return mocker.patch("applications.models.novoed_tasks")
|
06d2e825bc9407e8e8bdd27170d457f046f8193c
| 31,012 |
def cart2spherical(x, y, z):
"""
Converts to spherical coordinates
:param x: x-component of the vector
:param y: y-component of the vector
:param z: z-component of the vector
:return: tuple with (r, phi, theta)-coordinates
"""
vectors = np.array([x, y, z])
r = np.sqrt(np.sum(vectors ** 2, 0))
theta = np.arccos(vectors[2] / r)
phi = np.arctan2(vectors[1], vectors[0])
if vectors.ndim == 1:
if r == 0:
phi = 0
theta = 0
else:
phi[r == 0] = 0
theta[r == 0] = 0
return r, phi, theta
|
702b0fa13f21cbee1fc7fe63219ef1bc8d398269
| 31,013 |
def get_JD(year=None, month=None, day=None, hour=None, min=None, sec=None,
string=None, format='yyyy-mm-dd hh:mm:ss', rtn='jd'):
"""compute the current Julian Date based on the given time input
:param year: given year between 1901 and 2099
:param month: month 1-12
:param day: days
:param hour: hours
:param min: minutes
:param sec: seconds
:param string: date string with format referencing "format" input
:param format: format of string input; currently accepts:
'yyyy-mm-dd hh:mm:ss'
'dd mmm yyyy hh:mm:ss'
:param rtn: optional return parameter; jd or mjd (modified julian)
default=jd
:return jd: Julian date
:return mjd: modified julian date
"""
if string:
if format == 'yyyy-mm-dd hh:mm:ss':
year = float(string[:4])
month = float(string[5:7])
day = float(string[8:10])
hour = float(string[11:13])
min = float(string[14:16])
sec = float(string[17:19])
elif format == 'dd mmm yyyy hh:mm:ss':
months = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5,
'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10,
'Nov':11, 'Dec':12}
year = float(string[7:11])
month = float(months[f'{string[3:6]}'])
day = float(string[:2])
hour = float(string[12:14])
min = float(string[15:17])
sec = float(string[18:20])
# compute julian date
jd = 1721013.5 + 367*year - int(7/4*(year+int((month+9)/12))) \
+ int(275*month/9) + day + (60*hour + min + sec/60)/1440
if rtn == 'mjd':
# compute mod julian
mjd = jd - 2400000.5
return mjd
else:
return jd
|
4350177350d731ea2c98d84f5f4cb56d5c67fb07
| 31,014 |
def get_final_feature(feature_1, feature_2, metric_list):
"""Get the difference between two features.
:param feature_1: the first feature
:type feature_1: numpy array
:param feature_2: the second feature
:type feature_2: numpy array
:param metric_list: the metrics which will be used to compare two feature vectors
:type metric_list: list
:return: the difference between two features
:rtype: numpy array
"""
if feature_1 is None or feature_2 is None:
return None
if metric_list is None:
return np.abs(feature_1 - feature_2)
final_feature_list = []
for metric in metric_list:
distance_matrix = pairwise_distances(np.vstack((feature_1, feature_2)),
metric=metric)
final_feature_list.append(distance_matrix[0, 1])
return np.array(final_feature_list)
|
a67d7da73a10393f119fa549991c277004b25beb
| 31,015 |
def removeZeros(infile, outfile, prop=0.5, genecols=2):
"""Remove lines from `infile' in which the proportion of zeros is equal to or higher than `prop'. `genecols' is the number of columns containing gene identifiers at the beginning of each row. Writes filtered lines to `outfile'."""
nin = 0
nout = 0
with open(infile, "r") as f:
hdr = f.readline()
columns = hdr.split("\t")
ncols = len(columns)-genecols
maxzeros = ncols*prop
with open(outfile, "w") as out:
out.write(hdr)
while True:
line = f.readline()
if line == '':
break
nin += 1
pline = line.rstrip("\r\n").split("\t")
nzeros = 0
for v in pline[genecols:]:
if float(v) == 0:
nzeros += 1
if nzeros < maxzeros:
out.write(line)
nout += 1
return (nin, nout)
|
43feba21513be4a8292c08918e16b3e34a73c341
| 31,016 |
def cuboid(origin, bounds, direction, color=Vec4(1), normal_as_color=NAC):
"""
Return GeomNode of the cuboid,
Args:
origin: center of the cuboid
bounds: 3-Tuple of length, width and height
direction: normal vector of the up face
color: Vec4
normal_as_color: whether to use vertex normal as color
"""
dfl = Vec3(-bounds[0], -bounds[1], -bounds[2])
dfr = Vec3(bounds[0], -bounds[1], -bounds[2])
dbr = Vec3(bounds[0], bounds[1], -bounds[2])
dbl = Vec3(-bounds[0], bounds[1], -bounds[2])
ufl = Vec3(-bounds[0], -bounds[1], bounds[2])
ufr = Vec3(bounds[0], -bounds[1], bounds[2])
ubr = Vec3(bounds[0], bounds[1], bounds[2])
ubl = Vec3(-bounds[0], bounds[1], bounds[2])
faces = [
(ufl, ufr, ubr, ubl), # Up
(dfl, dbl, dbr, dfr), # Down
(dfr, dbr, ubr, ufr), # Right
(dfl, ufl, ubl, dbl), # Left
(dfl, dfr, ufr, ufl), # Front
(dbl, ubl, ubr, dbr), # Back
]
D.setup(origin, direction)
m = mesh.Mesh('cuboid')
for f in faces:
pts = []
for p in f:
D.set_pos_hp_d(p.x, p.y, p.z, 0, 0, 0)
pts.append(m.add_vertex(D.pos, color))
m.add_triangle(*reversed(pts[:3]))
m.add_triangle(pts[0], *reversed(pts[2:]))
return m.export(normal_as_color=normal_as_color)
|
78578db28a0fb0e18a36f2f8de4a3bd3fc69f3ac
| 31,017 |
def fireball_get_HS_dat(cd, fname='HS.dat'):
""" """
f = open(cd+'/'+fname, "r")
nlines = int(f.readline())
#print(nlines)
s = f.readlines()
assert nlines==len(s)
i2aoao = np.zeros((nlines,4), dtype=int)
i2h = np.zeros((nlines))
i2s = np.zeros((nlines))
i2x = np.zeros((nlines,3))
for i,line in enumerate(s):
lspl = line.split()
i2aoao[i] = list(map(int, lspl[0:4]))
i2h[i] = float(lspl[4])
i2s[i] = float(lspl[5])
i2x[i] = list(map(float, lspl[6:]))
f.close()
return i2aoao,i2h,i2s,i2x
|
fd63b0def17f80c88b150087f749aa421aa45d48
| 31,018 |
def create(db: Session, request: UsersRequestSchema):
""" Creates a new user.
:return: UserModel
"""
user = UserModel(
username=request.username,
group=request.group
)
db.add(user)
db.commit()
db.refresh(user)
return user
|
f43a220fdfb654bc554f970ad01196ad5b754920
| 31,019 |
def format_timedelta(time):
"""Format a timedelta for use in a columnar format. This just
tweaks stuff like ``'3 days, 9:00:00'`` to line up with
``'3 days, 10:00:00'``
"""
result = str(strip_microseconds(time))
parts = result.split()
if len(parts) == 3 and len(parts[-1]) == 7:
return '%s %s %s' % tuple(parts)
else:
return result
|
1913b4492bfee4541dc7266dd72989d2b38b4dc4
| 31,020 |
def describe_protein(s1, s2, codon_table=1):
"""
"""
codons = util.codon_table_string(codon_table)
description = ProteinAllele()
s1_swig = util.swig_str(s1)
s2_swig = util.swig_str(s2)
codons_swig = util.swig_str(codons)
extracted = extractor.extract(s1_swig[0], s1_swig[1],
s2_swig[0], s2_swig[1], extractor.TYPE_PROTEIN, codons_swig[0])
variants = extracted.variants
#for variant in variants:
# print_var(variant)
#print()
index = 0
while index < len(variants):
if variants[index].type != extractor.IDENTITY:
variant = variants[index]
index += 1
seq_list = AISeqList()
# NOTE: This is for filling.
if index < len(variants):
last_end = variants[index].reference_start
else:
last_end = 1000000
while (index < len(variants) and
variants[index].type & extractor.FRAME_SHIFT):
if last_end < variants[index].sample_start:
seq_list.append(AISeq(
s2[last_end:variants[index].sample_start]))
last_end = variants[index].sample_end
seq_list.append(AISeq(
s2[variants[index].sample_start:
variants[index].sample_end],
start=variants[index].reference_start + 1,
end=variants[index].reference_end,
sample_start=variants[index].sample_start + 1,
sample_end=variants[index].sample_end,
frames=get_frames(variants[index].type)))
# NOTE: Perhaps use trans_open, trans_close to ...
index += 1
if last_end < variant.sample_end:
seq_list.append(AISeq(s2[last_end:variant.sample_end]))
var = var_to_protein_var(s1, s2, variant, seq_list,
weight_position=extracted.weight_position)
description.append(var)
else:
index += 1
if not description:
return ProteinAllele([ProteinVar()])
return description
|
12da2ca688b35324fadc2d897fa076daeb14b91f
| 31,021 |
from typing import Any
from typing import Mapping
import logging
import itertools
import inspect
def _convert_gradient_function(
proto: tf.compat.v1.NodeDef,
graph: Any,
library: Mapping[str, _LibraryFunction],
) -> Mapping[str, _LibraryFunction]:
"""Convert a custom_gradient function."""
op = graph.as_graph_element(proto.name)
input_specs = tuple([tf.TensorSpec.from_tensor(v) for v in op.inputs])
grad_fn_name = str(proto.attr["_gradient_op_type"].s, "utf-8")
if grad_fn_name in library:
return {}
@tf.function
def tf_grad_fn(*grad_args, **grad_kwargs):
fn = tf_ops.gradient_registry.lookup(grad_fn_name)
return fn(None, *grad_args, **grad_kwargs)
concrete_tf_grad_fn = tf_grad_fn.get_concrete_function(*input_specs)
grad_lib = _convert_all_gradient_functions(concrete_tf_grad_fn.graph, library)
logging.info("Converting gradient function %s", grad_fn_name)
grad_inputs = concrete_tf_grad_fn.inputs
grad_captured_inputs = concrete_tf_grad_fn.captured_inputs
num_flat_args = len(grad_inputs) - len(grad_captured_inputs)
func_variables = {v.handle.ref(): v for v in concrete_tf_grad_fn.variables}
# Gradient function can capture tensors in the outer function. Move them
# into the arguments of the gradient function for conversion to JAX.
variable_map = {}
constant_map = {}
external_capture_specs = []
internal_capture_names = []
for inp, cap in zip(grad_inputs[num_flat_args:], grad_captured_inputs):
if cap.dtype == tf.resource:
variable_map[inp.op.name] = func_variables[cap.ref()]
internal_capture_names.append(inp.op.name)
elif hasattr(cap, "numpy"):
constant_map[inp.op.name] = cap.numpy()
internal_capture_names.append(inp.op.name)
else:
external_capture_specs.append(tf.TensorSpec.from_tensor(cap))
structured_grad_input_specs = tree.map_structure(tf.TensorSpec.from_tensor,
concrete_tf_grad_fn.inputs)
structured_grad_input_specs = (structured_grad_input_specs, {})
grad_input_specs = input_specs + tuple(external_capture_specs)
grad_structured_outputs = tuple(
itertools.dropwhile(lambda x: x is None,
concrete_tf_grad_fn.structured_outputs))
grad_output_specs = tuple([
tf.TensorSpec.from_tensor(x) for x in grad_structured_outputs
])
# Nones correspond to the outputs of the original function.
num_fn_outputs = (
len(concrete_tf_grad_fn.structured_outputs) -
len(grad_structured_outputs))
signature = inspect.Signature(
(inspect.Parameter("grad_args", inspect.Parameter.VAR_POSITIONAL),))
jax_grad_fn, jax_grad_params = _convert(
concrete_tf_grad_fn.graph.as_graph_def(),
signature,
structured_grad_input_specs,
grad_output_specs,
captured_input_names=tuple(internal_capture_names),
variable_map=variable_map,
constants=constant_map,
# Note that dict(**a, **b) will raise TypeError on dupliates, unlike {}.
library=dict(**library, **grad_lib),
)
grad_fn = _LibraryFunction(jax_grad_fn, False, jax_grad_params,
grad_input_specs, grad_output_specs,
grad_output_specs[:num_fn_outputs])
return dict(**grad_lib, **{grad_fn_name: grad_fn})
|
0cdfbe6c6a302e3b454be77e1cd03a0419dcd64f
| 31,022 |
def generate_warp_function(chromatic_consts=None,
drift=None,
n_dim=3,
verbose=True):
"""Function to generate a spot translating function"""
## check inputs
if chromatic_consts is None:
_ch_consts = np.zeros([n_dim,1])
else:
_ch_consts = chromatic_consts
if drift is None:
_drift = np.zeros(n_dim)
else:
_drift = drift[:n_dim]
def _shift_function(coords, _ch_consts=_ch_consts,
_drift=_drift):
if np.shape(coords)[1] == n_dim:
_coords = np.array(coords).copy()
elif np.shape(coords)[1] == 11: # this means 3d fitting result
_coords = np.array(coords).copy()[:,4-n_dim:4]
else:
raise ValueError(f"Wrong input coords")
_shifts = []
for _idim in range(n_dim):
_consts = np.array(_ch_consts[_idim])
_ft_order = np.int(np.sqrt(len(_consts)*2+0.25)-1.5) # only dependent on 2d
_corr_data = []
for _order in range(_ft_order+1):
for _p in range(_order+1):
_corr_data.append(_coords[:,-2]**_p \
* _coords[:,-1]**(_order-_p))
_shifts.append(np.dot(np.array(_corr_data).transpose(), _consts))
# generate corrected coordinates
_corr_coords = _coords - np.stack(_shifts).transpose() - _drift
# return as input
if np.shape(coords)[1] == n_dim:
_output_coords = _corr_coords
elif np.shape(coords)[1] == 11: # this means 3d fitting result
_output_coords = np.array(coords).copy()
_output_coords[:,4-n_dim:4] = _corr_coords
return _output_coords
# return function
return _shift_function
|
38ebf2303494017f2e401a034fa800d04627a790
| 31,023 |
import pdb
def Dfunc(sign,k,N,dphi,si,sd,xF=[],F=[],beta=np.pi/2):
"""
Parameters
----------
sign : int
+1 | -1
k : wave number
N : wedge parameter
dphi : phi-phi0 or phi+phi0
si : distance source-D
sd : distance D-observation
beta : skew incidence angle
xF : array
support of Fresnel function.
F : array
Values of Fresnel function in regard of support
if F =[], fresnel function is computed
otherwise the passed interpolation F is used.
Reference
---------
[1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge
in a perfectly conducting surface" IEEE AP nov 74 vol 62 N11
Notes
-----
e-jnp.pi/4 1
Di= ------------------ * ----------- * F(kla) ([1] eq 25)
2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta)
"""
cste = (1.0-1.0*1j)*(1.0/(4.0*N*np.sqrt(k*np.pi)*np.sin(beta)))
rnn = (dphi+np.pi*sign)/(2.0*N*np.pi)
nn = np.zeros(np.shape(rnn))
nn[rnn>0.5] = 1
nn[rnn>1.5] = 2
nn[rnn<-0.5] = -1
nn[rnn<-1.5] = -2
# KLA ref[1] eq 27
L = ((si*sd)*np.sin(beta)**2)/(1.*(si+sd))
AC = np.cos( (2.0*N*nn*np.pi-dphi) / 2.0 )
A = 2*AC**2
KLA = k * L * A
epsi = AC*2.0
angle = (np.pi+sign*dphi)/(2.0*N)
tan = np.tan(angle)
Di = np.empty(KLA.shape)
if len(F) == 0:
Fkla,ys,yL = FreF(KLA)
else :
#pxF = 10**xF
#uF = (np.abs(KLA[:,:]-pxF[:,None,None])).argmin(axis=0)
val = np.maximum(np.log10(np.abs(KLA))-xF[0,None,None],0)
uF2 = (len(F)-1)*(val)/(xF[-1,None,None]-xF[0,None,None])
uF2_int = np.floor(uF2).astype('int')
Fkla = F[uF2_int]
#if np.max(Fkla) > 1:
# Warning('diffRT : Fkla tab probably wrong')
# 4.56 Mac Namara
try:
Di = -cste*Fkla/tan
except:
print('tan=0 : It can happen')
pdb.set_trace()
c5 = np.where(np.abs(tan)<1e-9)
BL = np.ones(Di.shape)*L
Di[:,c5] = 0.5*np.sqrt(BL[c5])
# if np.isinf(Di).any():
# pdb.set_trace()
return(Di)
|
6e85c3f708d6333307fc80c82910361c09dc892c
| 31,024 |
def deprecated(since=nicos_version, comment=''):
"""This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
The parameter ``since`` should contain the NICOS version number on which
the deprecation starts.
The ``comment`` should contain a hint to the user, what should be used
instead.
"""
def deco(f):
msg = '%r is deprecated since version %r.' % (f.__name__, since)
@wraps(f)
def new_func(*args, **options):
for l in [msg, comment]:
session.log.warning(l)
return f(*args, **options)
new_func.__doc__ += ' %s %s' % (msg, comment)
return new_func
return deco
|
04b77c3daf7aa92cab8dfdaf8fc2afca6eda0d24
| 31,025 |
import resource
def create_rlimits():
"""
Create a list of resource limits for our jailed processes.
"""
rlimits = []
# Allow a small number of subprocess and threads. One limit controls both,
# and at least OpenBLAS (imported by numpy) requires threads.
nproc = LIMITS["NPROC"]
if nproc:
rlimits.append((resource.RLIMIT_NPROC, (nproc, nproc)))
# CPU seconds, not wall clock time.
cpu = LIMITS["CPU"]
if cpu:
# Set the soft limit and the hard limit differently. When the process
# reaches the soft limit, a SIGXCPU will be sent, which should kill the
# process. If you set the soft and hard limits the same, then the hard
# limit is reached, and a SIGKILL is sent, which is less distinctive.
rlimits.append((resource.RLIMIT_CPU, (cpu, cpu+1)))
# Total process virtual memory.
vmem = LIMITS["VMEM"]
if vmem:
rlimits.append((resource.RLIMIT_AS, (vmem, vmem)))
# Size of written files. Can be zero (nothing can be written).
fsize = LIMITS["FSIZE"]
rlimits.append((resource.RLIMIT_FSIZE, (fsize, fsize)))
return rlimits
|
ac8fbfeeae471068ef75cc80520a016902f5d887
| 31,026 |
def save_processed_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of the processed image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of processed image
:returns: adds uuid of processed image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.processed_image = uuid
user.save()
except DoesNotExist:
return None
|
f1a063d417a66c5436efe65bbb024478d11ec05c
| 31,028 |
def web_request():
"""Mock web request for views testing."""
return web_request_func()
|
a8327e14fd793181f4b3e669d69e7ccc8edd8213
| 31,029 |
def _NormalizeDiscoveryUrls(discovery_url):
"""Expands a few abbreviations into full discovery urls."""
if discovery_url.startswith('http'):
return [discovery_url]
elif '.' not in discovery_url:
raise ValueError('Unrecognized value "%s" for discovery url')
api_name, _, api_version = discovery_url.partition('.')
return [
'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % (
api_name, api_version),
'https://%s.googleapis.com/$discovery/rest?version=%s' % (
api_name, api_version),
]
|
f361d01006a6e7f7487e06db375ae703ffde0021
| 31,030 |
def computeAirmass(dec, ha, lat=config['observatory']['latitude'],
correct=[75., 10.]):
"""Calculates the airmass for a given declination and HA (in degrees).
By default, assumes that the latitude of the observation is the one set
in the configuration file. If correct is defined, abs(HA) anggles greater
than correct[0] are given a flat value correct[1].
"""
dec = np.atleast_1d(dec)
ha = np.atleast_1d(ha) % 360.
if ha > 180:
ha -= 360
airmass = (np.sin(lat * np.pi / 180.) * np.sin(dec * np.pi / 180.) +
np.cos(lat * np.pi / 180.) * np.cos(dec * np.pi / 180.) *
np.cos(ha * np.pi / 180.)) ** (-1)
if correct is not None:
airmass[np.abs(ha) > correct[0]] = correct[1]
if len(airmass) == 1:
return airmass[0]
else:
return airmass
|
df9513e23932fe646bf13df85283380eff5bb871
| 31,031 |
def metric_op(metric):
"""Converts Keras metrics into a metric op tuple.
NOTE: If this method is called in for loop, the runtime is O(n^2). However
the number of eval metrics at any given time should be small enough that
this does not affect performance. Any impact is only during graph construction
time, and therefore has no effect on steps/s.
Args:
metric: Either a `tf.keras.metric.Metric` instance or a tuple of Tensor
value and update op.
Returns:
A tuple of metric Tensor value and update op.
"""
if not isinstance(metric, tf.keras.metrics.Metric):
return metric
vars_to_add = set()
vars_to_add.update(metric.variables)
metric = (metric.result(), metric.updates[0])
_update_variable_collection(tf.GraphKeys.LOCAL_VARIABLES, vars_to_add)
_update_variable_collection(tf.GraphKeys.METRIC_VARIABLES, vars_to_add)
return metric
|
52ad2f5092aa5203e1e850eae628d8e804447847
| 31,032 |
def get_geostore(geostore_id, format='esri'):
""" make request to geostore microservice for user given geostore ID """
config = {
'uri': '/geostore/{}?format={}'.format(geostore_id, format),
'method': 'GET',
}
return request_to_microservice(config)
|
1e5ebdd04f62930de40942efd4894d79d7a7cfd4
| 31,033 |
def srmi(df, n=9):
"""
SRMIMI修正指标 srmi(9)
如果收盘价>N日前的收盘价,SRMI就等于(收盘价-N日前的收盘价)/收盘价
如果收盘价<N日前的收盘价,SRMI就等于(收盘价-N日签的收盘价)/N日前的收盘价
如果收盘价=N日前的收盘价,SRMI就等于0
"""
_srmi = pd.DataFrame()
_srmi['date'] = df.date
_m = pd.DataFrame()
_m['close'] = df.close
_m['cp'] = df.close.shift(n)
_m['cs'] = df.close - df.close.shift(n)
_srmi['srmi'] = _m.apply(lambda x: x.cs/x.close if x.cs > 0 else (x.cs/x.cp if x.cs < 0 else 0), axis=1)
return _srmi
|
28bd0e715b34707b742e13230d93a4a104fd80ba
| 31,034 |
def define_pairs(grid: 'np.ndarray'):
"""Take a sequence grid and return all pairs of neighbours.
Returns a list of dictionaries containing the indices of the pairs
(neighbouring only), and the corresponding sequence numbers
(corresponding to the image array)
"""
nx, ny = grid.shape
footprint = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
shape = np.array(footprint.shape)
assert shape[0] == shape[1], 'Axes must be equal'
assert shape[0] % 2 == 1, 'Axis length must be odd'
center = shape // 2
connected = np.argwhere(footprint == 1) - center
pairs = []
for idx0, i0 in np.ndenumerate(grid):
neighbours = connected + idx0
for neighbour in neighbours:
neighbour = tuple(neighbour)
if neighbour[0] < 0 or neighbour[0] >= nx:
pass
elif neighbour[1] < 0 or neighbour[1] >= ny:
pass
else:
assert i0 == grid[idx0]
d = {
'seq0': grid[idx0],
'seq1': grid[neighbour],
'idx0': idx0,
'idx1': neighbour,
}
pairs.append(d)
return pairs
|
6c4aa1fcc22641f07054a3bcfe11a7edc21a3c56
| 31,035 |
def _length_normalization(length_norm_power, length, dtype=tf.float32):
"""Returns length normalization factor."""
return tf.pow(((5. + tf.cast(length, dtype)) / 6.), length_norm_power)
|
d012f3c5c24165e7f529ec55976f969edbcca6e6
| 31,036 |
def unregistercls(self, schemacls=None, data_types=None):
"""Unregister schema class or associated data_types.
:param type schemacls: sub class of Schema.
:param list data_types: data_types to unregister.
"""
return _REGISTRY.unregistercls(schemacls=schemacls, data_types=data_types)
|
50744b0f9fbf96b5f1e0213c216e4c3e6419f0d0
| 31,037 |
def get_relation_mapper() -> RelationMapper:
"""Get the relation mapper. Create and load if necessary."""
global _relation_mapper
if _relation_mapper is not None:
return _relation_mapper
path = mapping_root.joinpath("reltoid")
if not path.is_file():
create_mapping()
_relation_mapper = RelationMapper(__load_mapping(path))
return _relation_mapper
|
5a487c37dbca6b197b781ac8cab3b1f91bcbaf3e
| 31,038 |
def griffin_lim(stftm_matrix, max_iter=100):
""""Iterative method to 'build' phases for magnitudes."""
stft_matrix = np.random.random(stftm_matrix.shape)
y = librosa.core.istft(stft_matrix, hop_size, window_size)
for i in range(max_iter):
stft_matrix = librosa.core.stft(y, fft_size, hop_size, window_size)
stft_matrix = stftm_matrix * stft_matrix / np.abs(stft_matrix)
y = librosa.core.istft(stft_matrix, hop_size, window_size)
return y
|
ada8e7442d1d2c8ed1224d50be42b274fe1229fe
| 31,039 |
from typing import Union
from typing import Optional
def parse_subtitle_stream_id(input_file: str, input_sid: Union[int, str, None]) -> Optional[int]:
"""Translate the CLI `-s` parameter into a stream index suitable for subtitle_options()."""
subtitle_streams = tuple(list_subtitle_streams(input_file))
external_sub_file = find_sub_file(input_file)
if input_sid is None:
return 0 if subtitle_streams or external_sub_file else None
try:
stream_index = int(input_sid)
except ValueError:
pass
else:
return stream_index if stream_index >= 0 else None
language = str(input_sid)
if external_sub_file:
# external subtitles don't have the necessary metadata
raise ValueError("matching external subtitles to a language code is not supported")
for index, stream in enumerate(sorted(subtitle_streams, key=itemgetter("index"))):
if stream_matches_language(stream, language):
return index
raise ValueError("no subtitles found for language: %s" % language)
|
93d62eae4e3080eab7d94bb4fd9a5ed3aabbce0e
| 31,041 |
from typing import Optional
from typing import Iterable
from typing import Dict
import json
def routes_to_geojson(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
split_directions: bool = False,
include_stops: bool = False,
) -> Dict:
"""
Return a GeoJSON FeatureCollection of MultiLineString features representing this Feed's routes.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If ``include_stops``, then include the route stops as Point features .
If an iterable of route IDs is given, then subset to those routes.
If the subset is empty, then return a FeatureCollection with an empty list of
features.
If the Feed has no shapes, then raise a ValueError.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
if route_ids is not None:
D = set(route_ids) - set(feed.routes.route_id)
if D:
raise ValueError(f"Route IDs {D} not found in feed.")
# Get routes
g = geometrize_routes(feed, route_ids=route_ids, split_directions=split_directions)
if g.empty:
collection = {"type": "FeatureCollection", "features": []}
else:
collection = json.loads(g.to_json())
# Get stops if desired
if include_stops:
if route_ids is not None:
stop_ids = (
feed.stop_times.merge(feed.trips.filter(["trip_id", "route_id"]))
.loc[lambda x: x.route_id.isin(route_ids), "stop_id"]
.unique()
)
else:
stop_ids = None
stops_gj = feed.stops_to_geojson(stop_ids=stop_ids)
collection["features"].extend(stops_gj["features"])
return hp.drop_feature_ids(collection)
|
f74bb2f1b533a8c3ae84da59d77daf2e96fb573b
| 31,043 |
def read_input():
"""
Read user input and return state of running the game.
If user press Esc or exit game window stop game main loop.
Returns:
bool: Should game still be running?
"""
# Should we still run game after parsing all inputs?
running = True
# Look at every event in the queue
for event in pygame.event.get():
# Did the user hit a key?
if event.type == pygame.KEYDOWN:
# Was it the Escape key? If so, stop the loop.
if event.key == K_ESCAPE:
running = False
# Did the user click the window close button? If so, stop the loop.
elif event.type == QUIT:
running = False
return running
|
983661f2a63d0f68ac073ff52d49afe1c98c5ef3
| 31,044 |
from dateutil.relativedelta import relativedelta
def add_to_date(date, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, as_string=False, as_datetime=False):
"""Adds `days` to the given date"""
if date==None:
date = now_datetime()
if hours:
as_datetime = True
if isinstance(date, string_types):
as_string = True
if " " in date:
as_datetime = True
try:
date = parser.parse(date)
except ParserError:
frappe.throw(frappe._("Please select a valid date filter"), title=frappe._("Invalid Date"))
date = date + relativedelta(years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
if as_string:
if as_datetime:
return date.strftime(DATETIME_FORMAT)
else:
return date.strftime(DATE_FORMAT)
else:
return date
|
eec228e54f90a0dfc41be802ba0120906550a007
| 31,045 |
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters
|
d04e2a94470ca540fced5fc3b1b0a9321421de22
| 31,046 |
def IntegerHeap(i):
"""Return an integer heap for 2^i-bit integers.
We use a BitVectorHeap for small i and a FlatHeap for large i.
Timing tests indicate that the cutoff i <= 3 is slightly
faster than the also-plausible cutoff i <= 2, and that both
are much faster than the way-too-large cutoff i <= 4.
The resulting IntegerHeap objects will use 255-bit long integers,
still small compared to the overhead of a FlatHeap."""
if i <= 3:
return BitVectorHeap()
return FlatHeap(i)
|
61a7f44cfd37b38c91a8bcdf3b83b1dc2af98b5b
| 31,048 |
def list_sensors(name_pattern=Sensor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
"""
This is a generator function that enumerates all sensors that match the
provided arguments.
Parameters:
name_pattern: pattern that device name should match.
For example, 'sensor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, driver_name='lego-ev3-touch', or
address=['in1', 'in3']. When argument value is a list,
then a match against any entry of the list is enough.
"""
class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Sensor.SYSTEM_CLASS_NAME)
return (Sensor(name_pattern=name, name_exact=True)
for name in list_device_names(class_path, name_pattern, **kwargs))
|
b4ddc4508988e8c93d1388ce6857e736da7fc624
| 31,049 |
def preprocessBoilerPower(df2, key):
"""
calculates the average boiler power, because we have timestamps where the boiler is turned off/on often not following the capture period
preprocessing is done as well (set index, interpolation)
:param df2: dataframe of boiler
:param key: sensorname
:returns: new dataframe, in a 5 min grid
"""
toProcessDates = [pd.to_datetime('2017-10-15 02:15:02')]
toProcessValues =[0]
dataDict = {'DateTime' : [], key:[]}
firstTime = True
for idx, row in df2.iterrows():
ceiledTime = row["DateTime"].ceil("300s")
if not (firstTime or ceiledTime == recentTime):
#calculate integral
accumVal = 0
for i in range(1, len(toProcessDates)):
timeDel = (toProcessDates[i] - toProcessDates[i-1]).seconds
prevVal = toProcessValues[i-1]
accumVal += prevVal*timeDel
endInterval = toProcessDates[-1].ceil("300s")
timeDel = (endInterval - toProcessDates[-1]).seconds
accumVal += toProcessValues[-1]*timeDel
accumVal /= 300 # P = W /t divide by capture period
#append to dict for new dataframe
dataDict["DateTime"].append(endInterval)
dataDict[key].append(accumVal)
toProcessDates = [endInterval]
toProcessValues = [toProcessValues[-1]]
recentTime = ceiledTime
firstTime = False
toProcessDates.append(row["DateTime"])
toProcessValues.append(row[key])
recentTime = ceiledTime
dfBoil = pd.DataFrame(dataDict)
dfBoil = dfBoil.set_index("DateTime")
dfBoil5min = dfBoil.asfreq("300s")
dfBoil5min[key] = dfBoil5min[key].interpolate(method='linear').where(mask_knans(dfBoil5min[key], int(interpolationTime*60/300)))
return dfBoil5min
|
1deaab79bd82b9df2b91fc296f87deebfb1c03cd
| 31,050 |
def ProbLate(pmf):
"""Computes the probability of a birth in Week 41 or later.
Args:
pmf: Pmf object
Returns:
float probability
"""
return ProbRange(pmf, 41, 50)
|
c04f1047eeff6336975c490a66c736a7519f70b1
| 31,051 |
def inrange(inval, minimum=-1., maximum=1.):
"""
Make sure values are within min/max
"""
inval = np.array(inval)
below = np.where(inval < minimum)
inval[below] = minimum
above = np.where(inval > maximum)
inval[above] = maximum
return inval
|
3277ed0d780217713f22f5ca27e7fd15b6758d1c
| 31,052 |
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
return HttpResponse("There was an LTI communication error: {}".format(exception), status=500)
|
bb2df5f1fe38c6d72dd383b12713ac0ccc6d9f20
| 31,053 |
def index_for_shop(shop_id, page):
"""List orders for that shop."""
shop = _get_shop_or_404(shop_id)
brand = brand_service.get_brand(shop.brand_id)
per_page = request.args.get('per_page', type=int, default=15)
search_term = request.args.get('search_term', default='').strip()
only_payment_state = request.args.get(
'only_payment_state', type=PaymentState.__members__.get
)
def _str_to_bool(value):
valid_values = {
'true': True,
'false': False,
}
return valid_values.get(value, False)
only_overdue = request.args.get('only_overdue', type=_str_to_bool)
only_processed = request.args.get('only_processed', type=_str_to_bool)
order_state_filter = OrderStateFilter.find(
only_payment_state, only_overdue, only_processed
)
orders = order_service.get_orders_for_shop_paginated(
shop.id,
page,
per_page,
search_term=search_term,
only_payment_state=only_payment_state,
only_overdue=only_overdue,
only_processed=only_processed,
)
orders.items = list(service.extend_order_tuples_with_orderer(orders.items))
return {
'shop': shop,
'brand': brand,
'per_page': per_page,
'search_term': search_term,
'PaymentState': PaymentState,
'only_payment_state': only_payment_state,
'only_overdue': only_overdue,
'only_processed': only_processed,
'OrderStateFilter': OrderStateFilter,
'order_state_filter': order_state_filter,
'orders': orders,
'render_order_payment_method': _find_order_payment_method_label,
}
|
d9be41ba01991aaa08f74bf9546f18b565d9ac8e
| 31,054 |
from typing import Tuple
import requests
def create_token(author: str, password: str, token_name: str) -> Tuple[int, str]:
"""
Create an account verification token.
This token allows for avoiding HttpBasicAuth for subsequent calls.
Args:
author (`str`):
The account name.
password (`str`):
The account password.
token_name (`str`):
The name to be given to the token.
Returns:
(`Tuple[int, str]`)
Return the token id and the sha-1.
Raises:
(`HTTPError`)
Raise the error in request.
"""
url = f'https://hub.towhee.io/api/v1/users/{author}/tokens'
data = {'name': token_name}
try:
r = requests.post(url, data=data, auth=HTTPBasicAuth(author, password))
r.raise_for_status()
except HTTPError as e:
raise e
res = r.json()
token_id = str(res['id'])
token_sha1 = str(res['sha1'])
return token_id, token_sha1
|
ec0d5d4e208fb7fd6c77bfec554c1ea51d0838f4
| 31,055 |
def getPositionPdf(i):
"""Return the position of the square on the pdf page"""
return [int(i/5), i%5]
|
859fd00c1475cfcb4cd93800299181b77fdd6e93
| 31,056 |
def plm_colombo(LMAX, x, ASTYPE=np.float):
"""
Computes fully-normalized associated Legendre Polynomials and
their first derivative using a Standard forward column method
Arguments
---------
LMAX: Upper bound of Spherical Harmonic Degrees
x: elements ranging from -1 to 1
Keyword arguments
-----------------
ASTYPE: output variable data type
Returns
-------
plms: fully-normalized Legendre polynomials
dplms: first differentials of Legendre polynomials
"""
#-- removing singleton dimensions of x
x = np.atleast_1d(x).flatten().astype(ASTYPE)
#-- length of the x array
jm = len(x)
#-- verify data type of spherical harmonic truncation
LMAX = np.int(LMAX)
#-- allocating for the plm matrix and differentials
plm = np.zeros((LMAX+1,LMAX+1,jm))
dplm = np.zeros((LMAX+1,LMAX+1,jm))
#-- u is sine of colatitude (cosine of latitude) so that 0 <= s <= 1
#-- for x=cos(th): u=sin(th)
u = np.sqrt(1.0 - x**2)
#-- update where u==0 to eps of data type to prevent invalid divisions
u[u == 0] = np.finfo(u.dtype).eps
#-- Calculating the initial polynomials for the recursion
plm[0,0,:] = 1.0
plm[1,0,:] = np.sqrt(3.0)*x
plm[1,1,:] = np.sqrt(3.0)*u
#-- calculating first derivatives for harmonics of degree 1
dplm[1,0,:] = (1.0/u)*(x*plm[1,0,:] - np.sqrt(3)*plm[0,0,:])
dplm[1,1,:] = (x/u)*plm[1,1,:]
for l in range(2, LMAX+1):
for m in range(0, l):#-- Zonal and Tesseral harmonics (non-sectorial)
#-- Computes the non-sectorial terms from previously computed
#-- sectorial terms.
alm = np.sqrt(((2.0*l-1.0)*(2.0*l+1.0))/((l-m)*(l+m)))
blm = np.sqrt(((2.0*l+1.0)*(l+m-1.0)*(l-m-1.0))/((l-m)*(l+m)*(2.0*l-3.0)))
#-- if (m == l-1): plm[l-2,m,:] will be 0
plm[l,m,:] = alm*x*plm[l-1,m,:] - blm*plm[l-2,m,:]
#-- calculate first derivatives
flm = np.sqrt(((l**2.0 - m**2.0)*(2.0*l + 1.0))/(2.0*l - 1.0))
dplm[l,m,:] = (1.0/u)*(l*x*plm[l,m,:] - flm*plm[l-1,m,:])
#-- Sectorial harmonics
#-- The sectorial harmonics serve as seed values for the recursion
#-- starting with P00 and P11 (outside the loop)
plm[l,l,:] = u*np.sqrt((2.0*l+1.0)/(2.0*l))*np.squeeze(plm[l-1,l-1,:])
#-- calculate first derivatives for sectorial harmonics
dplm[l,l,:] = np.float128(l)*(x/u)*plm[l,l,:]
#-- return the legendre polynomials and their first derivative
return plm,dplm
|
b54f5802fa5d0989c5813f85e3b0e37c653ca5aa
| 31,057 |
import logging
def describe_entity_recognizer(
describe_entity_recognizer_request: EntityRecognizer,
):
"""[Describe a Entity Recognizer Router]
Args:
describe_entity_recognizer_request (EntityRecognizer): [Based on Input Schema]
Raises:
error: [Error]
Returns:
[type]: [Based on Response Model]
"""
try:
logging.info(
f"Describe Entity Recognizer Router: {describe_entity_recognizer_request}"
)
response = ComprehendController().describe_entity_recognizer_controller(
describe_entity_recognizer_request
)
return response
except Exception as error:
logging.error(f"{error=}")
raise error
|
80ee32329b5393d18d4689caeeb50a2332660f20
| 31,058 |
def tool_factory(clsname, command_name, base=CommandBase):
""" Factory for WESTPA commands."""
clsdict = {
'command_name': command_name,
}
return type(clsname, (base,), clsdict)
|
f4fe71f0938dfa17399e81d15233839a547844be
| 31,059 |
from typing import Union
from typing import Sequence
from typing import Any
from typing import Tuple
from typing import cast
from typing import Dict
def plot_single_sd(
sd: SpectralDistribution,
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
out_of_gamut_clipping: Boolean = True,
modulate_colours_with_sd_amplitude: Boolean = False,
equalize_sd_amplitude: Boolean = False,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution.
Parameters
----------
sd
Spectral distribution to plot.
cmfs
Standard observer colour matching functions used for computing the
spectrum domain and colours. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
out_of_gamut_clipping
Whether to clip out of gamut colours otherwise, the colours will be
offset by the absolute minimal colour leading to a rendering on
gray background, less saturated and smoother.
modulate_colours_with_sd_amplitude
Whether to modulate the colours with the spectral distribution
amplitude.
equalize_sd_amplitude
Whether to equalize the spectral distribution amplitude.
Equalization occurs after the colours modulation thus setting both
arguments to *True* will generate a spectrum strip where each
wavelength colour is modulated by the spectral distribution amplitude.
The usual 5% margin above the spectral distribution is also omitted.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
References
----------
:cite:`Spiker2015a`
Examples
--------
>>> from colour import SpectralDistribution
>>> data = {
... 500: 0.0651,
... 520: 0.0705,
... 540: 0.0772,
... 560: 0.0870,
... 580: 0.1128,
... 600: 0.1360
... }
>>> sd = SpectralDistribution(data, name='Custom')
>>> plot_single_sd(sd) # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Single_SD.png
:align: center
:alt: plot_single_sd
"""
_figure, axes = artist(**kwargs)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
sd = cast(SpectralDistribution, sd.copy())
sd.interpolator = LinearInterpolator
wavelengths = cmfs.wavelengths[
np.logical_and(
cmfs.wavelengths
>= max(min(cmfs.wavelengths), min(sd.wavelengths)),
cmfs.wavelengths
<= min(max(cmfs.wavelengths), max(sd.wavelengths)),
)
]
values = as_float_array(sd[wavelengths])
RGB = XYZ_to_plotting_colourspace(
wavelength_to_XYZ(wavelengths, cmfs),
CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["E"],
apply_cctf_encoding=False,
)
if not out_of_gamut_clipping:
RGB += np.abs(np.min(RGB))
RGB = normalise_maximum(RGB)
if modulate_colours_with_sd_amplitude:
with sdiv_mode():
RGB *= cast(NDArray, sdiv(values, np.max(values)))[..., np.newaxis]
RGB = CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding(RGB)
if equalize_sd_amplitude:
values = ones(values.shape)
margin = 0 if equalize_sd_amplitude else 0.05
x_min, x_max = min(wavelengths), max(wavelengths)
y_min, y_max = 0, max(values) + max(values) * margin
polygon = Polygon(
np.vstack(
[
(x_min, 0),
tstack([wavelengths, values]),
(x_max, 0),
]
),
facecolor="none",
edgecolor="none",
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
axes.add_patch(polygon)
padding = 0.1
axes.bar(
x=wavelengths - padding,
height=max(values),
width=1 + padding,
color=RGB,
align="edge",
clip_path=polygon,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
axes.plot(
wavelengths,
values,
color=CONSTANTS_COLOUR_STYLE.colour.dark,
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
)
settings: Dict[str, Any] = {
"axes": axes,
"bounding_box": (x_min, x_max, y_min, y_max),
"title": f"{sd.strict_name} - {cmfs.strict_name}",
"x_label": "Wavelength $\\lambda$ (nm)",
"y_label": "Spectral Distribution",
}
settings.update(kwargs)
return render(**settings)
|
b5523c7f278c5bd5fdd91d73f855af3b17c6a63e
| 31,060 |
def dur_attributes_to_dur(d_half, d_semiqvr):
"""
Convert arrays of d_hlf and d_sqv to d.
- See eq. (2) of the paper.
"""
def d_hlf_dur_sqv_to_d(d_hlf, d_sqv):
return 8 * d_hlf + d_sqv
d = d_hlf_dur_sqv_to_d(d_half, d_semiqvr)
return d
|
aeea74f929ef94d94178444df66a30d0d017fd4e
| 31,061 |
import shutil
import traceback
def copy_dir(source, destination):
"""
Copy a directory tree and returns destination path.
Parameters:
source (string): source containing root directory path
destination (string): target root directory path
Returns:
destination (string): copied destination path
"""
try:
shutil.copytree(
source, destination, ignore=shutil.ignore_patterns('.svn'))
return destination
except Exception:
print(traceback.format_exc())
|
5751da6232a64902f0030271671f3e74ecda97e0
| 31,062 |
def polyder_vec(p, m):
"""Vectorized version of polyder for differentiating multiple polynomials of the same degree
Parameters
----------
p : ndarray, shape(N,M)
polynomial coefficients. Each row is 1 polynomial, in descending powers of x,
each column is a power of x
m : int >=0
order of derivative
Returns
-------
der : ndarray, shape(N,M)
polynomial coefficients for derivative in descending order
"""
m = jnp.asarray(m, dtype=int) # order of derivative
p = jnp.atleast_2d(p)
n = p.shape[1] - 1 # order of polynomials
D = jnp.arange(n, -1, -1)
D = factorial(D) / factorial(D-m)
p = jnp.roll(D*p, m, axis=1)
idx = jnp.arange(p.shape[1])
p = jnp.where(idx < m, 0, p)
return p
|
25d0455c4649f0986ea592ec32c49ded921f73e9
| 31,063 |
def normalize_data(data:np.ndarray) -> np.ndarray:
"""
Subtracts the zero point of the time array and removes nans and infs
:param data: Dataset
:return: zeroed in dataset
"""
x = data[0]
y = data[1]
for i in [np.inf,-np.inf,np.nan]:
if i in y:
n = len(y[y==i])
x = x[y != i]
y = y[y != i]
return np.array(((x - x[0]), y))
|
26f334e5cebabf6d66cee79a47159bb34dd6e18f
| 31,064 |
def _scale_fct_fixed(*args, scale=0):
"""
This is a helper function that is necessary because multiprocessing requires
a picklable (i.e. top-level) object for parallel computation.
"""
return scale
|
75eb728f37466aee8664d5fe435d379cf5d7c6f2
| 31,065 |
def authorize(vendor_api_key, user_api_key, client_class=Client):
"""Authorize use of the Leaf Data Systems API
using an API key and MME (licensee) code.
This is a shortcut function which
instantiates `client_class`.
By default :class:`cannlytics.traceability.leaf.Client` is used.
Returns: `client_class` instance.
"""
client = client_class(vendor_api_key, user_api_key)
return client
|
bf005a58d0063f5171a5b884da59bce361d9df98
| 31,066 |
import six
def isclass(obj):
# type: (Any) -> bool
"""
Evaluate an object for :class:`class` type (ie: class definition, not an instance nor any other type).
"""
return isinstance(obj, (type, six.class_types))
|
a1116d44513c05407368517dc031f023f86d64a7
| 31,067 |
def filter_score_grouped_pair(post_pair):
"""
Filter posts with a positive score.
:param post_pair: pair of post_id, dict with score, text blocks, and comments
:return: boolean indicating whether post has a positive score
"""
_, post_dict = post_pair
post_score = post_dict['score']
return post_score and int(post_score) > 0
|
c824eacd43b44c85fc7acf102fdde2413a7c4d0e
| 31,068 |
def post_nodes():
"""
.. :quickref: Dev API; Update cluster nodes
**Developer documentation**
*Requires admin user.*
Update the status of cluster nodes specified in the request. The endpoint can be used to notify CC-Server after a
dead cluster node has been repaired.
**Example request**
.. sourcecode:: http
POST /nodes HTTP/1.1
{
"nodes": [{
"name": "cc-node2"
}]
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{}
"""
return request_handler.post_nodes()
|
5fc105ffa236a798c1646282aa5221bec223179f
| 31,069 |
import torch
def to_one_hot(y_tensor, n_dims=None):
"""
Take integer y (tensor or variable) with n dims &
convert it to 1-hot representation with n+1 dims.
"""
if(n_dims is None):
n_dims = int(y_tensor.max()+ 1)
_,h,w = y_tensor.size()
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(h,w,n_dims)
return y_one_hot.permute(2,0,1).unsqueeze(0)
|
f70fd5ab95386c6e471019801d9fe0a5dc0dcbda
| 31,070 |
def names():
"""List the names of the available satellites
Returns:
List: List of strings with the names of the available satellites
"""
return sorted(satellites().keys())
|
e5627ef1e6981f529b0cf1cf547a0364beb9f498
| 31,071 |
import re
def read_exechours(filename, verbose = False):
"""
Read exechours_SEMESTER.txt file and return columns as '~astropy.table.Table'.
Parameters
----------
filename : string
program exec hours text file name.
Returns
-------
progtable : '~astropy.table.Table'
Program data table
Columns
-------
'prog_ref' : str
program references
'alloc_time' : str
number of hours allocated to program
'elaps_time' : str
number of hours of elapsed time
'notcharged_time' : str
number of hours not charged
'partner_time' : str
number of hours charged to partner
'prog_time' : str
number of hours charged to program
"""
filetext = []
with open(filename, 'r') as readtext: # read file into memory.
# Split lines where commas ',' are found. Remove newline characters '\n'.
[filetext.append(re.sub('\n', '', line).split(',')) for line in readtext]
readtext.close()
# # For testing, set times elapsed, non-charged, partner, and program to zero.
# for i in range(len(filetext)):
# for j in range(2, len(filetext[i])):
# filetext[i][j] = '0.00'
if verbose:
[print(line) for line in filetext]
rows = np.array(filetext[3:])
columns = ['prog_ref', 'alloc_time', 'elaps_time', 'notcharged_time', 'partner_time', 'prog_time']
exechourstable = Table()
for i in range(len(columns)):
exechourstable[columns[i]] = rows[:, i]
if verbose:
print(exechourstable)
return exechourstable
|
24409895c3d9bef6149a84ecb9eded576fae75fd
| 31,072 |
def is_cond_comment(soup):
"""test whether an element is a conditional comment, return a
boolean.
:param soup: a BeautifulSoup of the code to reduce
:type soup: bs4.BeautifulSoup
"""
return isinstance(soup, bs4.element.Comment) \
and re_cond_comment.search(soup.string)
|
8976343f96fdaf144a324fe76709816fbe500b4d
| 31,073 |
def sequences_end_with_value(sequences, value, axis=-1):
"""Tests if `sequences` and with `value` along `axis`.
Args:
sequences: A matrix of integer-encoded sequences.
value: An integer value.
axis: Axis of `sequences` to test.
Returns:
A boolean `np.nadarray` that indicates for each sequences if it ends with
`value` along `axis`.
"""
sequences = np.asarray(sequences)
return np.all(np.diff((sequences == value).astype(np.int8), axis=axis) >= 0,
axis)
|
fb4b8091ede9c9f06cd8e1419b6a8f973811e923
| 31,074 |
def shortest_path_search(start, successors, is_goal):
"""Find the shortest path from start state to a state
such that is_goal(state) is true."""
# your code here
if is_goal(start):
return [start]
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
s = path[-1]
for (state, action) in successors(s).items():
if state not in explored:
explored.add(state)
path2 = path + [action, state]
if is_goal(state):
return path2
else:
frontier.append(path2)
Fail
|
9bd14c1d848f00dec27a88ec25f3062055f5967b
| 31,076 |
from typing import Union
from typing import Optional
def to_tensor(X: Union[np.ndarray, tf.Tensor], **kwargs) -> Optional[tf.Tensor]:
"""
Converts tensor to tf.Tensor
Returns
-------
tf.Tensor conversion.
"""
if X is not None:
if isinstance(X, tf.Tensor):
return X
return tf.constant(X)
return None
|
77e6fc1e52101717ba3671e3a357803c0a13630d
| 31,077 |
def get_explicit_positions(parsed_str_format):
"""
>>> parsed = parse_str_format("all/{}/is/{2}/position/{except}{this}{0}")
>>> get_explicit_positions(parsed)
{0, 2}
"""
return set(
map(
int,
filter(
lambda x: isinstance(x, str) and str.isnumeric(x),
(x[1] for x in parsed_str_format),
),
)
)
|
f6f3720443385f5d514de15d3d63d45cd4ef3408
| 31,078 |
def msg_parser_disable(id):
"""
Disable a Parser
- Disconnect a Parser from a Channel
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby = (0, 1)
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (record.channel_id, record.function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby = (0, 1)
).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Parser disabled"
else:
return "Parser already disabled"
|
0e3ff7006fba205eec47fd92e8b15bb4f52ae454
| 31,080 |
def filter_is(field, target):
"""
Check if a log field is the specified value.
A boolean is returned.
Case-insensitive checks are made.
"""
lowfield = field.lower()
retval = lowfield == target
# print "is:\t<%s>\t<%s>\t" % (field, target), retval
return(retval)
|
2975560c54736362f8986d5a9f92af351c4e40fe
| 31,081 |
async def get_server_port(node: Node) -> int:
"""
Returns the port which the WebSocket server is running on
"""
client = node.create_client(GetParameters, "/rosbridge_websocket/get_parameters")
try:
if not client.wait_for_service(5):
raise RuntimeError("GetParameters service not available")
port_param = await client.call_async(GetParameters.Request(names=["actual_port"]))
return port_param.values[0].integer_value
finally:
node.destroy_client(client)
|
5a409440312e6c0b0c01be53741f4182a6dc8f70
| 31,082 |
import sqlite3
def search_vac(search_phrase):
"""Get vacancies with search phrase in JSON"""
con = sqlite3.connect("testdb.db")
cur = con.cursor()
sql = 'SELECT * FROM vacancies WHERE json LIKE "%{}%" ORDER BY id DESC LIMIT 100;'.format(search_phrase)
cur.execute(sql)
vac = cur.fetchall()
con.close()
data_list = []
for i in vac:
data_list.append('<a href="https://hh.ru/vacancy/' + str(i[0]) + '">' + str(i[0]) + '</a>')
return str(data_list)
|
8669e4ecd50b47a385929536f5cf0faf62577361
| 31,083 |
from dso.task.regression.regression import RegressionTask
from dso.task.control.control import ControlTask
def make_task(task_type, **config_task):
"""
Factory function for Task object.
Parameters
----------
task_type : str
Type of task:
"regression" : Symbolic regression task.
"control" : Episodic reinforcement learning task.
config_task : kwargs
Task-specific arguments. See specifications of task_dict.
Returns
-------
task : Task
Task object.
"""
# Lazy import of task factory functions
if task_type == "regression":
task_class = RegressionTask
elif task_type == "control":
task_class = ControlTask
else:
# Custom task import
task_class = import_custom_source(task_type)
assert issubclass(task_class, Task), \
"Custom task {} must subclass dso.task.Task.".format(task_class)
task = task_class(**config_task)
return task
|
6ebee30330750ab607626b4aa52701b208d70ade
| 31,084 |
def detectar_lenguaje(texto, devolver_proba=False):
"""
Identifica el lenguaje en el que está escrito el texto de entrada.
:param texto: Texto de entrada.
:type texto: str
:param devolver_proba: Indica si se retorna el porcentaje de \
confiabilidad del lenguaje identificado. Valor por \
defecto `False`.
:type devolver_proba: bool, opcional
:return: (str) Texto del lenguaje identificado siguiendo el estandar \
`ISO 639-1 <https://es.wikipedia.org/wiki/ISO_639-1>`_. \
Si `devolver_proba = True` retorna una tupla.
"""
identificador = LanguageIdentifier.from_modelstring(model, norm_probs=True)
if devolver_proba:
return identificador.classify(texto)
else:
return identificador.classify(texto)[0]
|
1382cc2fb41d53fbc286a425f46171ece102f0aa
| 31,085 |
def get_tablenames():
"""get table names from database. """
con = get_db()
cursor = con.cursor()
select_tn_cmd = "SELECT name FROM sqlite_master WHERE type='table';"
tablenames, res = [], cursor.execute(select_tn_cmd).fetchall()
for tn in res:
tablenames += [tn['name']]
return tablenames
|
1fd55b54dfd06d5735bb56ff0287954132ba6a94
| 31,086 |
def expando_distinct_values(model_class, field_name):
""" Returns all possible values for a specific expando field.
Useful for search forms widgets.
"""
ct = ContentType.objects.get_for_model(model_class)
qs = Expando.objects.filter(content_type=ct, key=field_name)
return qs.distinct().values_list('value', flat=True)
|
0883022b25bf72f13b2eee98b5ddd98deb8b5991
| 31,087 |
def oconner(w, r):
"""
Optimistic for low reps. Between Lombardi and Brzycki for high reps.
"""
return w * (1 + r/40)
|
cdcdc44a06e44910361c55217366e3891e76b6a5
| 31,088 |
def unionPart(* partsList):
"""Finds the union of several partitions"""
mat = part2graph(util.concat(* partsList))
parts = graph.connectedComponents(mat.keys(), lambda x: mat[x].keys())
# remove parition ids from partitioning
parts = map(lambda part: filter(lambda x: type(x) != int, part), parts)
return parts
|
1aa54b80d37d51e9f75c80a0d95fed24e4c57853
| 31,089 |
def add_upper_log_level(logger, method_name, event_dict):
"""
Add the log level to the event dict.
"""
event_dict["level"] = method_name.upper()
return event_dict
|
36ccdf335473136fe8188ff99ed539920ee39fa7
| 31,090 |
def tanh_cl(a):
""" Hyperbolic tangent of GPUArray elements.
Parameters
----------
a : gpuarray
GPUArray with elements to be operated on.
Returns
-------
gpuarray
tanh(GPUArray)
Examples
--------
>>> a = tanh_cl(give_cl(queue, [0, pi/4]))
[ 0., 0.6557942]
>>> type(a)
<class 'pyopencl.array.Array'>
"""
return pyopencl.clmath.tanh(a)
|
d4fbb13033f48773bd20539837c69d6c66528f7f
| 31,091 |
import time
def sweep_depth(sketch_class):
"""Return a table of shape (len(depth_values), 2) counting number of times the sketch correctly found the heaviest key."""
# 1st column is for the median, 2nd is for the sign alignment estimator
print(sketch_class.__name__)
num_success = np.zeros((len(depth_values), 2))
max_depth = np.max(depth_values)
print("Depth values", depth_values)
for t in range(num_trials):
seed = rnd_seed + 53 * t
start = time.perf_counter()
sketch = sketch_class(seed, width, max_depth, num_keys)
sweep_depth_trial(sketch, num_success)
# sweep_depth_trial(sketch_class, seed, num_success)
print("Depth trial #", t, "time", time.perf_counter() - start)
return num_success
|
9c034ac6ef0c93488b30bbb6aef3b693489d6490
| 31,094 |
def plot_ensemble_results(model, ensemble, expts = None,
style='errorbars',
show_legend = True, loc = 'upper left',
plot_data = True, plot_trajectories = True):
"""
Plot the fits to the given experiments over an ensemble.
Note that this recalculates the cost for every member of the ensemble, so
it may be very slow. Filtering correlated members from the ensemble is
strongly recommended.
Inputs:
model: Model whose results to plot
ensemble: Parameter ensemble
expts: List of experiment IDs to plot, if None is specified, all
experiments are plotted
style: Style of plot. Currently supported options are:
'errorbars': Plots points and bars for each data point
'lines': Plots a continuous line for the data
show_legend: Boolean that control whether or not to show the legend
loc: Location of the legend. See help(Plotting.legend) for options.
plot_data: Boolean that controls whether the data is plotted
plot_trajectories: Boolean that controls whether the trajectories are
plotted
"""
exptColl = model.get_expts()
nets = model.get_calcs()
if expts is None:
expts = exptColl.keys()
lines, labels = [], []
cW = ColorWheel()
Network_mod.Network.pretty_plotting()
model.cost(ensemble[0])
timepoints = {}
for netId, net in nets.items():
traj = getattr(net, 'trajectory', None)
if traj is not None:
net.times_to_add = scipy.linspace(traj.timepoints[0],
traj.timepoints[-1], 1000)
Network_mod.Network.full_speed()
results = {}
for params in ensemble:
model.cost(params)
for exptId in expts:
expt = exptColl[exptId]
results.setdefault(exptId, {})
dataByCalc = expt.GetData()
for netId in dataByCalc.keys():
results[exptId].setdefault(netId, {})
# Pull the trajectory from that calculation, defaulting to None
# if it doesn't exist.
net = nets.get(netId)
traj = net.trajectory
for dataId in dataByCalc[netId].keys():
results[exptId][netId].setdefault(dataId, [])
scaleFactor = model.GetScaleFactors()[exptId][dataId]
result = scaleFactor*traj.get_var_traj(dataId)
results[exptId][netId][dataId].append(result)
for exptId in expts:
expt = exptColl[exptId]
dataByCalc = expt.GetData()
# We sort the calculation names for easier comparison across plots
sortedCalcIds = dataByCalc.keys()
sortedCalcIds.sort()
for netId in sortedCalcIds:
for dataId, dataDict in dataByCalc[netId].items():
color, sym, dash = cW.next()
if plot_data:
# Pull the data out of the dictionary and into an array
d = scipy.array([[t, v, e] for (t, (v, e))
in dataDict.items()])
if style is 'errorbars':
l = errorbar(d[:,0], d[:,1], yerr=d[:,2], fmt='o',
color=color, markerfacecolor=color,
marker=sym, ecolor='k', capsize=6)[0]
elif style is 'lines':
# Make sure we order the data before plotting
order = scipy.argsort(d[:,0], 0)
d = scipy.take(d, order, 0)
l = plot(d[:,0], d[:,1], color=color,
linestyle=dash)
lines.append(l)
if plot_trajectories:
times = model.get_calcs().get(netId).trajectory.get_times()
mean_vals = scipy.mean(results[exptId][netId][dataId], 0)
std_vals = scipy.std(results[exptId][netId][dataId], 0)
lower_vals = mean_vals - std_vals
upper_vals = mean_vals + std_vals
# Plot the polygon
xpts = scipy.concatenate((times, times[::-1]))
ypts = scipy.concatenate((lower_vals, upper_vals[::-1]))
fill(xpts, ypts, fc=color, alpha=0.4)
# Let's print the pretty name for our variable if we can.
name = net.get_component_name(dataId)
labels.append('%s in %s for %s' % (name, netId, exptId))
for netId, net in nets.items():
del net.times_to_add
if show_legend:
legend(lines, labels, loc=loc)
for net in nets.values():
net.times_to_add = None
return lines, labels
|
38d36373dde4959cce8f51d2c52a2c404ee73e4c
| 31,095 |
import math
import tqdm
def generate_synthetic_gaps(
mean: np.ndarray,
covariance: np.ndarray,
size: int,
chunk_size: int,
threshold: int,
seed: int
) -> np.ndarray:
"""Return numpy array with the coordinates of gaps in matrix mask.
Parameters
------------------
mean: np.ndarray,
Mean of biological gaps in considered windows.
covariance: np.ndarray,
Covariance of biological gaps in considered windows.
size: int,
Total number of rows to generate.
chunk_size: int,
Size of the chunk to process per sub-process step.
threshold: int,
Threshold used to convert the multivariate gaussian
distribution to a multivariate binomial.
seed: int,
The initial seed to use to render the gaps.
Returns
------------------
Return numpy array with shape(size, 2) containing
the coordinates of the gaps in the generated matrix mask.
"""
tasks = [
{
"mean": mean,
"covariance": covariance,
"size": min(chunk_size, size-chunk_size*i),
"offset": chunk_size*i,
"threshold": threshold,
"seed": i+seed
}
for i in range(math.ceil(size/chunk_size))
]
with Pool(cpu_count()) as p:
indices = np.vstack(list(tqdm(
p.imap(_generate_synthetic_gaps_wrapper, tasks),
total=len(tasks),
desc="Generating synthetic gaps",
leave=False,
dynamic_ncols=True
)))
p.close()
p.join()
return indices
|
fcfaa5c0dc7a3b1dc47d7d4947504c8a7794d6d1
| 31,096 |
def ulmfit_document_classifier(*, model_type, pretrained_encoder_weights, num_classes,
spm_model_args=None, fixed_seq_len=None,
with_batch_normalization=False, activation='softmax'):
"""
Document classification head as per the ULMFiT paper:
- AvgPool + MaxPool + Last hidden state
- BatchNorm
- 2 FC layers
"""
######## VERSION 1: ULMFiT last state built from Python code - pass the path to a weights directory
if model_type == 'from_cp':
ulmfit_rnn_encoder = ulmfit_rnn_encoder_native(pretrained_weights=pretrained_encoder_weights,
spm_model_args=spm_model_args,
fixed_seq_len=fixed_seq_len,
also_return_spm_encoder=False)
hub_object=None
######## VERSION 2: ULMFiT last state built from a serialized SavedModel - pass the path to a directory containing 'saved_model.pb'
elif model_type == 'from_hub':
ulmfit_rnn_encoder, hub_object = ulmfit_rnn_encoder_hub(pretrained_weights=pretrained_encoder_weights,
spm_model_args=None,
fixed_seq_len=fixed_seq_len,
also_return_spm_encoder=False)
else:
raise ValueError(f"Unknown model type {args['model_type']}")
if fixed_seq_len is None:
rpooler = RaggedConcatPooler(name="RaggedConcatPooler")(ulmfit_rnn_encoder.output)
else:
rpooler = ConcatPooler(name="ConcatPooler")(ulmfit_rnn_encoder.output)
if with_batch_normalization is True:
bnorm1 = tf.keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.1, scale=False, center=False)(rpooler)
drop1 = tf.keras.layers.Dropout(0.4)(bnorm1)
else:
drop1 = tf.keras.layers.Dropout(0.4)(rpooler)
fc1 = tf.keras.layers.Dense(50, activation='relu')(drop1)
if with_batch_normalization is True:
bnorm2 = tf.keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.1, scale=False, center=False)(fc1)
drop2 = tf.keras.layers.Dropout(0.1)(bnorm2)
else:
drop2 = tf.keras.layers.Dropout(0.1)(fc1)
fc_final = tf.keras.layers.Dense(num_classes, activation=activation)(drop2)
document_classifier_model = tf.keras.models.Model(inputs=ulmfit_rnn_encoder.inputs, outputs=fc_final)
return document_classifier_model, hub_object
|
639c7b2b94722a92fda94344529563b05fa2adbc
| 31,098 |
def is_zero_dict( dict ):
"""
Identifies empty feature vectors
"""
has_any_features = False
for key in dict:
has_any_features = has_any_features or dict[key]
return not has_any_features
|
eefb3df1547917fbc11751bbf57212f95388e8b2
| 31,099 |
from typing import Dict
from typing import Callable
def register_scale(
convert_to: Dict[str, Callable] = None, convert_from: Dict[str, Callable] = None
) -> Callable[[Callable], Callable]:
"""Decorator used to register new time scales
The scale name is read from the .scale attribute of the Time class.
Args:
convert_to: Functions used to convert to other scales.
convert_from: Functions used to convert from other scales.
Returns:
Decorator registering scale.
"""
def wrapper(cls: Callable) -> Callable:
name = cls.scale
_SCALES[cls.cls_name][name] = cls
conversions = _CONVERSIONS.setdefault(cls.cls_name, dict())
if convert_to:
for to_scale, converter in convert_to.items():
conversions[(name, to_scale)] = converter
if convert_from:
for from_scale, converter in convert_from.items():
conversions[(from_scale, name)] = converter
return cls
return wrapper
|
3adcd0d53a3c1622f81a1c444da19c4f2b332d14
| 31,100 |
def read_project(config_path, timesheet_path=None, replicon_options=None):
"""
Read a project dictionary from a YAML file located at the path ``config_path``, and read a project timesheet from the path ``timesheet_path``.
Parse these files, check them, and, if successful, return a corresponding Project instance.
"""
project_dict = read_config(config_path)
if timesheet_path is not None:
project_dict['timesheet_df'] = read_timesheet(timesheet_path,
replicon_options=replicon_options)
return Project(**project_dict)
|
b3b13bd1594f3d1ba0ba3b53af5f8546c59f39ee
| 31,102 |
from typing import Union
from typing import Tuple
def _parse_query_location(
location: Union[Tuple[float, float], Point, MultiPoint, Polygon]
) -> str:
"""Convert given locations into WKT representations.
Args:
location (QueryLocation): Provided location definition.
Raises:
ValueError: Raised for when unable to parse location.
Returns:
str: WKT representation of location.
"""
if isinstance(location, (tuple, list)):
# Assume this is [lon lat] following wkt format
location = Point(location[0], location[1])
if isinstance(location, (Point, MultiPoint, Polygon)):
wkt = location.wkt
else:
raise ValueError("Location is not in correct format.")
return wkt
|
17e57b9521144c61c6069d52a8ebcad6101882aa
| 31,103 |
def getuniqueitems(userchoices):
"""return a list of unique items given a bunch of userchoices"""
items = []
for userchoice in userchoices:
if userchoice.item not in items:
items.append(userchoice.item)
return items
|
a7885556604153cf756fb6a29c2e870c27d47337
| 31,104 |
def _as_array(arr, dtype=None):
"""Convert an object to a numerical NumPy array.
Avoid a copy if possible.
"""
if arr is None:
return None
if isinstance(arr, np.ndarray) and dtype is None:
return arr
if isinstance(arr, integer_types + (float,)):
arr = [arr]
out = np.asarray(arr)
if dtype is not None:
if out.dtype != dtype:
out = out.astype(dtype)
if out.dtype not in _ACCEPTED_ARRAY_DTYPES:
raise ValueError("'arr' seems to have an invalid dtype: "
"{0:s}".format(str(out.dtype)))
return out
|
d0336a8cedcd324d5dd26a7b98f59d70f691cf1d
| 31,105 |
def add_region():
""" add a region page function """
# init variables
entry = Region() # creates a model.py instance, instance only has a name right now
error_msg = {}
form_is_valid = True
country_list = Country.query.all()
if request.method == 'GET':
return render_template('regions/add.html', entry=entry, \
country_list=country_list, \
error_msg=error_msg)
if request.method == 'POST':
# validate input
[entry, form_is_valid, error_msg] = form_validate_region(entry)
# check if the form is valid
if not form_is_valid:
# current_app.logger.info('invalid add region')
return render_template('regions/add.html', entry=entry, \
country_list=country_list, \
error_msg=error_msg)
# the data is valid, save it
db.session.add(entry)
db.session.commit()
return redirect(url_for('regions.view_one_region', \
region_id=entry.region_id))
# current_app.logger.error("unsupported method")
|
0798a7bec6474cf83dd870b6529e8f57b69bc882
| 31,107 |
import six
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
This function was excerpted from Django project,
modifications have been applied.
The original license is as follows:
```
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s.encode(encoding)
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise e
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join(smart_text(arg, encoding, strings_only, errors)
for arg in s)
return s.encode(encoding)
|
e4040ea31669acbdbfee5db9f6ce30acdc40bae1
| 31,108 |
def get_host(hostname):
""" Get information about backups for a particular host """
print(f"\nSearching for host '{ hostname }'...\n")
backuppc_data = get_backup_data()
for host in backuppc_data:
if host['hostname'] == hostname:
return host
|
5b017be557d9b5340e44905a49ea6ee2b1ea30b3
| 31,109 |
def gazebo_get_gravity() -> Vector3:
"""
Function to get the current gravity vector for gazebo.
:return: the gravity vector.
:rtype: Vector3
"""
rospy.wait_for_service("/gazebo/get_physics_properties")
client_get_physics = rospy.ServiceProxy("/gazebo/get_physics_properties", GetPhysicsProperties)
physics = client_get_physics()
return physics.gravity
|
8ec560e1fd276be9d2677f94dc5d9b5017e81d23
| 31,110 |
def make_mock_device():
"""
Create a mock host device
"""
def _make_mock_device(xml_def):
mocked_conn = virt.libvirt.openAuth.return_value
if not isinstance(mocked_conn.nodeDeviceLookupByName, MappedResultMock):
mocked_conn.nodeDeviceLookupByName = MappedResultMock()
doc = ET.fromstring(xml_def)
name = doc.find("./name").text
mocked_conn.nodeDeviceLookupByName.add(name)
mocked_device = mocked_conn.nodeDeviceLookupByName(name)
mocked_device.name.return_value = name
mocked_device.XMLDesc.return_value = xml_def
mocked_device.listCaps.return_value = [
cap.get("type") for cap in doc.findall("./capability")
]
return mocked_device
return _make_mock_device
|
80b9b51f586c2b373478020b2a08276e97a6d5ba
| 31,111 |
from typing import Union
def load_file(file_name: str, mode: str = "rb") -> Union[str, bytes]:
"""Loads files from resources."""
file_path = get_resource_path(file_name)
with open(file_path, mode) as file:
return file.read()
|
ed535b091f3ae853e60610022ef01bb38c67b41e
| 31,112 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.