content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def getclasesbyid_(numid):
"""
Returns all defined clases by id [number]
"""
data = get_info_token()
user_ = data['User']
rol_ = data['Rol']
data_response = ''
if rol_ == 'Professor':
try:
bool_, data_response = getclassbyid_(data['User'],numid)
if bool_:
code = 200
else:
data_response = 'Forbidden'
code = 403
except Exception as e:
print e
data_response = 'Internal Error'
code = 500
elif rol_ == 'Student':
try:
bool_, data_response = getclassbyid_(data['User'],numid)
if bool_:
code = 200
else:
data_response = 'Forbidden'
code = 403
except Exception as e:
print e
code = 500
insert_general_record('getclassbyid/[id]',
{'data': data_response,
'code': code}
,user_)
return jsonify({'data': data_response, 'code': code})
|
75603f40621f51313863aa8977b71241a31c3d84
| 30,067 |
import json
def load_versions():
"""Load Bioversions data."""
with open(VERSIONS_PATH) as file:
return json.load(file)
|
e5e3b2a3dd4ae17fe6cf6b00700b33e9bc55e6b5
| 30,068 |
async def retrieve_document(document_id: str, collection: str) -> dict:
"""
:param document_id:
:param collection:
:return:
"""
document_filter = {"_id": ObjectId(document_id)}
if document := await greens.app.state.mongo_collection[collection].find_one(document_filter):
return await document_id_helper(document)
else:
raise ValueError(f"No document found for {document_id=} in {collection=}")
|
4865acd4e553f651a68d694171c76d609eceff98
| 30,069 |
import requests
def get_kalliope_poststukken_uit(path, session, from_,
to=None,
dossier_types=None):
"""
Perform the API-call to get all poststukken-uit that are ready to be processed.
:param path: url of the api endpoint that we want to fetch
:param session: a Kalliope session, as returned by open_kalliope_api_session()
:param from_: start boundary of timerange for which messages are requested
:param to: end boundary of timerange for which messages are requested
:param dossier_types: Only return messages associated to these types of dossier
:returns: tuple of poststukken
"""
params = {
'vanaf': from_.replace(microsecond=0).isoformat(),
'aantal': MAX_REQ_CHUNK_SIZE
}
if to:
params['tot'] = to.replace(microsecond=0).isoformat()
if dossier_types:
params['dossierTypes'] = ','.join(dossier_types)
poststukken = []
req_url = requests.Request('GET', path, params=params).prepare().url
while req_url:
helpers.log("literally requesting: {}".format(req_url))
r = session.get(req_url)
if r.status_code == requests.codes.ok:
r_content = r.json()
poststukken += r_content['poststukken']
req_url = r_content['volgende']
else:
try:
errorDescription = r.json()
except Exception as e:
errorDescription = r
raise requests.exceptions.HTTPError('Failed to get Kalliope poststuk uit (statuscode {}): {}'.format(r.status_code,
errorDescription))
return poststukken
|
2976979bfcccd64939e56c2d0874f6d419028b62
| 30,070 |
import scipy
def lstsq_cholesky(
coefs: np.ndarray,
result: np.ndarray,
) -> np.ndarray:
"""Solve OLS problem using a Cholesky decomposition."""
left = coefs.T @ coefs
right = coefs.T @ result
return scipy.linalg.solve(left, right, assume_a="pos")
|
08ec0988062daef04b55852d6673fb21031f9a87
| 30,072 |
import warnings
def _standardize(signals, demean=True, normalize=True, inplace=True,
verbose=False):
""" Center and norm a given signal (time is along first axis)
Attention: this will not center constant signals
but will replace these with colums of ones
Parameters
==========
signals: numpy.ndarray
Timeseries to standardize
demean: bool
if demeaning is required
normalize: bool
if True, shift timeseries to zero mean value and scale
to unit energy (sum of squares).
Returns
=======
std_signals: numpy.ndarray
copy of signals, normalized.
"""
if not inplace:
signals = signals.copy()
std = signals.std(axis=0)
if demean:
not_to_demean = std < TINY
signals -= signals.mean(axis=0)
shape_constant_signals = (signals.shape[0], not_to_demean.sum())
signals[:, not_to_demean] = np.ones(shape_constant_signals)
if verbose: print('not to demean nb of col: ', not_to_demean.sum())
if verbose: print('signals.mean() ', signals.mean())
if signals.shape[0] == 1:
warnings.warn('Standardization of 3D signal has been requested but '
'would lead to zero values. Skipping.')
return signals
if normalize:
if not demean:
# remove mean if not already detrended
signals -= signals.mean(axis=0)
if verbose: print(signals.mean())
#std = np.sqrt((signals ** 2).sum(axis=0))
std[std < TINY] = 1. # avoid divide by 0
# np.finfo(np.float).eps or TINY?
if verbose: print('(std < TINY).sum() = ',(std < TINY).sum())
signals /= std
return signals
|
f207af4e0e18f6f9f544a18ae89d0e86fd8ae493
| 30,075 |
def bbox_to_poly(bboxes: np.ndarray) -> np.ndarray:
"""
Expects bboxes in xyxy format. Turns each into a 1D array with 8 entries,
every consecutive pair being for one vertex (starting from top left and
going around clockwise)
Works with single bboxes (shape is (4, )) or multiple bboxes (shape is
(N, 4)).
"""
polys = np.concatenate([bboxes[:, :2], bboxes[:, 0:1], bboxes[:, 3:4],
bboxes[:, 2:], bboxes[:, 2:3], bboxes[:, 1:2]],
axis=1)
return polys
|
12a06d343ac5a1f4bd16168bf04dc7e9dfaff4ec
| 30,076 |
def waypts_2_pwsplines(wp_traj, dt, degree=1, plot=False):
"""
Convert a sequence of multi-dimensional sparse waypoints
to a sequence of interpolated multi-dimensional waypoints via splines.
Parameters
----------
wp_traj: horizon * n_s, a sequence of waypoints.
dt: duration of 1 time step of wp_traj.
degree: the degree of the spline fit.
plot: bool, whether to plot or not.
Returns
----------
fs: list with length = n_s, one spline interpolated trajectory per state dimension.
dts: list with length = horizon, time steps throughout the trajectory.
"""
# The degree of the spline fit.
# It is recommended to use cubic splines.
# Even values of k should be avoided especially with small s values.
# 1 <= k <= 5
assert 1 <= degree <= 5
n_s = wp_traj.shape[1]
# wp_traj = 0, ..., end_time, where end_time=horizon*dt.
horizon = wp_traj.shape[0] - 1
end_time = horizon * dt
dts, step = np.linspace(0.0, end_time, num=horizon + 1,
endpoint=True, retstep=True)
# print("horizon={}, end_time={}, dts={}, step={}".format(
# horizon, end_time, dts, step))
assert abs(step - dt) < 1e-5, "step={}, dt={}".format(step, dt)
assert dts.shape[0] == wp_traj.shape[0]
fs = []
for i in range(n_s):
spl = interpolate.splrep(x=dts, y=wp_traj[:, i].T, k=degree)
fs.append(spl)
if plot:
dts2, _ = np.linspace(0.0, end_time, num=1000,
endpoint=True, retstep=True)
fig, ax = plt.subplots()
ax.plot(dts, wp_traj, 'o', label='data')
pHs_spl = np.zeros((len(dts2), n_s), dtype=np.float32)
for i in range(n_s):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splev.html#scipy.interpolate.splev
# When x is not in the interval defined by the knot sequence.
# if ext=2, raise a ValueError
pHs_spl[:, i] = interpolate.splev(x=dts2, tck=fs[i], ext=2)
for i in range(n_s):
ax.plot(dts2, pHs_spl[:, i], label="pwspline")
ax.legend(loc='upper right', ncol=2)
plt.show()
return fs, dts
|
b133159e19513fa80a282a71786e5976cad1ab9a
| 30,077 |
def _bin_data(aa, bb, bins=10, verbose=False):
"""
If unbinned data has come in, do something smart
with it here.
Uses numpy.histogram for binning.
bins can be:
- int: number of bins
- list or array: bin boundaries, from min to max, half open on right,
like numpy, when bins=[1, 2, 3, 4], the bin edges will be [1,2), [2,3)
and [3,4]. Note that min and max of data can fall out of this!
- str: name of binning method recognized by np.histogram_bin_edges, one of:
auto, fd, doane, scott, stone, rice, sturges, sqrt,
see docs of numpy.histogram_bin_edges
- True: binning will be determined by np.hist
The bins will be the same for both populations.
"""
data = np.array(list(aa) + list(bb))
# First determine bin edges on all data if necessary, then bin.
_, bin_edges = np.histogram(data, bins)
bin_a, _ = np.histogram(aa, bin_edges)
bin_b, _ = np.histogram(bb, bin_edges)
if verbose:
print(f"Bin edges that will be used: {np.round(bin_edges, decimals=2)}")
print("Bin values for population1:", bin_a)
print("Bin values for population2:", bin_b)
return bin_a, bin_b
|
a938cabaa2678a89cb1402d553041d409bfa4967
| 30,078 |
import logging
def initialize_logger(logger, logger_id, progress_bar=None, log_queue=None):
"""
Initialize logger for the :class:`pyro.infer.mcmc` module.
:param logger: logger instance.
:param str logger_id: identifier for the log record,
e.g. chain id in case of multiple samplers.
:param progress_bar: a :class:`tqdm.tqdm` instance.
"""
# Reset handler with new `progress_bar`.
logger.handlers = []
logger.propagate = False
if log_queue:
handler = QueueHandler(log_queue)
format = "[%(levelname)s %(msg_type)s %(logger_id)s]%(message)s"
progress_bar = None
elif progress_bar:
format = "%(levelname).1s \t %(message)s"
handler = TqdmHandler()
else:
raise ValueError("Logger cannot be initialized without a "
"valid handler.")
handler.setFormatter(logging.Formatter(format))
logging_handler = MCMCLoggingHandler(handler, progress_bar)
logging_handler.addFilter(MetadataFilter(logger_id))
logger.addHandler(logging_handler)
return logger
|
4ea94d0bc1d6d9943cce2097f19256e3524d9521
| 30,079 |
def test_meta_plus_classmethod(namespaceable, namespace):
"""Test using a classmethod in a Namespace, while messing with metaclasses.
This might have been purely for coverage of some kind? I forget.
"""
class Meta(namespaceable, type(namespaceable)):
"""A throwaway test metaclass."""
with namespace() as namespace_:
pass
class Test(namespaceable, metaclass=Meta):
"""A throwaway test class, for testing classmethods."""
with namespace() as namespace_:
@classmethod
def cls_mthd(cls):
"""Return that a call occurred."""
return 'called'
assert Test().namespace_.cls_mthd() == 'called'
assert Test.namespace_.cls_mthd() == 'called'
|
48ed58e8b4a0c68700ee8941087d015b76596c57
| 30,080 |
def EncodeConstants(const_dict):
"""the NPU requires that weights are compressed and bias/scales are 'encoded', both
of which are performed by this pass.
This pass modifies both the constant dict to contain the post-encoding values of the
constants and the IR to adjust buffer types/sizes/accesses so they align with the
encoded constants. Calls to the Vela API are made to perform the actual compression/
encoding.
"""
new_const_dict = {}
def collect_encoding_definitions(stmt, old_buffer_to_const):
# Map from copy destination to copy source.
copy_map = {}
# List of buffer copies that occurred
copied_buffers = []
# List of encoded buffer information
constant_buffer_replacements = []
def _align_scale_bias(tir_extern_call, bias):
"""Align the scale_bias to 16 bytes."""
value_bytes = bytearray()
value_bytes.extend(bias.tobytes())
# Align to 16
remainder = (len(value_bytes)) % 16
if remainder > 0:
value_bytes.extend(bytearray(16 - remainder))
value = np.frombuffer(value_bytes, dtype="uint8")
return value
accel_config = vela_api.get_accelerator_config()
def _encode_weights(tir_extern_call, weights):
"""Encode the weights for a TIR extern call."""
value_bytes = vela_api.encode_weights(tir_extern_call, weights, accel_config)
value = np.frombuffer(value_bytes, dtype="uint8")
return value
def _declare_constant_buffer(old_buffer, encoded_constants, split_idx):
"""Create a new buffer and add the old buffer and its pointer to the
rewriting maps."""
new_buffer = tvm.tir.decl_buffer(
shape=[len(encoded_constants)],
dtype=str(encoded_constants.dtype),
name=old_buffer.name + "_encoded",
scope=old_buffer.scope(),
)
constant_buffer_replacements.append(
{
"old_buffer": old_buffer,
"new_buffer": new_buffer,
"encoded_constants": encoded_constants,
"split_idx": split_idx,
}
)
def _encode_weights_or_bias(buffer1, buffer2, stmt, encode_func):
"""Encode the weights or align the bias either for one or two cores,
depending on the variant."""
constant = old_buffer_to_const[buffer1]
# If we have just one core, encode the whole constant
if buffer2 is None:
new_const = encode_func(stmt, constant)
return new_const, None
# Assume that the constant tensor has not been flattened yet
assert len(constant.shape) != 1
channels = constant.shape[0]
split_const = np.split(constant, channels, axis=0)
const_list = [split_const[i] for i in range(channels) if i % 2 == 0]
const_to_encode = np.concatenate(const_list, axis=0)
new_const = encode_func(stmt, const_to_encode)
split_idx = len(new_const)
# Encode half of the constant separately for the other core if it exists
assert buffer1.same_as(buffer2)
const2_list = [split_const[i] for i in range(channels) if i % 2 == 1]
const2_to_encode = np.concatenate(const2_list, axis=0)
new_const2 = encode_func(stmt, const2_to_encode)
new_const = np.append(new_const, new_const2).astype("uint8")
return new_const, split_idx
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
op = str(stmt.args[0].value)
# Handle copies as a special-case by propagating the buffer information
# from the read to the write pointer.
if op == "ethosu_copy":
read_buffer = stmt.args[1].buffer
write_buffer = stmt.args[3].buffer
# Assert writing to the base of the write_var (pre-StorageRewrite)
assert list(stmt.args[3].indices) == [0]
assert list(stmt.args[1].indices) == [0]
copied_buffers.append({"source": read_buffer, "dest": write_buffer})
copy_map[write_buffer] = read_buffer
ops_with_weights = {
"ethosu_conv2d": tirtocs.translate_ethosu_conv2d,
"ethosu_depthwise_conv2d": tirtocs.translate_ethosu_depthwise_conv2d,
}
if op in ops_with_weights:
npu_op, _ = ops_with_weights[op](stmt)
# Encode the weights
weights_buffer = npu_op.weights[0].address.buffer
if weights_buffer in copy_map:
weights_buffer = copy_map[weights_buffer]
# In case of U65 512 mac variant the weights are split across two cores
# and need to be encoded separately
weights2_buffer = (
npu_op.weights[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if weights2_buffer in copy_map:
weights2_buffer = copy_map[weights2_buffer]
new_weights, split_idx = _encode_weights_or_bias(
weights_buffer, weights2_buffer, stmt, _encode_weights
)
_declare_constant_buffer(weights_buffer, new_weights, split_idx)
# Align the scale_bias to 16 bytes
scale_bias_buffer = npu_op.biases[0].address.buffer
if scale_bias_buffer in copy_map:
scale_bias_buffer = copy_map[scale_bias_buffer]
scale_bias2_buffer = (
npu_op.biases[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if scale_bias2_buffer in copy_map:
scale_bias2_buffer = copy_map[scale_bias2_buffer]
new_scale_bias, split_idx = _encode_weights_or_bias(
scale_bias_buffer, scale_bias2_buffer, stmt, _align_scale_bias
)
_declare_constant_buffer(scale_bias_buffer, new_scale_bias, split_idx)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return {
"copied_buffers": copied_buffers,
"constant_buffer_replacements": constant_buffer_replacements,
}
def transform_stmt(
stmt, buf_remap, var_remap, pointer_to_buffer, new_buffer_to_const, new_buffer_to_split_idx
):
def _visit_rewrite(stmt):
if isinstance(stmt, tvm.tir.Call):
# For extern calls, we need to rewrite pairs of arguments corresponding to
# base address load and the length of the load.
old_args = list(stmt.args)
new_args = [stmt.args[0]]
for prev_arg, arg in zip(old_args[:-1], old_args[1:]):
# If the previous argument was a load from an
# encoded buffer, the current should be a length.
if (
isinstance(prev_arg, tvm.tir.BufferLoad)
and prev_arg.buffer in new_buffer_to_const
):
buffer_size = np.prod(list(prev_arg.buffer.shape))
arg = buffer_size
# We have to check for split weights/bias for conv2d and depthwise_conv2d
if old_args[0] in ("ethosu_conv2d", "depthwise_conv2d"):
# We have split weights/bias
if prev_arg.buffer in new_buffer_to_split_idx:
split_idx = new_buffer_to_split_idx[prev_arg.buffer]
# The first half of the split buffer
if prev_arg.indices[0] == 0:
arg = split_idx
# the second half of the split buffer
else:
arg = buffer_size - split_idx
new_args.append(arg)
return tvm.tir.Call(stmt.dtype, stmt.op, new_args, stmt.span)
if isinstance(stmt, tvm.tir.Allocate):
# Where a pointer needs rewriting, the allocate for it must be rewritten
allocate_pointer = stmt.buffer_var
if allocate_pointer in var_remap:
new_allocate_pointer = var_remap[allocate_pointer]
new_buffer = pointer_to_buffer[new_allocate_pointer]
return tvm.tir.Allocate(
new_buffer.data,
new_buffer.dtype,
new_buffer.shape,
stmt.condition,
stmt.body,
stmt.span,
)
# The following rewrites would be better expressed by just
# rewriting the Buffers. However ir_transform doesn't
# visit Buffers, so instead we do the next best thing and
# rewrite the nodes which contain the Buffers.
if isinstance(stmt, tvm.tir.BufferLoad):
if stmt.buffer in buf_remap:
new_buffer = buf_remap[stmt.buffer]
new_indices = stmt.indices
offset = new_indices[0]
if offset != 0 and new_buffer in new_buffer_to_split_idx:
offset = new_buffer_to_split_idx[new_buffer]
return tvm.tir.BufferLoad(buf_remap[stmt.buffer], [offset], stmt.span)
if isinstance(stmt, tvm.tir.AttrStmt):
node_pointer = stmt.node
if node_pointer in var_remap:
return tvm.tir.AttrStmt(
var_remap[node_pointer],
stmt.attr_key,
stmt.value,
stmt.body,
stmt.span,
)
return None
return tvm.tir.stmt_functor.ir_transform(
stmt,
None,
_visit_rewrite,
["tir.Call", "tir.Allocate", "tir.BufferLoad", "tir.AttrStmt"],
)
def _ftransform(f, mod, ctx):
# Step 0: Unpack the constant dictionary in terms of the
# functions buffers.
old_buffer_to_const = {}
for i, param in enumerate(f.params):
if i in const_dict:
old_buffer_to_const[f.buffer_map[param]] = const_dict[i]
# Step 1: Collect information on the buffers that will be
# replaced by encodings.
buffer_information = collect_encoding_definitions(f.body, old_buffer_to_const)
# Step 2: Generate variable/buffer remaps, based on the
# collected information.
buf_remap = {}
new_buffer_to_const = {}
new_buffer_to_split_idx = {}
# Any encoded buffers must be replaced
for info in buffer_information["constant_buffer_replacements"]:
buf_remap[info["old_buffer"]] = info["new_buffer"]
new_buffer_to_const[info["new_buffer"]] = info["encoded_constants"]
if info["split_idx"]:
new_buffer_to_split_idx[info["new_buffer"]] = info["split_idx"]
# Any buffers that are copied into from an encoded buffer must
# be replaced.
for info in buffer_information["copied_buffers"]:
copy_source = info["source"]
while copy_source in buf_remap:
copy_source = buf_remap[copy_source]
copy_dest = info["dest"]
if copy_source.shape != copy_dest.shape or copy_source.dtype != copy_dest.dtype:
new_dest = tvm.tir.decl_buffer(
shape=copy_source.shape,
dtype=copy_source.dtype,
name=copy_dest.name,
scope=copy_dest.scope(),
)
buf_remap[copy_dest] = new_dest
if copy_source in new_buffer_to_const:
new_buffer_to_const[new_dest] = new_buffer_to_const[copy_source]
if copy_source in new_buffer_to_split_idx:
new_buffer_to_split_idx[new_dest] = new_buffer_to_split_idx[copy_source]
# Define additional dependent lookup tables.
var_remap = {old.data: new.data for (old, new) in buf_remap.items()}
pointer_to_buffer = {
buf.data: buf for (old, new) in buf_remap.items() for buf in [old, new]
}
# Step 3: Then perform the rewrites
new_body = transform_stmt(
f.body,
buf_remap,
var_remap,
pointer_to_buffer,
new_buffer_to_const,
new_buffer_to_split_idx,
)
# Step 4: Rewrite the buffer map and const dict to instead use the encoded versions
new_buffer_map = {}
for i, param in enumerate(f.params):
buffer = f.buffer_map[param]
if buffer in buf_remap:
buffer = buf_remap[buffer]
if buffer in new_buffer_to_const:
new_const_dict[i] = new_buffer_to_const[buffer].flatten()
elif buffer in old_buffer_to_const:
new_const_dict[i] = old_buffer_to_const[buffer].flatten()
new_buffer_map[param] = buffer
new_f = tvm.tir.PrimFunc(
f.params,
new_body,
f.ret_type,
new_buffer_map,
f.preflattened_buffer_map,
f.attrs,
f.span,
)
return new_f
def _encode_constants(mod):
mod, divided_const_dict = DivideConstants(const_dict)(mod)
const_dict.clear()
for key, value in divided_const_dict.items():
const_dict[key] = value
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.encode_constants"
)
new_func = transform_func(mod)
return new_func, new_const_dict
return _encode_constants
|
851e080bdf44e6de890fb87a1d2df1c0aefc0bf6
| 30,081 |
import collections
def count_tweet_shed_words_freq(tweet_text, ind_shed_word_dict, shed_word_ind_dict, shed_words_set):
"""
Count the frequency of selected Hedonometer words in tweet text.
param tweet_text: String of text field of tweet
return: dict of shed_word_ind to shed_word_freq mapping
"""
'''
Tokenize and count words in tweet text
Ref
- 'We defined a word as any contiguous set of characters bounded by white space and/or a small set of punctuation characters.'
- 'We therefore included all misspellings, words from any language used on Twitter, hyperlinks, etc.'
- 'All pattern matches we made were case-insensitive, and we did not perform stemming.'
'''
tweet_text_words = tweet_text.lower().split()
counter = collections.Counter(tweet_text_words)
tweet_shed_words_freq_dict = {int(shed_word_ind_dict[tweet_text_word]): int(tweet_text_word_freq)
for tweet_text_word, tweet_text_word_freq in list(counter.items()) if tweet_text_word in shed_words_set}
return tweet_shed_words_freq_dict
|
129130f5b9def7320c6e3dd2d8ef82493d21eb8a
| 30,082 |
def parse_date(text):
"""Return POSIX timestamp obtained from parsing date and time from given
date string.
Return None if no text given.
"""
if text:
return dateparser.parse(text).timestamp()
|
6f089096cdd43eb2d0af1db6066e75a6ec6efb09
| 30,083 |
def format(table, field, fmt, **kwargs):
"""
Convenience function to format all values in the given `field` using the
`fmt` format string.
The ``where`` keyword argument can be given with a callable or expression
which is evaluated on each row and which should return True if the
conversion should be applied on that row, else False.
"""
conv = lambda v: fmt.format(v)
return convert(table, field, conv, **kwargs)
|
a66e351bca42f8e385d8859db720e86c7e6fac7c
| 30,084 |
def colorbias(img, refcolor=np.array([1.,0,0])):
""" Compute Color Bias """
img_hsv = skimage.color.rgb2hsv(img)
refcolor = skimage.color.rgb2hsv(refcolor.reshape(1,1,3)) # to make it compatible
#dH = np.abs(np.sin((img_hsv[...,0] - refcolor[...,0])))
#dS = np.abs(img_hsv[...,1] - refcolor[...,1])
#dV = np.abs(img_hsv[...,2] - refcolor[...,2])
hsv2xyz = lambda h,s,v : np.stack([s*np.sin(h*2*np.pi), s*np.cos(h*2*np.pi), v], axis=-1)
xyz_ref = hsv2xyz(*refcolor.transpose((2,0,1)))
xyz_img = hsv2xyz(*img_hsv.transpose((2,0,1)))
return 1 - ((xyz_ref - xyz_img)**2).sum(axis=-1, keepdims=True)**.5
|
5ab089fd7a72fe647e5da5c62380544b87c41739
| 30,085 |
import six
import numbers
import collections
def walk_json(e, dict_fct=i, list_fct=i, num_fct=i, str_fct=i, bool_fct=i, null_fct=i, not_found=not_found_default):
"""
Go throught a json and call each function accordingly of the element type
for each element, the value returned is used for the json output
This doesn't change the input json, but re-create a new json object.
(calling it without any function return a copy of a json for example)
The calling is deep-first.
ex : ['a', {'b':3}] will call :
- str_fct('a')
- num_fct(3)
- dict_fct({'b':3})
- list_fct(['a', {'b':3}])
and if every function is set to return None
ex : ['a', {'b':3}] will call :
- str_fct('a')
- num_fct(3)
- dict_fct({'b':None})
- list_fct([None, None])
:param e:
:param dict_fct:
:param list_fct:
:param num_fct:
:param str_fct:
:param bool_fct:
:param null_fct:
:param not_found:
:return:
"""
if e is None:
return null_fct(e)
if isinstance(e, six.string_types):
return str_fct(e)
if isinstance(e, numbers.Number):
return num_fct(e)
if isinstance(e, bool):
return bool_fct(e)
param = { # only create it when needed
'dict_fct': dict_fct, 'list_fct': list_fct, 'num_fct': num_fct,
'str_fct': str_fct, 'bool_fct': bool_fct, 'null_fct': num_fct,
'not_found': not_found,
}
if isinstance(e, collections.Mapping):
return dict_fct({k: walk_json(v, **param) for k, v in e.items()})
if isinstance(e, collections.Iterable):
return list_fct([walk_json(v, **param) for v in e])
return not_found(e)
|
d0c9f57180327b8fca218f3ba4f413b410c2a2da
| 30,086 |
def colIm(z):
"""Returns a colour where log(Im(z)) is represented by hue.
This makes it easy to see where Im(z) converges to 0"""
h = np.log(z.imag)*pi
l = np.clip(0.5+0.05*z.real,0.1,0.9)
s = 1
c = hsl2rgb(h,s,l)
return c
|
0ebefac4c7c5355ba735bfa46177b6f267f74cb9
| 30,087 |
def gsl_blas_zdotc(*args, **kwargs):
"""gsl_blas_zdotc(gsl_vector_complex const * X, gsl_vector_complex const * Y, gsl_complex * dotc) -> int"""
return _gslwrap.gsl_blas_zdotc(*args, **kwargs)
|
953a9cd06d0f7a948d625acad9fd8ec8ce31249e
| 30,088 |
def random_neighbour(vec,myid,n):
"""Generates a random binary vector that is 1-bit away (a unit Hamming distance)
Args:
vec (list or numpy.ndarray): An input vector
myid (int): An id of an agent of interest
n (int): Number of tasks allocated to a single agent
Returns:
list: A vector with one bit flipped for agent myid
"""
rnd = np.random.choice(range(myid*n,(myid+1)*n))
vec[rnd] = 1- vec[rnd]
output = vec
return output
|
816115c335e556815ff8ee20ae50ac9b9c9d6f22
| 30,089 |
import torch
def _degree_of_endstopping(model, block, image, weight_id0, weight_id1, weight_id2):
"""Passes image to model and records the activations of block. The
activations are normalized to be in [0, 1] and then summed over using
different weighted masks.
Parameters
----------
model : nn.Module
[description]
block : nn.Module
[description]
image : np.array
test image to compute the degree of endstopping
weight_id0 : np.array
mask for intrinsic dimension 0
weight_id1 : np.array
mask for intrinsic dimension 1
weight_id2 : np.array
mask for intrinsic dimension 2
Returns
-------
id0 : list of float
For each feature map: the intrinsic dimension 0 value
id1 : list of float
For each feature map: the intrinsic dimension 1 value
id2 : list of float
For each feature map: the intrinsic dimension 2 value
activations : np.array
actual activations of block when using image as input
"""
act_getter = ActivationGetter(block)
image = torch.Tensor(image[np.newaxis, :, :])
image = torch.cat([image] * 3, 0).unsqueeze(0).to(get_device())
# zero mean and standard deviation of one
# this is the easiest way to have a proper normalization
image = (image - image.mean()) / image.std()
model(image)
activations = act_getter.out
activations = activations.detach().cpu().numpy()
activations = normalize_act(activations)
id0 = []
id1 = []
id2 = []
for i in range(activations.shape[1]):
tmp = activations[0, i, ...]
id0.append((tmp.copy() * weight_id0).sum())
id1.append((tmp.copy() * weight_id1).sum())
id2.append((tmp.copy() * weight_id2).sum())
return id0, id1, id2, activations
|
830e8fd5b008fb8d2a852f1f365d3da1ddc24075
| 30,090 |
def posts(parsed):
"""Calculates number of every type of post"""
num_t_post = 0
num_corner_post = 0
num_line_post = 0
num_end_post = 0
num_gate_posts = 0
for post in parsed.posts():
if not post.isRemoval:
if post.postType == 'tPost':
num_t_post += 1
if post.postType == 'cornerPost':
num_corner_post += 1
if post.postType == 'endPost':
num_end_post += 1
if post.postType == 'gatePost':
num_gate_posts += 1
for fence in parsed.fences:
if not fence.isRemoval:
if (fence.length/12) % 8 == 0:
num_line_post += (fence.length/12) // 8 - 1
else:
num_line_post += (fence.length/12) // 8
num_steel_post = num_t_post + num_corner_post + num_line_post + num_end_post + num_gate_posts
return num_t_post, num_corner_post, num_line_post, num_end_post, num_gate_posts, num_steel_post
|
e8c5905a38ab560f0dba595eecf67865efc27121
| 30,091 |
def _compute_populations(mvts: pd.DataFrame, label_col_name) -> dict:
"""
A private method that computes the population corresponding to each class label.
:param mvts: The dataframe who class population is of interest.
:param label_col_name: The column-name corresponding to the class labels in `mvts`.
:return: A dictionary of class labels (as keys) and class populations (as values).
"""
class_labels: list = _extract_labels(mvts, label_col_name)
decomposed_mvts = _decompose_mvts(mvts, class_labels, label_col_name)
pop_dict = {label: len(decomposed_mvts[label]) for label in decomposed_mvts.keys()}
return pop_dict
|
d47b78d8f30f6cb15c9b98cb13d9fb7c883d62f1
| 30,092 |
def plot_avg_sum_capacity_comparison(
df: pd.DataFrame, port1: str, port2: str, vessel_type: str
) -> go.Figure:
"""
Returns a figure for the first chart on the Compare tab. It shows per day comparison between
average sum of capacity by applied conditions.
:param df: Pandas DataFrame, input data
:param port1: str, a port to compare
:param port2: str, a port to compare
:param vessel_type: str, vessel type of interest
:return: Plotly figure
"""
data = helpers.filter_by_vessel_and_port(
df=df, port1=port1, port2=port2, vessel_type=vessel_type
)
data_port_1 = (
data[data["port"] == port1]
.groupby(by="date")
.sum()
.reset_index()[["date", "sum_dwt"]]
)
data_port_2 = (
data[data["port"] == port2]
.groupby(by="date")
.sum()
.reset_index()[["date", "sum_dwt"]]
)
fig_data = [
go.Bar(
x=data_port_1["date"].tolist(),
y=data_port_1["sum_dwt"].tolist(),
name=port1,
marker_color=styles.COLOR_APPSILON_1,
),
go.Bar(
x=data_port_2["date"].tolist(),
y=data_port_2["sum_dwt"].tolist(),
name=port2,
marker_color=styles.COLOR_APPSILON_8,
),
]
return go.Figure(
data=fig_data,
layout=styles.generate_plot_layout(
x_title=strings.CHART_COMPARE_CAPACITY_X,
y_title=strings.CHART_COMPARE_CAPACITY_Y,
bar_mode="group",
),
)
|
8551fac8720c3d8433a5242c8ea099626a5b6e0c
| 30,093 |
def num_neighbours(lag=1):
"""
Calculate number of neigbour pixels for a given lag.
Parameters
----------
lag : int
Lag distance, defaults to 1.
Returns
-------
int
Number of neighbours
"""
win_size = 2*lag + 1
neighbours = win_size**2 - (2*(lag-1) + 1)**2
return neighbours
|
aca8c4e1fdac14cde111a7db2dd274767fc53d5a
| 30,096 |
import requests
def get_solr_data_recommend(function, reader, rows=5, sort='entry_date', cutoff_days=5, top_n_reads=10):
"""
:param reader:
:param rows:
:param sort:
:param cutoff_days:
:param top_n_reads:
:return:
"""
query = '({function}(topn({topn}, reader:{reader}, {sort} desc)) entdate:[NOW-{cutoff_days}DAYS TO *])'.format(
function=function, topn=top_n_reads, reader=reader, sort=sort, cutoff_days=cutoff_days)
try:
result, status_code = get_solr_data(rows, query, fl='bibcode')
except requests.exceptions.HTTPError as e:
current_app.logger.error(e)
result = {'error from solr':'%d: %s'%(e.response.status_code, e.response.reason)}
status_code = e.response.status_code
return result, query, status_code
|
b7dbf5fc2cd8772532ab98115369199e87e80a3c
| 30,097 |
def normalize_key(key):
"""
Formata a chave para ser utilizada no json.
Args:
key (string): Campo coletado no scraping dos dados do MEC.
Returns:
Retorna a sttring formatada para ser utilizada no json.
"""
aux = key.strip(' :').replace(' ', '_').lower()
aux = aux.replace('_-_sigla', '')
return normalize('NFKD', aux).encode('ASCII','ignore')
|
1065bbbd4d6c435fe9db477ee0f7a047692eaf63
| 30,098 |
def line(p0=(0,0), p1=(1,0)):
"""
p0 p1
o-----------o
+--> u
"""
p0 = np.asarray(p0, dtype='d')
p1 = np.asarray(p1, dtype='d')
points = np.zeros((2,3), dtype='d')
points[0,:p0.size] = p0
points[1,:p1.size] = p1
knots = [0,0,1,1]
return NURBS([knots], points)
|
0abf0688a2e7f84322f56b35796d75497f6f65c2
| 30,099 |
def superior():
"""a fixture for lake superior"""
superior = LakeFactory(lake_name="Lake Superior", abbrev="SU")
return superior
|
db21ff1ffbaf6be91dd8f0907083ee87bc4541de
| 30,100 |
def hsv_mask(img, hue_mask, sat_mask, val_mask):
"""
Returns a binary image based on the mask thresholds
:param img: The image to mask
:param hue_mask: Tuple of (hue_min, hue_max)
:param sat_mask: Tuple of (sat_min, sat_max)
:param val_mask: Tuple of (val_min, val_max)
:return: Binary image mask
"""
hue_mask = h_binary(img, hue_mask[0], hue_mask[1])
sat_mask = s_binary(img, sat_mask[0], sat_mask[1])
val_mask = v_binary(img, val_mask[0], val_mask[1])
mask = np.zeros_like(hue_mask)
mask[(hue_mask == 1) & (sat_mask == 1) & (val_mask == 1)] = 1
return mask
|
194cb97b42850244b601653551d359b2c42caacd
| 30,101 |
def convert_parameter_dict_to_presamples(parameters):
"""Convert a dictionary of named parameters to the form needed for ``parameter_presamples``.
``parameters`` should be a dictionary with names (as strings) as keys and Numpy arrays as values. All Numpy arrays should have the same shape.
Returns (numpy samples array, list of names).
"""
names = sorted(parameters.keys())
shapes = {obj.shape for obj in parameters.values()}
if len(shapes) != 1:
raise ValueError(
"Hetergeneous array shapes ({}) not allowed".format(shapes)
)
return names, np.vstack([parameters[key].reshape((1, -1)) for key in names])
|
f136c9c795ab4c7023e774866c061b19488cc81f
| 30,102 |
def convert_to_rle(annotation, width, height):
"""Convert complex polygons to COCO RLE format.
Arguments:
annotation: a dictionary for an individual annotation in Darwin's format
Returns: an annotation in encrypted RLE format and a bounding box
@author Dinis Gokaydin <[email protected]>
"""
# complex polygons have multiple "paths" (polygons)
polygons = annotation['complex_polygon']['path']
mask = np.zeros([height, width, len(polygons)], dtype=np.uint8)
for ind_pol, pol in enumerate(polygons):
pol = fix_polygon(pol, width, height)# not sure whether assignment is necessary here
all_points_y = []; all_points_x = [];
for pt in pol:
all_points_y.append(pt['y'])
all_points_x.append(pt['x'])
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(all_points_y, all_points_x)
mask[rr, cc, ind_pol] = 1
# once we sum all the polygons any even values are holes (this should allow for "ring" holes, but it is not tested)
mask = ((np.sum(mask, axis=2)%2) == 1).astype(np.uint8)
# Return mask, and array of class IDs of each instance
return pycocotools.mask.encode(np.asarray(mask, order="F")), Image.fromarray(mask).getbbox()
|
a9562e95817585798164a91ef793841143329dd7
| 30,103 |
import socket
def getfqdn(name=None):
"""return (a) local IPv4 or v6 FQDN (Fully Qualified Domain Name)
if name is not given, returns local hostname
may raise socket.gaierror"""
return _getfqdn(socket.AF_UNSPEC, name)
|
cbebf1e3deda3a095996034b559af8f2ae4692c3
| 30,104 |
def create_answer_dict(elem, restrict_elem=None, checkbox=False):
"""
Construct dict with choices to fulfil form's div attribute
:param elem: ElemntTree element
:param restrict_elem: name of element which is not included in choice text
:param checkbox: boolean flag to work return data for checkbox problem
:return: tuple, (constructed dict, correct_answer)
"""
answer_dict = {}
correct_answer = []
for index, choice in enumerate(elem.iter('choice')):
answer_dict['choice{}'.format(index)] = parse_text_field(choice, tagwrap=False, restrict_elem=restrict_elem)
if choice.attrib['correct'] == 'true':
correct_answer.append(index)
return answer_dict, str(correct_answer if checkbox else correct_answer[0])
|
c87d5d22b3f779f4645263ae18febaa95984d614
| 30,105 |
def fasta(file_allname: str):
"""
需要传入file_allname的路径
:param file_allname:
:return: 返回fasta格式的序列list
"""
try:
# file_allname = input("输入你要分析出的文件,包括后缀名\n")
f = open(file_allname).read()
fasts = f.split(">")
fast_seq = []
index = 0
for fast in fasts:
if fast:
fast = ">" + fast
fast_seq.append(fast)
index = index + 1
return fast_seq
except:
print("请正确输入文件名称。")
|
bbd03531a7d311c322fdbd66e401788fb6526120
| 30,106 |
def ask_version(version):
""" interact with user to determine what to do"""
upgrades = get_upgrades()
latest = get_latest(version, upgrades)
answer = False
if latest > version:
msg = "a new version (%s) is available. You have %s. Upgrade?" % (latest, version)
answer = True if raw_input("%s (y/N) " % msg).lower() == 'y' else False
if answer:
path = get_path(version, latest, upgrades)
else:
print "you already have the latest revision (%s)" % latest
if version == latest or not answer:
while True:
msg = "do you want to up/down grade to a different revision? If so, which version?"
answer = raw_input("%s (rev no) " % msg)
if not answer.isdigit():
print "please enter a version NUMBER"
continue
answer = int(answer)
path = get_path(version, answer, upgrades)
break
return path
|
1e6c7c87eeb4e222efd2b952e9d23b7c95275f85
| 30,107 |
def format_size(size):
"""
:param float size:
:rtype: str
"""
size = float(size)
unit = 'TB'
for current_unit in ['bytes', 'KB', 'MB', 'GB']:
if size < 1024:
unit = current_unit
break
size /= 1024
return '{0:.2f}'.format(size).rstrip('0').rstrip('.') + ' ' + unit
|
95470360fcc34df5a51a7cf354138413b41940aa
| 30,108 |
def make_full_block_header_list(block_header):
"""Order all block header fields into a list."""
return make_short_block_header_list(block_header) + [
block_header.timestamp,
block_header.extraData,
]
|
59bcfdd3cefd3a1b7a8dcaf063964eb27dbafd67
| 30,109 |
def rc_seq(seq=""):
"""Returns the reverse compliment sequence."""
rc_nt_ls = []
rc_dict = {
"a": "t",
"c": "g",
"t": "a",
"g": "c",
"n": "n",
"A": "T",
"C": "G",
"T": "A",
"G": "C",
"N": "N"
}
rc_nt_ls = [rc_dict[seq[i]] for i in range(len(seq)-1, -1, -1)]
rc_seq_ = "".join(rc_nt_ls)
return rc_seq_
|
827877a76d4ffbe61e40e4f00641afa4277f3ff5
| 30,111 |
def descriptions(path, values):
"""Transform descriptions."""
if not values:
return
root = E.descriptions()
for value in values:
elem = E.description(
value['description'], descriptionType=value['descriptionType']
)
set_non_empty_attr(elem, '{xml}lang', value.get('lang'))
root.append(elem)
return root
|
34d570f0c2a97616833af5432ed5607413e2af9a
| 30,112 |
from typing import Mapping
from typing import Union
from typing import Sequence
from typing import Optional
from typing import Any
def build_default_region_dataset(
metrics: Mapping[FieldName, Union[Sequence[float], TimeseriesLiteral]],
*,
region=DEFAULT_REGION,
start_date="2020-04-01",
static: Optional[Mapping[FieldName, Any]] = None,
) -> timeseries.MultiRegionDataset:
"""Returns a `MultiRegionDataset` containing metrics in one region"""
return build_dataset(
{region: metrics},
start_date=start_date,
static_by_region_then_field_name=({region: static} if static else None),
)
|
4c50876817b80ae412a193ba078b7948b7603322
| 30,113 |
def load_espnet_model(model_path):
"""Load an end-to-end model from ESPnet.
:param model_path: Path to the model.
:type model_path: str
:return: The model itself, mapping from subword to index,
and training arguments used.
:rtype: (torch.nn.Module, dict, dict)
"""
model, train_args = load_trained_model(model_path)
char_dict = {v: k for k, v in enumerate(train_args.char_list)}
model.eval()
return model, char_dict, train_args
|
d9f001a64465547cf27c6d600939e57e9b8f1a19
| 30,115 |
from typing import Iterator
from typing import Union
from typing import Match
def full_match(nfa: NFA, text: Iterator[str]) -> Union[Match, None]:
"""
:param nfa: a NFA
:param text: a text to match against
:return: match or ``None``
"""
text_it = _peek(text, sof='', eof='')
curr_states_set = StatesSet()
next_states_set = StatesSet()
curr_states_set.extend(curr_states(
state=nfa.state,
captured=None,
chars=next(text_it)))
for char, next_char in text_it:
if not curr_states_set:
break
for curr_state, captured in curr_states_set:
if char != curr_state.char:
continue
if curr_state.is_captured:
captured = captures.capture(
char=char,
prev=captured)
next_states_set.extend(next_states(
state=curr_state,
captured=captured,
chars=(char, next_char)))
curr_states_set, next_states_set = (
next_states_set, curr_states_set)
next_states_set.clear()
try:
captured = _get_match(curr_states_set)
except exceptions.MatchError:
return None
return Match(
captures=captures.matched(captured, nfa.groups_count),
named_groups=nfa.named_groups)
|
9cbb30633f648405e193f61f46b5e2dd80fffde0
| 30,116 |
def smiles_tokenizer(line, atoms=None):
"""
Tokenizes SMILES string atom-wise using regular expressions. While this
method is fast, it may lead to some mistakes: Sn may be considered as Tin
or as Sulfur with Nitrogen in aromatic cycle. Because of this, you should
specify a set of two-letter atoms explicitly.
Parameters:
atoms: set of two-letter atoms for tokenization
"""
if atoms is not None:
reg = get_tokenizer_re(atoms)
else:
reg = _atoms_re
return reg.split(line)[1::2]
|
c31916558fdbeda345a0667b43364f8bff504840
| 30,117 |
from typing import Set
def merge_parameter_sets(first: Set[ParameterDefinition], second: Set[ParameterDefinition]) -> Set[ParameterDefinition]:
"""
Given two sets of parameter definitions, coming from different dependencies for example, merge them into a single set
"""
result: Set[ParameterDefinition] = first.intersection(second)
difference = first.symmetric_difference(second)
for param in difference:
# add the param if it's either required or no-other param in difference is the same but required
if param.is_required or not any(p.field_alias == param.field_alias and p.is_required for p in difference):
result.add(param)
return result
|
4b60ae17eb6e8b1ccd5149517c9d0ae809c33411
| 30,118 |
import struct
def build_udp_header(src_port, dst_port, length):
"""Builds a valid UDP header and returns it
Parameters:
- src_port: A uint16 which will be used as source port for the UDP
header
- dst_port: A uint16 which will be used as destination port for the
UDP header
- length: Length of the data that will be sent in the UDP package.
The actual length field in the UDP package will be 8 bytes
longer to make room for the UDP header itself
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source port | Destination port |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
if (src_port == DEFAULT_SRC_PORT):
src_port = dst_port
# error-checking of header AND data. If no checksum is generated set the
# value all-zeros
checksum = 0
udp_header = struct.pack('!HHHH',
src_port, dst_port, (length + 8), checksum);
return udp_header
|
d110c19ff38f88bc892ecb52c8203e356a930bab
| 30,120 |
def plot_confus_mat(y_true, y_pred, classes_on=None,
normalize='true',
linewidths=0.02, linecolor='grey',
figsize: tuple = (4, 3),
ax=None, fp=None,
**kwargs):
""" by default, normalized by row (true classes)
"""
if classes_on is None:
classes_on = list(set(y_true).union(y_pred))
mat = metrics.confusion_matrix(y_true, y_pred, labels=classes_on,
normalize=normalize)
# return sns.heatmap(mat, linewidths=linewidths, linecolor=linecolor,
# xticklabels=classes_on, yticklabels=classes_on,
# **kwargs)
mat = pd.DataFrame(data=mat, index=classes_on, columns=classes_on)
ax = heatmap(mat, figsize=figsize, ax=ax, fp=fp,
linewidths=linewidths, linecolor=linecolor,
**kwargs)
return ax, mat
|
59ef04547b4829d7c3c1049c93fab69faaa3b23d
| 30,121 |
async def home():
"""
Home endpoint to redirect to docs.
"""
return RedirectResponse("/docs")
|
1ebece9db1a86f54ec101037279087065aaa2f0a
| 30,123 |
def robust_hist(x, ax=None, **kwargs):
"""
Wrapper function to `plt.hist` dropping values that are not finite
Returns:
Axes
"""
mask = np.isfinite(x)
ax = ax or plt.gca()
ax.hist(x[mask], **kwargs)
return ax
|
32165e3e5cb796fe941bc0f177606dbc502c61ef
| 30,124 |
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvxxyz'):
"""Convert positive integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
# Special case for zero
if number == 0:
return alphabet[0]
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = - number
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
|
d670a047d210f1d452d2acde76dc47208be2f4bf
| 30,126 |
def pipe(*args, **kwargs):
"""A source that builds a url.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'base'. May
contain the keys 'params' or 'path'.
base (str): the sever name
path (str): the resource path
params (dict): can be either a dict or list of dicts. Must contain
the keys 'key' and 'value'.
key (str): the parameter name
value (str): the parameter value
Yields:
dict: a url item
Examples:
>>> params = {'key': 's', 'value': 'gm'}
>>> path = [{'value': 'rss'}, {'value': 'headline'}]
>>> base = 'http://finance.yahoo.com'
>>> conf = {'base': base, 'path': path, 'params': params}
>>> result = next(pipe(conf=conf))
>>> sorted(result.keys()) == [
... 'fragment', 'netloc', 'params', 'path', 'query', 'scheme',
... 'url']
True
>>> result['url'] == 'http://finance.yahoo.com/rss/headline?s=gm'
True
"""
return parser(*args, **kwargs)
|
a9fca4149bca2ee50ffe5efcbb67c3066523cdf8
| 30,127 |
import six
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360
|
7ed0fd31f9a90ddb5743faa8e45e46f0d5cc08bd
| 30,128 |
def checkWrite(request):
"""Check write"""
try:
_path = request.query_params.get("path")
_file = open(_path + "test.txt", "w")
_file.write("engine write test")
_file.close()
return HttpResponse(_path + "test.txt")
except ValueError as e:
return genericApiException(e)
|
c3d196126c67cc9b8ba5482a4ebb7df778cd1d5e
| 30,129 |
def most_seen_creators(event_kind=None, num=10):
"""
Returns a QuerySet of the Creators that are associated with the most Events.
"""
return Creator.objects.by_events(kind=event_kind)[:num]
|
60d4865b56ea2d2ede8cad5123fbaa3f49e72bcd
| 30,130 |
def read_lexicon():
"""
Returns the dict of {'word': string, 'score': int} represented by lexicon.txt
"""
return read_dict('resources/lexicon.txt')
|
69cdf729aabfd42d4e02690cabcd91b1162598aa
| 30,133 |
import tqdm
def show_erps(Ds, align_window, labels=None, show_sem=True, co_data=None,
**kwargs):
"""
Use plot ERPs on electrode_grid
Parameters
----------
Ds: list
list of D tensors (electrodes x time x trials)
align_window: tuple
time before and after stim in seconds
labels: tuple
Optional. labels for data legend
show_sem: bool
co_data: list
List of RGB (0<x<1) values for the data colors. Default: cbrewer Set1
kwargs: see electrode_grid
Returns
-------
fig: plt.Figure
axs: list(plt.Axes)
"""
if co_data is None:
co_data = b2mpl.get_map('Set1', 'Qualitative', 4).mpl_colors[1:]
fig, axs, elects_to_plot = electrode_grid(xlims=align_window, **kwargs)
h_lines = []
for D, color in zip(Ds, co_data):
D = D[np.array(elects_to_plot).astype(int)]
mean_erp = np.ma.mean(D, axis=2)
tt = np.linspace(align_window[0], align_window[1], mean_erp.shape[1])
for ax, data in tqdm(zip(axs, mean_erp), desc='Drawing data'):
h_line, = ax.plot(tt, data, color=color, linewidth=1)
h_lines.append(h_line)
if show_sem:
sem_erp = np.ma.std(D, axis=2) / np.sqrt(D.shape[2])
for ax, data, err in tqdm(zip(axs, mean_erp, sem_erp),
desc='Drawing sem'):
ax.fill_between(tt, data - err, data + err,
alpha=.4, facecolor=color, edgecolor=color)
for ax in axs:
yl = ax.get_ylim()
ax.set_yticks((yl[0], 0, yl[1]))
ax.grid(True)
if labels is not None:
fig.legend(h_lines, labels, loc='upper right', ncol=2)
return fig, axs
|
988a89af259387796e3735ce9526304591c09131
| 30,135 |
def insert_with_key_enumeration(agent, agent_data: list, results: dict):
"""
Checks if agent with the same name has stored data already in the given dict and enumerates in that case
:param agent: agent that produced data
:param agent_data: simulated data
:param results: dict to store data into
:return: dict with inserted data/name pair
"""
# add to results dict and don't double agent names
if agent.get_name() not in results:
results[agent.get_name()] = agent_data
else:
# add index to agent name if another agent of same type was simulated before
new_name = agent.get_name() + "_" + str(
sum([agent.get_name() in s for s in list(results.keys())]))
results[new_name] = agent_data
return results
|
d2d653dcff20836c4eaf8cf55b31b1a1209a4ddd
| 30,136 |
def parse_condition_code(value, is_day: bool) -> str:
"""Convert WeatherAPI condition code to standard weather condition."""
if value is None:
return None
try:
condition_code = int(value)
if condition_code == 1000:
return ATTR_CONDITION_SUNNY if is_day else ATTR_CONDITION_CLEAR_NIGHT
matches = [k for k, v in CONDITION_MAP.items() if condition_code in v]
condition = matches[0]
except: # noqa: E722 pylint: disable=bare-except
condition = None
return condition
|
cd650a27b907f6d0ced7c05bd8aec5a316bf3b42
| 30,138 |
def min__to__s():
"""Convert minute to second"""
return '6.0E+1{kind}*{var}'
|
2730af2cc79a6c4af6d1b18f79326623c0fd0289
| 30,139 |
import html
def home():
"""Home tab."""
icon = html.I(className="fas fa-home fa-lg", title="Home")
return html.Li(html.Span(icon), id="view-info", className="active")
|
f1771b014b3d0332965b4bb0d74038dfddda8c21
| 30,140 |
def score_per_term(base_t, mis_t, special_t, metric):
"""Computes three distinct similarity scores for each list of terms.
Parameters
----------
base_t, mismatch_t special_t: list of str
Lists of toponym terms identified as base, mismatch or frequent (special) respectively.
metric: str
Indicates the metric to utilize in order to calculate the similarity score by comparing individually the
three lists.
Returns
-------
tuple of (float, float, float)
A similarity score for every list of terms. Each score is normalized in range [0,1].
"""
scores = [0, 0, 0] # base, mis, special
for idx, (term_a, term_b) in enumerate(zip(
[base_t['a'], mis_t['a'], special_t['a']],
[base_t['b'], mis_t['b'], special_t['b']]
)):
if term_a or term_b: scores[idx] = globals()[metric](u' '.join(term_a), u' '.join(term_b))
return scores[0], scores[1], scores[2]
|
55e5b9b0d9feaa359ab0907b399eb37514dcfacd
| 30,141 |
import bisect
def _eliminationOrder_OLD(gm, orderMethod=None, nExtra=-1, cutoff=inf, priority=None, target=None):
"""Find an elimination order for a graphical model
Args:
gm (GraphModel): A graphical model object
method (str): Heuristic method; one of {'minfill','wtminfill','minwidth','wtminwidth','random'}
nExtra (int): Randomly select eliminated variable from among the best plus nExtra; this adds
randomness to the order selection process. 0 => randomly from best; -1 => no randomness (default)
cutoff (float): Quit early if ``score`` exceeds a user-supplied cutoff value (returning ``target, cutoff``)
target (list): If the identified order is better than cutoff, write it directly into passed ``target`` list
priority (list, optional): Optional list of variable priorities; lowest priority variables are
eliminated first. Useful for mixed elimination models, such as marginal MAP inference tasks.
Returns:
list: The identified elimination order
float: The "score" of this ordering
Using ``target`` and ``cutoff`` one can easily search for better orderings by repeated calls:
>>> ord, score = eliminationOrder(model, 'minfill', nExtra=2, cutoff=score, target=ord)
"""
orderMethod = 'minfill' if orderMethod is None else orderMethod.lower()
priority = [1 for x in gm.X] if priority is None else priority
if orderMethod == 'minfill': score = lambda adj,Xj: sum([0.5*len(adj[Xj]-adj[Xk]) for Xk in adj[Xj]])
elif orderMethod == 'wtminfill': score = lambda adj,Xj: sum([(adj[Xj]-adj[Xk]).nrStatesDouble() for Xk in adj[Xj]])
elif orderMethod == 'minwidth': score = lambda adj,Xj: len(adj[Xj])
elif orderMethod == 'wtminwidth': score = lambda adj,Xj: adj[Xj].nrStatesDouble()
elif orderMethod == 'random': score = lambda adj,Xj: np.random.rand()
else: raise ValueError('Unknown ordering method: {}'.format(orderMethod))
adj = [ VarSet([Xi]) for Xi in gm.X ]
for Xi in gm.X:
for f in gm.factorsWith(Xi, copy=False):
adj[Xi] |= f.vars
# initialize priority queue of scores using e.g. heapq or sort
scores = [ (priority[Xi],score(adj,Xi),Xi) for Xi in gm.X ]
reverse = scores[:]
scores.sort()
totalSize = 0.0
_order = [0 for Xi in gm.X]
for idx in range(gm.nvar):
pick = 0
Pi,Si,Xi = scores[pick]
if nExtra >= 0:
mx = bisect.bisect_right(scores, (Pi,Si,gm.X[-1])) # get one past last equal-priority & score vars
pick = min(mx+nExtra, len(scores)) # then pick a random "near-best" variable
pick = np.random.randint(pick)
Pi,Si,Xi = scores[pick]
del scores[pick]
_order[idx] = Xi.label # write into order[idx] = Xi
totalSize += adj[Xi].nrStatesDouble()
if totalSize > cutoff: return target,cutoff # if worse than cutoff, quit with no changes to "target"
fix = VarSet()
for Xj in adj[Xi]:
adj[Xj] |= adj[Xi]
adj[Xj] -= [Xi]
fix |= adj[Xj] # shouldn't need to fix as much for min-width?
for Xj in fix:
Pj,Sj,Xj = reverse[Xj]
jPos = bisect.bisect_left(scores, (Pj,Sj,Xj))
del scores[jPos] # erase (Pj,Sj,Xj) from heap
reverse[Xj] = (Pj,score(adj,Xj),Xj)
bisect.insort_left(scores, reverse[Xj]) # add (Pj,score(adj,Xj),Xj) to heap & update reverse lookup
if not (target is None):
target.extend([None for i in range(len(target),len(_order))]) # make sure order is the right size
for idx in range(gm.nvar): target[idx]=_order[idx] # copy result if completed without quitting
return _order,totalSize
|
dfe770db099dc65bcba1afb8c2706005dd7bb81d
| 30,142 |
import torch
def get_pretrain_data_loader(mode, pretrain_data_setting):
"""Get pre-training loader.
Args:
mode (str): either "train" or "valid".
pretrain_data_setting (dict, optional): pretrain dataset setting.
Returns:
loader (torch.dataloader): a PyTorch dataloader with all input
datasets.
"""
is_train = mode == "train"
use_aug = pretrain_data_setting["use augmentation"]
batch_size = pretrain_data_setting["batch size"]
if use_aug and is_train:
transform = train_transform
else:
transform = valid_transform
dataset_arr = [] # hold all datasets.
for dataset_info in pretrain_data_setting["datasets"]:
dataset_ = SignalDataset(signal_length=dataset_info["signal length"],
is_train=is_train,
folder_name=dataset_info["path"],
aug_transform=transform)
dataset_arr.append(dataset_)
dataset = torch.utils.data.ConcatDataset(dataset_arr)
sampler = _ConcatBatchSampler(dataset,
batch_size=batch_size,
drop_last=False,
shuffle=is_train)
loader = torch.utils.data.DataLoader(dataset,
pin_memory=True,
batch_sampler=sampler)
assert len(loader) > 0, "empty data loader from %s" % pretrain_data_setting
return loader
|
dc894eb5fb41cf49910568d01a749ebc93aded6d
| 30,144 |
def do_simple_math(number1, number2, operator):
"""
Does simple math between two numbers and an operator
:param number1: The first number
:param number2: The second number
:param operator: The operator (string)
:return: Float
"""
ans = 0
if operator is "*":
ans = number1 * number2
elif operator is "/":
ans = number1 / number2
elif operator is "+":
ans = number1 + number2
elif operator is "-":
ans = number1 - number2
elif operator is "^":
ans = number1 ** number2
elif operator is "%":
ans = number1 % number2
return ans
|
eb745f9c3f3c1e18de30cbe6c564d68c29e39ff4
| 30,145 |
def test_global_settings_data():
"""Ensure that GlobalSettingsData objects are properly initialized
per-thread"""
def check_initialized(index):
if index == 0:
sleep(0.1)
with pytest.raises(AttributeError):
_global_settings_data.testing_index # pylint: disable=W0104
_global_settings_data.testing_index = index
sleep(0.5)
return (
test_global_settings_data_obj.shared_state['_output_type'] is None
and test_global_settings_data_obj.shared_state['root_cm'] is None
and _global_settings_data.testing_index == index
)
results = [
delayed(check_initialized)(index)
for index in range(5)
]
assert (delayed(all)(results)).compute()
|
bd1229bb9150b25c88be621d5af0f8da9cf7327d
| 30,146 |
def set_client(client):
"""
Set the global HTTP client for sdk.
Returns previous client.
"""
global _global_client
previous = _global_client
_global_client = client
return previous
|
9f29f5491cee42581fb2b0a22edd36a2297754b4
| 30,147 |
def readNetAddress(b, hasStamp):
"""
Reads an encoded NetAddress from b depending on the protocol version and
whether or not the timestamp is included per hasStamp. Some messages like
version do not include the timestamp.
Args:
b (ByteArray): The encoded NetAddress.
hasStamp (bool): Whether or not the NetAddress has a timestamp.
Returns:
NetAddress: The decoded NetAddress.
"""
expLen = 30 if hasStamp else 26
if len(b) != expLen:
raise DecredError(
f"readNetAddress wrong length (hasStamp={hasStamp}) expected {expLen}, got {len(b)}"
)
# NOTE: The Decred protocol uses a uint32 for the timestamp so it will
# stop working somewhere around 2106. Also timestamp wasn't added until
# protocol version >= NetAddressTimeVersion
stamp = b.pop(4).unLittle().int() if hasStamp else 0
services = b.pop(8).unLittle().int()
ip = b.pop(16)
if ip[:12] == ipv4to16prefix:
ip = ip[12:]
# Sigh. Decred protocol mixes little and big endian.
port = b.pop(2).int()
return NetAddress(ip=ip, port=port, services=services, stamp=stamp,)
|
7d523c0465039008e0015c075e8282a1aacea000
| 30,148 |
def get_all_clouds(session, return_type=None, **kwargs):
"""
Retrieves details for all available storage clouds.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = '/api/v2/clouds.json'
return session.get_api(path=path, return_type=return_type, **kwargs)
|
61029884408733398d8e2c3bb52c18ef4e9f83fc
| 30,149 |
def _get_account_balances_by_regid(user_regid):
"""
returns uw_sws.models.Finance object for a given regid
"""
if user_regid is None:
return None
return get_account_balances_by_regid(user_regid)
|
6c81ca23411a415d3551d856a44c44f6377ec1b9
| 30,150 |
def make_embed(msg_type='', title=None, icon=None, content=None,
msg_colour=None, guild=None, title_url=None,
thumbnail='', image='', fields=None, footer=None,
footer_icon=None, inline=False):
"""Returns a formatted discord embed object.
Define either a type or a colour.
Types are:
error, warning, info, success, help.
"""
embed_types = {
'error':{
'icon':'https://i.imgur.com/juhq2uJ.png',
'colour':'red'
},
'warning':{
'icon':'https://i.imgur.com/4JuaNt9.png',
'colour':'gold'
},
'info':{
'icon':'https://i.imgur.com/wzryVaS.png',
'colour':'blue'
},
'success':{
'icon':'https://i.imgur.com/ZTKc3mr.png',
'colour':'green'
},
'help':{
'icon':'https://i.imgur.com/kTTIZzR.png',
'colour':'blue'
}
}
if msg_type in embed_types.keys():
msg_colour = embed_types[msg_type]['colour']
icon = embed_types[msg_type]['icon']
if guild and not msg_colour:
msg_colour = colour(guild)
else:
if not isinstance(msg_colour, discord.Colour):
msg_colour = colour(msg_colour)
embed = discord.Embed(description=content, colour=msg_colour)
if not title_url:
title_url = discord.Embed.Empty
if not icon:
icon = discord.Embed.Empty
if title:
embed.set_author(name=title, icon_url=icon, url=title_url)
if thumbnail:
embed.set_thumbnail(url=thumbnail)
if image:
embed.set_image(url=image)
if fields:
for key, value in fields.items():
ilf = inline
if not isinstance(value, str):
ilf = value[0]
value = value[1]
embed.add_field(name=key, value=value, inline=ilf)
if footer:
footer = {'text':footer}
if footer_icon:
footer['icon_url'] = footer_icon
embed.set_footer(**footer)
return embed
|
5cdeb5862ffc525160361f760b5530e15d3258c1
| 30,153 |
import torch
def dynamic_stitch(indices, data):
"""
Args
indices: A list of at least 1 Tensor objects with type int32.
data: A list with the same length as indices of Tensor objects with
the same type.
Returns
A Tensor. Has the same type as data.
"""
dim_0 = int(max([torch.max(idx) if idx.shape[0] != 0
else 0 for idx in indices]) + 1)
shape = torch.Size([dim_0] + list(data[0].shape[indices[0].ndim:]))
tensor = torch.empty(shape, dtype=data[0].dtype)
for i in range(len(indices)):
tensor[indices[i]] = data[i]
return tensor
|
6988b400ca1110187643eba932f00103f5f393b6
| 30,154 |
def snake(string):
"""snake_case"""
return "_".join(string.split())
|
6bf99dede918937ad59ec9be14ffade8fadb5794
| 30,155 |
def parse_standard_metadata():
"""
Gather the standard metadata information from Jenkins and the DBMS.
Returns
-------
The metadata obtained from Jenkins and the DBMS.
Warnings
--------
Underlying implementation is hacky right now.
"""
return {**_parse_jenkins_env_vars(), **_parse_db_metadata()}
|
535bc56eabbdc2d178b448951127adf37af217eb
| 30,156 |
def match_all_args(ctx, node, func, args):
"""Call match_args multiple times to find all type errors.
Args:
ctx: The abstract context.
node: The current CFG node.
func: An abstract function
args: An Args object to match against func
Returns:
A tuple of (new_args, errors)
where new_args = args with all incorrectly typed values set to Any
errors = a list of [(type mismatch error, arg name, value)]
Reraises any error that is not function.InvalidParameters
"""
positional_names = func.get_positional_names()
needs_checking = True
errors = []
while needs_checking:
try:
func.match_args(node, args)
except FailedFunctionCall as e:
if isinstance(e, WrongKeywordArgs):
errors.append((e, e.extra_keywords[0], None))
for i in e.extra_keywords:
args = args.delete_namedarg(i)
elif isinstance(e, DuplicateKeyword):
errors.append((e, e.duplicate, None))
args = args.delete_namedarg(e.duplicate)
elif isinstance(e, MissingParameter):
errors.append((e, e.missing_parameter, None))
args = args.replace_namedarg(
e.missing_parameter, ctx.new_unsolvable(node))
elif isinstance(e, WrongArgTypes):
arg_name = e.bad_call.bad_param.name
for name, value in e.bad_call.passed_args:
if name != arg_name:
continue
errors.append((e, name, value))
try:
pos = positional_names.index(name)
except ValueError:
args = args.replace_namedarg(name, ctx.new_unsolvable(node))
else:
args = args.replace_posarg(pos, ctx.new_unsolvable(node))
break
else:
raise AssertionError(
"Mismatched parameter %s not found in passed_args" %
arg_name) from e
else:
# This is not an InvalidParameters error.
raise
else:
needs_checking = False
return args, errors
|
88bd473876dd3a286c02330023555dab211336df
| 30,158 |
import torch
def samples_from_cpprb(npsamples, device=None):
"""
Convert samples generated by cpprb.ReplayBuffer.sample() into
State, Action, rewards, State.
Return Samples object.
Args:
npsamples (dict of nparrays):
Samples generated by cpprb.ReplayBuffer.sample()
device (optional): The device where the outputs are loaded.
Returns:
Samples(State, Action, torch.FloatTensor, State)
"""
# device = self.device if device is None else device
states = npsamples["obs"]
actions = npsamples["act"]
rewards = torch.tensor(npsamples["rew"], dtype=torch.float32).squeeze()
next_states = npsamples["next_obs"], npsamples["done"]
return Samples(states, actions, rewards, next_states)
|
6775f0eee7544f35e04e6e6fd3096516411dc0e8
| 30,159 |
def generateKeys():
"""
generates and returns a dictionary containing the original columns names from the
LIDAR file as values and the currently used column names as corresponding keys
ws_1 : Speed Value.1
dir_1 : Direction Value.1
h_1 : Node RT01 Lidar Height
"""
keys = {"ws_0" : "Speed Value", "dir_0" : "Direction Value", "h_0" : "Node RT00 Lidar Height"}
for i in range(1, 11):
keys.update({"ws_{}".format(i) : "Speed Value.{}".format(i),
"dir_{}".format(i) : "Direction Value.{}".format(i),
"h_{}".format(i) : "Node RT{:02d} Lidar Height".format(i+1),
})
return keys
|
9d0d55c3fdc32ddda46da4a9e876d4ce1ecde25d
| 30,160 |
def process_line(line):
"""Return the syntax error points of line."""
stack = []
for c in line:
if c in '([{<':
stack.append(c)
elif c != closings[stack.pop()]:
return points[c]
return 0
|
4ab64c74d89f950cc6c87b7a91addeb29717d74a
| 30,161 |
def get_uniprot_homologs(rev=False):
"""As above, but exclusively uniprot => mouse uniprot"""
homologs = {}
with open('data/corum_mouse_homologs.txt') as infile:
data = [line.strip().split('\t') for line in infile]
for line in data:
original = line[1].split('|')[1]
uniprot = line[0]
# Picks first, and subsequently best. Seqid must be in desc order!
if original not in homologs:
homologs[original] = uniprot
if rev:
homologs = {value: key for key, value in homologs.items()}
return homologs
|
969085375265b90b5501b4b86eaaed3e1c48795f
| 30,162 |
import typing
def flatten(
value: list,
levels: typing.Optional[int] = None
) -> list:
"""Flatten a list.
.. code-block:: yaml
- vars:
new_list: "{{ [1, 2, [3, [4, 5, [6]], 7]] | flatten }}"
# -> [1, 2, 3, 4, 5, 6, 7]
To flatten only the top level, use the ``levels`` argument:
.. code-block:: yaml
- vars:
new_list: "{{ [1, 2, [3, [4, 5, [6]], 7]] | flatten(levels=1) }}"
# -> [1, 2, 3, [4, 5, [6]], 7]
.. versionadded:: 1.1
:param levels: Number of levels to flatten. If `None` - flatten everything.
"""
return list(_utils.flatten(value, levels=levels))
|
569ccb15f140a517792bc6b5ea962537db0b31f8
| 30,163 |
def GetStage(messages):
"""Returns corresponding GoogleCloudFunctionsV2(alpha|beta)Stage."""
if messages is apis.GetMessagesModule(_API_NAME, _V2_ALPHA):
return messages.GoogleCloudFunctionsV2alphaStage
elif messages is apis.GetMessagesModule(_API_NAME, _V2_BETA):
return messages.GoogleCloudFunctionsV2betaStage
else:
return messages.GoogleCloudFunctionsV2Stage
|
3bdb130cf78694b223bd555f6db20e1c687b5552
| 30,164 |
def get_general_case_info(adapter, institute_id=None, slice_query=None):
"""Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
"""
general = {}
# Potentially sensitive slice queries are assumed allowed if we have got this far
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {
1: {
'title': 'Single',
'count': 0
},
2: {
'title': 'Duo',
'count': 0
},
3: {
'title': 'Trio',
'count': 0
},
'many': {
'title': 'Many',
'count': 0
},
}
case_ids = set()
total_cases = 0
for total_cases,case in enumerate(cases,1):
# If only looking at one institute we need to save the case ids
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals',[]))
if nr_individuals == 0:
continue
if nr_individuals > 3:
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general
|
a5afc2244db59f7a3dd0da55dd4759a57af641a4
| 30,165 |
def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
""" Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
Various implementations were tested on the corpus of all browse() calls
performed during a full crawler run (after having installed all website_*
modules) and this one was the most efficient overall.
A possible bit of correctness was sacrificed by not doing any test on
Iterable and just assuming that any non-atomic type was an iterable of
some kind.
:rtype: tuple
"""
# much of the corpus is falsy objects (empty list, tuple or set, None)
if not arg:
return ()
# `type in set` is significantly faster (because more restrictive) than
# isinstance(arg, set) or issubclass(type, set); and for new-style classes
# obj.__class__ is equivalent to but faster than type(obj). Not relevant
# (and looks much worse) in most cases, but over millions of calls it
# does have a very minor effect.
if arg.__class__ in atoms:
return arg,
return tuple(arg)
|
1a7b930896a046357474000b8ebc598f70fbba76
| 30,166 |
def is_intersection(g, n):
"""
Determine if a node is an intersection
graph: 1 -->-- 2 -->-- 3
>>> is_intersection(g, 2)
False
graph:
1 -- 2 -- 3
|
4
>>> is_intersection(g, 2)
True
Parameters
----------
g : networkx DiGraph
n : node id
Returns
-------
bool
"""
return len(set(g.predecessors(n) + g.successors(n))) > 2
|
415e5154095cd78112ef029b6c4d62c36da0b3b8
| 30,167 |
def AxisRotation(p, ang, inplace=False, deg=True, axis='z'):
""" Rotates points p angle ang (in deg) about an axis """
axis = axis.lower()
# Copy original array to if not inplace
if not inplace:
p = p.copy()
# Convert angle to radians
if deg:
ang *= np.pi / 180
if axis == 'x':
y = p[:, 1] * np.cos(ang) - p[:, 2] * np.sin(ang)
z = p[:, 1] * np.sin(ang) + p[:, 2] * np.cos(ang)
p[:, 1] = y
p[:, 2] = z
elif axis == 'y':
x = p[:, 0] * np.cos(ang) + p[:, 2] * np.sin(ang)
z = - p[:, 0] * np.sin(ang) + p[:, 2] * np.cos(ang)
p[:, 0] = x
p[:, 2] = z
elif axis == 'z':
x = p[:, 0] * np.cos(ang) - p[:, 1] * np.sin(ang)
y = p[:, 0] * np.sin(ang) + p[:, 1] * np.cos(ang)
p[:, 0] = x
p[:, 1] = y
else:
raise Exception('invalid axis. Must be either "x", "y", or "z"')
if not inplace:
return p
|
1df385b98edb69134849cb052380fb99261f96b2
| 30,168 |
from pathlib import Path
from typing import List
def get_dir_list(path: Path)->List[str]:
"""
Return directory list
"""
dir_list = []
paths = Path(path).glob("**/*")
for p in paths:
if p.is_dir():
dir_list.append(str(p))
return dir_list
|
a0fe0659ad0175364048be6ef96026584fa6f3ef
| 30,169 |
import typing
def tokenize(data: typing.Union[str, typing.Sequence[str]]) -> list[str]:
"""break up string into tokens, tokens can be separated by commas or spaces
creates separate tokens for:
- "(" or "[" at beginning
- ")" or "]" at end
"""
# break into tokens
if isinstance(data, str):
data = [data]
tokens = []
for datum in data:
datum = datum.replace(',', ' ')
subtokens = datum.split(' ')
for token in subtokens:
if len(token) == 0:
continue
elif len(token) == 1:
tokens.append(token)
else:
start_interval = token[0] in ['(', '[']
end_interval = token[-1] in [')', ']']
# convert token based on contained intervals
if start_interval and end_interval:
tokens.append(token[0])
if len(token) > 2:
tokens.append(token[1:-1])
tokens.append(token[-1])
elif start_interval:
tokens.append(token[0])
tokens.append(token[1:])
elif end_interval:
tokens.append(token[:-1])
tokens.append(token[-1])
else:
tokens.append(token)
return tokens
|
832343067c8777aa386c0c87c2c4e8202a7cb88f
| 30,170 |
def de_comma(string):
"""Remove any trailing commas
>>> de_comma(',fred,,') == ',fred'
True
"""
return string.rstrip(',')
|
453d615c1fbbef5139d05d6e4510731c969d6a86
| 30,171 |
def MakeData(ea, flags, size, tid):
"""
Create a data item at the specified address
@param ea: linear address
@param flags: FF_BYTE..FF_PACKREAL
@param size: size of item in bytes
@param tid: for FF_STRU the structure id
@return: 1-ok, 0-failure
"""
return idaapi.do_data_ex(ea, flags, size, tid)
|
ab890848784407bf0ee2864469a5c8874346c5ec
| 30,172 |
def get_node_network_receive(cluster_id, ip, start, end, bk_biz_id=None):
"""获取网络数据
start, end单位为毫秒,和数据平台保持一致
数据单位KB/s
"""
step = (end - start) // 60
prom_query = f"""
max(rate(node_network_receive_bytes_total{{cluster_id="{cluster_id}",job="node-exporter", instance=~"{ ip }:9100"}}[5m]))
""" # noqa
resp = query_range(prom_query, start, end, step)
return resp.get("data") or {}
|
9ba68d19c6ca959fd92020f50498d4aa14dfeb58
| 30,173 |
def verify_vrrpv3_summary(dut,**kwargs):
"""
Author: Raghukumar Rampur
email : [email protected]
:param dut:
:param interface:
:type string or list
:param vrid:
:type string or list
:param vip:
:type virtual-ip in string or list
:param state:
:type vrrp state as string or list
:param config_prio:
:type configured vrrp priority as list or string
:param current_prio:
:type Current vrrp priority as list or string
:return:
Usage
verify_vrrpv3_summary(dut1,vrid=['49','85'],state=['Master','Backup'],
interface=['Vlan2996','Vlan2998'],vip=['73.73.73.66','85.85.85.71'],
config_prio=[222,97],current_prio=[222,99])
verify_vrrpv3_summary(dut1,vrid='49',state='Master')
"""
ret_val = True
cli_type = kwargs.get("cli_type", st.get_ui_type(dut))
if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish'
output = st.show(dut,'show vrrp6',type=cli_type)
if len(output) == 0:
st.error("Output is Empty")
return False
if 'return_output' in kwargs:
return output
#Converting all kwargs to list type to handle single or list of vrrp instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
entries = filter_and_select(output,None,match=input_dict)
if not entries:
st.error("DUT {} -> Match Not Found {}".format(dut,input_dict))
ret_val = False
return ret_val
|
b5d9ae54fc316cadfd8c4d067439b19ecac4c371
| 30,174 |
def attributes_restore(node):
"""Restore previously unlocked attributes to their default state.
Args:
node (str): Node to restore attributes
Returns:
bool: False if attribute doesn't exists else True
"""
attr_name = "attributes_state"
base_attr = "{}.{}".format(node, attr_name)
if not cmds.objExists(base_attr):
return False
attr_data = literal_eval(cmds.getAttr(base_attr) or "{}")
for _attr, values in attr_data.iteritems():
node_attr = "{}.{}".format(node, _attr)
cmds.setAttr(node_attr, **values)
cmds.deleteAttr(base_attr)
return True
|
8c598518d7df1bcc88cbbb3c48d34fecd41b0487
| 30,175 |
import pickle
def get_actual_data(base, n_run, log_path, subfolders):
"""
:param base: the sub folder name right before the _DATE_InstanceNumber
:param n_run: the INSTANCE number in the subfolder name
:param log_path: path to the main log folder containing all the runs of an experiment (e.g. ../data/CH6-14S1G1TNSV/)
:param subfolders: the list of all the sub folders contained in log_folder
:param log_path: complete path
:return:
"""
for subfolder in subfolders:
splitted = subfolder.split('_')
# get basename, compare to base; compare n_run with experiment instance
if splitted[0] == base and str(n_run).zfill(3) == splitted[2]:
filepath = log_path + '/' + subfolder + '/global_save.txt'
try:
data = pickle.load(open(filepath, "rb"))
except:
print('Make sure your parameters are right!')
data = None
exit()
return data
|
b9f76b14b90e3c187e19bcd0b8bbbfe865518fe7
| 30,176 |
def secs_to_str(secs):
"""Given number of seconds returns, e.g., `02h 29m 39s`"""
units = (('s', 60), ('m', 60), ('h', 24), ('d', 7))
out = []
rem = secs
for (unit, cycle) in units:
out.append((rem % cycle, unit))
rem = int(rem / cycle)
if not rem:
break
if rem: # leftover = weeks
out.append((rem, 'w'))
return ' '.join(["%02d%s" % tup for tup in out[::-1]])
|
0918fd72fbaaa0adf8fe75bcb1ef39b4e9aba75b
| 30,177 |
def shuffle(xsets, ysets, seed=None):
"""Shuffle two datasets harmonically
Args:
x, y: datasets, both of them should have same length
Return:
(shuffled_x, shuffled_y): tuple including shuffled x and y
"""
if len(xsets) != len(ysets):
raise ValueError
np.random.seed(seed=seed)
shuffled_indexes = np.random.permutation(len(xsets))
shuffled_x = xsets[shuffled_indexes]
shuffled_y = ysets[shuffled_indexes]
return (shuffled_x, shuffled_y)
|
0d07fa7b1d556a5af0bb4f3d174326c756d3d6a7
| 30,178 |
import math
def get_CL_parameters(file_pointer, class_10_100_1000):
""" Function to predict cluster count and mean size by means of clustering
Args:
file_pointer: string with a file path
Returns
tuple with(
clusters: predicted number of clusters
log_mean_radius: predicted mean radius in log (pixels)
"""
#Constant parameters
# =========================================================================
# optimal parameters
# =========================================================================
#for imagge class 1000
binarize_list=[70]
eps_list=[2]
min_samples=[2]
filter_boundary_list=[100]
remove_large_clusters_list=[200]
remove_small_clusters_list=[10]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_1000=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
# for image class 100 objects
binarize_list=[15]
eps_list=[2]
min_samples=[2]
filter_boundary_list=[100]
remove_large_clusters_list=[20]
remove_small_clusters_list=[100]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_100=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
# for image class 10 objects
binarize_list=[30]
eps_list=[2]
min_samples=[5]
filter_boundary_list=[100]
remove_large_clusters_list=[0]
remove_small_clusters_list=[800]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_10_1=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
binarize_list=[5]
eps_list=[2]
min_samples=[5]
filter_boundary_list=[100]
remove_large_clusters_list=[0]
remove_small_clusters_list=[800]
max_filter_list=[3]
eps_grain_boundary_list=[2]
min_sample_grain_boundary_list=[5]
binarize_bdr_coord_list=[100]
binarize_grain_coord_list=[100]
param_list_10_2=DL_helper_functions.make_param_list_11(
binarize_list,
eps_list,
min_samples,
filter_boundary_list,
remove_large_clusters_list,
remove_small_clusters_list,
max_filter_list,
eps_grain_boundary_list,
min_sample_grain_boundary_list,
binarize_bdr_coord_list,
binarize_grain_coord_list,
)
# =========================================================================
# optimal parameters
# =========================================================================
if class_10_100_1000 =='10_1':
param=param_list_10_1[0]
if class_10_100_1000 =='10_2':
param=param_list_10_2[0]
if class_10_100_1000 =='100':
param=param_list_100[0]
if class_10_100_1000 =='1000':
param=param_list_1000[0]
#define parameters
binarize=param[0]
eps=param[1]
min_sample=param[2]
filter_boundary=param[3]
remove_large_clusters=param[4]
remove_small_clusters=param[5]
max_filter=param[6]
eps_grain_boundary=param[7]
min_sample_grain_boundary=param[8]
binarize_bdr_coord=param[9]
binarize_grain_coord=param[10]
(image_X, m_real, s_real)=CL_load.load_func(file_pointer, threshold=binarize, max_filter=3)
try:
print('Clustering image')
(m_CL, s_CL, clusters) = CL_DBscan.fit_DBscan(image_X,
eps,
eps_grain_boundary,
min_sample,
min_sample_grain_boundary,
filter_boundary,
remove_large_clusters,
remove_small_clusters,
binarize_bdr_coord,
binarize_grain_coord,
)
if math.isnan(m_CL):
(m_CL,clusters)=(0,0)
except:
print('fit went wrong', str(param))
(m_CL,clusters)=(0,0)
log_mean_radius=m_CL
#print(m_CL)
return(clusters, log_mean_radius)
|
498bf2e3b6a1e70808b159e2b630d9cdb8cebc40
| 30,179 |
def _nanclean(cube, rejectratio=0.25, boxsz=1):
"""
Detects NaN values in cube and removes them by replacing them with an
interpolation of the nearest neighbors in the data cube. The positions in
the cube are retained in nancube for later remasking.
"""
logger.info('Cleaning NaN values in the cube')
cleancube = cube.copy()
badcube = np.logical_not(np.isfinite(cleancube)) # find NaNs
badmap = badcube.sum(axis=0) # map of total nans in a spaxel
# choose some maximum number of bad pixels in the spaxel and extract
# positions
badmask = badmap > (rejectratio * cleancube.shape[0])
logger.info('Rejected %d spaxels with more than %.1f%% NaN pixels',
np.count_nonzero(badmask), rejectratio * 100)
# make cube mask of bad spaxels
badcube &= (~badmask[np.newaxis, :, :])
z, y, x = np.where(badcube)
neighbor = np.zeros((z.size, (2 * boxsz + 1)**3))
icounter = 0
logger.info("Fixing %d remaining NaN pixels", len(z))
# loop over samplecubes
nz, ny, nx = cleancube.shape
for j in range(-boxsz, boxsz + 1, 1):
for k in range(-boxsz, boxsz + 1, 1):
for l in range(-boxsz, boxsz + 1, 1):
iz, iy, ix = z + l, y + k, x + j
outsider = ((ix <= 0) | (ix >= nx - 1) |
(iy <= 0) | (iy >= ny - 1) |
(iz <= 0) | (iz >= nz - 1))
ins = ~outsider
neighbor[ins, icounter] = cleancube[iz[ins], iy[ins], ix[ins]]
neighbor[outsider, icounter] = np.nan
icounter = icounter + 1
cleancube[z, y, x] = np.nanmean(neighbor, axis=1)
return cleancube, badcube
|
154bf994161a932505101ccbe921792e2d3c9f3b
| 30,180 |
import json
def parseData(filePath):
"""
Tries to import JSON JobShop PRO file to program
:return machineList itinerariesList
"""
machinesList = []
itinerariesList = []
with open(filePath, 'r', encoding="utf8") as inputfile: # read file from path
importedData = json.loads(inputfile.read())
if list(importedData.keys()) == ["itineraries", "machines"]:
imMachines = importedData['machines'] # is first level structure is correct, then split
imItineraries = importedData['itineraries']
if len(list(imMachines)) > 0 and len(list(imItineraries)) > 0:
for index, dictMachine in enumerate(imMachines):
machinesList.append(Machine(imMachines[index]['machineName']))
for _, dictItinerary in enumerate(imItineraries): # for each itinerary check structure
tmpItinerary = Itinerary()
tmpItinerary.name = dictItinerary['itineraryName']
tmpItineraryTasks = dictItinerary['tasksList']
for i, taskDict in enumerate(tmpItineraryTasks): # check structure of each task in itinerary
if list(tmpItineraryTasks[i].keys()) == ['taskName', 'taskMachine', 'taskDuration']:
taskMachine = tmpItineraryTasks[i]['taskMachine']
if list(taskMachine.keys()) == ["machineName"]: # check correctness of elements
tmpItinerary.tasksList.append(Task(tmpItineraryTasks[i]['taskName'],
float(tmpItineraryTasks[i]['taskDuration']),
# parse values to taskList
[ mac for mac in taskMachine["machineName"]]))
# add itinerary to global list, beacuse parsing finished
itinerariesList.append(tmpItinerary)
return machinesList, itinerariesList
|
b02471737e320eb35c4c9626c11737952455f18e
| 30,181 |
def curve_fit_log(xdata, ydata, sigma):
"""Fit data to a power law with weights according to a log scale"""
# Weights according to a log scale
# Apply fscalex
xdata_log = np.log10(xdata)
# Apply fscaley
ydata_log = np.log10(ydata)
sigma_log = np.log10(sigma)
# Fit linear
popt_log, pcov_log = curve_fit(linlaw, xdata_log, ydata_log,
sigma=sigma_log)
#print(popt_log, pcov_log)
# Apply fscaley^-1 to fitted data
ydatafit_log = np.power(10, linlaw(xdata_log, *popt_log))
# There is no need to apply fscalex^-1 as original data is already available
return (popt_log, pcov_log, ydatafit_log)
|
f00484c2e520e8060d7cb29ea503170c2e6ff07d
| 30,182 |
def get_computed_response_text_value(response):
"""
extract the text message from the Dialogflow response, fallback: None
"""
try:
if len(response.query_result.fulfillment_text):
return response.query_result.fulfillment_text
elif len(response.query_result.fulfillment_messages[0].text.text[0]):
return response.query_result.fulfillment_messages[0].text.text[0]
else:
return None
except Exception as e:
return None
|
fa7410ac4b0ef2c0dea59b0e9d001a7893a56479
| 30,183 |
def tmpdir_factory(request):
"""Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
"""
return request.config._tmpdirhandler
|
cb506efaef55275d30755fc010d130f61b331215
| 30,184 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.