content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Iterator
from typing import Tuple
from typing import Any
import itertools
def _nonnull_powerset(iterable) -> Iterator[Tuple[Any]]:
"""Returns powerset of iterable, minus the empty set."""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(1, len(s) + 1)) | ad02ab8ac02004adb54310bc639c6e2d84f19b02 | 11,300 |
import yaml
def _parse_obs_status_file(filename):
"""
Parse a yaml file and return a dictionary.
The dictionary will be of the form: {'obs': [], 'bad': [], 'mags: []}
:param filename:
:return:
"""
with open(filename) as fh:
status = yaml.load(fh, Loader=yaml.SafeLoader)
if 'obs' not in status:
status['obs'] = []
if 'bad' not in status:
status['bad'] = []
if 'mags' not in status:
status['mags'] = []
if hasattr(status['bad'], 'items'):
status['bad'] = list(status['bad'].items())
return status | 389fc921867367964001e5fc2f56a7fa7defd7c8 | 11,301 |
def extract_optional_suffix(r):
"""
a | a b -> a b?
"""
modified = False
def match_replace_fn(o):
if isinstance(o, Antlr4Selection):
potential_prefix = None
potential_prefix_i = None
to_remove = []
for i, c in enumerate(o):
if potential_prefix is None:
potential_prefix = c
potential_prefix_i = i
else:
# check if the potential_prefix is really a prefix of this rule
is_prefix, suffix = is_prefix_of_elem(potential_prefix, c)
if is_prefix:
# put suffix as a optional to a prefix
if list(iter_non_visuals(suffix)):
if not isinstance(potential_prefix, Antlr4Sequence):
assert o[potential_prefix_i] is potential_prefix
potential_prefix = Antlr4Sequence([potential_prefix, ])
o[potential_prefix_i] = potential_prefix
if len(suffix) == 1:
suffix = suffix[0]
else:
suffix = Antlr4Sequence(suffix)
potential_prefix.append(Antlr4Option(suffix))
to_remove.append(c)
potential_prefix = None
potential_prefix_i = None
modified = True
else:
potential_prefix = c
potential_prefix_i = i
for c in to_remove:
o.remove(c)
if len(o) == 1:
return Antlr4Sequence([o[0], ])
replace_item_by_sequence(r, match_replace_fn)
return modified | a1d4e6702ed1b23e0f94a44e5bea6fae16b47e17 | 11,302 |
def _heading_index(config, info, token, stack, level, blockquote_depth):
"""Get the next heading level, adjusting `stack` as a side effect."""
# Treat chapter titles specially.
if level == 1:
return tuple(str(i) for i in stack)
# Moving up
if level > len(stack):
if (level > len(stack) + 1) and (blockquote_depth == 0):
err(f"Heading {level} out of place", info, token)
while len(stack) < level:
stack.append(1)
# Same level
elif level == len(stack):
stack[-1] += 1
# Going down
else:
while len(stack) > level:
stack.pop()
stack[-1] += 1
# Report.
return tuple(str(i) for i in stack) | f42dd5c6aae942da687310d7bef81f70bfadad83 | 11,303 |
from typing import List
def sin_salida_naive(vuelos: Data) -> List[str]:
"""Retorna una lista de aeropuertos a los cuales hayan llegado
vuelos pero no hayan salido vuelos de este.
:param vuelos: Información de los vuelos.
:vuelos type: Dict[str, Dict[str, Union[str, float]]]
:return: Lista de aeropuertos
:rtype: List[str]
"""
salidas, llegadas, aeropuertos = [], [], []
for vuelo in vuelos.values():
salidas.append(vuelo['origen'])
llegadas.append(vuelo['destino'])
for aeropuerto in llegadas:
if aeropuerto not in salidas:
aeropuertos.append(aeropuerto)
return aeropuertos | 136b7c1e3428cecee5d3bc7046fac815276288e5 | 11,304 |
def converter(doc):
"""
This is a function for converting various kinds of objects we see
inside a graffle document.
"""
if doc.nodeName == "#text":
return str(doc.data)
elif doc.nodeName == "string":
return str(doc.firstChild.data)
elif doc.nodeName == 'integer':
return int(doc.firstChild.data)
elif doc.nodeName == 'real':
return float(doc.firstChild.data)
elif doc.nodeName == 'dict':
return convert_dict(doc)
elif doc.nodeName == 'array':
return convert_list(doc)
elif doc.nodeName == 'plist':
return convert_list(doc)
else:
return 'unknown:' + doc.nodeName | 8820aa739f4b96251033c191ea405157cfe1e9fb | 11,305 |
def printable_cmd(c):
"""Converts a `list` of `str`s representing a shell command to a printable
`str`."""
return " ".join(map(lambda e: '"' + str(e) + '"', c)) | b5e8a68fc535c186fdbadc8a669ed3dec0da3aee | 11,306 |
def details_from_params(
params: QueryParams,
items_per_page: int,
items_per_page_async: int = -1,
) -> common.Details:
"""Create details from request params."""
try:
page = int(params.get('page', 1))
except (ValueError, TypeError):
page = 1
try:
anchor = int(params.get('anchor', 1))
except (ValueError, TypeError):
anchor = -1
return common.Details(
page=max(1, page),
anchor=anchor,
items_per_page=items_per_page,
items_per_page_async=items_per_page_async,
) | 50e20619bc4f32af6811a3416b2d2f93820ba44a | 11,307 |
import glob
import os
def run_nuclei_type_stat(
pred_dir, true_dir, nuclei_type_dict, type_uid_list=None, exhaustive=True, rad=12, verbose=False
):
"""
rad = 12 if x40
rad = 6 if x20
"""
def _get_type_name(uid, ntd=nuclei_type_dict):
for name,v in ntd.items():
if v == uid:
return name
def calc_type_metrics(paired_true, paired_pred, unpaired_true, unpaired_pred, type_id, w):
type_samples = (paired_true == type_id) | (paired_pred == type_id)
paired_true = paired_true[type_samples]
paired_pred = paired_pred[type_samples]
# unpaired_pred_t = unpaired_pred[unpaired_pred == type_id] # (unpaired_pred == type_id).sum()
# unpaired_true_t = unpaired_true[unpaired_true == type_id]
# Original
tp_dt = ((paired_true == type_id) & (paired_pred == type_id)).sum()
tn_dt = ((paired_true != type_id) & (paired_pred != type_id)).sum()
fp_dt = ((paired_true != type_id) & (paired_pred == type_id)).sum()
fn_dt = ((paired_true == type_id) & (paired_pred != type_id)).sum()
# Classification
# TP - detected cell with GT label t, classified as t
tp_dtc = ((paired_true == type_id) & (paired_pred == type_id)).sum()
# TN - detected cell with GT label other than t, classified as other than t
tn_dtc = ((paired_true != type_id) & (paired_pred != type_id)).sum()
# FP - detected cell with GT label other than t classified as t
fp_dtc = ((paired_true != type_id) & (paired_pred == type_id)).sum()
# FN - detected cell with GT label t classified as other than t
fn_dtc = ((paired_true == type_id) & (paired_pred != type_id)).sum()
# Integrated classification
# TP - detected cell with GT label t, classified as t
tp_dtic = ((paired_true == type_id) & (paired_pred == type_id)).sum()
# TN - detected or falsely detected cell with GT label other than t, classified as other than t
tn_dtic = np.concatenate((
((paired_true != type_id) & (paired_pred != type_id)),
(unpaired_pred != type_id)
# np.concatenate(
# ((unpaired_true != type_id), (unpaired_pred != type_id))
# )
)).sum()
# FP - detected or falsely detected cell with GT label other than t, classified as t
fp_dtic = np.concatenate((
((paired_true != type_id) & (paired_pred == type_id)),
(unpaired_pred == type_id)
# np.concatenate(
# ((unpaired_true != type_id), (unpaired_pred == type_id))
# )
)).sum()
# FN - detected cell with GT label t, classified as other than t and all cells with GT label t not detected
fn_dtic = np.concatenate((
((paired_true == type_id) & (paired_pred != type_id)),
(unpaired_true == type_id)
)).sum()
if not exhaustive:
ignore = (paired_true == -1).sum()
fp_dt -= ignore
tp_d = (paired_pred == type_id).sum()
# tn_d = (paired_true == type_id).sum()
fp_d = (unpaired_pred == type_id).sum()
fn_d = (unpaired_true == type_id).sum()
rec_dt = tp_d / (tp_d + fn_d)
def __internal_metrics(tp, tn, fp, fn):
# print (f"tp: {tp}, \ntn: {tn}, \nfp:{fp}, fn: {fn}\n")
acc = (tp + tn) / (tp + fp + fn + tn)
prec = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (prec * recall) / (prec + recall)
# print (f"Accuracy: {acc}, \nPrecision: {prec}, \nRecall:{recall}, F1: {f1}\n")
return acc, prec, recall, f1
res_class = __internal_metrics(tp_dtc, tn_dtc, fp_dtc, fn_dtc)
dtc_tptnfpfn = (tp_dtc, tn_dtc, fp_dtc, fn_dtc)
res_i_class = __internal_metrics(tp_dtic, tn_dtic, fp_dtic, fn_dtic)
dtic_tptnfpfn = (tp_dtic, tn_dtic, fp_dtic, fn_dtic)
# print (f"tp_dt: {tp_dt}") # TPc
# print (f"tn_dt: {tn_dt}") # TNc
# print (f"fp_dt: {fp_dt}") # FPc
# print (f"fn_dt: {fn_dt}") # FNc
# print (f"fp_d: {fp_d}")
# print (f"fn_d: {fn_d}")
tp_w = tp_dt + tn_dt
fp_w = 2 * fp_dt + fp_d
fn_w = 2 * fn_dt + fn_d
w_f1_type = (2 * (tp_dt + tn_dt)) / (
2 * (tp_dt + tn_dt)
+ w[0] * fp_dt
+ w[1] * fn_dt
+ w[2] * fp_d
+ w[3] * fn_d
)
w_acc_type = (tp_w) / (tp_w + fp_w + fn_w) ## check
w_precision_type = tp_w / (tp_w + fp_w)
w_recall_type = tp_w / (tp_w + fn_w)
weighted = (w_acc_type, w_precision_type, w_recall_type, w_f1_type)
cls_r = (dtc_tptnfpfn, res_class)
icls_r = (dtic_tptnfpfn, res_i_class)
#return f1_type, precision_type, recall_type
return (
rec_dt, ### Segmentation recall
cls_r, ### Classification
icls_r, ### Integrated classification
weighted ### Weighted
)
######################################################
types = sorted([f"{v}:{k}" for k, v in nuclei_type_dict.items()])
if verbose: print(types)
file_list = glob.glob(os.path.join(pred_dir, "*.mat"))
file_list.sort() # ensure same order [1]
paired_all = [] # unique matched index pair
unpaired_true_all = [] # the index must exist in `true_inst_type_all` and unique
unpaired_pred_all = [] # the index must exist in `pred_inst_type_all` and unique
true_inst_type_all = [] # each index is 1 independent data point
pred_inst_type_all = [] # each index is 1 independent data point
for file_idx, filename in enumerate(file_list[:]):
filename = os.path.basename(filename)
basename = filename.split(".")[0]
# print (basename)
# true_info = sio.loadmat(os.path.join(true_dir, '{}.mat'.format(basename)))
# # dont squeeze, may be 1 instance exist
# true_centroid = (true_info['inst_centroid']).astype('float32')
# true_inst_type = (true_info['inst_type']).astype('int32')
true_info = np.load(
os.path.join(true_dir, "{}.npy".format(basename)), allow_pickle=True
)
# dont squeeze, may be 1 instance exist
true_centroid = (true_info.item().get("inst_centroid")).astype("float32")
true_inst_type = (true_info.item().get("inst_type")).astype("int32")
if true_centroid.shape[0] != 0:
true_inst_type = true_inst_type[:, 0]
else: # no instance at all
pass
true_centroid = np.array([[0, 0]])
true_inst_type = np.array([0])
pred_info = sio.loadmat(os.path.join(pred_dir, "{}.mat".format(basename)))
# dont squeeze, may be 1 instance exist
pred_centroid = (pred_info["inst_centroid"]).astype("float32")
pred_inst_type = (pred_info["inst_type"]).astype("int32")
if pred_centroid.shape[0] != 0:
pred_inst_type = pred_inst_type[:, 0]
else: # no instance at all
pass
pred_centroid = np.array([[0, 0]])
pred_inst_type = np.array([0])
# ! if take longer than 1min for 1000 vs 1000 pairing, sthg is wrong with coord
paired, unpaired_true, unpaired_pred = pair_coordinates(
true_centroid, pred_centroid, rad
)
# * Aggreate information
# get the offset as each index represent 1 independent instance
true_idx_offset = (
true_idx_offset + true_inst_type_all[-1].shape[0] if file_idx != 0 else 0
)
pred_idx_offset = (
pred_idx_offset + pred_inst_type_all[-1].shape[0] if file_idx != 0 else 0
)
true_inst_type_all.append(true_inst_type)
pred_inst_type_all.append(pred_inst_type)
# increment the pairing index statistic
if paired.shape[0] != 0: # ! sanity
paired[:, 0] += true_idx_offset
paired[:, 1] += pred_idx_offset
paired_all.append(paired)
unpaired_true += true_idx_offset
unpaired_pred += pred_idx_offset
unpaired_true_all.append(unpaired_true)
unpaired_pred_all.append(unpaired_pred)
paired_all = np.concatenate(paired_all, axis=0) # (x, 2) # paired ids (found in GT and pred)
unpaired_true_all = np.concatenate(unpaired_true_all, axis=0) # (x,) # unpaired ids (found in GT and NOT in pred)
unpaired_pred_all = np.concatenate(unpaired_pred_all, axis=0) # (x,) # unpaired ids (NOT found in GT and found in pred)
true_inst_type_all = np.concatenate(true_inst_type_all, axis=0) # all type ids in true [3,3,3...1,1,1]
paired_true_type = true_inst_type_all[paired_all[:, 0]] # paired true type ids [3,3,3...1,1,1]
unpaired_true_type = true_inst_type_all[unpaired_true_all]
pred_inst_type_all = np.concatenate(pred_inst_type_all, axis=0) # all type ids in pred [3,3,3...1,1,1]
paired_pred_type = pred_inst_type_all[paired_all[:, 1]]
unpaired_pred_type = pred_inst_type_all[unpaired_pred_all]
# true_inst_type_all = paired_true_type + unpaired_true_type
###
# overall
# * quite meaningless for not exhaustive annotated dataset
tp_d = paired_pred_type.shape[0]
fp_d = unpaired_pred_type.shape[0]
fn_d = unpaired_true_type.shape[0]
tp_tn_dt = (paired_pred_type == paired_true_type).sum()
fp_fn_dt = (paired_pred_type != paired_true_type).sum()
if not exhaustive:
ignore = (paired_true_type == -1).sum()
fp_fn_dt -= ignore
w = [1, 1]
acc_type = tp_tn_dt / (tp_tn_dt + fp_fn_dt)
precision = tp_d / (tp_d + w[0] * fp_d)
recall = tp_d / (tp_d + w[0] * fn_d)
f1_d = 2 * tp_d / (2 * tp_d + w[0] * fp_d + w[1] * fn_d)
# results_list = [acc_type, precision, recall, f1_d]
results_all_types = [[acc_type], [precision], [recall], [f1_d]]
w = [2, 2, 1, 1]
if type_uid_list is None:
type_uid_list = np.unique(true_inst_type_all).tolist()
if 0 in type_uid_list:
type_uid_list.remove(0)
pred_type_uid_list = np.unique(pred_inst_type_all).tolist()
if 0 in pred_type_uid_list:
pred_type_uid_list.remove(0)
if verbose:
print(f"True type_uid_list: {type_uid_list}")
print(f"Pred type_uid_list: {pred_type_uid_list}")
res_all = {}
for type_uid in type_uid_list:
res = calc_type_metrics(
paired_true_type,
paired_pred_type,
unpaired_true_type,
unpaired_pred_type,
type_uid,
w,
)
result_uid_metrics = [
[res[0]], # rec_dt ### Segmentation recall
[res[1][1][0]], [res[1][1][1]], [res[1][1][2]], [res[1][1][3]], # (dtc_tptnfpfn, res_class), ### Classification
[res[2][1][0]], [res[2][1][1]], [res[2][1][2]], [res[2][1][3]], # (dtic_tptnfpfn, res_i_class), ### Integrated classification
[res[3][0]], [res[3][1]], [res[3][2]], [res[3][3]] # weighted ### Weighted
]
res_all[f"{type_uid}:{_get_type_name(type_uid)}"] = result_uid_metrics
### I - integrated, W - weighted, Type - across all types
cols_uid = ["Recall_dt", "Cls_acc", "Cls_precision", "Cls_recall", "Cls_F1", "ICls_acc", "ICls_precision", "ICls_recall", "ICls_F1", "WCls_acc", "WCls_precision", "WCls_recall", "WCls_F1"] # result_uid_metrics
cols_all_types = ["Type_acc", "Type_precision", "Type_recall", "Type_F1"] # results_all_types
df_all_types = pd.DataFrame(np.transpose(np.array(results_all_types)), columns=cols_all_types)
df_uid = pd.DataFrame(np.squeeze(np.array(list(res_all.values()))), columns=cols_uid)
df_uid["Type"] = list(res_all.keys())
df_uid = df_uid[["Type", *cols_uid]]
if verbose:
print()
print(df_all_types.to_markdown(index=False))
print()
print(df_uid.to_markdown(index=False))
return df_uid, df_all_types | 9b24343a05c5111b80239c66f1500a934ea3ba33 | 11,308 |
from typing import Tuple
from typing import Mapping
def _signature_pre_process_predict(
signature: _SignatureDef) -> Tuple[Text, Mapping[Text, Text]]:
"""Returns input tensor name and output alias tensor names from signature.
Args:
signature: SignatureDef
Returns:
A tuple of input tensor name and output alias tensor names.
"""
input_tensor_names = [value.name for value in signature.inputs.values()]
input_tensor_types = dict([
(key, value.dtype) for key, value in signature.inputs.items()
])
output_alias_tensor_names = dict([
(key, output.name) for key, output in signature.outputs.items()
])
return input_tensor_names, input_tensor_types, output_alias_tensor_names | a53af746f7cebc7c3baaa316458dd3e7b88c2c38 | 11,309 |
def style_95_read_mode(line, patterns):
"""Style the EAC 95 read mode line."""
# Burst mode doesn't have multiple settings in one line
if ',' not in line:
return style_setting(line, 'bad')
split_line = line.split(':', 1)
read_mode = split_line[0].rstrip()
line = line.replace(read_mode, '<span class="log5">{}</span>'.format(read_mode), 1)
parts = split_line[1].lstrip().split(' ', 1)
parts[1:] = [part.strip() for part in parts[1].split(',')]
num = 0
p = patterns['95 settings']
for setting in [
p['Read mode'],
p['C2 pointers'],
p['Accurate stream'],
p['Audio cache'],
]:
if num == len(parts):
break
class_ = 'good' if setting in line else 'bad'
line = line.replace(
parts[num], '<span class="{}">{}</span>'.format(class_, parts[num]), 1
)
num += 1
return line | 25c347acb87702f19ebcea85a3b4f0257df101ae | 11,310 |
def trange(
client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close"
):
"""This will return a dataframe of true range for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
trange = t.TRANGE(df[highcol].values, df[lowcol].values, df[closecol].values)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"trange": trange,
}
) | 94cf95eb86575a66e015e46fd81a5a8085277255 | 11,311 |
def ConvolveUsingAlm(map_in, psf_alm):
"""Convolve a map using a set of pre-computed ALM
Parameters
----------
map_in : array_like
HEALPix map to be convolved
psf_alm : array_like
The ALM represenation of the PSF
Returns
-------
map_out : array_like
The smeared map
"""
norm = map_in.sum()
nside = hp.pixelfunc.npix2nside(map_in.size)
almmap = hp.sphtfunc.map2alm(map_in)
almmap *= psf_alm
outmap = hp.sphtfunc.alm2map(almmap, nside)
outmap *= norm / outmap.sum()
return outmap | 0ebfa49c605f57ea2cc59b2686da8995dc01881f | 11,312 |
def load_summary_data():
""" Function to load data
param DATA_URL: data_url
return: pandas dataframe
"""
DATA_URL = 'data/summary_df.csv'
data = pd.read_csv(DATA_URL)
return data | b5f09e845e1379fd00a03fd11b0174e3114eb7d3 | 11,313 |
import itertools
def _enumerate_trees_w_leaves(n_leaves):
"""Construct all rooted trees with n leaves."""
def enumtree(*args):
n_args = len(args)
# trivial cases:
if n_args == 0:
return []
if n_args == 1:
return args
# general case of 2 or more args:
# build index array
idxs = range(0, n_args)
trees = []
# we consider all possible subsets of size n_set to gather
for n_set in range(2, n_args+1):
idxsets = list(itertools.combinations(idxs, n_set))
for idxset in idxsets:
# recurse by joining all subtrees with
# n_set leaves and (n_args - n_set) leaves
arg_set = tuple(args[i] for i in idxs if i in idxset)
arg_coset = tuple(args[i] for i in idxs if i not in idxset)
if arg_coset:
trees.extend(tuple(itertools.product(enumtree(*arg_set),
enumtree(*arg_coset))))
else:
# trivial case where arg_set is entire set
trees.append(arg_set)
return trees
# return enumerated trees with integers as leaves
return enumtree(*range(n_leaves)) | 574a2d3ec63d3aeeb06292ec361b83aebba0ff84 | 11,314 |
from typing import get_args
import sys
import re
def main():
"""Make a jazz noise here"""
args = get_args()
sub = args.substring
word = args.word
# Sanity check
if sub not in word:
sys.exit(f'Substring "{sub}" does not appear in word "{word}"')
# Create a pattern that replaces the length of the sub with any letters
pattern = word.replace(sub, '[a-z]{' + str(len(sub)) + '}')
regex = re.compile('^' + pattern + '$')
# Find matches but exclude the original word
def match(check):
return check != word and regex.match(check)
if words := list(filter(match, args.wordlist.read().lower().split())):
print('\n'.join(words))
else:
print('Womp womp') | 0c2149e1f107967a0bc948fde0e7a25844664f24 | 11,315 |
def gen_tfidf(tokens, idf_dict):
"""
Given a segmented string and idf dict, return a dict of tfidf.
"""
# tokens = text.split()
total = len(tokens)
tfidf_dict = {}
for w in tokens:
tfidf_dict[w] = tfidf_dict.get(w, 0.0) + 1.0
for k in tfidf_dict:
tfidf_dict[k] *= idf_dict.get(k, 0.0) / total
return tfidf_dict | 9217867b3661a8070cc1b2d577918c95d1ff7755 | 11,316 |
def timestamp_to_seconds(timestamp):
"""Convert timestamp to python (POSIX) time in seconds.
:param timestamp: The timestamp.
:return: The python time in float seconds.
"""
return (timestamp / 2**30) + EPOCH | 3d5ca5f5ec93b54e1d1a6c53cefba1d49f8ebac2 | 11,317 |
import io
import subprocess
import time
def launch_dpf(ansys_path, ip=LOCALHOST, port=DPF_DEFAULT_PORT, timeout=10, docker_name=None):
"""Launch Ansys DPF.
Parameters
----------
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initialization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
docker_name : str, optional
To start DPF server as a docker, specify the docker name here.
Returns
-------
process : subprocess.Popen
DPF Process.
"""
process = _run_launch_server_process(ansys_path, ip, port, docker_name)
# check to see if the service started
lines = []
docker_id = []
def read_stdout():
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
LOG.debug(line)
lines.append(line)
if docker_name:
docker_id.append(lines[0].replace("\n", ""))
docker_process = subprocess.Popen(f"docker logs {docker_id[0]}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in io.TextIOWrapper(docker_process.stdout, encoding="utf-8"):
LOG.debug(line)
lines.append(line)
errors = []
def read_stderr():
for line in io.TextIOWrapper(process.stderr, encoding="utf-8"):
LOG.error(line)
errors.append(line)
# must be in the background since the process reader is blocking
Thread(target=read_stdout, daemon=True).start()
Thread(target=read_stderr, daemon=True).start()
t_timeout = time.time() + timeout
started = False
while not started:
started = any("server started" in line for line in lines)
if time.time() > t_timeout:
raise TimeoutError(f"Server did not start in {timeout} seconds")
# verify there were no errors
time.sleep(0.1)
if errors:
try:
process.kill()
except PermissionError:
pass
errstr = "\n".join(errors)
if "Only one usage of each socket address" in errstr:
raise errors.InvalidPortError(f"Port {port} in use")
raise RuntimeError(errstr)
if len(docker_id) > 0:
return docker_id[0] | 6d9d4039b702c3499ed256254f156f28eeb809a6 | 11,318 |
def fit_lowmass_mstar_mpeak_relation(mpeak_orig, mstar_orig,
mpeak_mstar_fit_low_mpeak=default_mpeak_mstar_fit_low_mpeak,
mpeak_mstar_fit_high_mpeak=default_mpeak_mstar_fit_high_mpeak):
"""
"""
mid = 0.5*(mpeak_mstar_fit_low_mpeak + mpeak_mstar_fit_high_mpeak)
mask = (mpeak_orig >= 10**mpeak_mstar_fit_low_mpeak)
mask &= (mpeak_orig < 10**mpeak_mstar_fit_high_mpeak)
# Add noise to mpeak to avoid particle discreteness effects in the fit
_x = np.random.normal(loc=np.log10(mpeak_orig[mask])-mid, scale=0.002)
_y = np.log10(mstar_orig[mask])
c1, c0 = np.polyfit(_x, _y, deg=1)
return c0, c1, mid | 620275ad18173bb00d38f3d468be132d150fc1fa | 11,319 |
def load_ref_system():
""" Returns benzaldehyde as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 0.3179 1.0449 -0.0067
C 1.6965 0.8596 -0.0102
C 2.2283 -0.4253 -0.0050
C 1.3808 -1.5297 0.0037
C 0.0035 -1.3492 0.0073
C -0.5347 -0.0596 0.0021
C -2.0103 0.0989 0.0061
O -2.5724 1.1709 0.0021
H 2.3631 1.7283 -0.0171
H 3.3139 -0.5693 -0.0078
H 1.8000 -2.5413 0.0078
H -0.6626 -2.2203 0.0142
H -2.6021 -0.8324 0.0131
H -0.1030 2.0579 -0.0108
""") | 518ca10a84befa07fefa3c2f646e40095318d63c | 11,320 |
def get_department_level_grade_data_completed(request_ctx, account_id, **request_kwargs):
"""
Returns the distribution of grades for students in courses in the
department. Each data point is one student's current grade in one course;
if a student is in multiple courses, he contributes one value per course,
but if he's enrolled multiple times in the same course (e.g. a lecture
section and a lab section), he only constributes on value for that course.
Grades are binned to the nearest integer score; anomalous grades outside
the 0 to 100 range are ignored. The raw counts are returned, not yet
normalized by the total count.
Shares the same variations on endpoint as the participation data.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:return: Get department-level grade data
:rtype: requests.Response (with void data)
"""
path = '/v1/accounts/{account_id}/analytics/completed/grades'
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, **request_kwargs)
return response | 8dd40c7b7c7a734aa66d4f808224424c0c0df81d | 11,321 |
def allocate_samples_to_bins(n_samples, ideal_bin_count=100):
"""goal is as best as possible pick a number of bins
and per bin samples to a achieve a given number
of samples.
Parameters
----------
Returns
----------
number of bins, list of samples per bin
"""
if n_samples <= ideal_bin_count:
n_bins = n_samples
samples_per_bin = [1 for _ in range(n_bins)]
else:
n_bins = ideal_bin_count
remainer = n_samples % ideal_bin_count
samples_per_bin = np.array([(n_samples - remainer) / ideal_bin_count for _ in range(n_bins)])
if remainer != 0:
additional_samples_per_bin = distribute_samples(remainer, n_bins)
samples_per_bin = samples_per_bin + additional_samples_per_bin
return n_bins, np.array(samples_per_bin).astype(int) | 66d5fe32a89478b543818d63c65f2745fe242b33 | 11,322 |
from typing import Any
def create_algo(name: str, discrete: bool, **params: Any) -> AlgoBase:
"""Returns algorithm object from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
params (any): arguments for algorithm.
Returns:
d3rlpy.algos.base.AlgoBase: algorithm.
"""
return get_algo(name, discrete)(**params) | 4fab0f5581eb6036efba6074ab6e3b232bcf5679 | 11,323 |
def tf_inv(T):
""" Invert 4x4 homogeneous transform """
assert T.shape == (4, 4)
return np.linalg.inv(T) | 5bf7d54456198c25029956a7aebe118d7ee4fa87 | 11,324 |
def send_reset_password_email(token, to, username):
"""
send email to user for reset password
:param token: token
:param to: email address
:param username: user.username
:return:
"""
url_to = current_app.config["WEB_BASE_URL"] + "/auth/reset-password?token=" + token
response = _send_email(
subject="请重置密码",
to=to,
html_body=render_template(
"emails/reset_password.html", username=username, url_to=url_to
),
)
return response.status_code | dddcb66425de79a1a736bbbcc5cbc3f5855e7db9 | 11,325 |
def part1(data):
"""Solve part 1"""
countIncreased = 0
prevItem = None
for row in data:
if prevItem == None:
prevItem = row
continue
if prevItem < row:
countIncreased += 1;
prevItem = row
return countIncreased | e01b5edc9d9ac63a31189160d09b5e6e0f11e522 | 11,326 |
def yzrotation(theta = np.pi*3/20.0):
"""
Returns a simple planar rotation matrix that rotates
vectors around the x-axis.
args:
theta: The angle by which we will perform the rotation.
"""
r = np.eye(3)
r[1,1] = np.cos(theta)
r[1,2] = -np.sin(theta)
r[2,1] = np.sin(theta)
r[2,2] = np.cos(theta)
return r | 59a2a251f8e8aa77548f749f49871536de29b0bb | 11,327 |
def is_compiled_release(data):
"""
Returns whether the data is a compiled release (embedded or linked).
"""
return 'tag' in data and isinstance(data['tag'], list) and 'compiled' in data['tag'] | ea8c8ae4f1ccdedbcc145bd57bde3b6040e5cab5 | 11,328 |
import numpy
def resize_frame(
frame: numpy.ndarray, width: int, height: int, mode: str = "RGB"
) -> numpy.ndarray:
"""
Use PIL to resize an RGB frame to an specified height and width.
Args:
frame: Target numpy array representing the image that will be resized.
width: Width of the resized image.
height: Height of the resized image.
mode: Passed to Image.convert.
Returns:
The resized frame that matches the provided width and height.
"""
frame = Image.fromarray(frame)
frame = frame.convert(mode).resize(size=(width, height))
return numpy.array(frame) | 941eb73961843e46b4e67d48439a09c0223c2af0 | 11,329 |
def get_proxies(host, user, password, database, port=3306, unix_socket=None):
""""Connect to a mysql database using pymysql and retrieve proxies for the scraping job.
Args:
host: The mysql database host
user: The mysql user
password: The database password
port: The mysql port, by default 3306
unix_socket: Sometimes you need to specify the mysql socket file when mysql doesn't reside
in a standard location.
Returns;
A list of proxies obtained from the database
Raisese:
An Exception when connecting to the database fails.
"""
try:
conn = pymysql.connect(host=host, port=port, user=user, passwd=password, unix_socket=unix_socket)
conn.select_db(database)
cur = conn.cursor(pymysql.cursors.DictCursor)
# Adapt this code for you to make it retrieving the proxies in the right format.
cur.execute('SELECT host, port, username, password, protocol FROM proxies')
proxies = [Proxy(proto=s['protocol'], host=s['host'], port=s['port'],
username=s['username'], password=s['password']) for s in cur.fetchall()]
return proxies
except Exception as e:
logger.error(e)
raise | d4595440c9d4d07a7d5e27740bf7049176dbe432 | 11,330 |
def APIRevision():
"""Gets the current API revision to use.
Returns:
str, The revision to use.
"""
return 'v1beta3' | c748e1917befe76da449e1f435540e10ee433444 | 11,331 |
def pretty_string_value_error(value, error, error_digits=2, use_unicode=True):
"""
Returns a value/error combination of numbers in a scientifically
'pretty' format.
Scientific quantities often come as a *value* (the actual
quantity) and the *error* (the uncertainty in the value).
Given two floats, value and error, return the two in a
'pretty' formatted string: where the value and error are truncated
at the correct precision.
Parameters
----------
value : float
The quantity in question
error : float
The uncertainty of the quantity
error_digits : int, default 2
How many significant figures the error has. Scientific
convention holds that errors have 1 or (at most) 2 significant
figures. The larger number of digits is chosen here by default.
Returns
-------
new_string : str
A new list of strings sorted numerically
Examples
--------
>>> pretty_string_value_error(1.23456789e8, 4.5678e5,
error_digits=2)
"1.2346 +/- 0.0046 * 10^+08"
>>> pretty_string_value_error(5.6e-2, 2.0e-3, error_digits=1)
"5.6 +/- 0.2 * 10^-02"
"""
if value is None:
return "None"
if error is None or not np.isfinite(error):
if use_unicode:
new_string = "{:.6E} \u00B1 UNKNOWN ERROR MARGIN".format(value)
else:
new_string = "{:.6E} +/- UNKNOWN ERROR MARGIN".format(value)
else:
if not np.isfinite(value):
return str(value)
assert "e" in "{:e}".format(value), "Cannot convert into scientific "\
"notation: {1}".format(value)
value_mantissa_str, value_exponent_str = \
"{:e}".format(value).strip().split('e')
value_mantissa = float(value_mantissa_str)
value_exponent = int(value_exponent_str)
error_mantissa_str, error_exponent_str = \
"{:e}".format(error).strip().split('e')
error_mantissa = float(error_mantissa_str)
error_exponent = int(error_exponent_str)
padding = value_exponent - error_exponent + error_digits - 1
if padding < 1: padding = 1
exp_diff = error_exponent - value_exponent
string_for_formatting = "{:.%df}" % padding
new_value_mantissa = string_for_formatting.format(value_mantissa)
new_error_mantissa = string_for_formatting.format(
error_mantissa*10**exp_diff)
if use_unicode:
new_string = "%s \u00B1 %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
else:
new_string = "%s +/- %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
return new_string | bd7b1496880e7d1cb4ffd04d23df20d679ac8ade | 11,332 |
def update_resnet(model, debug=False):
"""
Update a ResNet model to use :class:`EltwiseSum` for the skip
connection.
Args:
model (:class:`torchvision.models.resnet.ResNet`): ResNet model.
debug (bool): If True, print debug statements.
Returns:
model (:class:`torchvision.models.resnet.ResNet`): ResNet model
that uses :class:`EltwiseSum` for the skip connections. The forward
functions of :class:`torchvision.models.resnet.BasicBlock` and
:class:`torch.models.resnet.Bottleneck` are modified.
"""
assert isinstance(model, ResNet)
def bottleneck_forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip(out, identity)
out = self.relu(out)
return out
def basicblock_forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip(out, identity)
out = self.relu(out)
return out
for module_name, module in model.named_modules():
if isinstance(module, Bottleneck):
module.skip = EltwiseSum()
module.forward = bottleneck_forward.__get__(module)
elif isinstance(module, BasicBlock):
module.skip = EltwiseSum()
module.forward = basicblock_forward.__get__(module)
else:
continue
if debug:
print('Adding EltwiseSum as skip connection in {}.'.format(
module_name))
return model | a6baf031ff89b82b4e063795555c55885548f61e | 11,333 |
def sameSize(arguments) -> bool:
"""Checks whether given vectors are the same size or not"""
sameLength = True
initialSize = len(vectors[arguments[0]])
for vector in arguments:
if len(vectors[vector]) != initialSize:
sameLength = False
return sameLength | 0840adcb0f6a84c56ff3b0ce3aa23892e45d942e | 11,334 |
def db_read(src_path, read_type=set, read_int=False):
"""Read string data from a file into a variable of given type.
Read from the file at 'src_path', line by line, skipping certain lines and
removing trailing whitespace.
If 'read_int' is True, convert the resulting string to int.
Return read data as an object of the desired type specified by 'read_type'.
"""
def skip(s):
"""Bool func. for skipping a line. "#%# " is chosen as a comment
indicator. """
return s == "\n" or s.startswith("#%# ")
if read_type is list:
result = list()
with open(src_path, "r") as f:
for i in f.readlines():
if not skip(i):
result.append(int(i.strip()) if read_int else i.strip())
elif read_type is set:
result = set()
with open(src_path, "r") as f:
for i in f.readlines():
if not skip(i):
result.add(int(i.strip()) if read_int else i.strip())
elif read_type is dict:
# Process the lines in pairs: First the key, then the corresponding
# value, and then the next key... and so on.
result = dict()
with open(src_path, "r") as f:
key_temp = ""
for i in f.readlines():
if not skip(i):
if key_temp:
result[key_temp] = (
int(i.strip()) if read_int else i.strip()
)
key_temp = ""
else:
key_temp = (int(i.strip()) if read_int else i.strip())
elif read_type is str:
# Only read the first line of the file, strip and return it:
with open(src_path, "r") as f:
result = f.readline().rstrip()
else:
logger.error("db_read: read_type is not list, str, set or dict.")
return None
return result | 811c6efb83d134d695c6dec2e34d3405818b8a48 | 11,335 |
import re
def get_config_keys():
"""Parses Keys.java to extract keys to be used in configuration files
Args: None
Returns:
list: A list of dict containing the following keys -
'key': A dot separated name of the config key
'description': A list of str
"""
desc_re = re.compile(r"(/\*\*\n|\s+\*/|\s+\*)")
key_match_re = re.compile(r"\(\n(.+)\);", re.DOTALL)
key_split_re = re.compile(r",\s+", re.DOTALL)
keys = []
with open(_KEYS_FILE, "r") as f:
config = re.findall(
r"(/\*\*.*?\*/)\n\s+(public static final Config.*?;)", f.read(), re.DOTALL
)
for i in config:
try:
key_match = key_match_re.search(i[1])
if key_match:
terms = [x.strip() for x in key_split_re.split(key_match.group(1))]
key = terms[0].replace('"', "")
description = [
x.strip().replace("\n", "")
for x in desc_re.sub("\n", i[0]).strip().split("\n\n")
]
if len(terms) == 3:
description.append(f"Default: {terms[2]}")
keys.append(
{
"key": key,
"description": description,
}
)
except IndexError:
# will continue if key_match.group(1) or terms[0] does not exist
# for some reason
pass
return keys | 8c04fcac2d05579ce47f5436999f0fe86fb1bdbd | 11,336 |
def new():
"""Create a new community."""
return render_template('invenio_communities/new.html') | 60ee1560f749d94833f57b6a34e2d514e3e04ccb | 11,337 |
import torch
def interpolate(results_t, results_tp1, dt, K, c2w, img_wh):
"""
Interpolate between two results t and t+1 to produce t+dt, dt in (0, 1).
For each sample on the ray (the sample points lie on the same distances, so they
actually form planes), compute the optical flow on this plane, then use softsplat
to splat the flows. Finally use MPI technique to compute the composite image.
Used in test time only.
Inputs:
results_t, results_tp1: dictionaries of the @render_rays function.
dt: float in (0, 1)
K: (3, 3) intrinsics matrix (MUST BE THE SAME for results_t and results_tp1!)
c2w: (3, 4) current pose (MUST BE THE SAME for results_t and results_tp1!)
img_wh: image width and height
Outputs:
(img_wh[1], img_wh[0], 3) rgb interpolation result
(img_wh[1], img_wh[0]) depth of the interpolation (in NDC)
"""
device = results_t['xyzs_fine'].device
N_rays, N_samples = results_t['xyzs_fine'].shape[:2]
w, h = img_wh
rgba = torch.zeros((h, w, 4), device=device)
depth = torch.zeros((h, w), device=device)
c2w_ = torch.eye(4)
c2w_[:3] = c2w
w2c = torch.inverse(c2w_)[:3]
w2c[1:] *= -1 # "right up back" to "right down forward" for cam projection
P = K @ w2c # (3, 4) projection matrix
grid = create_meshgrid(h, w, False, device) # (1, h, w, 2)
xyzs = results_t['xyzs_fine'] # equals results_tp1['xyzs_fine']
zs = rearrange(results_t['zs_fine'], '(h w) n2 -> h w n2', w=w, h=h)
# static buffers
static_rgb = rearrange(results_t['static_rgbs_fine'],
'(h w) n2 c -> h w n2 c', w=w, h=h, c=3)
static_a = rearrange(results_t['static_alphas_fine'], '(h w) n2 -> h w n2 1', w=w, h=h)
# compute forward buffers
xyzs_w = ray_utils.ndc2world(rearrange(xyzs, 'n1 n2 c -> (n1 n2) c'), K)
xyzs_fw_w = ray_utils.ndc2world(
rearrange(xyzs+results_t['transient_flows_fw'],
'n1 n2 c -> (n1 n2) c'), K) # fw points with full flow
xyzs_fw_w = xyzs_w + dt*(xyzs_fw_w-xyzs_w) # scale the flow with dt
uvds_fw = P[:3, :3] @ rearrange(xyzs_fw_w, 'n c -> c n') + P[:3, 3:]
uvs_fw = uvds_fw[:2] / uvds_fw[2]
uvs_fw = rearrange(uvs_fw, 'c (n1 n2) -> c n1 n2', n1=N_rays, n2=N_samples)
uvs_fw = rearrange(uvs_fw, 'c (h w) n2 -> n2 h w c', w=w, h=h)
of_fw = rearrange(uvs_fw-grid, 'n2 h w c -> n2 c h w', c=2)
transient_rgb_t = rearrange(results_t['transient_rgbs_fine'],
'(h w) n2 c -> n2 c h w', w=w, h=h, c=3)
transient_a_t = rearrange(results_t['transient_alphas_fine'],
'(h w) n2 -> n2 1 h w', w=w, h=h)
transient_rgba_t = torch.cat([transient_rgb_t, transient_a_t], 1)
# compute backward buffers
xyzs_bw_w = ray_utils.ndc2world(
rearrange(xyzs+results_tp1['transient_flows_bw'],
'n1 n2 c -> (n1 n2) c'), K) # bw points with full flow
xyzs_bw_w = xyzs_w + (1-dt)*(xyzs_bw_w-xyzs_w) # scale the flow with 1-dt
uvds_bw = P[:3, :3] @ rearrange(xyzs_bw_w, 'n c -> c n') + P[:3, 3:]
uvs_bw = uvds_bw[:2] / uvds_bw[2]
uvs_bw = rearrange(uvs_bw, 'c (n1 n2) -> c n1 n2', n1=N_rays, n2=N_samples)
uvs_bw = rearrange(uvs_bw, 'c (h w) n2 -> n2 h w c', w=w, h=h)
of_bw = rearrange(uvs_bw-grid, 'n2 h w c -> n2 c h w', c=2)
transient_rgb_tp1 = rearrange(results_tp1['transient_rgbs_fine'],
'(h w) n2 c -> n2 c h w', w=w, h=h, c=3)
transient_a_tp1 = rearrange(results_tp1['transient_alphas_fine'],
'(h w) n2 -> n2 1 h w', w=w, h=h)
transient_rgba_tp1 = torch.cat([transient_rgb_tp1, transient_a_tp1], 1)
for s in range(N_samples): # compute MPI planes (front to back composition)
transient_rgba_fw = FunctionSoftsplat(tenInput=transient_rgba_t[s:s+1].cuda(),
tenFlow=of_fw[s:s+1].cuda(),
tenMetric=None,
strType='average').cpu()
transient_rgba_fw = rearrange(transient_rgba_fw, '1 c h w -> h w c')
transient_rgba_bw = FunctionSoftsplat(tenInput=transient_rgba_tp1[s:s+1].cuda(),
tenFlow=of_bw[s:s+1].cuda(),
tenMetric=None,
strType='average').cpu()
transient_rgba_bw = rearrange(transient_rgba_bw, '1 c h w -> h w c')
composed_rgb = transient_rgba_fw[..., :3]*transient_rgba_fw[..., 3:]*(1-dt) + \
transient_rgba_bw[..., :3]*transient_rgba_bw[..., 3:]*dt + \
static_rgb[:, :, s]*static_a[:, :, s]
composed_a = 1 - (1-(transient_rgba_fw[..., 3:]*(1-dt)+
transient_rgba_bw[..., 3:]*dt)) * \
(1-static_a[:, :, s])
rgba[..., :3] += (1-rgba[..., 3:])*composed_rgb
depth += (1-rgba[..., 3])*composed_a[..., 0]*zs[..., s]
rgba[..., 3:] += (1-rgba[..., 3:])*composed_a
return rgba[..., :3], depth | d5cdae22a3fb324e9bdfdedabe0b69cb5d40ebdb | 11,338 |
def divisor(baudrate):
"""Calculate the divisor for generating a given baudrate"""
CLOCK_HZ = 50e6
return round(CLOCK_HZ / baudrate) | a09eee716889ee6950f8c5bba0f31cdd2b311ada | 11,339 |
def scm_get_active_branch(*args, **kwargs):
"""
Get the active named branch of an existing SCM repository.
:param str path: Path on the file system where the repository resides. If not specified, it defaults to the
current work directory.
:return: Name of the active branch
:rtype: str
"""
if not _scm_handler:
_load_scm_handler()
return _scm_handler.get_active_branch(*args, **kwargs) | 6c18454548732cd8db4ba85b45cdc9a8d9b47fce | 11,340 |
def search_evaluations(campus, **kwargs):
"""
year (required)
term_name (required): Winter|Spring|Summer|Autumn
curriculum_abbreviation
course_number
section_id
student_id (student number)
"""
url = "%s?%s" % (IAS_PREFIX, urlencode(kwargs))
data = get_resource_by_campus(url, campus)
evaluations = _json_to_evaluation(data)
return evaluations | d7069c2e9135b350141b0053e3ec1202650b7c28 | 11,341 |
from typing import Optional
import select
async def get_user(username: str, session: AsyncSession) -> Optional[User]:
"""
Returns a user with the given username
"""
return (
(await session.execute(select(User).where(User.name == username)))
.scalars()
.first()
) | 0975c069d76414fbf57f4b8f7370d0ada40e39f5 | 11,342 |
import os
import time
import stat
from datetime import datetime
def convert_modtime_to_date(path):
"""
Formats last modification date of a file into m/d/y form.
Params:
path (file path): the file to be documented
Example:
convert_modtime_to_date(/users/.../last_minute_submission.pdf)
"""
fileStatsObj = os.stat(path)
modificationTime = time.ctime(fileStatsObj[stat.ST_MTIME])
return datetime.datetime.strptime(modificationTime,'%a %b %d %H:%M:%S %Y').strftime('%m/%d/%y') | 7377e6c46e477a8af9fa1c500517e2a9b2d7df49 | 11,343 |
import pandas
def compute_balances(flows):
"""
Balances by currency.
:param flows:
:return:
"""
flows = flows.set_index('date')
flows_by_asset = flows.pivot(columns='asset', values='amount').apply(pandas.to_numeric)
balances = flows_by_asset.fillna(0).cumsum()
return balances | 98728c2c687df60194eb11b479c08fc90502807a | 11,344 |
import json
def unjsonify(json_data):
"""
Converts the inputted JSON data to Python format.
:param json_data | <variant>
"""
return json.loads(json_data, object_hook=json2py) | 93a59f8a2ef96cbe25e89c2970969b0132b1a892 | 11,345 |
from typing import Tuple
from typing import List
def comp_state_dist(table: np.ndarray) -> Tuple[np.ndarray, List[str]]:
"""Compute the distribution of distinct states/diagnoses from a table of
individual diagnoses detailing the patterns of lymphatic progression per
patient.
Args:
table: Rows of patients and columns of LNLs, reporting which LNL was
involved for which patient.
Returns:
A histogram of unique states and a list of the corresponding state
labels.
Note:
This function cannot deal with parts of the diagnose being unknown. So
if, e.g., one level isn't reported for a patient, that row will just be
ignored.
"""
_, num_cols = table.shape
table = table.astype(float)
state_dist = np.zeros(shape=2**num_cols, dtype=int)
for row in table:
if not np.any(np.isnan(row)):
idx = int(np.sum([n * 2**i for i,n in enumerate(row[::-1])]))
state_dist[idx] += 1
state_labels = []
for i in range(2**num_cols):
state_labels.append(change_base(i, 2, length=num_cols))
return state_dist, state_labels | 1a2edacd40d4fea3ff3cc5ddd57d76bffc60c7bc | 11,346 |
def polyConvert(coeffs, trans=(0, 1), backward=False):
"""
Converts polynomial coeffs for x (P = a0 + a1*x + a2*x**2 + ...) in
polynomial coeffs for x~:=a+b*x (P~ = a0~ + a1~*x~ + a2~*x~**2 +
...). Therefore, (a,b)=(0,1) makes nothing. If backward, makes the
opposite transformation.
Note: backward transformation could be done using more general
polynomial composition `polyval`, but forward transformation is a
long standing issue in the general case (look for functional
decomposition of univariate polynomial).
"""
a, b = trans
if not backward:
a = -float(a) / float(b)
b = 1 / float(b)
return N.dot(polyConvMatrix(len(coeffs), (a, b)), coeffs) | 1a2607b28046a8dc67315726957a87a5d5c9a435 | 11,347 |
import random
def uniform(_data, weights):
"""
Randomly initialize the weights with values between 0 and 1.
Parameters
----------
_data: ndarray
Data to pick to initialize weights.
weights: ndarray
Previous weight values.
Returns
-------
weights: ndarray
New weight values
"""
return random.rand(*weights.shape) | fbf7e853f11a888ee01dc840c6ffcb214560c5a8 | 11,348 |
def ingresar_datos():
"""Ingresa los datos de las secciones"""
datos = {}
while True:
codigo = int_input('Ingrese el código de la sección: ')
if codigo < 0:
break
cantidad = int_input(
'Ingrese la cantidad de alumnos: ', min=MIN, max=MAX
)
datos[codigo] = cantidad
return datos | 3bacb0e5d6b234b2f90564c44a25d151a640fd1f | 11,349 |
def fetch_credentials() -> Credentials:
"""Produces a Credentials object based on the contents of the
CONFIG_FILE or, alternatively, interactively.
"""
if CONFIG_FILE_EXISTS:
return parse_config_file(CONFIG_FILE)
else:
return get_credentials_interactively() | 0b882c8c4c8066a1898771c66db6ccbe7cb09c37 | 11,350 |
def pool_adjacency_mat_reference_wrapper(
adj: sparse.spmatrix, kernel_size=4, stride=2, padding=1
) -> sparse.spmatrix:
"""Wraps `pool_adjacency_mat_reference` to provide the same API as `pool_adjacency_mat`"""
adj = Variable(to_sparse_tensor(adj).to_dense())
adj_conv = pool_adjacency_mat_reference(adj, kernel_size, stride, padding)
return sparse.coo_matrix(adj_conv.data.numpy(), dtype=np.int16) | e72cb1e50bf7542d4175b9b3b3989e70a8812373 | 11,351 |
def send(socket, obj, flags=0, protocol=-1):
"""stringify an object, and then send it"""
s = str(obj)
return socket.send_string(s) | a89165565837ad4a984905d5b5fdd73e398b35fd | 11,352 |
def arraystr(A: Array) -> str:
"""Pretty print array"""
B = np.asarray(A).ravel()
if len(B) <= 3:
return " ".join([itemstr(v) for v in B])
return " ".join([itemstr(B[0]), itemstr(B[1]), "...", itemstr(B[-1])]) | 9cceed63c83812a7fd87dba833fc4d5b5a75088c | 11,353 |
def dist2_test(v1, v2, idx1, idx2, len2):
"""Square of distance equal"""
return (v1-v2).mag2() == len2 | 3a268a3ba704a91f83345766245a952fe5d943dd | 11,354 |
def extract_grid_cells(browser, grid_id):
"""
Given the ID of a legistar table, returns a list of dictionaries
for each row mapping column headers to td elements.
"""
table = browser.find_element_by_id(grid_id)
header_cells = table.find_elements_by_css_selector(
'thead:nth-child(2) > tr:nth-child(2) > th'
)
headers = [extract_text(cell) for cell in header_cells]
tbody = table.find_element_by_css_selector('tbody:nth-child(4)')
rows = tbody.find_elements_by_tag_name('tr')
result_rows = []
for row in rows:
cells = {}
td_elements = row.find_elements_by_tag_name('td')
for header, cell in zip(headers, td_elements):
cells[header] = cell
result_rows.append(cells)
return (headers, result_rows) | bee4265a18cfd428f25e3fdf3202fb5bfad820df | 11,355 |
import ast
def gatherAllParameters(a, keep_orig=True):
"""Gather all parameters in the tree. Names are returned along
with their original names (which are used in variable mapping)"""
if type(a) == list:
allIds = set()
for line in a:
allIds |= gatherAllVariables(line)
return allIds
if not isinstance(a, ast.AST):
return set()
allIds = set()
for node in ast.walk(a):
if type(node) == ast.arg:
origName = node.originalId if (keep_orig and hasattr(node, "originalId")) else None
allIds |= set([(node.arg, origName)])
return allIds | e899e60d818750a4ff1656b039a6dc4413f8f181 | 11,356 |
def average_link_euclidian(X,verbose=0):
"""
Average link clustering based on data matrix.
Parameters
----------
X array of shape (nbitem,dim): data matrix
from which an Euclidian distance matrix is computed
verbose=0, verbosity level
Returns
-------
t a weightForest structure that represents the dendrogram of the data
Note
----
this method has not been optimized
"""
if X.shape[0]==np.size(X):
X = np.reshape(X,(np.size(X),1))
if np.size(X)<10000:
D = Euclidian_distance(X)
else:
raise ValueError, "The distance matrix is too large"
t = average_link_distance(D,verbose)
return t | 17aae1e7f802f82765bcda8b403598a2c5a9f822 | 11,357 |
import functools
def cached(func):
"""Decorator cached makes the function to cache its result and return it in duplicate calls."""
prop_name = '__cached_' + func.__name__
@functools.wraps(func)
def _cached_func(self):
try:
return getattr(self, prop_name)
except AttributeError:
val = func(self)
setattr(self, prop_name, val)
return val
return _cached_func | 5b23c251c03160ba2c4e87848201be46ba2f34fb | 11,358 |
def SX_inf(*args):
"""
create a matrix with all inf
inf(int nrow, int ncol) -> SX
inf((int,int) rc) -> SX
inf(Sparsity sp) -> SX
"""
return _casadi.SX_inf(*args) | b11fba9e9b60eadb983d1203b1dd852abca9a2b7 | 11,359 |
def aes_encrypt(mode, aes_key, aes_iv, *data):
"""
Encrypt data with AES in specified mode.
:param aes_key: aes_key to use
:param aes_iv: initialization vector
"""
encryptor = Cipher(algorithms.AES(aes_key), mode(aes_iv), backend=default_backend()).encryptor()
result = None
for value in data:
result = encryptor.update(value)
encryptor.finalize()
return result, None if not hasattr(encryptor, 'tag') else encryptor.tag | 94a39ddabe3ea186463808e79e86bec171fbaeda | 11,360 |
def _ebpm_gamma_update_a(init, b, plm, step=1, c=0.5, tau=0.5, max_iters=30):
"""Backtracking line search to select step size for Newton-Raphson update of
a"""
def loss(a):
return -(a * np.log(b) + a * plm - sp.gammaln(a)).sum()
obj = loss(init)
d = (np.log(b) - sp.digamma(init) + plm).mean() / sp.polygamma(1, init)
update = loss(init + step * d)
while (not np.isfinite(update) or update > obj + c * step * d) and max_iters > 0:
step *= tau
update = loss(init + step * d)
max_iters -= 1
if max_iters == 0:
# Step size is small enough that update can be skipped
return init
else:
return init + step * d | 038fb28824b3429b03887299af7a7feeec16b689 | 11,361 |
def edge_distance_mapping(graph : Graph,
iterations : int,
lrgen : LearningRateGen,
verbose : bool = True,
reset_locations : bool = True):
"""
Stochastic Gradient Descent algorithm for performing graph vertex laoyout
optimization using the path distances as target distance in the layout.
The algorihm is adapted from the paper https://arxiv.org/pdf/1710.04626.pdf
Args:
graph : The graph to arrange
iterations : number of iteration rounds
lrgen : learning rate function that takes iteration round as input
verbose : boolean, set True to print progress status information
Returns:
Vertex location stress value list that contains one summary stress
value per iteration.
"""
# Create temporary lists of vertex list indices
n_vertex = graph.vertex_count
vertex_idx_list_a = np.arange(n_vertex)
vertex_idx_list_b = np.arange(n_vertex)
stress_list = []
# Calculate distance look-up table
dist_arr, keys = __edge_distance_lut(graph)
if reset_locations:
__reset_locations(graph)
# Main iteration loop
for iter_round in range(iterations):
stress = 0
lr = lrgen.get_lr(iter_round)
if verbose:
progress_print = ProgressPrint(n_vertex)
a_loop = 0
np.random.shuffle(vertex_idx_list_a)
for idx_a in vertex_idx_list_a:
np.random.shuffle(vertex_idx_list_b)
for idx_b in vertex_idx_list_b:
if idx_a == idx_b:
continue
# Get path distance from vertex a to b.
# Value -1 means there is no path.
dist_target = dist_arr[idx_a, idx_b]
if dist_target == np.inf:
continue
# Update the locations and get stress for the patg
key_a = keys[idx_a]
key_b = keys[idx_b]
edge_stress = __coord_update(graph, key_a, key_b, dist_target, lr)
stress += edge_stress
# Progress monitoring
if verbose:
a_loop += 1
progress_print.print_update(iter_round, a_loop, stress)
stress_list.append(stress)
return stress_list | f5c93cf83a7cd7892936246eb6c90562030ad819 | 11,362 |
def strip_extension(name: str) -> str:
"""
Remove a single extension from a file name, if present.
"""
last_dot = name.rfind(".")
if last_dot > -1:
return name[:last_dot]
else:
return name | 9dc1e3a3c9ad3251aba8a1b61f73de9f79f9a8be | 11,363 |
import os
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev | 56e1b859fd26e546e7a63cbf2764b78c2cd41990 | 11,364 |
def validatePullRequest(data):
"""Validate pull request by action."""
if 'action' not in data:
raise BadRequest('no event supplied')
if 'pull_request' not in data or 'html_url' not in data.get('pull_request'):
raise BadRequest('payload.pull_request.html_url missing')
return True | a4577a1b719b11f1ea845fff436a78178ca9e370 | 11,365 |
def __adjust_data_for_log_scale(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
This will clean and adjust some of the data so that Altair can plot it using a logarithmic scale. Altair does not
allow zero values on the Y axis when plotting with a logarithmic scale, as log(0) is undefined.
Args:
dataframe: The data to plot on the chart.
Returns: A new data frame with the appropriate adjustments for plotting on a log scale.
"""
return dataframe.replace(0, float('nan')) | 30d7a73f2f0d564f6e52e1a2fa4b521fa1265c3d | 11,366 |
import torch
def predict_sentence(model,vocab,sentence):
"""Predicts the section value of a given sentence
INPUT: Trained model, Model vocab, Sentence to predict
OUTPUT: Assigned section to the sentence"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
nlp=spacy.load('en_core_sci_md')
model=model.to(device)
tokens=[t.text for t in nlp.tokenizer(sentence)]
indexed = [vocab[t] for t in tokens]
tensor_to_predict=torch.LongTensor(indexed).to(device)
tensor_to_predict=tensor_to_predict.unsqueeze(1).T
length_tensor= torch.LongTensor([len(indexed)]).to(device)
prediction=model(tensor_to_predict,length_tensor)
return prediction.argmax(1).item() | f8ef02bd92dfc3dfcea0f5a2e9d5da99050fe367 | 11,367 |
import spacy
import re
def ne_offsets_by_sent(
text_nest_list=[],
model='de_core_news_sm',
):
""" extracts offsets of NEs and the NE-type grouped by sents
:param text_nest_list: A list of list with following structure:\
[{"text": "Wien ist schön", "ner_dicts": [{"text": "Wien", "ne_type": "LOC"}]}]
:param model: The name of the spacy model which should be used for sentence splitting.
:return: A list of spacy-like NER Tuples [('some text'), entities{[(15, 19, 'place')]}]
"""
nlp = spacy.load(model)
text_nes = text_nest_list
results = []
for entry in text_nes:
ner_dicts = entry['ner_dicts']
in_text = entry['text']
doc = nlp(in_text)
for sent in doc.sents:
entities = []
if sent.text != "":
plain_text = sent.text
for x in ner_dicts:
for m in re.finditer(x['text'], plain_text):
entities.append([m.start(), m.end(), x['ne_type']])
entities = [item for item in set(tuple(row) for row in entities)]
entities = sorted(entities, key=lambda x: x[0])
ents = []
next_item_index = 1
for x in entities:
cur_start = x[0]
try:
next_start = entities[next_item_index][0]
except IndexError:
next_start = 9999999999999999999999
if cur_start == next_start:
pass
else:
ents.append(x)
next_item_index = next_item_index + 1
train_data = (
plain_text,
{
"entities": ents
}
)
results.append(train_data)
return results | 1e4fdaba07bf562b1d91b5f2376955efa9974c56 | 11,368 |
from typing import Optional
def clone_repo(
url: str,
path: str,
branch: Optional[str] = None,
) -> bool:
"""Clone repo from URL (at branch if specified) to given path."""
cmd = ['git', 'clone', url, path]
if branch:
cmd += ['--branch', branch]
return run(cmd)[0].returncode == 0 | 56bc8641c3418216f1da5f0c87d33478888775c7 | 11,369 |
def get_inputtype(name, object_type):
"""Get an input type based on the object type"""
if object_type in _input_registry:
return _input_registry[object_type]
inputtype = type(
name,
(graphene.InputObjectType,),
_get_input_attrs(object_type),
)
_input_registry[object_type] = inputtype
return inputtype | aee2a84c8aaf0d66554f022ac0fec0aaef808160 | 11,370 |
from src.common import lang
import os
def res_ex_response(e, original=False):
"""异常响应,结果必须可以作json序列化
参数: e # 原始错误信息
original # 是否返回原始错误信息,默认flase
"""
if os.getenv('ENV') != 'UnitTest':
current_app.logger.error(e)
msg = lang.resp('L_OPER_FAILED')
if original:
msg = str(e)
return jsonify({"msg":msg, "code":4001, "status":False}), 200 | 810edae8d2081e208b27e19589d8c9c09d669a72 | 11,371 |
def get_engine_status(engine=None):
"""Return a report of the current engine status"""
if engine is None:
engine = crawler.engine
global_tests = [
"time()-engine.start_time",
"engine.is_idle()",
"engine.has_capacity()",
"engine.scheduler.is_idle()",
"len(engine.scheduler.pending_requests)",
"engine.downloader.is_idle()",
"len(engine.downloader.sites)",
"engine.scraper.is_idle()",
"len(engine.scraper.sites)",
]
spider_tests = [
"engine.spider_is_idle(spider)",
"engine.closing.get(spider)",
"engine.scheduler.spider_has_pending_requests(spider)",
"len(engine.scheduler.pending_requests[spider])",
"len(engine.downloader.sites[spider].queue)",
"len(engine.downloader.sites[spider].active)",
"len(engine.downloader.sites[spider].transferring)",
"engine.downloader.sites[spider].closing",
"engine.downloader.sites[spider].lastseen",
"len(engine.scraper.sites[spider].queue)",
"len(engine.scraper.sites[spider].active)",
"engine.scraper.sites[spider].active_size",
"engine.scraper.sites[spider].itemproc_size",
"engine.scraper.sites[spider].needs_backout()",
]
status = {'global': {}, 'spiders': {}}
for test in global_tests:
try:
status['global'][test] = eval(test)
except Exception, e:
status['global'][test] = "%s (exception)" % type(e).__name__
for spider in engine.downloader.sites:
x = {}
for test in spider_tests:
try:
x[test] = eval(test)
except Exception, e:
x[test] = "%s (exception)" % type(e).__name__
status['spiders'][spider] = x
return status | 0d87692a991965c8b72204d241964a27a9499014 | 11,372 |
import tqdm
import requests
import json
def stock_em_jgdy_tj():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研统计
http://data.eastmoney.com/jgdy/tj.html
:return: pandas.DataFrame
"""
url = "http://data.eastmoney.com/DataCenter_V3/jgdy/gsjsdy.ashx"
page_num = _get_page_num_tj()
temp_df = pd.DataFrame()
for page in tqdm(range(1, page_num+1)):
params = {
"pagesize": "5000",
"page": str(page),
"js": "var sGrabtEb",
"param": "",
"sortRule": "-1",
"sortType": "0",
"rt": "52581365",
}
res = requests.get(url, params=params)
data_json = json.loads(res.text[res.text.find("={")+1:])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
return temp_df | 1841702e6fb5c677245a2d213f489d95a789d68b | 11,373 |
def hpdi(x, prob=0.90, axis=0):
"""
Computes "highest posterior density interval" (HPDI) which is the narrowest
interval with probability mass ``prob``.
:param numpy.ndarray x: the input array.
:param float prob: the probability mass of samples within the interval.
:param int axis: the dimension to calculate hpdi.
:return: quantiles of ``x`` at ``(1 - prob) / 2`` and
``(1 + prob) / 2``.
:rtype: numpy.ndarray
"""
x = np.swapaxes(x, axis, 0)
sorted_x = np.sort(x, axis=0)
mass = x.shape[0]
index_length = int(prob * mass)
intervals_left = sorted_x[:(mass - index_length)]
intervals_right = sorted_x[index_length:]
intervals_length = intervals_right - intervals_left
index_start = intervals_length.argmin(axis=0)
index_end = index_start + index_length
hpd_left = np.take_along_axis(sorted_x, index_start[None, ...], axis=0)
hpd_left = np.swapaxes(hpd_left, axis, 0)
hpd_right = np.take_along_axis(sorted_x, index_end[None, ...], axis=0)
hpd_right = np.swapaxes(hpd_right, axis, 0)
return np.concatenate([hpd_left, hpd_right], axis=axis) | 579515ebb6d28c2a1578c85eab8cbff1b67bd5ee | 11,374 |
def a(n, k):
"""calculates maximum power of p(n) needed
>>> a(0, 20)
4
>>> a(1, 20)
2
>>> a(2, 20)
1
"""
return floor(log(k) / log(p(n))) | 581e2a23a3dc069fc457ed5a6fe7d5a355353242 | 11,375 |
import platform
def is_windows():
""" détermine si le système actuel est windows """
return platform.system().lower() == "windows" | fc9e2ca948f7cc5dc6b6cc9afb52ba701222bb7a | 11,376 |
def WTfilt_1d(sig):
"""
# 使用小波变换对单导联ECG滤波
# 参考:Martis R J, Acharya U R, Min L C. ECG beat classification using PCA, LDA, ICA and discrete
wavelet transform[J].Biomedical Signal Processing and Control, 2013, 8(5): 437-448.
:param sig: 1-D numpy Array,单导联ECG
:return: 1-D numpy Array,滤波后信号
"""
coeffs = pywt.wavedec(sig, 'db6', level=9)
coeffs[-1] = np.zeros(len(coeffs[-1]))
coeffs[-2] = np.zeros(len(coeffs[-2]))
coeffs[0] = np.zeros(len(coeffs[0]))
sig_filt = pywt.waverec(coeffs, 'db6')
return sig_filt | 8a3c65b35ac347b247a36e7d70705f76f41010d5 | 11,377 |
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
gamma = 0.99
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r | b093c0d82ef82824c08d08ce4da1b840318bd7ed | 11,378 |
def mvn(tensor):
"""Per row mean-variance normalization."""
epsilon = 1e-6
mean = K.mean(tensor, axis=1, keepdims=True)
std = K.std(tensor, axis=1, keepdims=True)
mvn = (tensor - mean) / (std + epsilon)
return mvn | c205712d3a1a53450de0e0b9af0abe1b9d51f269 | 11,379 |
import re
def grapheme_to_phoneme(text, g2p, lexicon=None):
"""Converts grapheme to phoneme"""
phones = []
words = filter(None, re.split(r"(['(),:;.\-\?\!\s+])", text))
for w in words:
if lexicon is not None and w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
phones += list(filter(lambda p: p != " ", g2p(w)))
return phones | 2bb5195a323aa712b2725851fdde64b8e38856f0 | 11,380 |
def mean_log_cosh_error(pred, target):
"""
Determine mean log cosh error.
f(y_t, y) = sum(log(cosh(y_t-y)))/n
where, y_t = predicted value
y = target value
n = number of values
:param pred: {array}, shape(n_samples,)
predicted values.
:param target: {array}, shape(n_samples,)
target values.
:return: mean log cosh error.
"""
error = pred - target
return np.mean(np.log(np.cosh(error))) | 85fd6c3d582e7bc41271e3212d43c5cfea8bcf7e | 11,381 |
def plot_abctraces(pools, surveypath=''):
""" Input: a list of pools in the abc format
Generates trace plots of the thetas,eps and metrics """
sns.set_style("white")
matplotlib.rc("font", size=30)
""" Plot Metric-Distances """
distances = np.array([pool.dists for pool in pools])
print(distances.shape)
f, ax = plt.subplots()
for ii in range(distances.shape[2]):
ax.errorbar(np.arange(len(distances)), np.mean(distances, axis=1)[:, ii], np.std(distances, axis=1)[:, ii], label='$\\Delta_%i$' % (ii+1))
# sns.distplot(np.asarray(distances)[:, ii], axlabel="distances", label='M%i' % (ii))
#plt.title("Development of Metric Distances")
plt.xlabel('Iteration')
plt.ylabel('Distance $\Delta$ in metric')
plt.legend()
plt.savefig('%s/Metrics.png' % (surveypath))
""" Plot Variables """
thetas = np.array([pool.thetas for pool in pools])
print(thetas.shape)
f, ax = plt.subplots()
for ii in range(thetas.shape[2]):
ax.errorbar(np.arange(len(thetas)), np.mean(thetas, axis=1)[:, ii], np.std(thetas, axis=1)[:, ii], label='$\\theta_%i$' % (ii+1))
#plt.title("Development of Parameters")
plt.xlabel('Iteration')
plt.ylabel('$\\theta_i$')
plt.legend()
plt.savefig('%s/Thetas.png' % (surveypath))
""" Plot Variables """
#TODO: Fix bug ... you need to call pools or pool?
for ii, pool, in enumerate(pools):
thetas = pool.thetas
figure = corner.corner(thetas)
plt.savefig('%s/CornerThetas_%02i.png' % (surveypath,ii))
"""
corner.corner(distances)
plots the various distances over each other to show if they are uncorrelated.
This is not super important, you could also use correlated distances with this approach. On the other hand it is interesting to see
if both metrices are independent, often this is a sign that they are good features!
"""
""" Plot Epsilon"""
fig, ax = plt.subplots()
eps_values = np.array([pool.eps for pool in pools])
for ii in range(distances.shape[2]):
ax.plot(eps_values[:, ii], label='$\epsilon_%i$' % (ii))
ax.set_xlabel("Iteration")
ax.set_ylabel(r"$\epsilon$", fontsize=15)
ax.legend(loc="best")
#ax.set_title("Thresholds $\epsilon$")
plt.savefig('%s/Thresholds.png' % (surveypath))
""" Violin Plots """
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
# generate some random test data
all_data = [pool.thetas[:,0] for pool in pools]
len_pools = [pool.thetas.shape[0]/pool.ratio for pool in pools]
print('len_pools:', len_pools, sum(len_pools))
mod_data = [np.concatenate((pool.thetas[:,0]+0.2, pool.thetas[:,0]), axis=0) for pool in pools]
# plot violin plot
#background = axes.violinplot(mod_data,
# showmeans=False,
# showmedians=False, showextrema=False)
axes.violinplot(all_data,
showmeans=False,
showmedians=True)
#for pc in background['bodies']:
# pc.set_facecolor('grey')
# pc.set_edgecolor('black')
# pc.set_alpha(0.4)
#for pc in foreground['bodies']:
# pc.set_facecolor('cornflowerblue')
# pc.set_edgecolor('black')
# pc.set_alpha(1)
# axes.set_title('violin plot')
# adding horizontal grid lines
axes.yaxis.grid(True)
axes.set_xticks([y+1 for y in range(len(all_data))])
axes.set_xlabel('Iteration')
axes.set_ylabel('$\\log_{10}(\\xi_e)$')
axes.set_ylabel('$\\log_{10}(\\xi_e)$')
# add x-tick labels
plt.setp(axes, xticks=[y+1 for y in range(len(all_data))])
plt.savefig('%s/Violin.png' % (surveypath))
plt.savefig('%s/Violin.pdf' % (surveypath))
plt.clf()
""" Plot Parameters
pools[ii].thetas[:, 0] is a numpy array
"""
for ii, nouse in enumerate(pools):
if thetas.shape[1] > 1:
jg = sns.jointplot(pools[ii].thetas[:, 0],
pools[ii].thetas[:, 1],
#kind="kde", # BUG: creates an error
)
jg.ax_joint.set_xlabel('var1')
jg.ax_joint.set_ylabel('var2')
plt.savefig('%s/FirstThetas_%i.png' % (surveypath, ii))
return 0 | 9fef0b6bf382648d4d9d0cdfc175ce239aaf1aff | 11,382 |
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height)
and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***22**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*4***'])
False
"""
return check_uniqueness_in_rows(get_board_columns(board), False) and \
check_horizontal_visibility(get_board_columns(board), False) | 85fd1f02b392c6b6219ce989f7439ec0140b9fa2 | 11,383 |
import numpy
import pathlib
import tqdm
import pandas
import warnings
def extract_header(mjds, path, keywords, dtypes=None, split_dbs=False, is_range=False):
"""Returns a `~pandas.DataFrame` with header information.
For a list or range of MJDs, collects a series of header keywords for
database files and organises them in a `~pandas.DataFrame` sorted by
MJD and frame number.
Parameters
----------
mjds : list
The list of MJDs to extract. If the lenght of ``mjds`` is two and
``is_range=True``, all the MJDs between both values will be extracted.
path : str
The path to the database file.
keywords : list
A list of strings with the header keywords to extract.
dtypes : list, optional
A list of types to cast the keyword values.
split_dbs : bool, optional
If True, assumes that the DB is split into multiple files, one for each
MJD. In that case, the path for each file is assumed to be ``path``
with the ``_{MJD}`` suffix.
is_range : bool, optional
If True, assumes that ``mjds`` are the extremes of a range of MJDs.
"""
mjds = numpy.atleast_1d(mjds)
path = pathlib.Path(path)
keywords = [key.lower() for key in keywords]
if dtypes:
assert len(dtypes) == len(keywords), 'inconsistent lenghts of keywords and dtypes'
assert mjds.ndim == 1, 'invalid number of dimensions in mjds'
if is_range:
assert len(mjds) == 2, 'when is_range=True, mjds must be a list of lenght 2'
mjds = numpy.arange(mjds[0], mjds[1] + 1)
if not split_dbs:
assert path.exists()
database.init(str(path))
assert database.connect(), 'cannot connect to database'
dataframes = []
with tqdm.trange(len(mjds)) as tt:
for mjd in mjds:
tt.set_description(str(mjd))
if split_dbs:
suffix = path.suffix
database_mjd = str(path).replace(suffix, f'_{mjd}{suffix}')
if not pathlib.Path(database_mjd).exists():
tt.update()
continue
database.init(str(database_mjd))
assert database.connect(), 'cannot connect to database'
Header = playhouse.reflection.Introspector.from_database(
database).generate_models()['header']
fields = [Frame.mjd, Frame.frame]
failed = any([not hasattr(Header, keyword) for keyword in keywords])
if failed:
tt.update()
continue
for keyword in keywords:
fields.append(getattr(Header, keyword))
data = Header.select(*fields).join(Frame, on=(Frame.pk == Header.frame_pk)).tuples()
dataframes.append(pandas.DataFrame(list(data), columns=(['mjd', 'frame'] + keywords)))
tt.update()
dataframe = pandas.concat(dataframes)
if dtypes:
failed = False
for ii, key in enumerate(keywords):
try:
dataframe[key] = dataframe[key].astype(dtypes[ii])
except ValueError as ee:
warnings.warn(f'failed to apply astype: {ee!r}', exceptions.GuiderQAUserWarning)
failed = True
if not failed:
dataframe = dataframe[dataframe > -999.]
dataframe = dataframe.set_index(['mjd', 'frame'])
dataframe.sort_index(inplace=True)
return dataframe | 65934d9b5e9c1e6eb639709a16e6e93c145b57e7 | 11,384 |
import pytz
from datetime import datetime
async def get_time():
"""获取服务器时间
"""
tz = pytz.timezone('Asia/Shanghai')
return {
'nowtime': datetime.now(),
'utctime': datetime.utcnow(),
'localtime': datetime.now(tz)
} | 282eb1136713df8045c6ad5f659042484fe4ec8b | 11,385 |
def health_check():
"""Attempt to ping the database and respond with a status code 200.
This endpoint is verify that the server is running and that the database is
accessible.
"""
response = {"service": "OK"}
try:
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
response["database"] = "OK"
except Exception as e:
app.logger.error(e)
response["database"] = "ERROR"
try:
mongo.connection.server_info()
response["document_store"] = "OK"
except Exception as e:
app.logger.error(e)
response["document_store"] = "ERROR"
return response | cd47815ada53281f2b13542dd8cad93398be5203 | 11,386 |
def find_ad_adapter(bus):
"""Find the advertising manager interface.
:param bus: D-Bus bus object that is searched.
"""
remote_om = dbus.Interface(
bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if constants.LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None | 6b5f49a5908948a54f99438a38865293fca51cc7 | 11,387 |
def leaky_relu(x, slope=0.2):
"""Leaky Rectified Linear Unit function.
This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a`
is a configurable slope value.
Args:
x (~chainer.Variable): Input variable.
slope (float): Slope value :math:`a`.
Returns:
~chainer.Variable: Output variable.
"""
return LeakyReLU(slope)(x) | cf7624309543e24c70832249116b74d56c26d1f9 | 11,388 |
def GetConfig(user_config):
"""Decide number of vms needed to run oldisim."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves
+ NUM_DRIVERS + NUM_ROOTS)
return config | b04380fe6dbc84ef4c353dd34354581fa69aac89 | 11,389 |
import os
import shutil
def build_tstuser_dir(username):
"""
Create a directory with files and return its structure
in a list.
:param username: str
:return: tuple
"""
# md5("foo") = "acbd18db4cc2f85cedef654fccc4a4d8"
# md5("bar") = "37b51d194a7513e45b56f6524f2d51f2"
# md5("spam") = "e09f6a7593f8ae3994ea57e1117f67ec"
file_contents = [
('spamfile', 'spam', 'e09f6a7593f8ae3994ea57e1117f67ec'),
(os.path.join('subdir', 'foofile.txt'), 'foo', 'acbd18db4cc2f85cedef654fccc4a4d8'),
(os.path.join('subdir', 'barfile.md'), 'bar', '37b51d194a7513e45b56f6524f2d51f2'),
]
user_root = userpath2serverpath(username)
# If directory already exists, destroy it
if os.path.isdir(user_root):
shutil.rmtree(user_root)
os.mkdir(user_root)
expected_timestamp = None
expected_snapshot = {}
for user_filepath, content, md5 in file_contents:
expected_timestamp = int(_create_file(username, user_filepath, content))
expected_snapshot[user_filepath] = [expected_timestamp, unicode(md5)]
return expected_timestamp, expected_snapshot | bb6b62a11a778f83cd8161077dbcab5afb40d64c | 11,390 |
import re
def gen_answer(question, passages):
"""由于是MLM模型,所以可以直接argmax解码。
"""
all_p_token_ids, token_ids, segment_ids = [], [], []
for passage in passages:
passage = re.sub(u' |、|;|,', ',', passage)
p_token_ids, _ = tokenizer.encode(passage, maxlen=max_p_len + 1)
q_token_ids, _ = tokenizer.encode(question, maxlen=max_q_len + 1)
all_p_token_ids.append(p_token_ids[1:])
token_ids.append([tokenizer._token_start_id])
token_ids[-1] += ([tokenizer._token_mask_id] * max_a_len)
token_ids[-1] += [tokenizer._token_end_id]
token_ids[-1] += (q_token_ids[1:] + p_token_ids[1:])
segment_ids.append([0] * len(token_ids[-1]))
token_ids = sequence_padding(token_ids)
segment_ids = sequence_padding(segment_ids)
probas = model.predict([token_ids, segment_ids])
results = {}
for t, p in zip(all_p_token_ids, probas):
a, score = tuple(), 0.
for i in range(max_a_len):
idxs = list(get_ngram_set(t, i + 1)[a])
if tokenizer._token_end_id not in idxs:
idxs.append(tokenizer._token_end_id)
# pi是将passage以外的token的概率置零
pi = np.zeros_like(p[i])
pi[idxs] = p[i, idxs]
a = a + (pi.argmax(),)
score += pi.max()
if a[-1] == tokenizer._token_end_id:
break
score = score / (i + 1)
a = tokenizer.decode(a)
if a:
results[a] = results.get(a, []) + [score]
results = {
k: (np.array(v)**2).sum() / (sum(v) + 1)
for k, v in results.items()
}
return results | 536880c1318cc193be19561183e652c7668eb09b | 11,391 |
def compile(function_or_sdfg, *args, **kwargs):
""" Obtain a runnable binary from a Python (@dace.program) function. """
if isinstance(function_or_sdfg, dace.frontend.python.parser.DaceProgram):
sdfg = dace.frontend.python.parser.parse_from_function(
function_or_sdfg, *args, **kwargs)
elif isinstance(function_or_sdfg, SDFG):
sdfg = function_or_sdfg
else:
raise TypeError("Unsupported function type")
return sdfg.compile(**kwargs) | 7504344e8e9df5a395e51af1211db286188f3fcb | 11,392 |
import re
def is_untweeable(html):
"""
I'm not sure at the moment what constitutes untweeable HTML, but if we don't find DVIS in tiddlywiki,
that is a blocker
"""
# the same regex used in tiddlywiki
divs_re = re.compile(
r'<div id="storeArea"(.*)</html>',
re.DOTALL
)
return bool(divs_re.search(html)) | face6c6d30b6e26ffa3344ed8e42ed7d44cf2ea5 | 11,393 |
from typing import Optional
def create_1m_cnn_model(only_digits: bool = False, seed: Optional[int] = 0):
"""A CNN model with slightly under 2^20 (roughly 1 million) params.
A simple CNN model for the EMNIST character recognition task that is very
similar to the default recommended model from `create_conv_dropout_model`
but has slightly under 2^20 parameters. This is useful if the downstream task
involves randomized Hadamard transform, which requires the model weights /
gradients / deltas concatednated as a single vector to be padded to the
nearest power-of-2 dimensions.
This model is used in https://arxiv.org/abs/2102.06387.
When `only_digits=False`, the returned model has 1,018,174 trainable
parameters. For `only_digits=True`, the last dense layer is slightly smaller.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
seed: A random seed governing the model initialization and layer randomness.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
initializer = tf.keras.initializers.GlorotUniform(seed=seed)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1),
kernel_initializer=initializer),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
kernel_initializer=initializer),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
128, activation='relu', kernel_initializer=initializer),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62,
activation=tf.nn.softmax,
kernel_initializer=initializer),
])
return model | 87353f8bd8e3b3d7602ad3dcd92b717b2590285b | 11,394 |
def _check_index(target_expr, index_expr):
"""
helper function for making sure that an index is valid
:param target_expr: the target tensor
:param index_expr: the index
:return: the index, wrapped as an expression if necessary
"""
if issubclass(index_expr.__class__, _Expression):
index = index_expr
else:
index = _ConstScalar(index_expr)
if index.proto_expr.dtype is lang.UNDEFINED_TYPE:
raise TypeError('Can only index with a scalar.')
if type(index) is _ConstScalar:
if target_expr.size <= index.value() or index.value() < 0:
raise IndexError('Index out of bounds.')
return index | 96d5bf6d6d19bfca0de30ea9915a38237cf9c80f | 11,395 |
def create_access_token(user: UserModel, expires_delta: timedelta = None) -> str:
"""
Create an access token for a user
:param user: CTSUser -> The user
:param expires_delta: timedelta -> The expiration of the token. If not given a default will be used
:return: str -> A token
"""
load_all_config()
to_encode = user.dict()
if not expires_delta:
expires_delta = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
return __generate_jwt_token(to_encode, expires_delta) | d5ba53f0ecc7e7755988ad2540e4cd4c520b30dd | 11,396 |
def is_async_mode():
"""Tests if we're in the async part of the code or not."""
async def f():
"""Unasync transforms async functions in sync functions."""
return None
obj = f()
if obj is None:
return False
obj.close() # prevent unawaited coroutine warning
return True | 8e515efc767f75c4b90486089f0d8a7203da59d7 | 11,397 |
def remove_arm(frame):
"""
Removes the human arm portion from the image.
"""
##print("Removing arm...")
# Cropping 15 pixels from the bottom.
height, width = frame.shape[:2]
frame = frame[:height - 15, :]
##print("Done!")
return frame | 99b998da87f1aa2eca0a02b67fc5adc411603ee4 | 11,398 |
def cumulative_spread(array, x):
"""
>>> import numpy as np
>>> a = np.array([1., 2., 3., 4.])
>>> cumulative_spread(a, 0.)
array([0., 0., 0., 0.])
>>> cumulative_spread(a, 5.)
array([1., 2., 2., 0.])
>>> cumulative_spread(a, 6.)
array([1., 2., 3., 0.])
>>> cumulative_spread(a, 12.)
array([1., 2., 3., 4.])
"""
# This is probably inefficient.
cumulative_effect = np.cumsum(array) - array
b = x - cumulative_effect
return np.fmin(array, np.fmax(0, b)) | c6966a97945f30cce6a794325091a31716a36e54 | 11,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.