content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def askfont():
"""
Opens a :class:`FontChooser` toplevel to allow the user to select a font
:return: font tuple (family_name, size, \*options), :class:`~font.Font` object
"""
chooser = FontChooser()
chooser.wait_window()
return chooser.font | 8bce830a24d92be38c23ba09b6754f2e6cc6d161 | 4,100 |
def load_data(train_file, test_file):
"""
The method reads train and test data from their dataset files.
Then, it splits train data into features and labels.
Parameters
----------
train_file: directory of the file in which train data set is located
test_file: directory of the file in which test data set is located
"""
x_tra = pd.read_csv(train_file[0]).drop(columns=["ID"])
y_tra = pd.read_csv(train_file[1]).drop(columns=["ID"])
x_tst = pd.read_csv(test_file).drop(columns=["ID"])
return x_tra, y_tra, x_tst | d830f4bcd3efe467a23cab0dfa4a3cdb4694559e | 4,101 |
import os
def split_dataset_random(data_path, test_size=0.2):
"""Split the dataset in two sets train and test with a repartition given by test_size
data_path is a string
test_size is a float between 0 and 1"""
#Initialise list for the names of the files
sample_name = []
#Get the file of the dataset
for filename in os.listdir(data_path):
sample_name.append(filename)
#Split the dataset in train and test with 80% in train and 20% in test
train_sample, test_sample = train_test_split(sample_name, test_size=test_size)
return (train_sample, test_sample) | 8c24f4a0dda3b3da8c637b624655dd98c6f9ff0a | 4,102 |
def guide(batch_X, batch_y=None, num_obs_total=None):
"""Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|x)
"""
# we are interested in the posterior of w and intercept
# since this is a fairly simple model, we just initialize them according
# to our prior believe and let the optimization handle the rest
assert(jnp.ndim(batch_X) == 2)
d = jnp.shape(batch_X)[1]
z_w_loc = param("w_loc", jnp.zeros((d,)))
z_w_std = jnp.exp(param("w_std_log", jnp.zeros((d,))))
z_w = sample('w', dist.Normal(z_w_loc, z_w_std))
z_intercept_loc = param("intercept_loc", 0.)
z_interpet_std = jnp.exp(param("intercept_std_log", 0.))
z_intercept = sample('intercept', dist.Normal(z_intercept_loc, z_interpet_std))
return (z_w, z_intercept) | 889f3224424496a4f001d81b046e1279ba0efe77 | 4,103 |
import os
def downloads_dir():
"""
:returns string: default downloads directory path.
"""
return os.path.expanduser('~') + "/Downloads/" | f8c328a3176a664387059ebf6af567d018bcd57e | 4,104 |
def get_reddit_tables():
"""Returns 12 reddit tables corresponding to 2016"""
reddit_2016_tables = []
temp = '`fh-bigquery.reddit_posts.2016_{}`'
for i in range(1, 10):
reddit_2016_tables.append(temp.format('0' + str(i)))
for i in range(10, 13):
reddit_2016_tables.append(temp.format(str(i)))
return reddit_2016_tables | e590ab35becbe46aa220257f6629e54f720b3a13 | 4,105 |
def first_empty():
"""Return the lowest numbered workspace that is empty."""
workspaces = sorted(get_workspace_numbers(get_workspaces().keys()))
for i in range(len(workspaces)):
if workspaces[i] != i + 1:
return str(i + 1)
return str(len(workspaces) + 1) | f9c9f868570bbcc15a28097930d304b308ddf452 | 4,106 |
import argparse
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Demonstrate affect on SVM of removing a support vector' ,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-o',
'--outfile',
help='File to write figure',
metavar='FILE',
type=str,
default=None)
parser.add_argument(
'-r',
'--random_seed',
help='Random seed value',
metavar='int',
type=int,
default=None)
return parser.parse_args() | ab279b344be0c8b632eafe9e1f86a6638792eb8f | 4,107 |
def get_local_tzone():
"""Get the current time zone on the local host"""
if localtime().tm_isdst:
if altzone < 0:
tzone = '+' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
tzone = '-' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
if altzone < 0:
tzone = \
'+' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
else:
tzone = \
'-' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
return tzone | dec1d9f9c5ecf937779de55a33397436841913bc | 4,108 |
def subscribers_tables_merge(tablename1: Tablename, tablename2: Tablename, csv_path=csvpath, verbose=True):
"""
Сводит таблицы, полученные загрузчиком, в одну. Может принимать pandas.DataFrame или имя группы, в этом
случае группа должна быть в списке групп, а соответствующий файл - в <csv_path>
"""
if isinstance(tablename1, pd.DataFrame):
table1 = tablename1
else:
table1 = pd.read_csv(csv_path + tablename1 + '.csv', sep=";", header=0, dtype=str)
if isinstance(tablename2, pd.DataFrame):
table2 = tablename2
else:
table2 = pd.read_csv(csv_path + tablename2 + '.csv', sep=";", header=0, dtype=str)
concatenated = table1.append(table2, ignore_index=True)
# Выявляем тех, кто подписан на несколько групп
# Условие gs_x != gs_x проверяет, не является ли значение NaN
outer_joined = pd.merge(table1[{'id', 'group_subscribed'}],
table2[{'id', 'group_subscribed'}],
on='id', how='outer')
outer_joined['groups'] = outer_joined['group_subscribed_x'] + ',' + outer_joined['group_subscribed_y']
outer_joined.loc[ outer_joined.group_subscribed_x != outer_joined.group_subscribed_x,
'groups'] = outer_joined.group_subscribed_y
outer_joined.loc[ outer_joined.group_subscribed_y != outer_joined.group_subscribed_y,
'groups'] = outer_joined.group_subscribed_x
# Сводим воедино и чистим
left_joined = pd.merge(concatenated, outer_joined[{'id', 'groups'}], on='id', how='left')
left_joined['group_subscribed'] = left_joined['groups']
L = left_joined.drop_duplicates('id')
if verbose:
print("{0} и {1} обработаны".format(str(tablename1), str(tablename2)))
return L[L.columns[0:6]] | 56c5c80b57aa4103f1836f8b9a5ca7bbb67e25bc | 4,109 |
def get_current_offset(session):
"""
For backfilling only, this function works with the init container to look up
it's job_id so it can line that up with it's consumer group and offest so that
we can backfill up to a given point and then kill the worker afterwards.
"""
if settings.JOB_ID is None:
return settings.CONSUMER_GROUP, None
output = {}
while True:
logger.info(f"Getting kafka job with job_id = {settings.JOB_ID}")
sql = f"select * from kafka_jobs WHERE job_id='{settings.JOB_ID}';"
result = session.execute(sql).fetchall()
session.commit()
if len(result) == 0:
logger.info(f"Did not find job_id={settings.JOB_ID} - sleeping")
sleep(2)
continue
for r in result:
# Keyed on tuple of topic, partition to look up the stop_offset
output[(r[2], r[3])] = r[4]
return r[1], output | 97d0b0485005a709a047582667f56a79f636388d | 4,110 |
def get_params(p1, p2, L):
"""
Return the curve parameters 'a', 'p', 'q' as well as the integration
constant 'c', given the input parameters.
"""
hv = p2 - p1
m = p1 + p2
def f_bind(a): return f(a, *hv, L)
def fprime_bind(a): return fprime(a, hv[0])
# Newton-Raphson algorithm to find a value for 'a'
a0 = nr_first_guess(f_bind, 0.1, 0.01, 1.8)
a = optimize.newton(f_bind, a0, fprime_bind)
# Use our formulas to compute the rest
p = 0.5 * (m[0] - a * np.log((L+hv[1])/(L-hv[1])))
q = 0.5 * (m[1] - L / np.tanh(0.5 * hv[0]/a))
c = -a * np.sinh((p1[0]-p)/a)
return a, p, q, c | eae7d942b4272b3addc6c3f3912abc564e93f339 | 4,111 |
def _unpack(f):
"""to unpack arguments"""
def decorated(input):
if not isinstance(input, tuple):
input = (input,)
return f(*input)
return decorated | 245d425b45d9d7ef90239b791d6d65bcbd0433d5 | 4,112 |
from .ops.classes import WriteRichOp
from typing import Iterable
from functools import reduce
def chain_rich(iterable: Iterable['WriteRichOp']) -> 'WriteRichOp':
"""Take an `iterable` of `WriteRich` segments and combine them to produce a single WriteRich operation."""
return reduce(WriteRichOp.then, iterable) | fa75ab929fb01b9c68e58938aa04aebddc26f245 | 4,113 |
def sum_plot_chi2_curve(bin_num, sum_bin, r_mpc, ax=None, cov_type='bt', label=None,
xlabel=True, ylabel=True, show_bin=True, ref_sig=None):
"""Plot the chi2 curve."""
if ax is None:
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0.165, bottom=0.13, right=0.995, top=0.99, wspace=None, hspace=None)
ax = fig.add_subplot(111)
ax.axhline(1.0, linewidth=3.0, alpha=.4, c='k')
# Reduced chi2 curves
rchi2 = sum_bin['chi2_' + cov_type] / (len(sum_bin['dsigma']) - 1)
# Best-fit scatter and its uncertainty
ax.axvline(sum_bin['sig_med_' + cov_type], linewidth=2.0, alpha=0.4,
linestyle='--', color='k')
ax.fill_between(
[sum_bin['sig_low_' + cov_type], sum_bin['sig_upp_' + cov_type]],
[0, 0], [np.max(rchi2) * 1.2, np.max(rchi2) * 1.2],
color=color_bins[bin_num], alpha=0.2)
if ref_sig is not None:
ax.axvline(ref_sig, linewidth=3.0, alpha=0.5, linestyle='-.', color='k')
# Reduced chi2 curves
sims = sum_bin['simulation']
markers = cycle(['o', 's', 'h', '8', '+'])
for sim in np.unique(sims):
mask = sims == sim
ax.scatter(
sum_bin['sigma'][mask], rchi2[mask], marker=next(markers),
s=60, alpha=0.8, facecolor=color_bins[bin_num], edgecolor='grey',
linewidth=1.0, label=label)
ax.scatter(sum_bin['sigma'][sum_bin['idx_med_' + cov_type]],
rchi2[sum_bin['idx_med_' + cov_type]], marker='o',
s=100, alpha=1.0, facecolor=color_bins[bin_num], edgecolor='k',
linewidth=1.0, label=r'__no_label__')
ax.set_xlim(0.00, np.max(sum_bin['sigma']) * 1.09)
ax.set_ylim(0.01, np.max(rchi2) * 1.19)
sig_best = sum_bin['sig_med_' + cov_type]
sig_upp = sum_bin['sig_upp_' + cov_type]
sig_low = sum_bin['sig_low_' + cov_type]
if sig_best <= 0.65:
_ = ax.text(
sig_best + 0.05, np.max(rchi2) * 0.95,
r'$\sigma={:4.2f}^{{+{:4.2f}}}_{{-{:4.2f}}}$'.format(
sig_best, sig_upp - sig_best, sig_best - sig_low), fontsize=25)
else:
_ = ax.text(
sig_best - 0.45, np.max(rchi2) * 0.95,
r'$\sigma={:4.2f}^{{+{:4.2f}}}_{{-{:4.2f}}}$'.format(
sig_best, sig_upp - sig_best, sig_best - sig_low), fontsize=25)
if show_bin:
_ = ax.text(0.07, 0.87, r'$\rm Bin\ {:1d}$'.format(bin_num + 1), fontsize=35,
transform=ax.transAxes)
if xlabel:
_ = ax.set_xlabel(r'$\sigma_{\mathcal{M} | \mathcal{O}}$', fontsize=30)
else:
_ = ax.set_xticklabels([])
if ylabel:
_ = ax.set_ylabel(r'$\rm Reduced\ \chi^2$', fontsize=30)
else:
_ = ax.set_yticklabels([])
if ax is None:
return fig
return ax | 6f0b7adf2daa98ecac9ff722eab9f6b748ef188b | 4,114 |
import dfim
import dfim.util
def compute_importance(model, sequences, tasks,
score_type='gradient_input',
find_scores_layer_idx=0,
target_layer_idx=-2,
reference_gc=0.46,
reference_shuffle_type=None,
num_refs_per_seq=10):
"""
reference_shuffle_type in ['random', 'dinuc']
reference_gc = 0 will return numpy array of 0s
reference_gc < 1 will assign each G and C reference_gc/2
"""
### Compute Importance scores
print('Calculating Importance Scores')
importance_method = {
"deeplift": deeplift.blobs.NonlinearMxtsMode.DeepLIFT_GenomicsDefault,
"rescale_all_layers": deeplift.blobs.NonlinearMxtsMode.Rescale,
"revealcancel_all_layers": deeplift.blobs.NonlinearMxtsMode.RevealCancel,
"gradient_input": deeplift.blobs.NonlinearMxtsMode.Gradient,
"guided_backprop": deeplift.blobs.NonlinearMxtsMode.GuidedBackprop,
"deconv": deeplift.blobs.NonlinearMxtsMode.DeconvNet
}
importance_model = kc.convert_sequential_model(model,
nonlinear_mxts_mode=importance_method[score_type])
importance_func = importance_model.get_target_contribs_func(
find_scores_layer_idx=find_scores_layer_idx,
target_layer_idx=target_layer_idx)
(reference, new_importance_func) = get_reference(sequences, importance_func,
gc_fraction=reference_gc,
shuffle=reference_shuffle_type,
seed=1)
importance_score_dict = {}
for task in tasks:
if reference is None:
reload(dfim.util)
seq_fastas = dfim.util.convert_one_hot_to_fasta(sequences)
scores = np.array(new_importance_func(task_idx=task, # was 0
input_data_sequences=seq_fastas,
num_refs_per_seq=num_refs_per_seq,
batch_size=10,
progress_update=1000))
else:
scores = np.array(new_importance_func(task_idx=task,
input_data_list=[sequences],
batch_size=10,
progress_update=1000,
input_references_list=[reference]))
importance_score_dict[task] = scores * sequences
return importance_score_dict | a7ebe928f4e3b50d5c8735d438d28c034d5dfeb9 | 4,115 |
from typing import Iterable
def test_check_non_existing() -> None:
"""Test a check on a non-existing column."""
class Schema(pa.SchemaModel):
a: Series[int]
@pa.check("nope")
@classmethod
def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:
return series < 100
err_msg = (
"Check int_column_lt_100 is assigned to a non-existing field 'nope'"
)
with pytest.raises(pa.errors.SchemaInitError, match=err_msg):
Schema.to_schema() | 473b0e1c4b4c785970bdc648e4290426524882c7 | 4,116 |
import requests
def fetch_url(url):
"""Fetches the specified URL.
:param url: The URL to fetch
:type url: string
:returns: The response object
"""
return requests.get(url) | 26198dbc4f7af306e7a09c86b59a7da1a4802241 | 4,117 |
def _nw_score_(s1, s2, insert=lambda c: -2,
delete=lambda c: -2,
substitute=lambda c1, c2: 2 if c1 == c2 else -1):
"""Compute Needleman Wunsch score for aligning two strings.
This algorithm basically performs the same operations as Needleman Wunsch
alignment, but is made more memory efficient by storing only two columns of
the optimal alignment matrix. As a consequence, no reconstruction is
possible.
Args:
s1 (iterable): iterable to which we should align
s2 (iterable): iterable to be aligned
insert (lambda): function returning penalty for insertion (default -2)
delete (lambda): function returning penalty for deletion (default -2)
substitute (lambda): function returning penalty for substitution
(default -1)
Returns:
: last column of optimal matching matrix
"""
# lengths of two strings are further used for ranges, therefore 1 is added
# to every length
m = len(s1) + 1
n = len(s2) + 1
# score will be a two dimensional matrix
score = [[0 for i in xrange(n)], [0 for i in xrange(n)]]
# character of first and second string, respectively
c1 = c2 = ''
# iterator over the second string
s2_it = xrange(1, n)
# indices of current and previous column in the error matrix (will be
# swapped along the way)
crnt = 0
prev = 1
prev_j = 0
# base case when the first string is shorter than second
for j in s2_it:
prev_j = j - 1
score[crnt][j] = score[crnt][prev_j] + insert(s2[prev_j])
# iterate over the first string
for i in xrange(1, m):
# swap current and previous columns
prev, crnt = crnt, prev
# get current character of the first string
c1 = s1[i - 1]
# calculate the base case when len = 0
score[crnt][0] = score[prev][0] + delete(c1)
for j in s2_it:
prev_j = j - 1
c2 = s2[prev_j]
# current cell will be the maximum over insertions, deletions, and
# substitutions applied to adjacent cells
# substitution (covers cases when both chars are equal)
score[crnt][j] = max(score[prev][prev_j] + substitute(c1, c2),
# deletion
score[prev][j] + delete(c1),
# insertion
score[crnt][prev_j] + insert(c2))
# return last computed column of scores
return score[crnt] | 009c9eb4afec828adde53bddfd2a8b4d2a952c24 | 4,118 |
import pickle
import torch
import re
import warnings
from typing import Counter
from typing import OrderedDict
def load_gisaid_data(
*,
device="cpu",
min_region_size=50,
include={},
exclude={},
end_day=None,
columns_filename="results/usher.columns.pkl",
features_filename="results/usher.features.pt",
feature_type="aa",
) -> dict:
"""
Loads the two files columns_filename and features_filename,
converts the input to PyTorch tensors and truncates the data according to
``include`` and ``exclude``.
:param str device: torch device to use
:param dict include: filters of data to include
:param dict exclude: filters of data to exclude
:param end_day: last day to include
:param str columns_filename:
:param str features_filename:
:param str feature_type: Either "aa" for amino acid features or "nuc" for
nucleotide features.
:returns: A dataset dict
:rtype: dict
"""
logger.info("Loading data")
include = include.copy()
exclude = exclude.copy()
if end_day:
logger.info(f"Load gisaid data end_day: {end_day}")
# Load column data.
with open(columns_filename, "rb") as f:
columns = pickle.load(f)
# Clean up location ids (temporary; this should be done in preprocess_gisaid.py).
columns["location"] = list(map(pyrocov.geo.gisaid_normalize, columns["location"]))
logger.info(f"Training on {len(columns['day'])} rows with columns:")
logger.info(", ".join(columns.keys()))
# Aggregate regions smaller than min_region_size to country level.
fine_regions = get_fine_regions(columns, min_region_size)
# Filter features into numbers of mutations and possibly genes.
usher_features = torch.load(features_filename)
mutations = usher_features[f"{feature_type}_mutations"]
features = usher_features[f"{feature_type}_features"].to(
device=device, dtype=torch.get_default_dtype()
)
keep = [m.count(",") == 0 for m in mutations] # restrict to single mutations
if include.get("gene"):
re_gene = re.compile(include.pop("gene"))
keep = [k and bool(re_gene.search(m)) for k, m in zip(keep, mutations)]
if exclude.get("gene"):
re_gene = re.compile(exclude.pop("gene"))
keep = [k and not re_gene.search(m) for k, m in zip(keep, mutations)]
if include.get("region"):
gene, region = include.pop("region")
lb, ub = sarscov2.GENE_STRUCTURE[gene][region]
for i, m in enumerate(mutations):
g, m = m.split(":")
if g != gene:
keep[i] = False
continue
match = re.search("[0-9]+", m)
assert match is not None
pos = int(match.group())
if not (lb < pos <= ub):
keep[i] = False
mutations = [m for k, m in zip(keep, mutations) if k]
if mutations:
features = features[:, keep]
else:
warnings.warn("No mutations selected; using empty features")
mutations = ["S:D614G"] # bogus
features = features[:, :1] * 0
logger.info("Loaded {} feature matrix".format(" x ".join(map(str, features.shape))))
# Construct the list of clades.
clade_id_inv = usher_features["clades"]
clade_id = {k: i for i, k in enumerate(clade_id_inv)}
clades = columns["clade"]
# Generate sparse_data.
sparse_data: dict = Counter()
countries = set()
states = set()
state_to_country_dict = {}
location_id: dict = OrderedDict()
skipped_clades = set()
num_obs = 0
for day, location, clade in zip(columns["day"], columns["location"], clades):
if clade not in clade_id:
if clade not in skipped_clades:
skipped_clades.add(clade)
if not clade.startswith("fine"):
logger.warning(f"WARNING skipping unsampled clade {clade}")
continue
# Filter by include/exclude
row = {
"location": location,
"day": day,
"clade": clade,
}
if not all(re.search(v, row[k]) for k, v in include.items()):
continue
if any(re.search(v, row[k]) for k, v in exclude.items()):
continue
# Filter by day
if end_day is not None:
if day > end_day:
continue
# preprocess parts
parts = location.split("/")
if len(parts) < 2:
continue
parts = tuple(p.strip() for p in parts[:3])
if len(parts) == 3 and parts not in fine_regions:
parts = parts[:2]
location = " / ".join(parts)
# Populate countries on the left and states on the right.
if len(parts) == 2: # country only
countries.add(location)
p = location_id.setdefault(location, len(countries) - 1)
else: # state and country
country = " / ".join(parts[:2])
countries.add(country)
c = location_id.setdefault(country, len(countries) - 1)
states.add(location)
p = location_id.setdefault(location, -len(states))
state_to_country_dict[p] = c
# Save sparse data.
num_obs += 1
t = day // TIMESTEP
c = clade_id[clade]
sparse_data[t, p, c] += 1
logger.warning(f"WARNING skipped {len(skipped_clades)} unsampled clades")
state_to_country = torch.full((len(states),), 999999, dtype=torch.long)
for s, c in state_to_country_dict.items():
state_to_country[s] = c
logger.info(f"Found {len(states)} states in {len(countries)} countries")
location_id_inv = [None] * len(location_id)
for k, i in location_id.items():
location_id_inv[i] = k
assert all(location_id_inv)
# Generate weekly_clades tensor from sparse_data.
if end_day is not None:
T = 1 + end_day // TIMESTEP
else:
T = 1 + max(columns["day"]) // TIMESTEP
P = len(location_id)
C = len(clade_id)
weekly_clades = torch.zeros(T, P, C)
for tps, n in sparse_data.items():
weekly_clades[tps] = n
logger.info(f"Dataset size [T x P x C] {T} x {P} x {C}")
logger.info(
f"Keeping {num_obs}/{len(clades)} rows "
f"(dropped {len(clades) - int(num_obs)})"
)
# Construct sparse representation.
pc_index = weekly_clades.ne(0).any(0).reshape(-1).nonzero(as_tuple=True)[0]
sparse_counts = dense_to_sparse(weekly_clades)
# Construct time scales centered around observations.
time = torch.arange(float(T)) * TIMESTEP / GENERATION_TIME
time -= time.mean()
# Construct lineage <-> clade mappings.
lineage_to_clade = usher_features["lineage_to_clade"]
clade_to_lineage = usher_features["clade_to_lineage"]
lineage_id_inv = sorted(lineage_to_clade)
lineage_id = {k: i for i, k in enumerate(lineage_id_inv)}
clade_id_to_lineage_id = torch.zeros(len(clade_to_lineage), dtype=torch.long)
for c, l in clade_to_lineage.items():
clade_id_to_lineage_id[clade_id[c]] = lineage_id[l]
lineage_id_to_clade_id = torch.zeros(len(lineage_to_clade), dtype=torch.long)
for l, c in lineage_to_clade.items():
lineage_id_to_clade_id[lineage_id[l]] = clade_id[c]
dataset = {
"clade_id": clade_id,
"clade_id_inv": clade_id_inv,
"clade_id_to_lineage_id": clade_id_to_lineage_id,
"clade_to_lineage": usher_features["clade_to_lineage"],
"features": features,
"lineage_id": lineage_id,
"lineage_id_inv": lineage_id_inv,
"lineage_id_to_clade_id": lineage_id_to_clade_id,
"lineage_to_clade": usher_features["lineage_to_clade"],
"location_id": location_id,
"location_id_inv": location_id_inv,
"mutations": mutations,
"pc_index": pc_index,
"sparse_counts": sparse_counts,
"state_to_country": state_to_country,
"time": time,
"weekly_clades": weekly_clades,
}
return dataset | eaa9c5b3735f291706ea783272b3372ad9e7937c | 4,119 |
def get_symbol_size(sym):
"""Get the size of a symbol"""
return sym["st_size"] | b2d39afe39542e7a4e1b4fed60acfc83e6a58677 | 4,120 |
import argparse
import functools
import os
def parse_args(argv):
"""Parse and validate command line flags"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--base-image',
type=functools.partial(
validation_utils.validate_arg_regex, flag_regex=IMAGE_REGEX),
default='gcr.io/google-appengine/python:latest',
help='Name of Docker image to use as base')
# In some cases, gcloud sets an environment variable to indicate
# the location of the application configuration file, rather than
# using the --config flag. The order of precedence from highest
# to lowest is:
#
# 1) --config flag
# 2) $GAE_APPLICATION_YAML_PATH environment variable
# 3) a file named "app.yaml" in the current working directory
parser.add_argument(
'--config',
type=functools.partial(
validation_utils.validate_arg_regex, flag_regex=PRINTABLE_REGEX),
default=(os.environ.get(GAE_APPLICATION_YAML_PATH) or 'app.yaml'),
help='Path to application configuration file'
)
parser.add_argument(
'--source-dir',
type=functools.partial(
validation_utils.validate_arg_regex, flag_regex=PRINTABLE_REGEX),
default='.',
help=('Application source and output directory'))
args = parser.parse_args(argv[1:])
return args | afc558b05d247d1096c4c276edc4a74a34f93827 | 4,121 |
def to_unnamed_recursive(sexpr, scheme):
"""Convert all named column references to unnamed column references."""
def convert(n):
if isinstance(n, NamedAttributeRef):
n = toUnnamed(n, scheme)
n.apply(convert)
return n
return convert(sexpr) | ffb58acb1cfbef654c5c936880961b8cc982ec01 | 4,122 |
def login_process():
"""Process login."""
email_address = request.form.get("email")
password = request.form.get("password")
user = User.query.filter_by(email_address=email_address).first()
if not user:
flash("Please try again!")
return redirect('/')
if user.password != password:
flash("Incorrect password")
return redirect('/')
session["user_id"] = user.user_id
flash("Logged in")
return redirect('/dashboard') | afee068b653e5f759329658e3614b0ce7ee2d405 | 4,123 |
def get_doc_translations(doctype, name):
"""
Returns a dict custom tailored for the document.
- Translations with the following contexts are handled:
- doctype:name:docfield
- doctype:name
- doctype:docfield (Select fields only)
- 'Select' docfields will have a values dict which will have
translations for each option
document(doctype, name) {
[lang_code_1]: {
title: lang_1_title,
status: {
value: lang_1_status,
values: {
option_1: lang_1_option_1,
...
}
}
},
[lang_code_2]: {
title: lang_2_title,
}
}
"""
context = f"{doctype}:"
translations = frappe.db.sql("""
SELECT
t.language,
t.source_text,
t.context,
t.translated_text
FROM `tabTranslation` t
WHERE
t.context LIKE %(context)s
""", {
"context": f"{context}%"
}, as_dict=1)
tr_dict = frappe._dict()
if not len(translations):
return tr_dict
doc = frappe.get_cached_doc(doctype, name)
value_fieldname_dict = None
def get_value_fieldname_dict():
nonlocal value_fieldname_dict
if value_fieldname_dict is not None:
return value_fieldname_dict
d = frappe._dict()
for fieldname in frappe.get_meta(doctype).get_valid_columns():
v = doc.get(fieldname)
if not v:
continue
if v not in d:
d[v] = []
d[v].append(fieldname)
value_fieldname_dict = d
return value_fieldname_dict
for t in translations:
if t.language not in tr_dict:
tr_dict[t.language] = frappe._dict()
ctx = t.context.split(":")
if len(ctx) == 3 and ctx[1] == name:
# Docfield translation
# doctype:name:docfield
fieldname = t.context.split(":")[2]
if t.source_text == "*" or doc.get(fieldname) == t.source_text:
tr_dict[t.language][fieldname] = t.translated_text
elif len(ctx) == 2 and ctx[1] != name:
# Select DocField
select_df = ctx[1]
if select_df not in [x.fieldname for x in frappe.get_meta(doctype).get_select_fields()]:
continue
select_tr = tr_dict[t.language].setdefault(
select_df, frappe._dict(value=None, values=frappe._dict()))
select_tr.get("values")[t.source_text] = t.translated_text
if doc.get(select_df) == t.source_text:
select_tr.value = t.translated_text
elif len(ctx) == 2:
# Document Translation
# doctype:name
d = get_value_fieldname_dict()
if t.source_text in d:
for fieldname in d[t.source_text]:
if tr_dict[t.language].get(fieldname, None):
continue
tr_dict[t.language][fieldname] = t.translated_text
return tr_dict | e7fd896de3162452a77ab989670e61b01e8e35a2 | 4,124 |
from datetime import datetime
def fetch_newer_version(
installed_version=scancode_version,
new_version_url='https://pypi.org/pypi/scancode-toolkit/json',
force=False,
):
"""
Return a version string if there is an updated version of scancode-toolkit
newer than the installed version and available on PyPI. Return None
otherwise.
Limit the frequency of update checks to once per week.
State is stored in the scancode_cache_dir.
If `force` is True, redo a PyPI remote check.
"""
installed_version = packaging_version.parse(installed_version)
try:
state = VersionCheckState()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if ('last_check' in state.state and 'latest_version' in state.state):
last_check = datetime.datetime.strptime(
state.state['last_check'],
SELFCHECK_DATE_FMT
)
seconds_since_last_check = total_seconds(current_time - last_check)
one_week = 7 * 24 * 60 * 60
if seconds_since_last_check < one_week:
latest_version = state.state['latest_version']
if force:
latest_version = None
# Refresh the version if we need to or just see if we need to warn
if latest_version is None:
try:
latest_version = fetch_latest_version(new_version_url)
state.save(latest_version, current_time)
except Exception:
# save an empty version to avoid checking more than once a week
state.save(None, current_time)
raise
latest_version = packaging_version.parse(latest_version)
# Our git version string is not PEP 440 compliant, and thus improperly
# parsed via most 3rd party version parsers. We handle this case by
# pulling out the "base" release version by split()-ting on "post".
#
# For example, "3.1.2.post351.850399ba3" becomes "3.1.2"
if isinstance(installed_version, packaging_version.LegacyVersion):
installed_version = installed_version.split('post')
installed_version = installed_version[0]
installed_version = packaging_version.parse(installed_version)
# Determine if our latest_version is older
if (installed_version < latest_version
and installed_version.base_version != latest_version.base_version):
return str(latest_version)
except Exception:
msg = 'There was an error while checking for the latest version of ScanCode'
logger.debug(msg, exc_info=True) | efd0e92219efd8fb54064200acbdc3e512071f37 | 4,125 |
def app(request):
"""
Default view for Person Authority App
"""
return direct_to_template(request,
'person_authority/app.html',
{'app':APP}) | 9e75c9cf381c69b19bfdf08c74b2e0dc2106822b | 4,126 |
def is_xbar(top, name):
"""Check if the given name is crossbar
"""
xbars = list(filter(lambda node: node["name"] == name, top["xbar"]))
if len(xbars) == 0:
return False, None
if len(xbars) > 1:
log.error("Matching crossbar {} is more than one.".format(name))
raise SystemExit()
return True, xbars[0] | 435b84a30f3f749f07d0cc6dfdd5e7f0c5343c4f | 4,127 |
def index():
""" Root URL response """
return "Reminder: return some useful information in json format about the service here", status.HTTP_200_OK | d8128c8ba8976238c1d68376eaa64a77d09ce525 | 4,128 |
def backproject(depth, K):
"""Backproject a depth map to a cloud map
depth: depth
----
organized cloud map: (H,W,3)
"""
H, W = depth.shape
X, Y = np.meshgrid(np.asarray(range(W)) - K[0, 2], np.asarray(range(H)) - K[1, 2])
return np.stack((X * depth / K[0, 0], Y * depth / K[1, 1], depth), axis=2) | 5433fd408932f48c238cad7e5e8d7b14ee7b00de | 4,129 |
from pathlib import Path
def get_parent_dir(os_path: str) -> str:
"""
Get the parent directory.
"""
return str(Path(os_path).parents[1]) | 3a6e518119e39bfbdb9381bc570ac772b88b1334 | 4,130 |
import os
import ctypes
def test_is_admin():
"""Returns True if the program is ran as administrator.
Returns False if not ran as administrator.
"""
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == 1:
return 1
else:
return 0 | 6ace6a49a40ded8df9dd065bfd2d8f8359850b68 | 4,131 |
def parse_work_url(work_url):
"""Extract work id from work url
Args:
work_url (str): work url
Returns:
str: bdrc work id
"""
work_id = ""
if work_url:
work_url_parts = work_url.split("/")
work_id = work_url_parts[-1]
return work_id | 1e7f5e222a2f6c7d01cbcb7df556adf6dd33f7cf | 4,132 |
def room():
"""Create a Room instance for all tests to share."""
return Room({"x": 4, "y": 4, "z": 4}, savable=False) | f031faa1bf654ff32868b678f79c2af040926b44 | 4,133 |
import re
def searchLiteralLocation(a_string, patterns):
"""assumes a_string is a string, being searched in
assumes patterns is a list of strings, to be search for in a_string
returns a list of re span object, representing the found literal if it exists,
else returns an empty list"""
results = []
for pattern in patterns:
regex = pattern
match = re.search(regex, a_string)
if match:
results.append((match, match.span()))
return results | 0f751bae801eaee594216688551919ed61784187 | 4,134 |
def UIOSelector_Highlight(inUIOSelector):
"""
Highlight (draw outline) the element (in app) by the UIO selector.
:param inUIOSelector: UIOSelector - List of items, which contains condition attributes
:return:
"""
# Check the bitness
lSafeOtherProcess = UIOSelector_SafeOtherGet_Process(inUIOSelector)
if lSafeOtherProcess is None:
UIO_Highlight(UIOSelector_Get_UIO(inUIOSelector))
else:
# Run function from other process with help of PIPE
lPIPEResuestDict = {"ModuleName": "UIDesktop", "ActivityName": "UIOSelector_Highlight",
"ArgumentList": [inUIOSelector],
"ArgumentDict": {}}
# Отправить запрос в дочерний процесс, который отвечает за работу с Windows окнами
ProcessCommunicator.ProcessChildSendObject(lSafeOtherProcess, lPIPEResuestDict)
# Get answer from child process
lPIPEResponseDict = ProcessCommunicator.ProcessChildReadWaitObject(lSafeOtherProcess)
if lPIPEResponseDict["ErrorFlag"]:
raise Exception(
f"Exception was occured in child process (message): {lPIPEResponseDict['ErrorMessage']}, (traceback): {lPIPEResponseDict['ErrorTraceback']}")
else:
return lPIPEResponseDict["Result"]
return True | 9ab5930396aa9813f09d858c4bb94adc2170f312 | 4,135 |
import torch
def completeMessage_BERT(mod, tok, ind, max_length=50):
"""
Sentence Completion of the secret text from BERT
"""
tokens_tensor = torch.tensor([ind])
outInd = mod.generate(tokens_tensor, max_length=50)
outText=tok.decode(outInd[0].tolist())
newText=outText[len(tok.decode(ind)):]
newText=newText.split(sep=".", maxsplit=1)[0]
newText="".join((newText, "."))
outInd=ind+tok.encode(newText)
return outInd | c2a47bbe90a9e5d222af0bbe5959c82d2ebd2cd3 | 4,136 |
import copy
import json
def _select_train_and_seat_type(train_names, seat_types, query_trains):
"""
选择订票车次、席别
:param train_names 预定的车次列表
:param seat_types 预定席别列表
:param query_trains 查询到火车车次列表
:return select_train, select_seat_type
"""
def _select_trains(query_trains, train_names=None):
if train_names:
select_trains = []
# 根据订票车次次序,选择车次
for train_name in train_names:
for train in query_trains:
if train['train_name'] == train_name:
select_trains.append(copy.deepcopy(train))
return select_trains
else:
return query_trains
def _select_types(trains, seat_types):
select_train = None
select_seat_type = None
for train in trains:
for seat_type in seat_types:
seat_type_left_ticket = train.get(seat_type, '')
if _check_seat_type_is_booking(seat_type_left_ticket):
select_seat_type = seat_type
select_train = copy.deepcopy(train)
return select_train, select_seat_type
else:
return None, None
_logger.debug('train_names:%s seat_types:%s' % (json.dumps(train_names, ensure_ascii=False),
json.dumps(seat_types, ensure_ascii=False)))
trains = _select_trains(query_trains, train_names)
# debug trains
for i in range(min(len(trains), len(train_names or ['']))):
_logger.debug('query left tickets train info. %s' % json.dumps(trains[i], ensure_ascii=False))
return _select_types(trains, seat_types) | 519cba93eca3a676734f05d196a7f125917da88a | 4,137 |
def load_real_tcs():
""" Load real timecourses after djICA preprocessing """
try:
return sio.loadmat(REAL_TC_DIR)['Shat'][0]
except KeyError:
try:
return sio.loadmat(REAL_TC_DIR)['Shat_'][0]
except KeyError:
print("Incorrect key")
pass | 68b148e6fc6088d8ef9f90daf25e07609010d9fe | 4,138 |
def create_fsaverage_forward(epoch, **kwargs):
"""
A forward model is an estimation of the potential or field distribution for a known source
and for a known model of the head. Returns EEG forward operator with a downloaded template
MRI (fsaverage).
Parameters:
epoch: mne.epochs.Epochs
MNE epoch object containing portions of raw EEG data built around specified timestamp(s).
kwargs: arguments
Specify any of the following arguments for the mne.make_forward_solution() function. These include midist=5.0, n_jobs=1.
Returns:
mne.forward.forward.Forward:
Forward operator built from the user_input epoch and the fsaverage brain.
"""
defaultKwargs = { 'n_jobs': 1, 'mindist': 5.0 }
kwargs = { **defaultKwargs, **kwargs }
# Download fsaverage brain files (to use as 3D MRI brain for model)
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
# Make forward
fwd = mne.make_forward_solution(epoch.info,
trans=trans,
src=src,
bem=bem,
eeg=True,
**kwargs)
return fwd | 34d72211babf23e41927ebe7df13c58bf6876e4d | 4,139 |
import os
def make_file(path):
"""
Factory function for File strategies
:param str path: A local relative path or s3://, file:// protocol urls
:return:
"""
try:
if not is_valid_url(path):
return LocalFile(os.path.abspath(path))
url_obj = urlparse(path)
if url_obj.scheme == 'file':
return LocalFile(url_obj.path)
if url_obj.scheme == 's3':
return S3File(url_obj.path, url_obj.netloc, boto3.resource('s3'))
raise Exception()
except Exception:
raise ValueError('Path %s is not a valid file or s3 url' % path) | 29163e04c676e81a8f01456e29529b219e9ad2a8 | 4,140 |
def midi_to_hz(notes):
"""Hello Part 6! You should add documentation to this function.
"""
return 440.0 * (2.0 ** ((np.asanyarray(notes) - 69.0) / 12.0)) | 7215126d25ff969a8aa187c7f49216ec7743a9e9 | 4,141 |
def bond_stereo_parities(chi, one_indexed=False):
""" Parse the bond stereo parities from the stereochemistry layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping bond keys onto parities
:rtype: dict[frozenset[int]: bool]
"""
ste_lyr_dct = stereo_layers(chi)
bnd_ste_dct = _bond_stereo_parities(ste_lyr_dct, one_indexed=one_indexed)
return bnd_ste_dct | 02729db6888899a91e69a25dae81c06777b89182 | 4,142 |
def filter_camera_angle(places):
"""Filter camera angles for KiTTI Datasets"""
bool_in = np.logical_and((places[:, 1] < places[:, 0] - 0.27), (-places[:, 1] < places[:, 0] - 0.27))
# bool_in = np.logical_and((places[:, 1] < places[:, 0]), (-places[:, 1] < places[:, 0]))
return places[bool_in] | 417fccfbb240c5defc36b4ce465fe14333922b94 | 4,143 |
def neural_log_literal_function(identifier):
"""
A decorator for NeuralLog literal functions.
:param identifier: the identifier of the function
:type identifier: str
:return: the decorated function
:rtype: function
"""
return lambda x: registry(x, identifier, literal_functions) | 84651d58b7da677ee213d1ff4667dc3be702f243 | 4,144 |
def get_factors(n: int) -> list:
"""Returns the factors of a given integer.
"""
return [i for i in range(1, n+1) if n % i == 0] | c15a0e30e58597daf439facd3900c214831687f2 | 4,145 |
def fetch_tables():
""" Used by the frontend, returns a JSON list of all the tables including metadata. """
return jsonify([
{
"tab": "animeTables",
"name": "Anime",
"tables": [
{
"id": "englishAnimeSites",
"title": "English Streaming Sites",
"type": "anime"
},
{
"id": "foreignAnimeSites",
"title": "Foreign Streaming Sites",
"type": "anime"
},
{
"id": "downloadSites",
"title": "Download Only Sites",
"type": "animeDownload"
}
]
},
{
"tab": "mangaTables",
"name": "Manga",
"tables": [
{
"id": "englishMangaAggregators",
"title": "Aggregators",
"type": "manga"
},
{
"id": "foreignMangaAggregators",
"title": "Non-English Aggregators",
"type": "manga"
},
{
"id": "englishMangaScans",
"title": "Scans",
"type": "manga"
},
{
"id": "foreignMangaScans",
"title": "Non-English Scans",
"type": "manga"
}
]
},
{
"tab": "lightNovelTables",
"name": "Novels",
"tables": [
{
"id": "lightNovels",
"title": "Light Novels",
"type": "novel"
},
{
"id": "visualNovels",
"title": "Visual Novels",
"type": "novel"
}
]
},
{
"tab": "applicationsTables",
"name": "Applications",
"tables": [
{
"id": "iosApplications",
"title": "iOS",
"type": "application"
},
{
"id": "androidApplications",
"title": "Android",
"type": "application"
},
{
"id": "windowsApplications",
"title": "Windows",
"type": "application"
},
{
"id": "macOSApplications",
"title": "macOS",
"type": "application"
},
{
"id": "browserExtensions",
"title": "Browser Extensions",
"type": "application"
}
]
},
{
"tab": "hentaiTables",
"name": "Hentai",
"tables": [
{
"id": "hentaiAnimeSites",
"title": "Hentai Anime Streaming Sites",
"type": "anime"
},
{
"id": "hentaiDoujinshiSites",
"title": "Hentai Manga/Image Boards/LN sites",
"type": "novel"
},
{
"id": "hentaiDownloadSites",
"title": "Hentai Download",
"type": "animeDownload"
},
{
"id": "hentaiApplications",
"title": "Hentai Applications",
"type": "application"
}
]
},
{
"tab": "vpnTables",
"name": "VPN",
"tables": [
{
"id": "vpnServices",
"title": "VPNs",
"type": "vpn"
}
]
}
]) | 5c07e7bc9f3366bc72e21dd5468edf57b6c448b3 | 4,146 |
def base_positive_warps():
"""
Get warp functions associated with domain (0,inf), scale 1.0
Warp function is defined as f(x) = log(exp(x)-1)
Returns
-------
Callable[torch.Tensor,torch.Tensor],
Callable[torch.Tensor,torch.Tensor],
Callable[torch.Tensor,torch.Tensor]
Function from (0,inf) to R, from R to (0,inf),
and log of derivative of function from (0,inf) to R
"""
warpf = utils.invsoftplus
iwarpf = utils.softplus
logdwarpf = lambda x: x - utils.invsoftplus(x)
return warpf, iwarpf, logdwarpf | 389db769f55f7542a45e6acbbccf5760dc7b8c26 | 4,147 |
import re
import json
from datetime import datetime
def dev_work_create():
"""
Create work order.
:return:
"""
db_ins = current_user.dbs
audits = User.query.filter(User.role == 'audit')
form = WorkForm()
if form.validate_on_submit():
sql_content = form.sql_content.data
db_ins = form.db_ins.data
shard = form.shard.data
if form.backup.data:
is_backup = True
else:
is_backup = False
sql_content = sql_content.rstrip().replace("\n", " ")
# Only Create and Alter can be used with table shard
shard_create = re.search('\s*create\s+', sql_content, flags=re.IGNORECASE)
shard_alter = re.search('\s*alter\s+', sql_content, flags=re.IGNORECASE)
shard_judge = shard_create or shard_alter
if shard != '0' and not shard_judge:
flash('Only Create and Alter sql can be used when using table shard!')
return redirect(url_for('.dev_work_create'))
# split joint sql with shard numbers
if shard != '0' and shard_judge:
split_sql = sqlparse.split(sql_content)
format_table = re.sub(" +", " ", split_sql[1])
sql_content = ''
for count in range(int(shard)):
format_table_list = format_table.split(' ')
shard_name = '`' + str(format_table_list[2].strip('`')) + '_' + str(count) + '`'
format_table_list[2] = shard_name
sql_content += ' '.join(format_table_list)
sql_content = split_sql[0] + sql_content
if sql_content[-1] == ';':
work = Work()
work.name = form.name.data
work.db_name = form.db_ins.data
work.shard = form.shard.data
work.backup = is_backup
work.dev_name = current_user.name
work.audit_name = form.audit.data
work.sql_content = sql_content
result = sql_auto_review(sql_content, db_ins)
if result or len(result) != 0:
json_result = json.dumps(result)
work.status = 1
for row in result:
if row[2] == 2:
work.status = 2
break
elif re.match(r"\w*comments\w*", row[4]):
work.status = 2
break
work.auto_review = json_result
work.create_time = datetime.now()
db.session.add(work)
db.session.commit()
if current_app.config['MAIL_ON_OFF'] == 'ON':
auditor = User.query.filter(User.name == work.audit_name).first()
mail_content = "<p>Proposer:" + work.dev_name + "</p>" + "<p>Sql Content:" + work.sql_content + \
"</p>" + "<p>A new work sheet.</p>"
send_mail.delay('【inception_mysql】New work sheet', mail_content, auditor.email)
return redirect(url_for('.dev_work'))
else:
flash('The return of Inception is null. May be something wrong with the SQL sentence ')
return redirect(url_for('.dev_work_create'))
else:
flash('SQL sentences does not ends with ; Please Check!')
return redirect(url_for('.dev_work_create'))
return render_template('dev/work_create.html', form=form, db_ins=db_ins, audits=audits) | b11f840bbc6428696afabe7f2fe00b5d0b6ad7d1 | 4,148 |
def blur(x, mean=0.0, stddev=1.0):
"""
Resize to smaller size (AREA) and then resize to original size (BILINEAR)
"""
size = tf.shape(x)[:2]
downsample_factor = 1 + tf.math.abs(tf.random.normal([], mean=mean, stddev=stddev))
small_size = tf.to_int32(tf.to_float(size)/downsample_factor)
x = tf.image.resize_images(x, small_size, method=tf.image.ResizeMethod.AREA)
x = tf.image.resize_images(x, size, method=tf.image.ResizeMethod.BILINEAR)
return x | b0101a4b820beb84c627bef048bbafeb1d11cdea | 4,149 |
def improve(update, close, guess=1, max_updates=100):
"""Iteratively improve guess with update until close(guess) is true or
max_updates have been applied."""
k = 0
while not close(guess) and k < max_updates:
guess = update(guess)
k = k + 1
return guess | 3475c07a3e9a674661d90e116bfb91fa12344d63 | 4,150 |
def images_to_sequence(tensor):
"""Convert a batch of images into a batch of sequences.
Args:
tensor: a (num_images, height, width, depth) tensor
Returns:
(width, num_images*height, depth) sequence tensor
"""
num_image_batches, height, width, depth = _shape(tensor)
transposed = tf.transpose(tensor, [2, 0, 1, 3])
return tf.reshape(transposed, [width, num_image_batches * height, depth]) | cc89ce931239b5335d5788bc6e9007e5186648bf | 4,151 |
from typing import Any
import logging
def transform_regions(regions: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""
Transform aggregated region data for map
regions -- aggregated data from region pipeline
"""
records = []
for record in regions:
if "latitude" in record["_id"].keys():
if record["admin3"]:
id = record["admin3"]
search_term = "admin3"
elif record["admin2"]:
id = record["admin2"]
search_term = "admin2"
elif record["admin1"]:
id = record["admin1"]
search_term = "admin1"
else:
id = country_name(record["country"])
if id is None:
continue
search_term = "country"
new_record = {
"_id": id,
"casecount": record["casecount"],
"country": country_name(record["country"]),
"country_code": record["country"],
"admin1": record["admin1"],
"admin2": record["admin2"],
"admin3": record["admin3"],
"lat": record["_id"]["latitude"],
"long": record["_id"]["longitude"],
"search": search_term,
}
logging.info(new_record)
records.append(new_record)
return records | 599e58e3bd66159114d0dbf27b339c47134c29c3 | 4,152 |
def _file_to_import_exists(storage_client: storage.client.Client,
bucket_name: str, filename: str) -> bool:
"""Helper function that returns whether the given GCS file exists or not."""
storage_bucket = storage_client.get_bucket(bucket_name)
return storage.Blob(
bucket=storage_bucket, name=filename).exists(storage_client) | cb051aba0d5e787e85dbc0283aa439e3c17e819c | 4,153 |
import sys
def run(args, options):
""" Compile a file and output a Program object.
If options.merge_opens is set to True, will attempt to merge any
parallelisable open instructions. """
prog = Program(args, options)
VARS['program'] = prog
if options.binary:
VARS['sint'] = GC_types.sbitintvec.get_type(int(options.binary))
VARS['sfix'] = GC_types.sbitfixvec
for i in 'cint', 'cfix', 'cgf2n', 'sintbit', 'sgf2n', 'sgf2nint', \
'sgf2nuint', 'sgf2nuint32', 'sgf2nfloat', 'sfloat', 'cfloat', \
'squant':
del VARS[i]
print('Compiling file', prog.infile)
# make compiler modules directly accessible
sys.path.insert(0, 'Compiler')
# create the tapes
exec(compile(open(prog.infile).read(), prog.infile, 'exec'), VARS)
prog.finalize()
if prog.req_num:
print('Program requires:')
for x in prog.req_num.pretty():
print(x)
if prog.verbose:
print('Program requires:', repr(prog.req_num))
print('Cost:', 0 if prog.req_num is None else prog.req_num.cost())
print('Memory size:', dict(prog.allocated_mem))
return prog | bdaf9327a94c38b9e6ba3e8206ca6ea3664b1073 | 4,154 |
from typing import Optional
from typing import List
from typing import Tuple
def get_relative_poses(
num_frames: int,
frames: np.ndarray,
selected_track_id: Optional[int],
agents: List[np.ndarray],
agent_from_world: np.ndarray,
current_agent_yaw: float,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Internal function that creates the targets and availability masks for deep prediction-type models.
The futures/history offset (in meters) are computed. When no info is available (e.g. agent not in frame)
a 0 is set in the availability array (1 otherwise).
Note: return dtype is float32, even if the provided args are float64. Still, the transformation
between space is performed in float64 to ensure precision
Args:
num_frames (int): number of offset we want in the future/history
frames (np.ndarray): available frames. This may be less than num_frames
selected_track_id (Optional[int]): agent track_id or AV (None)
agents (List[np.ndarray]): list of agents arrays (same len of frames)
agent_from_world (np.ndarray): local from world matrix
current_agent_yaw (float): angle of the agent at timestep 0
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: position offsets, angle offsets, extent, availabilities
"""
# How much the coordinates differ from the current state in meters.
positions_m = np.zeros((num_frames, 2), dtype=agent_from_world.dtype)
yaws_rad = np.zeros((num_frames, 1), dtype=np.float32)
extents_m = np.zeros((num_frames, 2), dtype=np.float32)
availabilities = np.zeros((num_frames,), dtype=np.float32)
for i, (frame, frame_agents) in enumerate(zip(frames, agents)):
if selected_track_id is None:
agent_centroid_m = frame["ego_translation"][:2]
agent_yaw_rad = rotation33_as_yaw(frame["ego_rotation"])
agent_extent = (EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH)
else:
# it's not guaranteed the target will be in every frame
try:
agent = filter_agents_by_track_id(frame_agents, selected_track_id)[0]
agent_centroid_m = agent["centroid"]
agent_yaw_rad = agent["yaw"]
agent_extent = agent["extent"][:2]
except IndexError:
availabilities[i] = 0.0 # keep track of invalid futures/history
continue
positions_m[i] = agent_centroid_m
yaws_rad[i] = agent_yaw_rad
extents_m[i] = agent_extent
availabilities[i] = 1.0
# batch transform to speed up
positions_m = transform_points(positions_m, agent_from_world) * availabilities[:, np.newaxis]
yaws_rad = angular_distance(yaws_rad, current_agent_yaw) * availabilities[:, np.newaxis]
return positions_m.astype(np.float32), yaws_rad, extents_m, availabilities | e1dad983e5070310ce239615c98f85d8b09b9c45 | 4,155 |
import numpy
def read_mat_cplx_bin(fname):
"""
Reads a .bin file containing floating-point values (complex) saved by Koala
Parameters
----------
fname : string
Path to the file
Returns
-------
buffer : ndarray
An array containing the complex floating-point values read from the file
See Also
--------
write_mat_cplx_bin
Example
-------
>>> buf = read_mat_cplx_bin('test/file_cplx.bin')
>>> buf
array([[ 0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j, ...,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j],
[ 0.00000000e+00 +0.00000000e+00j,
4.97599517e-09 +9.14632536e-10j,
5.99623329e-09 -1.52811275e-08j, ...,
1.17636354e-07 -1.01500063e-07j,
6.33714581e-10 +5.61812996e-09j,
0.00000000e+00 +0.00000000e+00j],
...,
[ 0.00000000e+00 +0.00000000e+00j,
-1.26479121e-08 -2.92324431e-09j,
-4.59448168e-09 +9.28236474e-08j, ...,
-4.15031316e-08 +1.48466597e-07j,
4.41099779e-08 -1.27046489e-08j,
0.00000000e+00 +0.00000000e+00j],
[ -0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j, ...,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j]], dtype=complex64)
"""
kcplx_header_dtype = numpy.dtype([
("width", "i4"),
("height", "i4")
])
f = open(fname, 'rb')
kcplx_header = numpy.fromfile(f, dtype=kcplx_header_dtype, count=1)
shape = (kcplx_header['height'], kcplx_header['width'])
#print kcplx_header
tmp = numpy.fromfile(f, dtype='float32')
f.close()
real_tmp = (tmp[0:kcplx_header['height']*kcplx_header['width']]).reshape(shape)
imag_tmp = (tmp[kcplx_header['height']*kcplx_header['width']:]).reshape(shape)
#print tmp
#print 'array = {}'.format(len(tmp))
return real_tmp + 1j*imag_tmp | f2761f4cd7031dc16cb2f9903fd431bc7b4212d8 | 4,156 |
def DeleteDataBundle(**kwargs):
"""
Deletes a Data Bundle by ID.
:param kwargs:
:return:
"""
data_bundle_id = kwargs['data_bundle_id']
del data_bundles[data_bundle_id]
return(kwargs, 200) | 88ded979e45beebe885eeb1890ce66ae367b1fd6 | 4,157 |
def determineactions(repo, deficiencies, sourcereqs, destreqs):
"""Determine upgrade actions that will be performed.
Given a list of improvements as returned by ``finddeficiencies`` and
``findoptimizations``, determine the list of upgrade actions that
will be performed.
The role of this function is to filter improvements if needed, apply
recommended optimizations from the improvements list that make sense,
etc.
Returns a list of action names.
"""
newactions = []
knownreqs = supporteddestrequirements(repo)
for d in deficiencies:
name = d.name
# If the action is a requirement that doesn't show up in the
# destination requirements, prune the action.
if name in knownreqs and name not in destreqs:
continue
newactions.append(d)
# FUTURE consider adding some optimizations here for certain transitions.
# e.g. adding generaldelta could schedule parent redeltas.
return newactions | 0ec771565560607e839ce87a65426e01d0276f36 | 4,158 |
def filter_ccfs(ccfs, sc_thresh, min_ccf):
"""
Remove noisy ccfs from irrelevant experiments
:param ccfs: 2d array
:param sc_thresh: int
number of sign changes expected
:param min_ccf: float
cutoff value for a ccf to be above the noise threshold
:return:
"""
if sc_thresh is None:
sc_thresh = np.inf
asign = np.sign(ccfs)
signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)
signchange[:, 0] = 0
# (np.sum(signchange, axis=1) <= sc_thresh) &
### Do not cross correlate with a lag greater than 1/2 of the dataset when the timeseries is short
### throw out these cross correlations in filtered time-series
max_lag = ccfs.shape[1]
# if max_lag < 10:
# max_lag = int(np.ceil(ccfs.shape[1]/2.0))
filtered_ccf = ccfs[(np.sum(signchange, axis=1) <= sc_thresh) & (np.max(np.abs(ccfs), axis=1) > min_ccf),
:max_lag + 1]
return filtered_ccf | 06941eaea7bc5dc25f261669532c66ac37cbb9ab | 4,159 |
def market_data(symbol, expirationDate, strike, optionType, info=None):
"""Gets option market data from information. Takes time to load pages."""
assert all(isinstance(i, str) for i in [symbol, expirationDate, strike, optionType])
return robin_stocks.options.get_option_market_data(symbol, expirationDate, strike, optionType, info=None) | 153d15af1030be22fa6c97b8d68fdf2049ebc416 | 4,160 |
def get_documents_embeddings (y, embedder, column):
"""
Given a Dataframe containing study_id and a text column, return a numpy array of embeddings
The idea of this function is to prevent to embed two times the same text (for computation efficiency)
Parameters:
-----------
y: Dataframe containing study_id, and a text column
embedder: Object of embedding creator containing a transform function
column: column containing the text to Embed
Output:
-------
Numpy array of size (n, embedding_size)
"""
# Getting reports DF
reports_df = y[["study_id", column]].fillna("").drop_duplicates("study_id").reset_index(drop=True)
reports_list = reports_df[column].astype(str).values
# Getting BERT embeddings
reports_embeddings = embedder.fit_transform(reports_list)
output = pd.merge(
y[["study_id"]],
reports_df[["study_id"]].join(
pd.DataFrame(reports_embeddings)
),
left_on="study_id",
right_on="study_id",
how="left"
).iloc[:,1:].values
return output | 9a748ef8b276d68a61d78c6fa567a40aae4fc222 | 4,161 |
def index(request):
"""view fonction de la page d'accueil
Render templates de la page d'accueil
"""
return render(request, "t_myapp/index.html") | b3cf3be5d3c2a286d5705281e35042ad19d0a050 | 4,162 |
def cidr_mask_to_subnet_mask(mask_num):
"""
掩码位数转换为点分掩码
:param mask_num: 掩码位数, 如 16
:return: 十进制点分ipv4地址
"""
return convert_to_ipv4(cidr_mask_to_ip_int(mask_num), stype='int') | 83556c856f68e82824fa1f3a34b4d629361081af | 4,163 |
def correlate(A,B,
rows=None,columns=None, mode_row='zero', mode_column='zero'):
"""Correlate A and B.
Input:
------
A,B : array
Input data.
columns : int
Do correlation at columns 0..columns, defaults to the number of columns in A.
rows : int
Do correlation at columns 0..rows, defaults to the number of rows in A.
mode_row, mode_column : string
How values outside boundaries are handled ('zero' or 'mirror').
Output:
-------
Y : array
Rows-by-columns array of correlation values.
"""
A,B = atype([A,B],[np.double,np.double])
assert A.ndim == 2 and B.ndim == 2, "Input arrays must be two dimensional"
A_r,A_c = A.shape
B_r,B_c = B.shape
columns = columns or A_c
rows = rows or A_r
assert rows <= A_r and columns <= A_c, \
"columns and rows cannot be larger than dimensions of A"
modes = {'zero': 0,
'mirror': 1}
output = np.empty((rows,columns),dtype=np.double)
_lib.correlate(A_r, A_c, A,
B_r, B_c, B,
rows, columns,
modes[mode_row], modes[mode_column],
output)
return output | 88bfec52c318aaf119a6fac5cff731855f0a0d81 | 4,164 |
def getChrLenList(chrLenDict, c):
""" Given a chromosome length dictionary keyed on chromosome names and
a chromosome name (c) this returns a list of all the runtimes for a given
chromosome across all Step names.
"""
l = []
if c not in chrLenDict:
return l
for n in chrLenDict[c]:
l.append(chrLenDict[c][n])
return l | aedf613484262ac5bd31baf384ade2eb35f3e1eb | 4,165 |
import argparse
def build_arg_parser():
"""
Build an argument parser using argparse. Use it when python version is 2.7 or later.
"""
parser = argparse.ArgumentParser(description="Smatch table calculator -- arguments")
parser.add_argument("--fl", type=argparse.FileType('r'), help='AMR ID list file')
parser.add_argument('-f', nargs='+', help='AMR IDs (at least one)')
parser.add_argument("-p", nargs='*', help="User list (can be none)")
parser.add_argument("--fd", default=isi_dir_pre, help="AMR File directory. Default=location on isi machine")
parser.add_argument('-r', type=int, default=4, help='Restart number (Default:4)')
parser.add_argument('-v', action='store_true', help='Verbose output (Default:False)')
return parser | 199332d5c6c6811ba4c11437c0cad8387bb8dd60 | 4,166 |
from typing import Optional
def query_sessions(user_id: Optional[int]) -> TList[Session]:
"""
Return all user's sessions
:param user_id: current user ID (None if user auth is disabled)
:return: list of session objects
"""
adb = get_data_file_db(user_id)
return [Session(db_session) for db_session in adb.query(DbSession)] | c7449c7805f1ba0c425140603952215b67e3ce0e | 4,167 |
import torch
import math
def positionalencoding3d(d_model, dx, dy, dz):
"""
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix
"""
# if d_model % 6 != 0:
# raise ValueError("Cannot use sin/cos positional encoding with "
# "odd dimension (got dim={:d})".format(d_model))
pe = torch.zeros(d_model, dx, dy, dz)
# Each dimension use half of d_model
interval = int(d_model // 6) * 2
div_term = torch.exp(torch.arange(0., interval, 2) * -(math.log(10000.0) / interval))
pos_x = torch.arange(0., dx).unsqueeze(1) * div_term
pos_y = torch.arange(0., dy).unsqueeze(1) * div_term
pos_z = torch.arange(0., dz).unsqueeze(1) * div_term
pe[0:interval:2, :, :, :] = torch.sin(pos_x).T.unsqueeze(2).unsqueeze(3).repeat(1, 1, dy, dz)
pe[1:interval:2, :, :, :] = torch.cos(pos_x).T.unsqueeze(2).unsqueeze(3).repeat(1, 1, dy, dz)
pe[interval:int(interval * 2):2, :, :] = torch.sin(pos_y).T.unsqueeze(1).unsqueeze(3).repeat(1, dx, 1, dz)
pe[interval + 1:int(interval * 2):2, :, :] = torch.cos(pos_y).T.unsqueeze(1).unsqueeze(3).repeat(1, dx, 1, dz)
pe[int(interval * 2):int(interval * 3):2, :, :] = torch.sin(pos_z).T.unsqueeze(1).unsqueeze(2).repeat(1, dx, dy, 1)
pe[int(interval * 2) + 1:int(interval * 3):2, :, :] = torch.cos(pos_z).T.unsqueeze(1).unsqueeze(2).repeat(1, dx, dy, 1)
return pe | 178dc3b86e3be0c9e799f5f0c658808f541f1eca | 4,168 |
def make_headers(context: TraceContext) -> Headers:
"""Creates dict with zipkin headers from supplied trace context.
"""
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: '0',
SAMPLED_ID_HEADER: '1' if context.sampled else '0',
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers | 474e3a57af1bda99585f7d140fbd0bb1d9bd18b2 | 4,169 |
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc) | 342d08644c56c2cce5e02f0d3d0ddd9df0b2f173 | 4,170 |
def scalar_sub(x: Number, y: Number) -> Number:
"""Implement `scalar_sub`."""
_assert_scalar(x, y)
return x - y | 74c9d44eaaabb1bfeea012b4ec1503e37d7c9f8b | 4,171 |
def predict_attack(h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13):
"""
Parameters:
-name:h1
in:query
type:number
required=True
-name:h5
in:query
type:number
required:True
-name:h4
in:query
type:number
required:True
-name:h8
in:query
type:number
required:True
-name:h9
in:query
type:number
required:True
-name:h10
in:query
type:number
required:True
-name:h11
in:query
type:number
required:True
-name:h12
in:query
type:number
required:True
DESCRIPTION:output varaibles
"""
if h2=='male':
h2=0
else:
h2=1
if h3=='angina':
h3=0
elif h3=='atypical anigna':
h3=1
elif h3=='non-anignal pain':
h3=2
else:
h3=3
if h6=='greater than 120':
h6=1
else:
h6=0
if h7=='normal':
h7=0
elif h7=='ST-t normal':
h7=1
else:
h7=2
if h13=='yes':
h13=1
else:
h13=0
res=classifier.predict([[h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13]])
return res | 907f6b52c3b1c24a409b8b7ebc157412bd67777d | 4,172 |
def _check_varrlist_integrity(vlist):
"""Return true if shapes and datatypes are the same"""
shape = vlist[0].data.shape
datatype = vlist[0].data.dtype
for v in vlist:
if v.data.shape != shape:
raise(Exception("Data shapes don't match"))
if v.data.dtype != datatype:
raise(Exception("Data types don't match"))
return True | 1b6fedd1222757c0bc92490be85d8030ee877842 | 4,173 |
def subclassfactory(fact_method):
"""fact_method takes the same args as init and returns the subclass appropriate to those args
that subclass may in turn override the same factory method and choose amoung it's subclasses.
If this factory method isn't overridden in the subclass an object of that class is inited.
fact_method is made into a cls method and must take at least a cls argument
"""
@wraps(fact_method)
@classmethod
def wrapper(cls, *args, **kwargs):
subclass = fact_method(cls, *args, **kwargs)
submeth = getattr(subclass, fact_method.__name__)
curmeth = getattr(cls, fact_method.__name__)
if (submeth.__func__ == curmeth.__func__):
return subclass(*args, **kwargs)
else:
return submeth(*args, **kwargs)
return wrapper | eb0b8227276ed7499d21d9998ec08fb830d89642 | 4,174 |
def simulate_var1(x_tnow, b, mu, sigma2, m_, *, j_=1000, nu=10**9,
init_value=True):
"""For details, see here.
Parameters
----------
x_tnow : array, shape(n_, )
b : array, shape(n_,n_)
mu : array, shape(n_, )
sigma2 : array, shape(n_,n_)
m_ : int
nu: int
j_ : int
init_value : boolean
Returns
-------
x_tnow_thor : array, shape(j_, m_+1, n_)
"""
n_ = np.shape(sigma2)[0]
# Step 1: Monte Carlo scenarios of projected paths of the risk drivers
x_tnow_thor = np.zeros((j_, m_, n_))
for m in range(0, m_):
epsi = simulate_t(mu, sigma2, nu, j_).reshape((j_, -1))
if m > 0:
x_prec = x_tnow_thor[:, m-1, :]
else:
x_prec = np.tile(x_tnow, (j_, 1))
x_tnow_thor[:, m, :] = x_prec @ b.T + epsi
# Step 2: Include the initial value as starting node, if selected
if init_value:
x_tnow = np.tile(x_tnow, (j_, 1))
x_tnow = np.expand_dims(x_tnow, axis=1)
x_tnow_thor = np.concatenate((x_tnow, x_tnow_thor), axis=1)
return x_tnow_thor | 66bf82052e933e14d16e82738d36a4c96b51ca43 | 4,175 |
from typing import Optional
def is_drom(insee_city: Optional[str] = None, insee_region: Optional[str] = None) -> bool:
"""
Est-ce que le code INSEE de la ville ou de la région correspond à un DROM ?
Args:
insee_city: Code INSEE de la ville
insee_region: Code INSEE de la région
Returns:
Vrai ssi le code INSE est un DROM
"""
if insee_city is not None:
return insee_city[:2] in {'97', '98'}
elif insee_region is not None: # Les codes région ne suivent pas la nomenclature des codes département
return insee_region in {'01', '02', '03', '04', '06'} | 7a33516eb31c5ff7800eb6dc663d76d5e2c445cb | 4,176 |
import math
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks) | e0b8a4701adf1757a558475e2ea5830a3d53ab2a | 4,177 |
def reset_user_pwd(username: str) -> int:
"""
:param username: 用户名
:return: 结果代码: 1: 成功, 0: 失败
"""
return update_user_info(username=username, args={
'password': '12345678'
}) | a9703bb82913b47e9b59ba36cd9257323cbfeec2 | 4,178 |
def location_engineering(df: pd.DataFrame) -> pd.DataFrame:
"""Call the `location_dict()` function to get the location dictionary and the
`location_dataframe()` one to add the location dictionary info to the DataFrame.
Parameters
----------
df :
The dataframe to work with.
Returns
-------
The DataFrame with location info added.
"""
# Call `location_dict` function to get a dictionary with location info
location_dictionary = location_dict(df)
# Call `location_dataframe` function to add the `location_dict` to a df
df = location_dataframe(df, location_dictionary)
return df | cca3e1724da08ffcb895aa9fc323ebaf380760e4 | 4,179 |
import re
def extract_energyxtb(logfile=None):
"""
Extracts xtb energies from xtb logfile using regex matching.
Args:
logfile (str): Specifies logfile to pull energy from
Returns:
energy (list[float]): List of floats containing the energy in each step
"""
re_energy = re.compile("energy: (-\\d+\\.\\d+)")
energy = []
with logfile.open() as f:
for line in f:
if "energy" in line:
energy.append(float(re_energy.search(line).groups()[0]))
return energy | 075f9d48d3bcc9f6bd12aa791cc4d0444299dd74 | 4,180 |
import os
def GetPID():
"""Returns the PID of the shell."""
return os.getppid() | 28e56a9d0c1c6c1d005c58f5c9fffeb3857d8877 | 4,181 |
def make_transaction_frame(transactions):
"""
Formats a transaction DataFrame.
Parameters
----------
transactions : pd.DataFrame
Contains improperly formatted transactional data.
Returns
-------
df : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
"""
transaction_list = []
for dt in transactions.index:
txns = transactions.loc[dt]
if len(txns) == 0:
continue
for txn in txns:
txn = map_transaction(txn)
transaction_list.append(txn)
df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['dt']))
df['txn_dollars'] = -df['amount'] * df['price']
df.index = list(map(pd.Timestamp, df.dt.values))
return df | ab8feafb1a441fddf574ebd12a7720a7c4d7398b | 4,182 |
def find_or_create_role(name, desc):
""" Find existing role or create new role """
role = Role.query.filter(Role.name == name).first()
if not role:
role = Role(name=name, desc=desc)
return role
return role | 414b960488d55ea6c2cc41121132f06f0d677abd | 4,183 |
import os
def enumerate_shapefile_fields(shapefile_uri):
"""Enumerate all the fielfd in a shapefile.
Inputs:
-shapefile_uri: uri to the shapefile which fields have to be
enumerated
Returns a nested list of the field names in the order they are stored
in the layer, and groupped per layer in the order the layers appear.
"""
message = shapefile_uri + "' doesn't point to a file."
assert os.path.isfile(shapefile_uri), message
shapefile = ogr.Open(shapefile_uri)
message = "OGR can't open " + shapefile_uri
assert shapefile is not None, message
layer_count = shapefile.GetLayerCount()
names = [] # names are organized by layer
for l in range(layer_count):
names.append([])
layer = shapefile.GetLayer(l)
feature = layer.GetFeature(0)
field_count = feature.GetFieldCount()
for f in range(field_count):
field_defn = feature.GetFieldDefnRef(f)
names[l].append(field_defn.GetNameRef())
return names | 1a1a128daa991854629894b7e23e90253761a1c8 | 4,184 |
def parse_nrrdvector(inp):
"""Parse a vector from a nrrd header, return a list."""
assert inp[0] == '(', "Vector should be enclosed by parenthesis."
assert inp[-1] == ')', "Vector should be enclosed by parenthesis."
return [_to_reproducible_float(x) for x in inp[1:-1].split(',')] | 3e3c793d3ee53198c4cdb01832062be4f0c02876 | 4,185 |
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1) | 6f08d04528f5e515d5ae75d4dc47753cc4cebc7b | 4,186 |
import os
def parsed_codebook_importer(codebook):
"""
Import the parsed CPS codebook
Parameters:
codebook (str): the filename of the parsed codebook
Returns:
dataframe
"""
path_finder('codebooks')
skip = row_skipper(codebook)
codebook = pd.read_csv(codebook, sep='\t', skiprows=skip).dropna()
os.chdir('..')
return codebook | abb0b261ab894f6ea5111be07eaa00d256bfa3c9 | 4,187 |
def get_html(url):
"""Returns html content of the url. Retries until successful without overloading the server."""
while True:
# Retry until succesful
try:
sleep(2)
debug('Crawling %s' % url)
html = urllib2.urlopen(url).read()
return html
except urllib2.HTTPError, e:
warn('HTTP error %s while crawling %s. Trying again.' % (e, url))
sleep(5)
continue
except urllib2.URLError, e:
warn('URL error %s while crawling %s. Trying again.' % (e, url))
sleep(5)
continue | a444151add46273c6e72ead585d04aa65e7e7734 | 4,188 |
def map_min(process):
"""
"""
param_dict = {'ignore_nodata': 'bool'}
return map_default(process, 'min', 'reduce', param_dict) | 33dcc2192fd8b979e7238c1fdbe5e9bec551dd3f | 4,189 |
def geoname_exhaustive_search(request, searchstring):
"""
List all children of a geoname filtered by a list of featurecodes
"""
if request.query_params.get('fcode'):
fcodes = [ s.upper() for s in request.query_params.get('fcode').split(',')]
else:
fcodes = []
limit = request.query_params.get('limit') or 50
if request.method == 'GET':
geonames = Geoname.objects \
.filter(
Q(englishname__startswith=searchstring) |
Q(alternatenames__alternatename__startswith=searchstring,
alternatenames__iscolloquial=0
)
) \
.order_by('-population','-fcode__searchorder_detail').distinct()
if len(fcodes) > 0:
geonames = geonames.filter(fcode__code__in=fcodes)
if limit:
geonames = geonames[:limit]
serializer = GeonameSearchSerializer(geonames,many=True)
return JsonResponse(serializer.data, safe=False) | 5a04a158a146e7e0ad3265d89520774b65c3780a | 4,190 |
import sys
def guess_temperature_sensor():
"""
Try guessing the location of the installed temperature sensor
"""
devices = listdir(DEVICE_FOLDER)
devices = [device for device in devices if device.startswith('28-')]
if devices:
# print "Found", len(devices), "devices which maybe temperature sensors."
return DEVICE_FOLDER + devices[0] + DEVICE_SUFFIX
else:
sys.exit("Sorry, no temperature sensors found") | d1a37d34eedb1e9a99e481ac3ffb6f5777fcdb7a | 4,191 |
def count_reads(regions_list, params):
""" Count reads from bam within regions (counts position of cutsite to prevent double-counting) """
bam_f = params.bam
read_shift = params.read_shift
bam_obj = pysam.AlignmentFile(bam_f, "rb")
log_q = params.log_q
logger = TobiasLogger("", params.verbosity, log_q) #sending all logger calls to log_q
#Count per region
read_count = 0
logger.spam("Started counting region_chunk ({0} -> {1})".format("_".join([str(element) for element in regions_list[0]]), "_".join([str(element) for element in regions_list[-1]])))
for region in regions_list:
read_lst = ReadList().from_bam(bam_obj, region)
for read in read_lst:
read.get_cutsite(read_shift)
if read.cutsite > region.start and read.cutsite < region.end: #only reads within borders
read_count += 1
logger.spam("Finished counting region_chunk ({0} -> {1})".format("_".join([str(element) for element in regions_list[0]]), "_".join([str(element) for element in regions_list[-1]])))
bam_obj.close()
return(read_count) | ffd8cc6afc6c0b5b92d82292ab9d4a54ef918641 | 4,192 |
def rgbImage2grayVector(img):
""" Turns a row and column rgb image into a 1D grayscale vector """
gray = []
for row_index in range(0, len(img)):
for pixel_index, pixel in enumerate(img[row_index]):
gray.append(rgbPixel2grayscaleValue(pixel))
return gray | a93bbb2dfa29cb3d4013334226e77f6beb526a13 | 4,193 |
def compute_MSE(predicted, observed):
""" predicted is scalar and observed as array"""
if len(observed) == 0:
return 0
err = 0
for o in observed:
err += (predicted - o)**2/predicted
return err/len(observed) | e2cc326dde2ece551f78cd842d1bf44707bfb6db | 4,194 |
def log_sum(log_u):
"""Compute `log(sum(exp(log_u)))`"""
if len(log_u) == 0:
return NEG_INF
maxi = np.argmax(log_u)
max = log_u[maxi]
if max == NEG_INF:
return max
else:
exp = log_u - max
np.exp(exp, out = exp)
return np.log1p(np.sum(exp[:maxi]) + np.sum(exp[maxi + 1:])) + max | f2c7917bc806dc7ec3fbbb1404725f590a82e194 | 4,195 |
import os
from datetime import datetime
def gather_basic_file_info(filename: str):
"""
Build out the basic file metadata that can be gathered from any file on the file system.
Parameters
----------
filename
full file path to a file
Returns
-------
dict
basic file attributes as dict
"""
if not os.path.exists(filename):
raise EnvironmentError('{} does not exist'.format(filename))
elif not os.path.isfile(filename):
raise EnvironmentError('{} is not a file'.format(filename))
last_modified_time = None
created_time = None
filesize = None
time_added = None
try:
stat_blob = os.stat(filename)
last_modified_time = datetime.fromtimestamp(stat_blob.st_mtime, tz=timezone.utc)
created_time = datetime.fromtimestamp(stat_blob.st_ctime, tz=timezone.utc)
filesize = np.around(stat_blob.st_size / 1024, 3) # size in kB
time_added = datetime.now(tz=timezone.utc)
except FileNotFoundError:
print('Unable to read from {}'.format(filename))
return {'file_path': filename, 'last_modified_time_utc': last_modified_time,
'created_time_utc': created_time, 'file_size_kb': filesize, 'time_added': time_added} | cb18c5213ce7a7d4f1355e84e6f6debe2052490b | 4,196 |
def special_value_sub(lhs, rhs):
""" Subtraction between special values or between special values and
numbers """
if is_nan(lhs):
return FP_QNaN(lhs.precision)
elif is_nan(rhs):
return FP_QNaN(rhs.precision)
elif (is_plus_infty(lhs) and is_plus_infty(rhs)) or \
(is_minus_infty(lhs) and is_minus_infty(rhs)):
return FP_QNaN(lhs.precision)
elif is_plus_infty(lhs) and is_minus_infty(rhs):
return lhs
elif is_minus_infty(lhs) and is_plus_infty(rhs):
return lhs
elif is_infty(lhs) and is_zero(rhs):
return lhs
elif is_infty(lhs):
# invalid inf - inf excluded previous
return lhs
elif is_infty(rhs):
return -rhs
else:
return lhs + (-rhs) | df64cf6c306c3192ba28d08e878add7ce0f27a2c | 4,197 |
def parse_git_repo(git_repo):
"""Parse a git repository URL.
git-clone(1) lists these as examples of supported URLs:
- ssh://[user@]host.xz[:port]/path/to/repo.git/
- git://host.xz[:port]/path/to/repo.git/
- http[s]://host.xz[:port]/path/to/repo.git/
- ftp[s]://host.xz[:port]/path/to/repo.git/
- rsync://host.xz/path/to/repo.git/
- [user@]host.xz:path/to/repo.git/
- ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/
- git://host.xz[:port]/~[user]/path/to/repo.git/
- [user@]host.xz:/~[user]/path/to/repo.git/
- /path/to/repo.git/
- file:///path/to/repo.git/
This function doesn't support the <transport>::<address> syntax, and it
doesn't understand insteadOf shortcuts from ~/.gitconfig.
"""
if '://' in git_repo:
return urlparse.urlparse(git_repo)
if ':' in git_repo:
netloc, colon, path = git_repo.partition(':')
return urlparse.ParseResult('ssh', netloc, path, '', '', '')
else:
return urlparse.ParseResult('file', '', git_repo, '', '', '') | 5eddf3aa9016996fb8aa1720b506c2f86b2e9c14 | 4,198 |
def make_wavefunction_list(circuit, include_initial_wavefunction=True):
""" simulate the circuit, keeping track of the state vectors at ench step"""
wavefunctions = []
simulator = cirq.Simulator()
for i, step in enumerate(simulator.simulate_moment_steps(circuit)):
wavefunction_scrambled = step.state_vector()
wavefunction = unscramble_wavefunction(wavefunction_scrambled)
wavefunctions.append(wavefunction)
if include_initial_wavefunction:
initial_wavefunction = wavefunctions[0]*0 # create a blank vector
initial_wavefunction[0] = 1
wavefunctions = [initial_wavefunction]+wavefunctions
return wavefunctions | af33d4a7be58ccfa7737deb289cbf5d581246e86 | 4,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.