content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def prepare_comparator(comparator_path):
""" Processes the comparator path from the benchmark specification. Imports the object
dynamically.
Parameters
----------
comparator_path : str
Path to the python script file containing the comparator definition.
Returns
-------
ccobra.CCobraComparator
Comparator object.
"""
comp = None
with contextmanager.dir_context(comparator_path):
imp = modelimporter.ModelImporter(comparator_path, superclass=CCobraComparator)
comp = imp.instantiate()
if not comp:
raise ValueError('Failed to instantiate comparator class.')
return comp | 63b9f863a3d68a6fc7bb36c9da909004859e469b | 11,200 |
import csv
def get_genes(path):
"""Returns a list of genes from a DE results table"""
with open(path) as gene_list:
gene_list = csv.reader(gene_list)
gene_list = [row[0] for row in gene_list if row[0].startswith('P')]
return gene_list | 9deed781edc0514348b7f6c2f6ac2d302f30295d | 11,201 |
import copy
import os
def process(path, ignore=[]):
"""calculate SET1 directory stats for given path, skipping
directories mentioned in ignore (e.g. '.hg', '.svn', ...)
"""
if not PY3K:
# unicode is critical to for non-English local names on Windows
path = unicode(path)
s = copy.copy(SET1)
s['totalsize'] = 0
for root, dirs, files in os.walk(path):
# filter directories
for ig in ignore:
if ig in dirs:
dirs.remove(ig)
for f in files:
s['totalsize'] += os.path.getsize(os.path.join(root, f))
s['filesnum'] += len(files)
s['dirsnum'] += len(dirs)
return s | b786dad9684f23f1ebe2db7172b21de2ceb34751 | 11,202 |
import types
def getNoncaptureMovesForRegularPiece(theGame, pieceLocation):
""" This returns a GameNode for every legal move of a regular piece """
moveList = []
xBoard = pieceLocation.get_x_board()
yBoard = pieceLocation.get_y_board()
pieceDestinationLeft = None
pieceDestinationRight = None
if theGame.getState(pieceLocation) is types.PLAYER_A_REGULAR:
# Player A moves in positive Y increments
moveDelta = 1
elif theGame.getState(pieceLocation) is types.PLAYER_B_REGULAR:
# Player B moves in negative Y increments
moveDelta = -1
pieceDestinationLeft = getCoordinateHelper(xBoard - 1, yBoard + moveDelta)
pieceDestinationRight = getCoordinateHelper(xBoard + 1, yBoard + moveDelta)
if (pieceDestinationLeft and
destinationIsEmpty(theGame, pieceDestinationLeft)):
moveList.append(makePieceMove(theGame,
pieceDestinationLeft,
pieceLocation))
if (pieceDestinationRight and
destinationIsEmpty(theGame, pieceDestinationRight)):
moveList.append(makePieceMove(theGame,
pieceDestinationRight,
pieceLocation))
return moveList | 13ad1e5cc4fcbd17b55b66703b41a49e5283efb8 | 11,203 |
import pathlib
def _top_level_package_filenames(tarball_paths):
"""Transform the iterable of npm tarball paths to the top-level files contained within the package."""
paths = []
for path in tarball_paths:
parts = pathlib.PurePath(path).parts
if parts[0] == "package" and len(parts) == 2:
paths.append(parts[1])
return frozenset(paths) | 6b9b825eff14fe2e40f33c2caac104cf9869b277 | 11,204 |
def scale(X_train, X_test, type='MinMaxScaler', tuning_mode= True):
"""
This function apply Min Max or Standard scaling to a divided set of features divided as train and test data
Args:
The two dataframes:
X_train: a pandas dataframe with features of the training window
X_test: a pandas dataframe with features of the test window
tuning_mode: a boolean parameter set for cases when tuning is made. Automatically set to True unless provided a False.
Return:
Two arrays coming from the original dataframes after applying StandardScaler() or MinMaxScaler(), where the standarization is made using the X_train features
"""
# Create an Scaler instance
scaler = MinMaxScaler()
if type=='StandardScaler':
scaler = StandardScaler()
# Apply the scaler model to fit the X_train data
X_scaler = scaler.fit(X_train)
# Transform the X_train and X_test DataFrames using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
if tuning_mode == True:
print(f"X_train_scaled shape: {X_train_scaled.shape}")
print(f"X_test_scaled shape: {X_test_scaled.shape}")
return X_train_scaled, X_test_scaled | 58d283856d789158847138c002e9f1f19d4beb9f | 11,205 |
def calc_positions(pl, pr, region1, region3, w, xi, t, gamma, dustFrac=0.):
"""
:return: tuple of positions in the following order ->
Head of Rarefaction: xhd, Foot of Rarefaction: xft,
Contact Discontinuity: xcd, Shock: xsh
"""
p1, rho1 = region1[:2] # don't need velocity
p3, rho3, u3 = region3
c1 = sound_speed(gamma, p1, rho1, dustFrac)
c3 = sound_speed(gamma, p3, rho3, dustFrac)
if pl > pr:
xsh = xi + w * t
xcd = xi + u3 * t
xft = xi + (u3 - c3) * t
xhd = xi - c1 * t
else:
# pr > pl
xsh = xi - w * t
xcd = xi - u3 * t
xft = xi - (u3 - c3) * t
xhd = xi + c1 * t
return xhd, xft, xcd, xsh | 8034d327c4d9c9c771137e3eff49f8627315b10a | 11,206 |
import inspect
import scipy
def spectrum_correlation_fft(tlist, y):
"""
Calculate the power spectrum corresponding to a two-time correlation
function using FFT.
Parameters
----------
tlist : *list* / *array*
list/array of times :math:`t` which the correlation function is given.
y : *list* / *array*
list/array of correlations corresponding to time delays :math:`t`.
Returns
-------
w, S : *tuple*
Returns an array of angular frequencies 'w' and the corresponding
one-sided power spectrum 'S(w)'.
"""
if debug:
print(inspect.stack()[0][3])
N = len(tlist)
dt = tlist[1] - tlist[0]
F = scipy.fftpack.fft(y)
# calculate the frequencies for the components in F
f = scipy.fftpack.fftfreq(N, dt)
# select only indices for elements that corresponds
# to positive frequencies
indices = np.where(f > 0.0)
return 2 * pi * f[indices], 2 * dt * np.real(F[indices]) | e9c8b01fb2944c7fb0ac16070710c751502cc836 | 11,207 |
def accuracy(targets, predictions, weights=None):
"""Computes the categorical accuracy.
Given a set of ground truth values and a set of predicted labels as tensors of
the same shape, it returns a tensor of the same shape with 1.0 in those position
where the ground truth value and the predicted one are equal, 0.0 otherwise.
So, if the grount truth is [[1, 2, 3], [0, 9, 23]] and the predicted labels
are [[1, 2, 4], [9, 0, 23]] the result will be: [[1, 1, 0], [0, 0, 1]].
Arguments:
target: the gold truth values `Tensor`, with `tf.int32` as `dtype`. It has rank
`[d_0, d_1, ..., d_{r-1}]` and the last value is supposed to range between
`0` and `num_classes - 1`, where `num_classes` is the number of possible classes.
predictions: the predicted values `Tensor` with `tf.float32` as `dtype`. It can
have shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` and
represents the probability distribution across the output classes generated by
the model -- so that the predicted label is the one coming from argmax over the
last dimension. Alternatively it can be of the same shape, `dtype` and format of
`target`, and it will considered as the predicted labels.
weights: coefficients for the metric. This must be scalar or of same rank as `target`.
Returns:
values: a `Tensor` of `dtype=tf.float32` and of the same shape as `targest`
representing the accuracy, weighted according to the input argument `weights`.
weights: a `Tensor` of `dtype=tf.float32` and of the same shape of `values`
representing the weighted scheme for the streaming average on `values`, which
is the same tensor of the input `weights` argument.
"""
trank = targets.get_shape().ndims
prank = predictions.get_shape().ndims
if prank > trank:
diff = prank - trank
if diff > 1:
raise ValueError(
"""Rank of `predictions` must be equal to rank of `label` """
"""or greater of 1, found %d and %d instead.""" % (prank, trank))
predictions = tf.argmax(predictions, axis=-1) # tf.int64!!!
predictions = tf.cast(predictions, tf.int32)
is_equal = tf.equal(targets, predictions)
is_equal = tf.cast(is_equal, tf.float32)
if weights is not None:
is_equal = tf.multiply(is_equal, weights)
return is_equal, weights | 08dba6fab0f09c8d1507d00a8501192b2f008499 | 11,208 |
import re
def jp_author_name_normalized(name):
"""Construct the author name as P. Szekely."""
clean = name.replace('.',' ').replace(',',' ').replace(';', ' ')
clean = asciiChars(clean, '')
names = re.sub(r'\s+', ' ', clean.strip()).split(' ');
last_word = names[-1]
if len(last_word) == 1:
# The last word is an initial, so we accumulate all words before it that are not initials
# that will be our last name
i = 0;
index = -1 # index of last word that is not an initial
for n in names:
if len(n)>1:
index = i
else:
names[i] = n + '.'
i = i + 1;
if index == -1 or index == len(names) - 1:
return ' '.join(names).title();
last = names[index]
first = ' '.join(names[0:index]) + ' '.join(names[index + 1:])
return (first + ' ' + last).title()
else:
i = 0
for n in names:
if len(n) == 1:
names[i] = n + '.'
elif i < len(names) - 1:
names[i] = n[0] + '.'
i = i + 1
return ' '.join(names).title(); | 61e5aa4290611266d4c96c67a870ce4c375a291c | 11,209 |
import math
def palette(tensor, shape, name=None, time=0.0, speed=1.0):
"""
Another approach to image coloration
https://iquilezles.org/www/articles/palettes/palettes.htm
"""
if not name:
return tensor
channel_shape = [shape[0], shape[1], 3]
p = palettes[name]
offset = p["offset"] * tf.ones(channel_shape)
amp = p["amp"] * tf.ones(channel_shape)
freq = p["freq"] * tf.ones(channel_shape)
phase = p["phase"] * tf.ones(channel_shape) + time
# Multiply value_map's result x .875, in case the image is just black and white (0 == 1, we don't want a solid color image)
return offset + amp * tf.math.cos(math.tau * (freq * value.value_map(tensor, shape, keepdims=True, with_normalize=False) * .875 + phase)) | 5717a405a68f54257082f15ce54ccb1fe3d52bd1 | 11,210 |
def binarize_categorical_columns(
input_train_df, input_test_df, categorical_columns):
"""Function to converting categorical features to one-hot encodings."""
# Binarize categorical columns.
binarized_train_df = pd.get_dummies(
input_train_df, columns=categorical_columns)
binarized_test_df = pd.get_dummies(
input_test_df, columns=categorical_columns)
# Make sure the train and test dataframes have the same binarized columns.
# Identify columns in train set not in test set and fill them in test set.
test_df_missing_cols = set(binarized_train_df.columns) - set(
binarized_test_df.columns)
for c in test_df_missing_cols:
binarized_test_df[c] = 0
# Identify columns in test set not in train set and fill them in train set.
train_df_missing_cols = set(binarized_test_df.columns) - set(
binarized_train_df.columns)
for c in train_df_missing_cols:
binarized_train_df[c] = 0
# Just to be sure that both train and test df"s have same columns.
binarized_train_df = binarized_train_df[binarized_test_df.columns]
return binarized_train_df, binarized_test_df | 537b5271298ee00b60fd16039ba63d02c61189b9 | 11,211 |
def smallest_sval(X, solver='lobpcg', **kws):
"""
Computes the smallest singular value of a matrix using
scipy.sparse.linalg.svds
Parameters
----------
X: array-like
solver: str
Which solver to use. Must be one of ['lobpcg', 'arpack']
**kws
Kws for svds
Output
------
smallest_sval: float
The smallest singular value of X
"""
# for 1d arrays return the frobenius norm
if min(X.shape) == 1:
return np.sqrt((X.reshape(-1) ** 2).sum())
return svds(X, k=1, which='SM', solver=solver, **kws)[1].item() | ba5436fbc347eb050cc69c3b7dec7df414d31403 | 11,212 |
def mean_ndcg_score(u_scores, u_labels, wtype="max"):
"""Mean Normalize Discounted cumulative gain (NDCG) for all users.
Parameters
----------
u_score : array of arrays, shape = [num_users]
Each array is the predicted scores, shape = [n_samples[u]]
u_label : array of arrays, shape = [num_users]
Each array is the ground truth label, shape = [n_samples[u]]
wtype : 'log' or 'max'
type for discounts
Returns
-------
mean_ndcg : array, shape = [num_users]
mean ndcg for each user (averaged among all rank)
avg_ndcg : array, shape = [max(n_samples)], averaged ndcg at each
position (averaged among all users for given rank)
"""
num_users = len(u_scores)
n_samples = [len(scores) for scores in u_scores]
max_sample = max(n_samples)
count = np.zeros(max_sample)
mean_ndcg = np.zeros(num_users)
avg_ndcg = np.zeros(max_sample)
for u in range(num_users):
ndcg = ndcg_score(u_scores[u], u_labels[u], wtype)
avg_ndcg[: n_samples[u]] += ndcg
count[: n_samples[u]] += 1
mean_ndcg[u] = ndcg.mean()
return mean_ndcg, avg_ndcg / count | 6a6e60182ec8bf2e3677779a5397d76fe492740c | 11,213 |
from typing import Union
from typing import List
import re
def load_genbank_features(
genbank_record: SeqRecord.SeqRecord,
terminus: Union[SeqFeature.FeatureLocation] = None
) -> List[GENBANK_FEATURE]:
"""
Parses a GenBank record and generates Bitome knowledgebase objects based on the features within the record
Currently set up to create the following feature types:
- Gene
- Protein
- TRNA
- MobileElement
- RepeatRegion
- Origin
:param SeqRecord.SeqRecord genbank_record: the Genbank record to parse
:param Union[SeqFeature.FeatureLocation] terminus: the location of the terminus region for this genome; used to
determine whether a GenBank feature is on the leading or lagging strand
:return List[GENBANK_FEATURE] genbank_features: the GenBank-based knowledgebase objects for genomic features
"""
# some functionality is limited to the E. coli K-12 MG1655 genome annotation currently; set a flag for that
is_k12 = genbank_record.id == 'NC_000913.3'
if is_k12:
locus_tag_cog_lookup = load_locus_tag_cogs_lookup()
locus_tag_yome_lookup = load_locus_tag_yome_lookup()
essential_locus_tags = load_essential_genes()
else:
locus_tag_cog_lookup = {}
locus_tag_yome_lookup = {}
essential_locus_tags = []
genome_seq = genbank_record.seq
# separate the gene SeqFeatures and non-gene SeqFeatures from the GenBank record
gene_seqfeatures = select_seqfeatures(genbank_record.features, 'gene')
non_gene_seqfeatures = list(set(genbank_record.features).difference(set(gene_seqfeatures)))
origin_seqfeatures = select_seqfeatures(non_gene_seqfeatures, 'rep_origin')
origins: list = []
for origin_seqfeature in origin_seqfeatures:
origins.append(Origin(
origin_seqfeature.location,
genome_seq,
name=get_seqfeature_qualifier(origin_seqfeature, 'note')
))
genes: list = []
proteins: list = []
trnas: list = []
for gene_seqfeature in gene_seqfeatures:
locus_tag = get_seqfeature_qualifier(gene_seqfeature, 'locus_tag')
gene_name = get_seqfeature_qualifier(gene_seqfeature, 'gene')
gene_location = gene_seqfeature.location
# pseudogenes have a 'pseudo' key (with empty value) in their qualifiers dictionary
is_pseudo = 'pseudo' in gene_seqfeature.qualifiers
# determine if feature is leading, lagging, or in terminus region (if we have that region provided)
# assumes the origin is at a "higher" position in the linear numbering of the chromosome than terminus
replication_strand = None
origin_distance = None
terminus_distance = None
if len(origins) == 1 and terminus is not None:
origin = origins[0]
gene_start = gene_location.start.position
gene_end = gene_location.end.position
gene_strand = gene_location.strand
# all below descriptions of conditional cases assume we're looking at the genome with origin at the top,
# and the absolute position numbering goes clockwise; so the origin is 12:00, terminus is 5:30 - 6:30
# the gene is in the 12:00 - 5:30 region; note, we're assuming that the wraparound point is here (i.e.
# the spot where base 4.6M and base 1 are adjacent; also assuming that terminus region is 180 degrees
# from origin, so that the clockwise direction will definitely be shorter
if gene_start > origin.location.end.position or gene_start < terminus.start.position:
if gene_strand == 1:
replication_strand = 'leading'
else:
replication_strand = 'lagging'
if gene_start > origin.location.end.position:
origin_distance = gene_start - origin.location.end.position
terminus_distance = terminus.start.position + (len(genome_seq) - gene_end)
else:
origin_distance = (len(genome_seq) - origin.location.end.position) + gene_start
terminus_distance = terminus.start.position - gene_end
# the gene is in the terminus region between 5:30 and 6:30; can't guarantee if it's leading or lagging
# also don't assume which direction to origin is closer; distance to terminus is 0 since it's in there
elif terminus.start.position < gene_start < terminus.end.position:
replication_strand = 'terminus'
origin_distance_1 = (len(genome_seq) - origin.location.end.position) + gene_start
origin_distance_2 = origin.location.start.position - gene_end
origin_distance = min(origin_distance_1, origin_distance_2)
terminus_distance = 0
# the gene is on the left of the clock (6:30 - 12:00)
elif terminus.end.position < gene_start < origin.location.start.position:
if gene_strand == 1:
replication_strand = 'lagging'
else:
replication_strand = 'leading'
origin_distance = origin.location.start.position - gene_end
terminus_distance = gene_start - terminus.end.position
# isolate the feature that this gene codes; GenBank record separates these; e.g. a gene and its 'CDS' (coding
# sequence) will be distinct SeqFeature objects when parsed from the GenBank record
# for coronavirus, we want to ignore mat_peptide for now
if genbank_record.id == 'NC_045512.2':
coded_seqfeature = find_locus_tag(locus_tag, non_gene_seqfeatures, ignore_types=['mat_peptide'])
else:
coded_seqfeature = find_locus_tag(locus_tag, non_gene_seqfeatures)
if is_pseudo:
gene_type = 'pseudo'
# note; this ignores ONE gene in NC_000913.3; ralA, antisense toxin; TODO don't ignore this
elif coded_seqfeature is None:
warn(f'No coded feature found for {locus_tag}; no Gene object created')
continue
elif coded_seqfeature.type == 'ncRNA':
ncrna_class = get_seqfeature_qualifier(coded_seqfeature, 'ncRNA_class')
if ncrna_class == 'antisense_RNA':
gene_type = 'antisense_RNA'
else:
gene_type = 'ncRNA'
elif coded_seqfeature.type == 'mat_peptide':
gene_type = 'CDS'
# TODO don't ignore variation and mRNA features for lambda phage genome
elif coded_seqfeature.type in ['variation', 'mRNA']:
continue
else:
gene_type = coded_seqfeature.type
# use the CDS location if the coded feature is a CDS; TODO this glosses over genes whose mRNA are altered to
# make the CDS (see lambdap57 for an example)
if gene_type == 'CDS':
gene_location = coded_seqfeature.location
gene = Gene(
locus_tag,
gene_type,
gene_location,
gene_name,
genome_seq,
# these lookups are non-empty only for GenBank record NC_000913.3 (E. coli K-12 MG1655)
cog=locus_tag_cog_lookup.get(locus_tag, None),
y_ome=locus_tag_yome_lookup.get(locus_tag, None),
essential=(locus_tag in essential_locus_tags),
replication_strand=replication_strand,
origin_distance=origin_distance,
terminus_distance=terminus_distance
)
genes.append(gene)
if gene_type == 'CDS':
protein_name = get_seqfeature_qualifier(coded_seqfeature, 'product')
protein = protein_from_gene(gene, include_gempro=is_k12, name=protein_name)
proteins.append(protein)
gene.link_protein(protein)
# if we have a gene coding for a tRNA, generate a TRNA object
if gene_type == 'tRNA':
trna_name = get_seqfeature_qualifier(coded_seqfeature, 'product')
trna_note = get_seqfeature_qualifier(coded_seqfeature, 'note')
if trna_note is None:
trna_anticodon = None
else:
# assumes an anticodon will be somewhere in the note
trna_anticodon = re.findall(r'[AUCGTaugct]{3}', trna_note)
trna = TRNA(
locus_tag,
gene_location,
gene.reading_frame,
genome_seq,
name=trna_name,
anticodon=trna_anticodon
)
trnas.append(trna)
gene.link_trna(trna)
# add CAI for protein-coding genes
cds_genes = [gene for gene in genes if gene.gene_type == 'CDS']
calculate_and_add_cai(cds_genes)
# load mobile element, repeat region, and origin of replication features
mobile_element_seqfeatures = select_seqfeatures(non_gene_seqfeatures, 'mobile_element')
mobile_elements: list = []
for mobile_element_seqfeature in mobile_element_seqfeatures:
mobile_elements.append(MobileElement(
mobile_element_seqfeature.location,
genome_seq,
name=get_seqfeature_qualifier(mobile_element_seqfeature, 'mobile_element_type')
))
repeat_region_seqfeatures = select_seqfeatures(non_gene_seqfeatures, 'repeat_region')
repeat_regions: list = []
for repeat_region_seqfeature in repeat_region_seqfeatures:
repeat_regions.append(RepeatRegion(
repeat_region_seqfeature.location,
genome_seq,
name=get_seqfeature_qualifier(repeat_region_seqfeature, 'note')
))
all_genbank_features = genes + proteins + trnas + mobile_elements + repeat_regions + origins
return all_genbank_features | 2c2dc8d6454eb1c059ae0668e8fa6ea834792f69 | 11,214 |
from typing import List
import torch
def average_precision(predictions: List, targets: List,
iou_threshold: float = 0.5) -> torch.Tensor:
"""Calculates average precision for given inputs
Args:
predictions (List): [Ni,5 dimensional as xmin,ymin,xmax,ymax,conf]
targets (List): [Mi,4 dimensional as xmin,ymin,xmax,ymax]
iou_threshold (float, optional): iou threshold for ap score. Defaults to 0.5.
Raises:
AssertionError: [description]
Returns:
torch.Tensor: average precision score
"""
assert len(predictions) == len(targets), "prediction and ground truths must be equal in lenght"
assert len(predictions) > 0, "given input list lenght must be greater than 0"
device = predictions[0].device
sorted_table, M = _generate_prediction_table(predictions, targets, device=device)
N = sorted_table.size(0)
if N == 0:
# pylint: disable=not-callable
return torch.tensor([0], dtype=torch.float32, device=device)
accumulated_tp = torch.zeros(sorted_table.size(0), dtype=torch.float32, device=device)
accumulated_fp = torch.zeros(sorted_table.size(0), dtype=torch.float32, device=device)
sorted_table[sorted_table[:, 0] < iou_threshold, 1] = 0.
tp = 0
fp = 0
for i, row in enumerate(sorted_table):
# row : 3 as iou,tp,confidence
if row[1] == 1.:
tp += 1
else:
fp += 1
accumulated_tp[i] = tp
accumulated_fp[i] = fp
precision = accumulated_tp / torch.arange(1, N+1, dtype=torch.float32, device=device)
recall = accumulated_tp / (M + 1e-16)
unique_recalls = recall.unique_consecutive()
auc = torch.empty(unique_recalls.size(0), dtype=torch.float32, device=device)
# pylint: disable=not-callable
last_value = torch.tensor(0, dtype=torch.float32, device=device)
for i, recall_value in enumerate(unique_recalls):
mask = recall == recall_value # N,
p_mul = precision[mask].max() # get max p
auc[i] = p_mul * (recall_value-last_value)
last_value = recall_value
return auc.sum() | e32efe5501fee9140f9b10d2a29fa779af4be18b | 11,215 |
def new_post(update: Update, context: CallbackContext) -> int:
"""Start the conversation, display any stored data and ask user for input."""
# init empty list to store dicts w/ info about each uploaded photo
context.user_data['photos'] = []
reply_text = "Initiate conversation: new post "
# if context.user_data:
# reply_text += (
# f"Current data: {', '.join(context.user_data.keys())}."
# )
# else:
reply_text += (
"Enter title"
)
update.message.reply_text(reply_text, reply_markup=markup)
return ENTER_TITLE | 1b49d20628aaafdf6d61f1cb92cc2f123e742fac | 11,216 |
from typing import List
from typing import Union
from typing import Tuple
from typing import Sequence
def _get_geometry_type_from_list(
features: List, allowed_features: List[Union[Tuple, Sequence]]
) -> Tuple[str]:
"""
Gets the Geometry type from a List, otherwise it raises an exception.
:param features: input feature as a list
:return: tuple with extracted geometry types
"""
geometry_type = tuple()
n_dim = get_input_dimensions(features)
if n_dim == 1 and all(
isinstance(el, (dict, *allowed_features[0])) for el in features
):
return tuple(
map(
lambda geom: _get_geometry_type_from_feature(geom, allowed_features),
features,
)
)
elif all(isinstance(el, (list, tuple, int, float)) for el in features):
feature_type = [
k for k, v in dimensions.items() if v == n_dim and k in allowed_features[1]
]
if len(feature_type) == 1:
geometry_type += (feature_type[0],)
else:
raise InvalidInput(
error_code_messages["InvalidGeometry"](allowed_features[1])
)
else:
raise InvalidInput(error_code_messages["InvalidGeometry"](allowed_features[1]))
return geometry_type | c6ec2e68e3f5667a9fa31ab72b76b0257e4a7221 | 11,217 |
def save_network_to_path(interactions, path):
"""Save dataframe to a tab-separated file at path."""
return interactions.to_csv(path, sep='\t', index=False, na_rep=str(None)) | f189c6e8f7791f1f97c32847f03e0cc2e167ae90 | 11,218 |
from typing import Optional
from typing import Dict
from typing import Tuple
from typing import Union
def apply_sql(
query: str,
output_name: Optional[str],
found: Dict[str, beam.PCollection],
run: bool = True) -> Tuple[str, Union[PValue, SqlNode], SqlChain]:
"""Applies a SqlTransform with the given sql and queried PCollections.
Args:
query: The SQL query executed in the magic.
output_name: (optional) The output variable name in __main__ module.
found: The PCollections with variable names found to be used in the query.
run: Whether to prepare the SQL pipeline for a local run or not.
Returns:
A tuple of values. First str value is the output variable name in
__main__ module, auto-generated if not provided. Second value: if run,
it's a PValue; otherwise, a SqlNode tracks the SQL without applying it or
executing it. Third value: SqlChain is a chain of SqlNodes that have been
applied.
"""
output_name = _generate_output_name(output_name, query, found)
query, sql_source, chain = _build_query_components(
query, found, output_name, run)
if run:
try:
output = sql_source | SqlTransform(query)
# Declare a variable with the output_name and output value in the
# __main__ module so that the user can use the output smoothly.
output_name, output = create_var_in_main(output_name, output)
_LOGGER.info(
"The output PCollection variable is %s with element_type %s",
output_name,
pformat_namedtuple(output.element_type))
return output_name, output, chain
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
on_error('Error when applying the Beam SQL: %s', e)
else:
return output_name, chain.current, chain | 26541c4e946a3d7b8dd1783d8b429bceb0b7b6bf | 11,219 |
from typing import Tuple
from datetime import datetime
def check_course_time_conflict(current_course: Course,
user: NaturalPerson) -> Tuple[bool, str]:
"""
检查当前选择课程的时间和已选课程是否冲突
"""
selected_courses = Course.objects.activated().filter(
participant_set__person=user,
participant_set__status__in=[
CourseParticipant.Status.SELECT,
CourseParticipant.Status.SUCCESS,
]).prefetch_related("time_set")
def time_hash(time: datetime):
return time.weekday() * 1440 + time.hour * 60 + time.minute
# 因为选择的课最多只能有6门,所以暂时用暴力算法
for current_course_time in current_course.time_set.all():
# 当前选择课程的上课时间
current_start_time = current_course_time.start
current_end_time = current_course_time.end
for course in selected_courses:
for course_time in course.time_set.all():
start_time = course_time.start
end_time = course_time.end
# 效率不高,有待改进
if not (time_hash(current_start_time) >= time_hash(end_time) or
time_hash(current_end_time) <= time_hash(start_time)):
# 发生冲突
return True, \
f"《{current_course.name}》和《{course.name}》的上课时间发生冲突!"
# 没有冲突
return False, ""
'''
# 循环较少的写法
from django.db.models import Q
conflict_course_names = set()
for current_course_time in current_course.time_set.all():
# 冲突时间
conflict_times = CourseTime.objects.filter(
# 已选的课程
Q(course__in=selected_courses),
# 开始比当前的结束时间早
(Q(start__week_day=current_course_time.end.weekday() + 1,
start__time__lte=current_course_time.end.time())
| Q(start__week_day__lt=current_course_time.end.weekday() + 1))
# 结束比当前的开始时间晚
& (Q(end__week_day=current_course_time.start.weekday() + 1,
end__time__gte=current_course_time.start.time())
| Q(end__week_day__gt=current_course_time.start.weekday() + 1))
)
if conflict_times.exists():
# return True, f'《{conflict_times.first().course.name}》'
conflict_course_names.union(
conflict_times.values_list('course__name', flat=True))
conflict_count = len(conflict_course_names)
# 有冲突
if conflict_count:
return conflict_count, f'《{"》《".join(conflict_course_names)}》'
# 没有冲突
return conflict_count, ""
''' | 2b5a6d621463018b64d0d2d44ac8827b687cee67 | 11,220 |
def or_func(a, b):
"""Creates a new list out of the two supplied by applying the function to each
equally-positioned pair in the lists. The returned list is truncated to the
length of the shorter of the two input lists"""
return a or b | 0f90173e05910ebc7e81079d99bfdbbb1c0ee66b | 11,221 |
import typing
def json_loads(
value: typing.Union[bytes, bytearray, str]
) -> typing.Union[
typing.List[typing.Dict[str, typing.Any]], typing.Dict[str, typing.Any]
]:
"""Practical json dumps helper function, bytes, bytearray, and
str input are accepted. supports for ``orjson``, ``simplejson`.
In case of orjson, if the input exists as bytes (was read directly from a source),
it is recommended to pass bytes. This has lower memory usage and lower latency.
The input must be valid UTF-8."""
if json_mod.__name__ != "orjson" and isinstance(value, (bytes, bytearray)):
value = value.decode("utf8", "strict")
return json_mod.loads(value) | 9fe8664df512b52b565d68f3433abc346112c974 | 11,222 |
import json
def execute_create_payment(client, create_payment_request):
"""
Create a payment. Automatically creates an NR for use.
:param client:
:param create_payment_request:
:return:
"""
headers = get_test_headers()
draft_nr = setup_draft_nr(client)
nr_id = draft_nr.get('id')
payment_action = 'COMPLETE'
# POST /api/v1/payments/<int:nr_id>/<string:payment_action>
request_uri = API_BASE_URI + str(nr_id) + '/' + payment_action
path = request_uri
body = json.dumps(create_payment_request)
log_request_path(path)
response = client.post(path, data=body, headers=headers)
assert response.status_code == 201
payload = json.loads(response.data)
verify_payment_payload(payload)
assert payload.get('statusCode') == 'CREATED'
return payload | 56ff5e99f577d64cd71c451f9502220585b1d920 | 11,223 |
from typing import List
def collect_contrib_features(
project: 'ballet.project.Project'
) -> List[Feature]:
"""Collect contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project: project object
Returns:
collected features
"""
contrib = project.resolve('features.contrib')
return _collect_contrib_features(contrib) | 8c76f968d7fc75bba2eb6fee9447e49de2d53694 | 11,224 |
import logging
import urllib
import json
import re
def callwebservice(omdb_api_key, dvd_title, year=""):
""" Queries OMDbapi.org for title information and parses if it's a movie
or a tv series """
logging.debug("***Calling webservice with Title: " + dvd_title + " and Year: " + year)
try:
strurl = "http://www.omdbapi.com/?t={1}&y={2}&plot=short&r=json&apikey={0}".format(omdb_api_key, dvd_title, year)
logging.debug("http://www.omdbapi.com/?t={1}&y={2}&plot=short&r=json&apikey={0}".format("key_hidden", dvd_title, year))
dvd_title_info_json = urllib.request.urlopen(strurl).read()
except Exception:
logging.debug("Webservice failed")
return "fail", None
else:
doc = json.loads(dvd_title_info_json.decode())
if doc['Response'] == "False":
logging.debug("Webservice failed with error: " + doc['Error'])
return "fail", None
else:
media_type = doc['Type']
year = re.sub(r'[^\x00-\x7f]',r'', doc['Year'])
logging.debug("Webservice successful. Document returned is: " + json.dumps(doc))
return (media_type, year) | 46dd9f525c89af0b92c2e5c2cfe38f53aab6b5b9 | 11,225 |
def rect_bevel_2d(width, height, bevel) -> Verts2D:
"""get a rib, parameterized by the height and width of the opening"""
# TODO: there's a generic bevel somewhere in here
width_half = width * 0.5
height_half = height * 0.5
return np.array([
(width_half - bevel, -height_half),
(width_half, -height_half + bevel),
(width_half, height_half - bevel),
(width_half - bevel, height_half),
# TODO: function for this - flip sign and reverse order
(-width_half + bevel, height_half),
(-width_half, height_half - bevel),
(-width_half, -height_half + bevel),
(-width_half + bevel, -height_half)
]) | 6186a0a65c969ed2d89a4b4fd3af4307f5c3fb31 | 11,226 |
def verify_token(token):
""" Basic auth method """
curr_user = User.check_token(token) if token else None
return curr_user is not None | 95027a9d0235521819b5ca262082b29fe252dc1b | 11,227 |
from typing import Sequence
import random
def generate_test_cases(n_tests: int, min_len: int, max_len: int, min_dim: int, max_dim: int) \
-> Sequence[Sequence[int]]:
"""
:param n_tests: number of test to generate
:param min_len: minimum number of matrices for each test case
:param max_len: maximum number of matrices for each test case
:param min_dim: minimum dimension for each matrix (applies both for rows and columns)
:param max_dim: maximum dimension for each matrix (applies both for rows and columns)
:return:
"""
solutions = []
for n in range(n_tests):
test_len = random.randint(min_len, max_len)
dims = tuple([random.randint(min_dim, max_dim) for _ in range(test_len)])
solution = memoized_mcm(dims=dims)[0]
solutions.append([dims, solution])
return solutions | 63a20aa5d94456597c29523019e2699aedf4ab5f | 11,228 |
from typing import List
def calc_neighbours(
adata: AnnData,
distance: float = None,
index: bool = True,
verbose: bool = True,
) -> List:
"""Calculate the proportion of known ligand-receptor co-expression among the neighbouring spots or within spots
Parameters
----------
adata: AnnData The data object to scan
distance: float Distance to determine the neighbours (default: closest), distance=0 means within spot
index: bool Indicates whether to return neighbours as indices to other spots or names of other spots.
Returns
-------
neighbours: numba.typed.List List of np.array's indicating neighbours by indices for each spot.
"""
if verbose:
print("Calculating neighbours...")
# get neighbour spots for each spot according to the specified distance
coor = adata.obs[["imagerow", "imagecol"]]
point_tree = spatial.cKDTree(coor)
neighbours = []
for i, spot in enumerate(adata.obs_names):
if distance == 0:
neighbours.append(np.array([i if index else spot]))
else:
n_index = point_tree.query_ball_point(
np.array(
[adata.obs["imagerow"].loc[spot], adata.obs["imagecol"].loc[spot]]
),
distance,
)
if index:
n_index = np.array(n_index, dtype=np.int_)
neighbours.append(n_index[n_index != i])
else:
n_spots = adata.obs_names[n_index]
neighbours.append(n_spots[n_spots != spot])
typed_neighs = List()
[typed_neighs.append(neigh) for neigh in neighbours]
n_neighs = np.array([len(neigh) for neigh in neighbours])
if verbose:
print(
f"{len(np.where(n_neighs==0)[0])} spots with no neighbours, "
f"{int(np.median(n_neighs))} median spot neighbours."
)
if np.all(n_neighs == 0):
raise Exception(
"All spots have no neighbours at current distance,"
" set distance to higher value, or distance=0 for "
"within-spot mode."
)
return typed_neighs | 1fdbc372f2249115b0ace55bf5d89c54a1143523 | 11,229 |
from typing import Optional
from typing import Tuple
from typing import List
def _build_tree_string(
root: Optional[Node],
curr_index: int,
include_index: bool = False,
delimiter: str = "-",
) -> Tuple[List[str], int, int, int]:
"""Recursively walk down the binary tree and build a pretty-print string.
In each recursive call, a "box" of characters visually representing the
current (sub)tree is constructed line by line. Each line is padded with
whitespaces to ensure all lines in the box have the same length. Then the
box, its width, and start-end positions of its root node value repr string
(required for drawing branches) are sent up to the parent call. The parent
call then combines its left and right sub-boxes to build a larger box etc.
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param curr_index: Level-order_ index of the current node (root node is 0).
:type curr_index: int
:param include_index: If set to True, include the level-order_ node indexes using
the following format: ``{index}{delimiter}{value}`` (default: False).
:type include_index: bool
:param delimiter: Delimiter character between the node index and the node
value (default: '-').
:type delimiter:
:return: Box of characters visually representing the current subtree, width
of the box, and start-end positions of the repr string of the new root
node value.
:rtype: ([str], int, int, int)
.. _Level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
"""
if root is None:
return [], 0, 0, 0
line1 = []
line2 = []
if include_index:
node_repr = "{}{}{}".format(curr_index, delimiter, root.val)
else:
node_repr = str(root.val)
new_root_width = gap_size = len(node_repr)
# Get the left and right sub-boxes, their widths, and root repr positions
l_box, l_box_width, l_root_start, l_root_end = _build_tree_string(
root.left, 2 * curr_index + 1, include_index, delimiter
)
r_box, r_box_width, r_root_start, r_root_end = _build_tree_string(
root.right, 2 * curr_index + 2, include_index, delimiter
)
# Draw the branch connecting the current root node to the left sub-box
# Pad the line with whitespaces where necessary
if l_box_width > 0:
l_root = (l_root_start + l_root_end) // 2 + 1
line1.append(" " * (l_root + 1))
line1.append("_" * (l_box_width - l_root))
line2.append(" " * l_root + "/")
line2.append(" " * (l_box_width - l_root))
new_root_start = l_box_width + 1
gap_size += 1
else:
new_root_start = 0
# Draw the representation of the current root node
line1.append(node_repr)
line2.append(" " * new_root_width)
# Draw the branch connecting the current root node to the right sub-box
# Pad the line with whitespaces where necessary
if r_box_width > 0:
r_root = (r_root_start + r_root_end) // 2
line1.append("_" * r_root)
line1.append(" " * (r_box_width - r_root + 1))
line2.append(" " * r_root + "\\")
line2.append(" " * (r_box_width - r_root))
gap_size += 1
new_root_end = new_root_start + new_root_width - 1
# Combine the left and right sub-boxes with the branches drawn above
gap = " " * gap_size
new_box = ["".join(line1), "".join(line2)]
for i in range(max(len(l_box), len(r_box))):
l_line = l_box[i] if i < len(l_box) else " " * l_box_width
r_line = r_box[i] if i < len(r_box) else " " * r_box_width
new_box.append(l_line + gap + r_line)
# Return the new box, its width and its root repr positions
return new_box, len(new_box[0]), new_root_start, new_root_end | 988a5816647ca31b19c25a3017061ec71cdabc85 | 11,230 |
def growth(xs, ys , x):
"""
growth function
pre:
xs,ys are arrays of known x and y values. x is a scaler or np.array
of values to calculate new y values for
post:
return new y values
"""
xs = np.array(xs)
ys = np.log(np.array(ys))
xy_bar = np.average(xs*ys)
x_bar = np.average(xs)
y_bar = np.average(ys)
x_sq_bar = np.average(xs**2)
beta = (xy_bar - x_bar*y_bar)/(x_sq_bar- x_bar**2)
alpha = y_bar - beta* x_bar
return np.exp(alpha + beta * x) | 5a47077ebfcca5284e29a02c467ba059d6350182 | 11,231 |
def remove_short_transition(transition_sites,thresh=11):
"""
removes transitions that are too close from others.
"""
if len(transition_sites) < 4:
return transition_sites
for i in range(len(transition_sites) - 1):
forward_difference = transition_sites[i+1] - transition_sites[i]
if forward_difference <= thresh:
transition_sites[i] = transition_sites[-1]
transition_sites.append(0)
transition_sites = list(set(transition_sites))
transition_sites = sorted(transition_sites)
return transition_sites | 5ae188ef1314f4416b3baba07f45cceb41dc4c7a | 11,232 |
def load_image(file_path):
"""
Load data from an image.
Parameters
----------
file_path : str
Path to the file.
Returns
-------
float
2D array.
"""
if "\\" in file_path:
raise ValueError(
"Please use a file path following the Unix convention")
mat = None
try:
mat = np.asarray(Image.open(file_path), dtype=np.float32)
except IOError:
print(("No such file or directory: {}").format(file_path))
raise
if len(mat.shape) > 2:
axis_m = np.argmin(mat.shape)
mat = np.mean(mat, axis=axis_m)
return mat | 1399e3299e8697097a0c92f39c122735ab63c633 | 11,233 |
import numpy as np
from glad.util import argmin_datetime,haversine
def absolute_dispersion(drifters,starttime,time):
"""
Calculates absolute dispersion A^2, given desired current and
initial time.
Parameters
----------
drifters : GladDrifter instance, list, ndarray
A list or numpy array of GladDrifter instances.
starttime : datetime instance
Start time.
time : datetime instance
Time at which to compute absolute dispersion.
Returns
-------
A2 : float
Absolute dispersion in km^2.
"""
if not isinstance(drifters,list):
drifters = [drifters]
dist_squared = []
for d in drifters:
if not (d.has_time(starttime) and d.has_time(time)):
continue
n1 = argmin_datetime(time,d.time)
n0 = argmin_datetime(starttime,d.time)
dist_squared.append(haversine(d.lon[n1],d.lat[n1],\
d.lon[n0],d.lat[n0])**2)
A2 = np.mean(dist_squared)
return A2 | 1cf22d344994c913f294d005558afd73eb21cd2a | 11,234 |
import logging
def add_xgis_url(df: gpd.geodataframe.GeoDataFrame) -> gpd.geodataframe.GeoDataFrame:
""" Adding x-gis URL which will let the user check the result
:param df: gdf to use
"""
# Generaring url from string
df.reset_index(inplace=True) # resetting index
xy_tog = df[c.X].astype(str) + "," + df[c.Y].astype(str)
kinnistu_str = df[c.kinnistu_nr].astype(str)
# Final URL
x_gis_url = c.X_GIS_URL_yua + "?" + "punkt=" + xy_tog + "&moot=500" + "&tooltip=Kinnistu nr: " + kinnistu_str
# Adding new column
df[c.URL] = x_gis_url
logging.info("\tAdded URL to dataframe")
return df | a220169ba79b452b8cbb0d0b2aee11229c09fdae | 11,235 |
from typing import Optional
from typing import Sequence
def get_compute_capacity_reservation_instance_shapes(availability_domain: Optional[str] = None,
compartment_id: Optional[str] = None,
display_name: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetComputeCapacityReservationInstanceShapesFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeCapacityReservationInstanceShapesResult:
"""
This data source provides the list of Compute Capacity Reservation Instance Shapes in Oracle Cloud Infrastructure Core service.
Lists the shapes that can be reserved within the specified compartment.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_compute_capacity_reservation_instance_shapes = oci.core.get_compute_capacity_reservation_instance_shapes(compartment_id=var["compartment_id"],
availability_domain=var["compute_capacity_reservation_instance_shape_availability_domain"],
display_name=var["compute_capacity_reservation_instance_shape_display_name"])
```
:param str availability_domain: The name of the availability domain. Example: `Uocm:PHX-AD-1`
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
:param str display_name: A filter to return only resources that match the given display name exactly.
"""
__args__ = dict()
__args__['availabilityDomain'] = availability_domain
__args__['compartmentId'] = compartment_id
__args__['displayName'] = display_name
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:core/getComputeCapacityReservationInstanceShapes:getComputeCapacityReservationInstanceShapes', __args__, opts=opts, typ=GetComputeCapacityReservationInstanceShapesResult).value
return AwaitableGetComputeCapacityReservationInstanceShapesResult(
availability_domain=__ret__.availability_domain,
compartment_id=__ret__.compartment_id,
compute_capacity_reservation_instance_shapes=__ret__.compute_capacity_reservation_instance_shapes,
display_name=__ret__.display_name,
filters=__ret__.filters,
id=__ret__.id) | d294586d5aac79bb452d93bf3424daa403923188 | 11,236 |
import re
def is_branch_or_version(string):
"""Tries to figure out if passed argument is branch or version.
Returns 'branch', 'version', or False if deduction failed.
Branch is either 'master' or something like 3.12.x;
version is something like 3.12.5,
optionally followed by letter (3.12.5b) for aplha/beta/gamma...zeta,
optionally followed by release (3.12.5-2).
"""
if string == "master" or re.match("3\.\\d+\.x$", string):
return "branch"
if re.match("3\\.\\d+\\.\\d+[a-z]?(-\\d+)?$", string):
return "version"
return None | 6a5ad7cb7af29b6ce0e39ff86171f0f230929fb3 | 11,237 |
def eval_regressors(regressor_factories, gen_one_data, batch_size=1, names=None):
"""Evaluates an iterable of regressors on some test data of size
:batch_size: generated from :gen_one_data:.
"""
X, y = dg.BatchData.batch(gen_one_data, batch_size)
return _eval_regressors(regressor_factories, X, y, names=names) | 89c63d5c8c4697370ff2e63baf96ec456565fe42 | 11,238 |
def beautify():
"""Set reasonable defaults matplotlib.
This method replaces matplotlib's default rgb/cmyk colors with the
colarized colors. It also does:
* re-orders the default color cycle
* sets the default linewidth
* replaces the defaault 'RdBu' cmap
* sets the default cmap to 'RdBu'
Examples
--------
You can safely call ``beautify`` right after you've imported the
``plot`` module.
>>> from wyrm import plot
>>> plot.beautify()
"""
def to_mpl_format(r, g, b):
"""Convert 0..255 t0 0..1."""
return r / 256, g / 256, b / 256
# The solarized color palette
base03 = to_mpl_format( 0, 43, 54)
base02 = to_mpl_format( 7, 54, 66)
base01 = to_mpl_format( 88, 110, 117)
base00 = to_mpl_format(101, 123, 131)
base0 = to_mpl_format(131, 148, 150)
base1 = to_mpl_format(147, 161, 161)
base2 = to_mpl_format(238, 232, 213)
base3 = to_mpl_format(253, 246, 227)
yellow = to_mpl_format(181, 137, 0)
orange = to_mpl_format(203, 75, 22)
red = to_mpl_format(220, 50, 47)
magenta = to_mpl_format(211, 54, 130)
violet = to_mpl_format(108, 113, 196)
blue = to_mpl_format( 38, 139, 210)
cyan = to_mpl_format( 42, 161, 152)
green = to_mpl_format(133, 153, 0)
white = (1, 1, 1)#base3
black = base03
# Tverwrite the default color values with our new ones. Those
# single-letter colors are used all over the place in matplotlib, so
# this setting has a huge effect.
mpl.colors.ColorConverter.colors = {
'b': blue,
'c': cyan,
'g': green,
'k': black,
'm': magenta,
'r': red,
'w': white,
'y': yellow
}
# Redefine the existing 'RdBu' (Red-Blue) colormap, with our new
# colors for red and blue
cdict = {
'red' : ((0., blue[0], blue[0]), (0.5, white[0], white[0]), (1., magenta[0], magenta[0])),
'green': ((0., blue[1], blue[1]), (0.5, white[1], white[1]), (1., magenta[1], magenta[1])),
'blue' : ((0., blue[2], blue[2]), (0.5, white[2], white[2]), (1., magenta[2], magenta[2]))
}
mpl.cm.register_cmap('RdBu', data=cdict)
# Reorder the default color cycle
mpl.rcParams['axes.color_cycle'] = ['b', 'm', 'g', 'r', 'c', 'y', 'k']
# Set linewidth in plots to 2
mpl.rcParams['lines.linewidth'] = 2
# Set default cmap
mpl.rcParams['image.cmap'] = 'RdBu' | da08523ed69b2bb97af06cc1e51f5bdf2e412faa | 11,239 |
def filter_xr_by_month(data: xr.DataArray, month: str) -> xr.DataArray:
"""
filtering xr.DataArray by input string of season
:param data:
:param month: such as 'JJA', 'DJF', et 'NDJF'
:return:
"""
if isinstance(data, xr.DataArray):
month = value_month_from_str(month)
mask = [True if x in month else False for x in data.time.dt.month]
lookup = xr.DataArray(mask, dims=data.dims[0])
data_to_return = data.where(lookup, drop=True)
if isinstance(data, xr.Dataset):
# TODO: to be updated:
print(f'function to update')
return data_to_return | 5fa9e8af51afb0f84a19d5b7fe87e47f226e289d | 11,240 |
import yaml
from typing import OrderedDict
def ordered_load(stream, loader=yaml.SafeLoader, object_pairs_hook=OrderedDict):
"""Load YAML, preserving the ordering of all data."""
class OrderedLoader(loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader) | d3b6a9c84e895a63a85de39dbb5a849d6c224ecd | 11,241 |
def user_unions_clear(*args):
"""
user_unions_clear(map)
Clear user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_clear(*args) | f1e7b1a8cf966f3d28966919cf2e4c5e82cfb45c | 11,242 |
def heur_best_from_now(state):
"""
This heuristics computes the cost based in put all weight in the launch with the lowest variable cost.
@param state: state to compute the cost.
@return: cost
"""
try:
return min([launch.compute_variable_cost(state.left_weight()) for launch in state.launches[state.launch_nr:]])
except ValueError:
return 0 | 050c7c718ad849e8e7fc6892de7097c3bd0f83dd | 11,243 |
def get_ngram(text, ns=[1]):
"""
获取文本的ngram等特征
:param text: str
:return: list
"""
if type(ns) != list:
raise RuntimeError("ns of function get_ngram() must be list!")
for n in ns:
if n < 1:
raise RuntimeError("enum of ns must '>1'!")
len_text = len(text)
ngrams = []
for n in ns:
ngram_n = []
for i in range(len_text):
if i + n <= len_text:
ngram_n.append(text[i:i + n])
else:
break
if not ngram_n:
ngram_n.append(text)
ngrams += ngram_n
return ngrams | 3826fcdce46b455762417528ac9f31a0552b5a04 | 11,244 |
from scipy.stats import entropy
def entropy(df):
"""Return Shannon Entropy for purchases of each user."""
mask = df.credit_debit.eq('debit')
df = df[mask]
num_cats = df.auto_tag.nunique()
def calc_entropy(user, num_cats):
total_purchases = len(user)
cat_purchases = user.groupby('auto_tag').size()
probs = (cat_purchases + 1) / (total_purchases + num_cats)
return entropy(probs, base=2)
g = df.groupby('user_id')
return g.apply(calc_entropy, num_cats).rename('entropy') | dd3f0e1d3865de151ce4a451c2def148d58da9da | 11,245 |
def splitter(h):
""" Splits dictionary numbers by the decimal point."""
if type(h) is dict:
for k, i in h.items():
h[k] = str(i).split('.');
if type(h) is list:
for n in range(0, len(h)):
h[n] = splitter(h[n])
return h | 1eb5e38a02ce310a068d8c1c9df2790658722662 | 11,246 |
def listall_comments():
"""Lists rule-based labels
Returns:
list: A list of FileTypeComments
"""
return listall('comment') | 5f26a0632497309e13d624437767467811d8faa3 | 11,247 |
import random
import hashlib
import time
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
"%s%s%s" % (
random.getstate(),
time.time(),
UNSECURE_RANDOM_STRING)
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)]) | 9a8df9402b8ccde30ae289eb759708f506ef6087 | 11,248 |
import html
def make_html_doc(body, root, resource_dir=None, title=None, meta=None):
"""Generate HTML document
Parameters
----------
body : fmtxt-object
FMTXT object which should be formatted into an HTML document.
root : str
Path to the directory in which the HTML file is going to be located.
resource_dir : None | str
Name for the directory containing resources like images, relative to
root. If None, images are embedded.
title : None | FMText
Document title. The default is to try to infer the title from the body
or use "Untitled".
meta : dict
Meta-information for document head.
Returns
-------
html : str
HTML document.
"""
if title is None:
if hasattr(body, '_site_title') and body._site_title is not None:
title = html(body._site_title)
elif hasattr(body, '_heading'):
title = html(body._heading)
else:
title = "Untitled"
if meta:
meta = '<meta %s>\n' % ' '.join('%s=%r' % x for x in meta.items())
else:
meta = ''
style = '\n'.join(('', '<style>', STYLE, '</style>'))
env = {'root': root, 'resource_dir': resource_dir}
txt_body = html(body, env)
return _html_doc_template.format(meta=meta, title=title, style=style,
body=txt_body) | 6794050593440cafafa080e46b435675fbdad648 | 11,249 |
def blur(img):
"""
:param img:
:return:
"""
blank_img = SimpleImage.blank(img.width, img.height)
for x in range(1, img.width-1):
for y in range(1, img.height-1):
left1_pixel = img.get_pixel(x-1, y-1)
left2_pixel = img.get_pixel(x-1, y)
left3_pixel = img.get_pixel(x-1, y+1)
center1_pixel = img.get_pixel(x, y-1)
center2_pixel = img.get_pixel(x, y)
center3_pixel = img.get_pixel(x, y+1)
right1_pixel = img.get_pixel(x+1, y-1)
right2_pixel = img.get_pixel(x+1, y)
right3_pixel = img.get_pixel(x+1, y+1)
new_pixel = blank_img.get_pixel(x, y)
new_pixel.red = (left1_pixel.red + left2_pixel.red + left3_pixel.red + center1_pixel.red + center2_pixel.red
+ center3_pixel.red + right1_pixel.red + right2_pixel.red + right3_pixel.red) // 9
new_pixel.green = (left1_pixel.green + left2_pixel.green + left3_pixel.green + center1_pixel.green +
center2_pixel.green + center3_pixel.green + right1_pixel.green + right2_pixel.green +
right3_pixel.green) // 9
new_pixel.blue = (left1_pixel.blue + left2_pixel.blue + left3_pixel.blue + center1_pixel.blue +
center2_pixel.blue + center3_pixel.blue + right1_pixel.blue + right2_pixel.blue +
right3_pixel.blue) // 9
for x in range(1):
for y in range(1, img.height-1):
"""edge x=0"""
edge1_pixel = img.get_pixel(x, y-1)
edge2_pixel = img.get_pixel(x, y)
edge3_pixel = img.get_pixel(x, y+1)
edge4_pixel = img.get_pixel(x+1, y-1)
edge5_pixel = img.get_pixel(x+1, y)
edge6_pixel = img.get_pixel(x+1, y+1)
new_pixel = blank_img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(img.width-1, img.width):
for y in range(1, img.height-1):
"""edge x=width-1"""
edge1_pixel = img.get_pixel(x-1, y-1)
edge2_pixel = img.get_pixel(x-1, y)
edge3_pixel = img.get_pixel(x-1, y+1)
edge4_pixel = img.get_pixel(x, y-1)
edge5_pixel = img.get_pixel(x, y)
edge6_pixel = img.get_pixel(x, y+1)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(1, img.width-1):
for y in range(1):
"""edge y=0"""
edge1_pixel = img.get_pixel(x-1, y)
edge2_pixel = img.get_pixel(x, y)
edge3_pixel = img.get_pixel(x+1, y)
edge4_pixel = img.get_pixel(x-1, y+1)
edge5_pixel = img.get_pixel(x, y+1)
edge6_pixel = img.get_pixel(x+1, y+1)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(1, img.width-1):
for y in range(img.height-1, img.height):
"""edge y=height-1"""
edge1_pixel = img.get_pixel(x-1, y-1)
edge2_pixel = img.get_pixel(x, y-1)
edge3_pixel = img.get_pixel(x+1, y-1)
edge4_pixel = img.get_pixel(x-1, y)
edge5_pixel = img.get_pixel(x, y)
edge6_pixel = img.get_pixel(x+1, y)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
return blank_img | b2e04d8134b1d295e497b28a43f93223bcaf881f | 11,250 |
def generalized_euler_solver(descr, coefs, rho0, v0, t, x, bc="periodic", num_integrator_steps=1, fix_vvx_term=True):
"""Solver for Euler hydro system.
Builds RHS of the Euler equation Dv_t = f(...) from symbolic description.
"""
t_pde = np.linspace(t[0], t[-1], len(t)*num_integrator_steps) # Create a refined t-grid
nt, nx = len(t), len(x)
rho_ev, v_ev = np.zeros((len(t), nx)), np.zeros((len(t), nx))
rho, v = rho0, v0
dt = t_pde[1] - t_pde[0]
dx = x[1] - x[0]
for it, t in enumerate(t_pde):
rhox = FiniteDiff(rho, dx, 1, bc)
vx = FiniteDiff(v, dx, 1, bc)
rho_t = -(rhox*v + vx*rho)
rho_next = rho + dt*rho_t
# Add RHS terms to Dt(v) = v_t+v*v_x = f (...)
f = np.sum([coefs[i]*get_euler_term_from_descr(descr_i, rho, v, x) \
for i, descr_i in enumerate(descr)], axis=0)
v_t = f
if fix_vvx_term:
v_t -= v*vx
v_next = v + dt*v_t # D_t(v) = f(rho, v, ...)
step = it // num_integrator_steps
if it % num_integrator_steps == 0:
rho_ev[step, :] = rho.copy()
v_ev[step, :] = v.copy()
rho = rho_next.copy()
v = v_next.copy()
if np.isnan(np.sum(rho)):
# Solution exploded, interrupt
return np.array([np.nan]), np.array([np.nan])
return rho_ev, v_ev | 5bc1fefbb3c1e13da37d2e37fdefa1be44812a36 | 11,251 |
import csv
def generate_rnr_features(in_query_stream, outfile, collection_id, cluster_id, num_rows=30, config=load_config()):
"""
Iterates over a labelled query stream and generates a feature file with the columns:
<query_num>,<answer_id>,<fea_0>,<fea_1>,...,<fea_n>,<relevance_label>
:param rnr_debug_helpers.queries.LabelledQueryStream in_query_stream:
:param File outfile: where the feature file contents will be written to
:param str collection_id: the RnR solr collection to use for finding search results
:param str cluster_id: the RnR solr cluster id to use for finding search results
:param int or None num_rows: The number of search results that will be retrieved for each query. Defaults to 30
similar to RnR Web UI/Tooling
:param ConfigParser config: A config loaded with the credentials to use
"""
rnr_cluster = RetrieveAndRankProxy(solr_cluster_id=cluster_id, config=config)
writer = csv.writer(outfile)
# Iterate over queries and generate feature vectors
stats = defaultdict(int)
is_first_row = True
for qid, query in enumerate(in_query_stream):
labels_for_relevant_answer_ids = _parse_correct_answer_ids_from_query(query)
_collect_stats(stats, labels_for_relevant_answer_ids)
LOGGER.debug("Getting feature vectors for query:<<%s>>" % query.get_qid())
rnr_search_results = rnr_cluster.get_fcselect_features(query_text=query.get_qid(), collection_id=collection_id,
generate_header=is_first_row,
num_results_to_return=num_rows)
if len(rnr_search_results) == 0:
stats["num_queries_with_zero_rnr_results"] += 1
else:
if is_first_row:
writer.writerow([_QID_COLUMN_NAME] + rnr_search_results.pop(0) + [_GT_COLUMN_NAME])
is_first_row = False
stats["num_queries_with_atleast_one_search_result"] += 1
stats['num_search_results_retrieved'] += len(rnr_search_results)
num_possible_correct, num_correct_answers_in_search_results = \
_print_feature_vectors_and_check_for_correct_answers(writer, rnr_search_results, '%d' % (qid + 1),
labels_for_relevant_answer_ids)
if num_possible_correct != num_correct_answers_in_search_results:
stats['num_queries_where_at_least_correct_answer_didnt_appear_in_rnr'] += 1
stats["num_correct_in_search_result"] += num_correct_answers_in_search_results
if stats["num_queries"] % 100 == 0:
LOGGER.info("Processed %d queries from input file" % stats['num_queries'])
_average_stats_across_collection(stats)
LOGGER.info("Finished processing %d queries from input file" % stats['num_queries'])
return stats | 71bf07ee5422c67ac5c1693a91151543436a9b9e | 11,252 |
import os
def get_theme_settings(theme):
"""
docutils writer will load css file.
"""
stylesheet = {}
search_paths = [
os.path.abspath(os.path.dirname(os.path.dirname(html5_polyglot.__file__))),
]
docutils_theme_path = ''
for path in search_paths:
if os.path.exists(os.path.join(path, 'html5_polyglot', 'template.txt')):
docutils_theme_path = path
break
logger.debug('docutils theme path: %s' % docutils_theme_path)
stylesheet['stylesheet_dirs'] = [
os.path.join(docutils_theme_path, 'html4css1'),
os.path.join(docutils_theme_path, 'html5_polyglot'),
]
pygments_path = os.path.join(__home_data_path__, 'themes', 'reStructuredText', 'pygments.css')
if os.path.exists(pygments_path):
stylesheet['stylesheet_path'] = pygments_path
stylesheet['syntax_highlight'] = 'short'
# docutils default theme
if not theme or theme == 'default':
return stylesheet
# third part theme
themes = get_rst_themes()
styles = themes.get(theme)
# stylesheet_path : css file path
# syntax_highlight: short
# template: template file path
stylesheet['stylesheet_dirs'].extend(styles['stylesheet_dirs'])
if 'syntax_highlight' in styles:
stylesheet['syntax_highlight'] = styles['syntax_highlight']
if 'stylesheet_path' in styles:
css_paths = styles['stylesheet_path'].split(',')
if 'stylesheet_path' in stylesheet:
css_paths += stylesheet['stylesheet_path'].split(',')
stylesheet['stylesheet_path'] = ','.join(css_paths)
if 'template' in styles:
old_path = styles['template']
new_path = os.path.abspath(
os.path.join(__home_data_path__,
'themes', 'reStructuredText',
theme,
old_path))
stylesheet['template'] = new_path
return stylesheet | a259248bba4b8107829abac6aec6909c38cd923e | 11,253 |
def pick_unassigned_variable(board, strategy, unassigned_heap):
"""
:returns: (row_index, col_index)
"""
if strategy == Strategies.FIRST_FOUND:
return __pick_unassigned_variable_first_found(board)
elif strategy == Strategies.MIN_ROW:
return __pick_unassigned_variable_min_row(board)
else:
(rowi, coli) = (-1, -1)
if strategy == Strategies.MIN_HEAP:
(rowi, coli) = __pick_unassigned_variable_heap(board, unassigned_heap)
else:
(rowi, coli) = __pick_unassigned_variable_heap_2(board, unassigned_heap)
# update the heap
unassigned_heap["row"][rowi] -= 1
unassigned_heap["col"][coli] -= 1
ssi = get_subsquare_index((rowi, coli))
unassigned_heap["subsquare"][ssi] -= 1
return (rowi, coli) | a96bb4d5e047f44fb79dd2eae68532c8e54296d4 | 11,254 |
def CONTAINS_INTS_FILTER(arg_value):
"""Only keeps int sequences or int tensors."""
return arg_value.elem_type is int or arg_value.has_int_dtypes() | c4452c5e6bbd9ead32359d8638a6bf1e49b600ba | 11,255 |
def pad_renderable(renderable, offset):
"""
Pad a renderable, subject to a particular truncation offset.
"""
if offset < 0:
raise Exception("invalid offset!")
if offset == 0:
return RenderGroup(_RULE, Padding(renderable, 1))
if offset == 1:
return Padding(renderable, 1)
else:
return Padding(renderable, (0, 1, 1, 1)) | eed05f632e00f8cb8a2539f59402c4c200159f4c | 11,256 |
import frappe.modules
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions) | c6f68a63433010f2a0270fdc0f373e96e63001c5 | 11,257 |
def transform_box_coord_pseudo(H, W, box_vertices, dataset_name):
"""
Transform box_vertices to match the coordinate system of the attributions
:param H: Desired height of image
:param W: Desired width of image
:param box_vertices:
:param dataset_name:
:param high_rez:
:param scaling_factor:
:return: transformed box_vertices
"""
y_range = None
if dataset_name == 'CadcDataset':
y_range = 100.0
elif dataset_name == 'KittiDataset':
'''Note: the range for Kitti is different now'''
y_range = 79.36
elif dataset_name == 'WaymoDataset':
y_range = 168.96
new_scale = H / y_range
# print('H: {}'.format(H))
# TODO: verify the following for waymo
if dataset_name == 'KittiDataset':
for vertex in box_vertices:
vertex[0] = vertex[0] * new_scale
vertex[0] = H - vertex[0]
vertex[1] = vertex[1] * new_scale
else:
# print("\n")
for vertex in box_vertices:
vertex[0] = vertex[0] * new_scale
vertex[0] = H - vertex[0]
vertex[1] = vertex[1] * new_scale
# print("vertex: {}".format(vertex))
return box_vertices | ee4738ea01b493e4854b0d163459b47c98fb082b | 11,258 |
def create_api_app(global_conf, **local_conf):
"""Creates MainAPI application"""
controllers = {}
api_version = global_conf.get('api_version')
if api_version == 'v2.0':
controllers.update({
'/log/single': v2_logs.Logs()
})
elif api_version == 'v3.0':
controllers.update({
'/logs': v3_logs.Logs()
})
wsgi_app = falcon.API(
request_type=request.Request
)
for route, ctrl in controllers.items():
wsgi_app.add_route(route, ctrl)
error_handlers.register_error_handlers(wsgi_app)
return wsgi_app | 1ec0f155dfdc482fa6b1b41afb18d037636fc9ba | 11,259 |
def gauss_4deg(x,b, ampl,cent,sigm):
""" Simple 3 parameter Gaussian
Args:
x
b (float): Floor
ampl (float): Amplitude
cent (float): Centroid
sigm (float): sigma
Returns:
float or ndarray: Evaluated Gausssian
"""
return b + ampl*np.exp(-1.*(cent-x)**2/2/sigm**2) | 42f58844c91423220176b0f93ead4a4fd6dbd608 | 11,260 |
import requests
def get_spreads(pair, since):
"""Returns last recent spreads"""
api_command = API_LINK + f'Spreads?pair={pair}&since={since}'
resp = requests.get(api_command).json()
if not resp['error']: # empty
return resp
return resp['error'] | 7e32b1abf2988bd5eeb439d9308a1a6320ba8b87 | 11,261 |
import os
def token_bytes(nbytes):
"""Return a random byte string containing *nbytes* bytes.
If *nbytes* is ``None`` or not supplied, a reasonable
default is used.
>>> token_bytes(16) #doctest:+SKIP
b'\\xebr\\x17D*t\\xae\\xd4\\xe3S\\xb6\\xe2\\xebP1\\x8b'
"""
return os.urandom(nbytes) | 3750fb9ae0be2bc3f9b52a9c3caa3dc67a3a91d0 | 11,262 |
def quat2expmap(q):
"""
Converts a quaternion to an exponential map
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args
q: 1x4 quaternion, w, x, y, z
Returns
r: 1x3 exponential map
Raises
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q) - 1) > 1e-3):
raise (ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide(q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps));
theta = 2 * np.arctan2(sinhalftheta, coshalftheta)
theta = np.mod(theta + 2 * np.pi, 2 * np.pi)
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r | 1b68e2ad62f46402f06c9ba2c459c33d464d84e4 | 11,263 |
import logging
def log_to_file(filename, level=None, formatter=None):
""" Output logs to a file
Causes logs to be additionally directed to a file, if you call this twice you will
get duplicated logging. This does not disable or invalidate other logging options , it
adds to them.
Supported Logging levels are CRITICAL, ERROR, WARNING, INFO and DEBUG
Logging formatters are documented here , they control the format of the logs.
https://docs.python.org/3/library/logging.html#formatter-objects
Example:
Selecting DEBUG will show all other levels
Selecting ERROR will show CRITICAL and ERROR only
Args:
filename (str) : Filename to log to.
level (str) : Display logs tagged below this level.
formatter (Formatter) : The python logging formatter you want to use.
Returns:
(logger) : A logging handle that you don't have to use.
"""
filehandler = logging.FileHandler(filename)
if level:
filehandler.setLevel(level)
else:
filehandler.setLevel(logging.DEBUG)
if formatter:
formatter = logging.Formatter(format)
else:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler.setFormatter(formatter)
if len(logger.handlers) < 2:
logger.addHandler(filehandler)
return logger | 5eba020f5481ec14532bd3752540b45ab11c34c0 | 11,264 |
def candidate_results_for_race_type(result_form, race_type, num_results=None):
"""Return the candidates and results for a result form and race type.
:param result_form: The result form to return data for.
:param race_type: The race type to get results for, get component results
if this is None.
:param num_results: Enforce a particular number of results, default None.
:returns: A list of tuples containing the candidate and all results for
that candidate.
"""
return get_candidates(get_results_for_race_type(result_form, race_type),
num_results) | 22bff04bd0b59b6c566f792794119929913efd42 | 11,265 |
def sort_2metals(metals):
"""
Handles iterable or string of 2 metals and returns them
in alphabetical order
Args:
metals (str || iterable): two metal element names
Returns:
(tuple): element names in alphabetical order
"""
# return None's if metals is None
if metals is None:
return None, None
if isinstance(metals, str):
if len(metals) != 4:
raise ValueError('str can only have two elements.')
metal1, metal2 = sorted([metals[:2], metals[2:]])
else:
metal1, metal2 = sorted(metals)
return metal1.title(), metal2.title() | dab922797a6c7b94d6489d8fc4d9c1d99f3ee35c | 11,266 |
def parse_join(tables, operation, left, right):
"""
Parses a join from the where clause
"""
# Verify Left
table_name = left['column']['table']
column_name = left['column']['name']
# If table and column, check that table for presense
if table_name is not None and not tables[table_name].has_column(column_name):
write_error("ERROR: Column reference \"{}\" does not exist in table \"{}\"", column_name, table_name)
# If no table, check for ambiguous column
present_in_tables = { key for (key, value) in tables.iteritems() if value.has_column(column_name)}
if table_name is None and len(present_in_tables) > 1:
write_error("ERROR: Column reference \"{}\" is ambiguous; present in multiple tables: {}.", column_name, ", ".join(present_in_tables))
if len(present_in_tables) == 1:
table_name = present_in_tables.pop()
column_location = tables[table_name].column_location(column_name)
left = (table_name, column_name, column_location)
# Verify Right
table_name = right['column']['table']
column_name = right['column']['name']
# If table and column, check that table for presense
if table_name is not None and not tables[table_name].has_column(column_name):
write_error("ERROR: Column reference \"{}\" does not exist in table \"{}\"", column_name, table_name)
# If no table, check for ambiguous column
present_in_tables = { key for (key, value) in tables.iteritems() if value.has_column(column_name)}
if table_name is None and len(present_in_tables) > 1:
write_error("ERROR: Column reference \"{}\" is ambiguous; present in multiple tables: {}.", column_name, ", ".join(present_in_tables))
if len(present_in_tables) == 1:
table_name = present_in_tables.pop()
column_location = tables[table_name].column_location(column_name)
right = (table_name, column_name, column_location)
# Are join types compatible
if tables[left[0]].column_type(left[1]) != tables[right[0]].column_type(right[1]):
write_error("ERROR: Column join types are incompatible.")
return Join(operation, left, right) | 1d5f912dfe634c21dea7640ad12271e4ac34b277 | 11,267 |
def address_id_handler(id):
"""
GET - called as /addresses/25
PUT - called to update as /addresses/25?address='abc'&lat=25&lon=89
DELETE - called as /addresses/25
:param id:
:return:
"""
if request.method == 'GET':
return jsonify(read_address(session, address_id=id))
elif request.method == 'PUT':
address = request.form.get('address','dummy')
lat = request.form.get('lat',0.1)
lon = request.form.get('lon',0.1)
update_address(session, address_id=id, search_string=address, lat=lat, lon=lon)
return jsonify({'success': True})
elif request.method == 'DELETE':
delete_address(session, id) | 9bb3fd813842aac0262417dcb59e4b0cffadc1f0 | 11,268 |
from neptune_mlflow.sync import sync as run_sync
def sync(path, project):
"""Upload mlflow runs data to Neptune.
PATH is a directory where Neptune will look for `mlruns` directory with mlflow data.
Examples:
neptune mlflow .
neptune mlflow /path
neptune mlflow /path --project username/sandbox
"""
# We do not want to import anything if process was executed for autocompletion purposes.
return run_sync(path=path, project=project) | 5fb258969ea02a93774b2fb350c3d3fc3d436752 | 11,269 |
def prefix_search_heuristic_split(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding with heuristic to speed up the algorithm.
Speed up prefix computation by splitting sequence into subsequences as described by Graves (p66).
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, _ = mat.shape
# split sequence into 3 subsequences, splitting points should be roughly placed at 1/3 and 2/3
split_targets = [int(max_T * 1 / 3), int(max_T * 2 / 3)]
best = [{'target': s, 'bestDist': max_T, 'bestIdx': s} for s in split_targets]
# find good splitting points (blanks above threshold)
thres = 0.9
for t in range(max_T):
for b in best:
if mat[t, blank_idx] > thres and abs(t - b['target']) < b['bestDist']:
b['bestDist'] = abs(t - b['target'])
b['bestIdx'] = t
break
# splitting points plus begin and end of sequence
ranges = [0] + [b['bestIdx'] for b in best] + [max_T]
# do prefix search for each subsequence and concatenate results
res = ''
for i in range(len(ranges) - 1):
beg = ranges[i]
end = ranges[i + 1]
res += prefix_search(mat[beg: end, :], chars)
return res | c7894012ff1fe4d2ef0baab6bb0375dc2167ea58 | 11,270 |
def _make_set_permissions_url(calendar_id, userid, level):
"""
:return: the URL string for GET request call
to Trumba SetPermissions method
"""
return "{0}?CalendarID={1}&Email={2}@uw.edu&Level={3}".format(
set_permission_url_prefix, calendar_id, userid, level) | 669a64f8a17d87777dea3d007d690b132d8cb266 | 11,271 |
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs) | dcb26dd286a88288cf9afb824a69d468188436be | 11,272 |
from datetime import datetime
def datetime_without_seconds(date: datetime) -> datetime:
"""
Returns given datetime with seconds and microseconds set to 0
"""
return date.replace(second=0, microsecond=0) | de30c7770d84751b555c78e045f37783030d8970 | 11,273 |
import copy
def update_br(request):
"""
更新会议室
:param request:
:return:
"""
if request.method == 'POST':
dbs = request.dbsession
app_path = request.registry.settings['app_path']
br = dbs.query(HasBoardroom).filter(HasBoardroom.id == request.POST.get('br_id', 0)).first()
old_br = copy.deepcopy(br)
new_name = request.POST.get('br_name', '')
if old_br.name != new_name:
msg = check_brm_name(dbs, room_name=request.POST.get('br_name', ''), org_id=request.POST.get('org_id', 0))
if not msg:
br.name = new_name
else:
return {
'resultFlag': 'failed',
'error_msg': msg
}
br.org_id = request.POST.get('org_id', 0)
br.config = request.POST.get('br_config', '')
br.description = request.POST.get('br_desc', '')
room_pic = request.POST.get('room_pic', '')
if room_pic:
room_pic = request.session['#room_pic']
br.picture = IMG_RPATH + str(br.org_id) + '/' + room_pic
room_logo1 = request.POST.get('room_logo1', '')
if room_logo1:
room_logo1 = request.session['#room_logo1']
br.logo1 = IMG_RPATH + str(br.org_id) + '/' + room_logo1
room_logo2 = request.POST.get('room_logo2', '')
if room_logo2:
room_logo2 = request.session['#room_logo2']
br.logo2 = IMG_RPATH + str(br.org_id) + '/' + room_logo2
room_btn = request.POST.get('room_btn', '')
if room_btn:
room_btn = request.session['#room_btn']
br.button_img = IMG_RPATH + str(br.org_id) + '/' + room_btn
room_bgd = request.POST.get('room_bgd', '')
if room_bgd:
room_bgd = request.session['#room_bgd']
br.background = IMG_RPATH + str(br.org_id) + '/' + room_bgd
br.state = request.POST.get('state', 1)
org_id = br.org_id
if old_br.org_id != int(org_id):
update_pic(old_br, br)
new_br = copy.deepcopy(br)
msg = update(dbs, br)
if not msg:
if room_pic:
delete_pic(old_br.picture, app_path)
move_pic(room_pic, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.picture, new_br.picture, app_path)
if room_logo1:
delete_pic(old_br.logo1, app_path)
move_pic(room_logo1, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.logo1, new_br.logo1, app_path)
if room_logo2:
delete_pic(old_br.logo2, app_path)
move_pic(room_logo2, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.logo2, new_br.logo2, app_path)
if room_btn:
delete_pic(old_br.button_img, app_path)
move_pic(room_btn, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.button_img, new_br.button_img, app_path)
if room_bgd:
delete_pic(old_br.background, app_path)
move_pic(room_bgd, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.background, new_br.background, app_path)
json_str = {
'resultFlag': 'failed' if msg else 'success',
'error_msg': msg
}
HyLog.log_update(request.client_addr, request.session['userAccount'],
request.POST.get('br_id') + ' failed' if msg else 'success', 'boardroom')
return json_str
return {} | 46fa2c9cb20f7335b539f2a4e77f566e93e6bf7c | 11,274 |
def find_north_pole(valid_rooms):
"""
Decode the room names and find the north pole.
Args:
valid_rooms (list): A list of valid rooms to decode/search.
Returns:
tuple
"""
global NORTH_POLE_NAME
for room in valid_rooms:
room_name, sector_id, checksum = room
decoded_name = decode_room_name(room_name, sector_id)
if decoded_name == NORTH_POLE_NAME:
return decoded_name, sector_id | b3da9a66838c024bb6ae68be8104b3e80f79d0d7 | 11,275 |
def cli(gene1, gene2, gene3, frameness, keep_exon, fusion_fraction,
add_insertion, total_coverage, output, common_filename):
"""[Simulator] Fusion generator."""
normal_coverage = total_coverage * (1. - fusion_fraction)
fusion_coverage = total_coverage * fusion_fraction
normal_ref = generate_normal_reference([gene1, gene2, gene3], output, common_filename)
fusion_ref = generate_fusion_reference([gene1, gene2, gene3], output,
keep_exon, frameness, add_insertion, common_filename)
normal_fastq = generate_fastq(normal_ref, output, 'normal', normal_coverage)
fusion_fastq = generate_fastq(fusion_ref, output, 'fusion', fusion_coverage)
merged1, merged2 = merge_fastq(normal_fastq, fusion_fastq, output, common_filename)
# chimerascan_bedpe = run_chimerascan(merged1, merged2, output)
# print chimerascan_bedpe
# generate_manifest(merged1, merged2)
# run_detango(merged1, merged2, output)
return merged1, merged2 | ed52c518bf3c88623510fa07996fdec205936f4a | 11,276 |
def build_upsample_layer(cfg, *args, **kwargs):
"""Build upsample layer.
Args:
cfg (dict): The upsample layer config, which should contain:
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding conv layer.
Returns:
nn.Module: Created upsample layer.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'typename' not in cfg:
raise KeyError(
f'the cfg dict must contain the key "typename", but got {cfg}')
cfg_ = cfg.copy()
layer_type = cfg_.pop('typename')
upsample = registry.get(layer_type, 'upsample')
if upsample is nn.Upsample:
cfg_['mode'] = layer_type
layer = upsample(*args, **kwargs, **cfg_)
return layer | 30121657746d3b5ec1f374d339a9d33b48a24724 | 11,277 |
def ctime_ticks(t):
"""This is for backwards compatibility and should not be used."""
return tsc_time.TSC_from_ticks(t).ctime() | 841f879db7e8fa7aa436ceddeb69c76c5d707f17 | 11,278 |
def vitruvian_loss(input, mask, dataset):
"""Vitruvian loss implementation"""
if dataset == "itop":
# 1 - 2 e 1 - 3 -> collo spalle
# 2 - 4 e 3 - 5 -> spalle gomito
# 4 - 6 e 5 - 7 -> gomito mano
# 9 - 11 e 10 - 12 -> anca ginocchio
# 11 - 13 e 12 - 14 -> ginocchio piede
loss = _vitruvian_calculate(input, [1, 2, 1, 3], mask)
loss += _vitruvian_calculate(input, [2, 4, 3, 5], mask)
loss += _vitruvian_calculate(input, [4, 6, 5, 7], mask)
loss += _vitruvian_calculate(input, [9, 11, 10, 12], mask)
loss += _vitruvian_calculate(input, [11, 13, 12, 14], mask)
elif dataset in ("watch_n_patch", "wnp", "watch-n-patch"):
# 20 - 4 e 20 - 8 -> spine shoulder spalle
# 4 - 5 e 8 - 9 -> spalle gomito
# 5 - 6 e 9 - 10 -> gomito polso
# 6 - 7 e 10 - 11 -> polso mano
# 12 - 0 e 0 - 16 -> anche spine base
# 12 - 13 e 16 - 17 -> anca ginocchio
# 13 - 14 e 17 - 18 -> ginocchio caviglia
# 14 - 15 e 18 - 19 -> caviglia piede
limbs = [
[20, 4, 20, 8],
[4, 5, 8, 9],
[5, 6, 9, 10],
[6, 7, 10, 11],
[0, 12, 0, 16],
[12, 13, 16, 17],
[13, 14, 17, 18],
[14, 15, 18, 19],
]
loss = 0.0
for limb in limbs:
loss += _vitruvian_calculate(input, limb, mask)
return loss | b609b28e15a1b53bccfd69d3ac0938be1f6ccee6 | 11,279 |
def LoadElement(href, only_etag=False):
"""
Return an instance of a element as a ElementCache dict
used as a cache.
:rtype ElementCache
"""
request = SMCRequest(href=href)
request.exception = FetchElementFailed
result = request.read()
if only_etag:
return result.etag
return ElementCache(
result.json, etag=result.etag) | 8e47fbc2aa745c97c9b93401302ff13435e8e9e7 | 11,280 |
import os
import sys
def load_config(config_file: str) -> EnvironmentAwareConfigParser:
"""Load the main configuration and return a config object."""
config = EnvironmentAwareConfigParser()
if not os.path.exists(config_file):
main_logger.critical('Configuration file "%s" does not exist!', config_file)
sys.exit(1)
try:
config.read(config_file)
except Exception as e:
main_logger.critical("Unable to read configuration file")
main_logger.critical(e)
sys.exit(1)
return config | f55c7bbd64094d1075fb2daa4c90bd9fbcf2b3ed | 11,281 |
from typing import Iterable
def xreplace_indices(exprs, mapper, candidates=None, only_rhs=False):
"""
Create new expressions from ``exprs``, by replacing all index variables
specified in mapper appearing as a tensor index. Only tensors whose symbolic
name appears in ``candidates`` are considered if ``candidates`` is not None.
"""
get = lambda i: i.rhs if only_rhs is True else i
handle = flatten(retrieve_indexed(get(i)) for i in as_tuple(exprs))
if candidates is not None:
handle = [i for i in handle if i.base.label in candidates]
mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle]))
replaced = [i.xreplace(mapper) for i in as_tuple(exprs)]
return replaced if isinstance(exprs, Iterable) else replaced[0] | 808918b8852ff23e40f34aa26cbc6433a9bdf102 | 11,282 |
import six
def format_ratio(in_str, separator='/'):
""" Convert a string representing a rational value to a decimal value.
Args:
in_str (str): Input string.
separator (str): Separator character used to extract numerator and
denominator, if not found in ``in_str`` whitespace is used.
Returns:
An integer or float value with 2 digits precision or ``in_str`` if
formating has failed.
>>> format_ratio('48000/1')
48000
>>> format_ratio('24000 1000')
24
>>> format_ratio('24000 1001')
23.98
>>> format_ratio('1,77')
'1,77'
>>> format_ratio(1.77)
1.77
"""
if not isinstance(in_str, six.string_types):
return in_str
try:
sep = separator if separator in in_str else ' '
ratio = in_str.split(sep)
if len(ratio) == 2:
ratio = round(float(ratio[0]) / float(ratio[1]), 2)
else:
ratio = float(ratio[0])
if ratio.is_integer():
ratio = int(ratio)
return ratio
except ValueError:
return in_str | 308ec972df6e57e87e24c26e769311d652118aee | 11,283 |
def fill_tidal_data(da,fill_time=True):
"""
Extract tidal harmonics from an incomplete xarray DataArray, use
those to fill in the gaps and return a complete DataArray.
Uses all 37 of the standard NOAA harmonics, may not be stable
with short time series.
A 5-day lowpass is removed from the harmonic decomposition, and added
back in afterwards.
Assumes that the DataArray has a 'time' coordinate with datetime64 values.
The time dimension must be dense enough to extract an exact time step
If fill_time is True, holes in the time coordinate will be filled, too.
"""
diffs=np.diff(da.time)
dt=np.median(diffs)
if fill_time:
gaps=np.nonzero(diffs>1.5*dt)[0]
pieces=[]
last=0
for gap_i in gaps:
# gap_i=10 means that the 10th diff was too big
# that means the jump from 10 to 11 was too big
# the preceding piece should go through 9, so
# exclusive of gap_i
pieces.append(da.time.values[last:gap_i])
pieces.append(np.arange( da.time.values[gap_i],
da.time.values[gap_i+1],
dt))
last=gap_i+1
pieces.append(da.time.values[last:])
dense_times=np.concatenate(pieces)
dense_values=np.nan*np.zeros(len(dense_times),np.float64)
dense_values[ np.searchsorted(dense_times,da.time.values) ] = da.values
da=xr.DataArray(dense_values,
dims=['time'],coords=[dense_times])
else:
pass
dnums=utils.to_dnum(da.time)
data=da.values
# lowpass at about 5 days, splitting out low/high components
winsize=int( np.timedelta64(5,'D') / dt )
data_lp=filters.lowpass_fir(data,winsize)
data_hp=data - data_lp
valid=np.isfinite(data_hp)
omegas=harm_decomp.noaa_37_omegas() # as rad/sec
harmonics=harm_decomp.decompose(dnums[valid]*86400,data_hp[valid],omegas)
dense=harm_decomp.recompose(dnums*86400,harmonics,omegas)
data_recon=utils.fill_invalid(data_lp) + dense
data_filled=data.copy()
missing=np.isnan(data_filled)
data_filled[missing] = data_recon[missing]
fda=xr.DataArray(data_filled,coords=[da.time],dims=['time'])
return fda | fbb881ca3a47778c8c6ee8b3804953fb5b806b4e | 11,284 |
def wrapper_unit_scaling(x, T, s_ref, n_gt, *args, **kwargs):
"""Normalize segments to unit-length and use center-duration format
"""
xc = segment_format(x, 'b2c')
init_ref = np.repeat(s_ref[:, 0], n_gt)
return segment_unit_scaling(xc, T, init_ref) | 28f29fd81143c1bb12d4e062e680de2643662bd1 | 11,285 |
import copy
import time
import tqdm
import torch
def fairseq_generate(data_lines, args, models, task, batch_size, beam_size, device):
"""beam search | greedy decoding implemented by fairseq"""
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
gen_args = copy.copy(args)
with open_dict(gen_args):
gen_args.beam = beam_size
generator = task.build_generator(models, gen_args)
data_size = len(data_lines)
all_results = []
logger.info(f'Fairseq generate batch {batch_size}, beam {beam_size}')
start = time.perf_counter()
for start_idx in tqdm(range(0, data_size, batch_size)):
batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]]
batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines]
lengths = torch.LongTensor([t.numel() for t in batch_ids])
batch_dataset = task.build_dataset_for_inference(batch_ids, lengths)
batch_dataset.left_pad_source = True
batch = batch_dataset.collater(batch_dataset)
batch = utils.apply_to_sample(lambda t: t.to(device), batch)
translations = generator.generate(models, batch, prefix_tokens=None)
results = []
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos])
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | a412f619f79f39f77d6df83cefbbe38170d77109 | 11,286 |
def retrieve_features(dataframe):
"""
Retrieves features (X) from dataframe
:param dataframe:
:return:
"""
return list(dataframe["tweet"]) | 69118d6d0b9503500f6fa4b24fb844af4ff25644 | 11,287 |
def atarzia_short_MD_settings():
"""My default settings for short, crude cage optimizations in stk.
Modified on 26/04/19.
"""
Settings = {
'output_dir': None,
'timeout': None,
'force_field': 16,
'temperature': 700, # K
'conformers': 50,
'time_step': 1, # fs
'eq_time': 50, # ps
'simulation_time': 1000, # ps -- 1 ns
'maximum_iterations': 2500,
'minimum_gradient': 0.05,
'use_cache': False
}
return Settings | 490a59e9551a324b571d4f67c4e7885144f2fe77 | 11,288 |
def result_by_score_from_csv(f, score, ascending=True):
"""Return result with the best defined score"""
df = pd.read_csv(f)
df.sort_values(score, ascending=ascending, inplace=True)
return df.loc[0, ["pdb_code", score]].tolist() | ba41c6cfc26d830685c43265eaa21c496173311e | 11,289 |
from datetime import datetime
def create_column_dnn(
predict_feature='close',
ticker='',
debug=False,
use_epochs=10,
use_batch_size=10,
use_test_size=0.1,
use_random_state=1,
use_seed=7,
use_shuffle=False,
model_verbose=True,
fit_verbose=True,
use_scalers=True,
df=[],
dnn_config={},
compile_config={},
s3_bucket='',
s3_key='',
send_plots_to_slack=False):
"""create_column_dnn
For scaler-normalized datasets this will
compile numeric columns and ignore string/non-numeric
columns as training and test feature columns
:param predict_feature: Column to create DNN with
:param ticker: Ticker being used
:param debug: Debug mode
:param use_epochs: Epochs times to use
:param use_batch_size: Batch size to use
:param use_test_size: Test size to use
:param use_random_state: Random state to train with
:param use_seed: Seed used to build scalar datasets
:param use_shuffle: To shuffle the regression estimator or not
:param model_verbose: To use a verbose Keras regression model or not
:param fit_verbose: To use a verbose fitting of the regression estimator
:param use_scalers: To build using scalars or not
:param df: Ticker dataset
:param dnn_config: Deep Neural Net keras model json to build the model
:param compile_config: Deep Neural Net dictionary of compile options
:param s3_bucket: S3 Bucket
:param s3_key: S3 Key
"""
df_filter = (df[f'{predict_feature}'] >= 0.1)
first_date = df[df_filter]['date'].iloc[0]
end_date = df[df_filter]['date'].iloc[-1]
if 'minute' in df:
found_valid_minute = df['minute'].iloc[0]
if found_valid_minute:
first_date = df[df_filter]['minute'].iloc[0]
end_date = df[df_filter]['minute'].iloc[-1]
num_rows = len(df.index)
log.info(
f'prepared training data from '
f'history {s3_bucket}@{s3_key} '
f'rows={num_rows} '
f'dates: {first_date} to {end_date}')
if debug:
for i, r in df.iterrows():
log.info(
f'{r["minute"]} - {r["{}".format(predict_feature)]}')
# end of for loop
log.info(
f'columns: {df.columns.values}')
log.info(
f'rows: {len(df.index)}')
# end of debug
use_all_features = use_scalers
all_features = []
train_features = []
if use_all_features:
for c in df.columns.values:
if (
pandas_types.is_numeric_dtype(df[c]) and
c not in train_features):
if c != predict_feature:
train_features.append(c)
if c not in all_features:
all_features.append(c)
dnn_config['layers'][-1]['activation'] = (
'sigmoid')
else:
temp_choices = choices[:]
temp_choices.remove(predict_feature)
train_features = ['open']
train_features.extend(temp_choices)
all_features = [
f'{predict_feature}'
] + train_features
num_features = len(train_features)
features_and_minute = [
'minute'
] + all_features
log.info(
'converting columns to floats')
timeseries_df = df[df_filter][features_and_minute].fillna(-10000.0)
converted_df = timeseries_df[all_features].astype('float32')
train_df = None
test_df = None
scaler_predictions = None
if use_all_features:
scaler_res = build_scaler_datasets.build_datasets_using_scalers(
train_features=train_features,
test_feature=predict_feature,
df=converted_df,
test_size=use_test_size,
seed=use_seed)
if scaler_res['status'] != ae_consts.SUCCESS:
log.error(
'failed to build scaler train and test datasets')
return
train_df = scaler_res['scaled_train_df']
test_df = scaler_res['scaled_test_df']
x_train = scaler_res['x_train']
x_test = scaler_res['x_test']
y_train = scaler_res['y_train']
y_test = scaler_res['y_test']
scaler_predictions = scaler_res['scaler_test']
else:
log.info(
'building train and test dfs from subset of features')
train_df = converted_df[train_features]
test_df = converted_df[[predict_feature]]
log.info(
f'splitting {num_rows} into test and training '
f'size={use_test_size}')
(x_train,
x_test,
y_train,
y_test) = tt_split.train_test_split(
train_df,
test_df,
test_size=use_test_size,
random_state=use_random_state)
log.info(
f'split breakdown - '
f'x_train={len(x_train)} '
f'x_test={len(x_test)} '
f'y_train={len(y_train)} '
f'y_test={len(y_test)}')
def set_model():
return build_dnn.build_regression_dnn(
num_features=num_features,
compile_config=compile_config,
model_config=dnn_config)
estimator = keras_scikit.KerasRegressor(
build_fn=set_model,
epochs=use_epochs,
batch_size=use_batch_size,
verbose=model_verbose)
log.info(
f'fitting estimator - '
f'predicting={predict_feature} '
f'epochs={use_epochs} '
f'batch={use_batch_size} '
f'test_size={use_test_size} '
f'seed={use_seed}')
history = estimator.fit(
x_train,
y_train,
validation_data=(
x_train,
y_train),
epochs=use_epochs,
batch_size=use_batch_size,
shuffle=use_shuffle,
verbose=fit_verbose)
created_on = (
datetime.datetime.now().strftime(
ae_consts.COMMON_TICK_DATE_FORMAT))
plot_fit_history.plot_dnn_fit_history(
df=history.history,
title=(
f'DNN Errors Over Training Epochs\n'
f'Training Data: s3://{s3_bucket}/{s3_key}\n'
f'Created: {created_on}'),
red='mean_squared_error',
blue='mean_absolute_error',
green='acc',
orange='cosine_proximity',
send_plots_to_slack=send_plots_to_slack)
# on production use newly fetched pricing data
# not the training data
predict_records = []
if use_all_features:
prediction_res = build_scaler_df.build_scaler_dataset_from_df(
df=converted_df[train_features])
if prediction_res['status'] == ae_consts.SUCCESS:
predict_records = prediction_res['df']
else:
predict_records = converted_df[train_features]
log.info(
f'making predictions: {len(predict_records)}')
predictions = estimator.model.predict(
predict_records,
verbose=True)
np.set_printoptions(threshold=np.nan)
indexes = tf.argmax(predictions, axis=1)
data = {}
data['indexes'] = indexes
price_predictions = []
if use_all_features and scaler_predictions:
price_predictions = [
ae_consts.to_f(x) for x in
scaler_predictions.inverse_transform(
predictions.reshape(-1, 1)).reshape(-1)]
else:
price_predictions = [ae_consts.to_f(x[0]) for x in predictions]
timeseries_df[f'predicted_{predict_feature}'] = price_predictions
timeseries_df['error'] = (
timeseries_df[f'{predict_feature}'] -
timeseries_df[f'predicted_{predict_feature}'])
output_features = [
'minute',
f'{predict_feature}',
f'predicted_{predict_feature}',
'error'
]
date_str = (
f'Dates: {timeseries_df["minute"].iloc[0]} '
f'to '
f'{timeseries_df["minute"].iloc[-1]}')
log.info(
f'historical {predict_feature} with predicted {predict_feature}: '
f'{timeseries_df[output_features]}')
log.info(
date_str)
log.info(
f'Columns: {output_features}')
average_error = ae_consts.to_f(
timeseries_df['error'].sum() / len(timeseries_df.index))
log.info(
f'Average historical {predict_feature} '
f'vs predicted {predict_feature} error: '
f'{average_error}')
log.info(
f'plotting historical {predict_feature} vs predicted {predict_feature}'
f' from training with columns={num_features}')
ts_filter = (timeseries_df[f'{predict_feature}'] > 0.1)
latest_feature = (
timeseries_df[ts_filter][f'{predict_feature}'].iloc[-1])
latest_predicted_feature = (
timeseries_df[ts_filter][f'predicted_{predict_feature}'].iloc[-1])
log.info(
f'{end_date} {predict_feature}={latest_feature} '
f'with '
f'predicted_{predict_feature}={latest_predicted_feature}')
plot_trading_history.plot_trading_history(
title=(
f'{ticker} - Historical {predict_feature.title()} vs '
f'Predicted {predict_feature.title()}\n'
f'Number of Training Features: {num_features}\n'
f'{date_str}'),
df=timeseries_df,
red=f'{predict_feature}',
blue=f'predicted_{predict_feature}',
green=None,
orange=None,
date_col='minute',
date_format='%d %H:%M:%S\n%b',
xlabel='minute',
ylabel=(
f'Historical {predict_feature.title()} vs '
f'Predicted {predict_feature.title()}'),
df_filter=ts_filter,
width=8.0,
height=8.0,
show_plot=True,
dropna_for_all=False,
send_plots_to_slack=send_plots_to_slack) | acd3fcddf8f0e7d931f8419113b5243180f67363 | 11,290 |
from typing import Mapping
from typing import Tuple
def get_default_hand_connection_style(
) -> Mapping[Tuple[int, int], DrawingSpec]:
"""Returns the default hand connection drawing style.
Returns:
A mapping from each hand connection to the default drawing spec.
"""
hand_connection_style = {}
for k, v in _HAND_CONNECTION_STYLE.items():
for connection in k:
hand_connection_style[connection] = v
return hand_connection_style | 7cbc020f746e2dacd31664f9ddbe71fb98fc1942 | 11,291 |
def lists_to_html_table(a_list):
"""
Converts a list of lists to a HTML table. First list becomes the header of the table.
Useful while sending email from the code
:param list(list) a_list: values in the form of list of lists
:return: HTML table representation corresponding to the values in the lists
:rtype: str
"""
header = "<tr><th>%s</th></tr>" % ("</th><th>".join(a_list[0]))
body = ""
if len(a_list) > 1:
for sub_list in a_list[1:]:
body += "<tr><td>%s</td></tr>\n" % ("</td><td>".join(sub_list))
return "<table>%s\n%s</table>" % (header, body) | cc244ec7f0bccedba2bb3bd66e29b3f43160f8c1 | 11,292 |
def from_matrix_vector(matrix, vector):
"""Combine a matrix and vector into a homogeneous transform.
Combine a rotation matrix and translation vector into a transform
in homogeneous coordinates.
Parameters
----------
matrix : ndarray
An NxM array representing the the linear part of the transform
a transform from an M-dimensional space to an N-dimensional space.
vector : ndarray
A 1xN array representing the translation.
Returns
-------
xform : ndarray
An N+1xM+1 transform matrix.
See Also
--------
to_matrix_vector
"""
nin, nout = matrix.shape
t = np.zeros((nin+1,nout+1), matrix.dtype)
t[0:nin, 0:nout] = matrix
t[nin, nout] = 1.
t[0:nin, nout] = vector
return t | d4d49a4217d82c93b77aa1b50fc7a8d70875d11a | 11,293 |
def take_closest(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before | 6dea2edfb83bb8d78e7140d9bcc9b8f30441a3bf | 11,294 |
def check_host(host):
""" Helper function to get the hostname in desired format """
if not ('http' in host and '//' in host) and host[len(host) - 1] == '/':
return ''.join(['http://', host[:len(host) - 1]])
elif not ('http' in host and '//' in host):
return ''.join(['http://', host])
elif host[len(host) - 1] == '/':
return host[:len(host) - 1]
else:
return host | 0d035f616ce539f0a822aa1426cf3cd0eb766d04 | 11,295 |
from datetime import datetime
def task_time_slot_add(request, task_id, response_format='html'):
"""Time slot add to preselected task"""
task = get_object_or_404(Task, pk=task_id)
if not request.user.profile.has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
task_time_slot = TaskTimeSlot(
task=task, time_to=datetime.now(), user=request.user.profile)
form = TaskTimeSlotForm(
request.user.profile, task_id, request.POST, instance=task_time_slot)
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif form.is_valid():
task_time_slot = form.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(request.user.profile, task_id)
subtasks = Object.filter_by_request(
request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'form': form,
'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_time_add', context,
context_instance=RequestContext(request), response_format=response_format) | 250fbb87a80f2c2431b5e95b7c437f0b9562a1bc | 11,296 |
from typing import Any
def get_artist_names(res: dict[str, Any]) -> str:
"""
Retrieves all artist names for a given input to the "album" key of a response.
"""
artists = []
for artist in res["artists"]:
artists.append(artist["name"])
artists_str = ", ".join(artists)
return artists_str | 2913c813e7e6097cb2cb3d3dfb84f831bbc0a6e7 | 11,297 |
def json_response(func):
"""
View decorator function that converts the dictionary response
returned by a view function to django JsonResponse.
"""
@wraps(func)
def func_wrapper(request, *args, **kwargs):
func_response = func(request, *args, **kwargs)
status_code = func_response.get('status_code', 200)
return JsonResponse(func_response, status=status_code)
return func_wrapper | ad4a304b9e1434d7d0832fc9b535e2fd37228ad8 | 11,298 |
def clean_df(df):
"""return : pandas.core.frame.DataFrame"""
df.index = pd.DatetimeIndex(df.comm_time)
df = df.sort_index()
df = df[~(np.abs(df.com_per-df.com_per.mean())>(3*df.com_per.std()))]#清洗出三个标准差之外的数据,人均有关的计算用df2
df = df.drop('_id',1)
df = df.drop_duplicates()
return df | 055f0efcc79e0e551620a602cb8d9e8244d58a7e | 11,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.