content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from tvm.relay.testing import mlp
def mlp_net():
"""The MLP test from Relay.
"""
return mlp.get_net(1) | 4e48dfb04bab1434bd581b7fc6cd5e2257c88022 | 16,862 |
def groupByX(grp_fn, messages):
"""
Returns a dictionary keyed by the requested group.
"""
m_grp = {}
for msg in getIterable(messages):
# Ignore messages that we don't have all the timing for.
if msg.isComplete() or not ignore_incomplete:
m_type = grp_fn(msg)
if m_type in m_grp:
m_grp[m_type].append(msg)
else:
m_grp[m_type] = [msg]
return m_grp | 9ebf63cfe81e8c8b6f19d90c725415cafdbcd636 | 16,864 |
import math
def regular_poly_circ_rad_to_side_length(n_sides, rad):
"""Find side length that gives regular polygon with `n_sides` sides an
equivalent area to a circle with radius `rad`."""
p_n = math.pi / n_sides
return 2 * rad * math.sqrt(p_n * math.tan(p_n)) | 939ff5de399d7f0a31750aa03562791ee83ee744 | 16,865 |
def dbl_colour(days):
"""
Return a colour corresponding to the number of days to double
:param days: int
:return: str
"""
if days >= 28:
return "orange"
elif 0 < days < 28:
return "red"
elif days < -28:
return "green"
else:
return "yellow" | 46af7d57487f17b937ad5b7332879878cbf84220 | 16,866 |
def create_model(data_format):
"""Model to recognize digits in the MNIST data set.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But uses the tf.keras API.
Args:
data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is
typically faster on GPUs while 'channels_last' is typically faster on
CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
Returns:
A tf.keras.Model. """
# pylint: disable=no-member
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
return Sequential(
[
Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
Conv2D(32, 5, padding='same', data_format=data_format, activation=tf.nn.relu,
kernel_initializer='random_uniform'),
MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format),
Conv2D(64, 5, padding='same', data_format=data_format, activation=tf.nn.relu,
kernel_initializer='random_uniform'),
MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format),
Flatten(),
Dense(1024, activation=tf.nn.relu, kernel_initializer='random_uniform'),
Dropout(0.4),
Dense(10, kernel_initializer='random_uniform')
]) | d6fe45e5cfef5246a220600b67e24cddceeebd3a | 16,867 |
def run_noncentered_hmc(model_config,
num_samples=2000,
burnin=1000,
num_leapfrog_steps=4,
num_adaptation_steps=500,
num_optimization_steps=2000):
"""Given a (centred) model, this function transforms it to a fully non-centred
one, and runs HMC on the reparametrised model.
"""
tf.reset_default_graph()
return run_parametrised_hmc(
model_config=model_config,
interceptor=ed_transforms.ncp,
num_samples=num_samples,
burnin=burnin,
num_leapfrog_steps=num_leapfrog_steps,
num_adaptation_steps=num_adaptation_steps,
num_optimization_steps=num_optimization_steps) | 95065fb8c8ee778f0d300b46f285f8f3bb026aed | 16,868 |
import collections
def get_project_apps(in_app_list):
""" Application definitions for app name.
Args:
in_app_list: (list) - names of applications
Returns:
tuple (list, dictionary) - list of dictionaries with apps definitions
dictionary of warnings
"""
apps = []
warnings = collections.defaultdict(list)
if not in_app_list:
return apps, warnings
missing_app_msg = "Missing definition of application"
application_manager = ApplicationManager()
for app_name in in_app_list:
if application_manager.applications.get(app_name):
apps.append({"name": app_name})
else:
warnings[missing_app_msg].append(app_name)
return apps, warnings | 4e9be8ffddf44aba740414a8ee020376eda3a761 | 16,869 |
def read(G):
""" Wrap a NetworkX graph class by an ILPGraph class
The wrapper class is used store the graph and the related variables of an optimisation problem
in a single entity.
:param G: a `NetworkX graph <https://networkx.org/documentation/stable/reference/introduction.html#graphs>`__
:return: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
"""
result = ILPGraph()
result.set_nx_graph(G)
return result | cb5db29d210d944047dbdf806ecfaaa274d517e8 | 16,870 |
def slog_det(obs, **kwargs):
"""Computes the determinant of a matrix of Obs via np.linalg.slogdet."""
def _mat(x):
dim = int(np.sqrt(len(x)))
if np.sqrt(len(x)) != dim:
raise Exception('Input has to have dim**2 entries')
mat = []
for i in range(dim):
row = []
for j in range(dim):
row.append(x[j + dim * i])
mat.append(row)
(sign, logdet) = anp.linalg.slogdet(np.array(mat))
return sign * anp.exp(logdet)
if isinstance(obs, np.ndarray):
return derived_observable(_mat, (1 * (obs.ravel())).tolist(), **kwargs)
elif isinstance(obs, list):
return derived_observable(_mat, obs, **kwargs)
else:
raise TypeError('Unproper type of input.') | 20b4016653d83303ac671a5d2641d4c344393b0a | 16,871 |
def make_optimiser_form(optimiser):
"""Make a child form for the optimisation settings.
:param optimiser: the Optimiser instance
:returns: a subclass of FlaskForm; NB not an instance!
"""
# This sets up the initial form with the optimiser's parameters
OptimiserForm = make_component_form(optimiser)
# Now add options for specifying objectives
OptimiserForm.obj_min_A = BooleanField('Minimise A', default=True)
OptimiserForm.obj_min_sigma_varA = BooleanField('Minimise variance in A')
OptimiserForm.obj_min_B = BooleanField('Minimise B')
OptimiserForm.obj_max_C = BooleanField('Maximise C')
# Options saying which variables to optimise
OptimiserForm.var_bool_param = BooleanField(
'Optimise the choice of a binary option',
default=True)
OptimiserForm.var_int_param = BooleanField('Optimise the range of an integer',
default=True)
return OptimiserForm | 745c4a9c4268d31687215f4acec709a5eacfcbf0 | 16,872 |
def prepare_for_evaluate(test_images, test_label):
"""
It will preprocess and return the images and labels for tesing.
:param original images for testing
:param original labels for testing
:return preprocessed images
:return preprocessed labels
"""
test_d = np.stack([preprocessing_for_testing(test_images[i]) for i in range(10000)], axis=0)
test_new_image, test_new_label = test_d, test_label
# Shuffle for 20 times
for time in range(20):
test_new_image, test_new_label = shuffle(test_d, test_label,
random_state=randint(0, test_images.shape[0]))
return test_new_image, test_new_label | abe60fc558c6cc2c951a4efee758d2746608d8d1 | 16,873 |
async def async_setup(hass, config):
"""Set up the PEVC modbus component."""
hass.data[DOMAIN] = {}
return True | cde898e2904f8e9cfcf60d260ee2476326877dd9 | 16,875 |
from typing import Any
def deserialize_value(val: str) -> Any:
"""Deserialize a json encoded string in to its original value"""
return _unpack_value(
seven.json.loads(check.str_param(val, "val")),
whitelist_map=_WHITELIST_MAP,
descent_path="",
) | d01ce83488ea743aae298b15d1fe5f4faac6adbc | 16,876 |
import six
import collections
def stringify(value):
"""
PHPCS uses a , separated strings in many places
because of how it handles options we have to do bad things
with string concatenation.
"""
if isinstance(value, six.string_types):
return value
if isinstance(value, collections.Iterable):
return ','.join(value)
return str(value) | 1ca24ff986f3cd02c845ad0e11b8b1cfd3c7f779 | 16,878 |
def read_requirements_file(path):
""" reads requirements.txt file """
with open(path) as f:
requires = []
for line in f.readlines():
if not line:
continue
requires.append(line.strip())
return requires | ab224bd3adac7adef76a2974a9244042f9aedf84 | 16,879 |
def vsa_get_all(context):
"""
Get all Virtual Storage Array records.
"""
session = get_session()
return session.query(models.VirtualStorageArray).\
options(joinedload('vsa_instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all() | 3568997b060fbeab115ec79c2c0cba77f78c6cba | 16,880 |
import threading
def thread_it(obj, timeout = 10):
""" General function to handle threading for the physical components of the system. """
thread = threading.Thread(target = obj.run())
thread.start()
# Run the 'run' function in the obj
obj.ready.wait(timeout = timeout)
# Clean up
thread.join()
obj.ready.clear()
return None | 02ed60a560ffa65f0364aa7414b1fda0d3e62ac5 | 16,882 |
def _subsize_sub_pixel_align_cy_ims(pixel_aligned_cy_ims, subsize, n_samples):
"""
The inner loop of _sub_pixel_align_cy_ims() that executes on a "subsize"
region of the larger image.
Is subsize is None then it uses the entire image.
"""
n_max_failures = n_samples * 2
sub_pixel_offsets = np.zeros((n_samples, pixel_aligned_cy_ims.shape[0], 2))
pixel_aligned_cy0_im = pixel_aligned_cy_ims[0]
im_mea = pixel_aligned_cy_ims.shape[-1]
assert pixel_aligned_cy_ims.shape[-2] == im_mea
def _subregion(im, pos):
if subsize is None:
return im
else:
return imops.crop(im, off=pos, dim=WH(subsize, subsize), center=False)
sample_i = 0
n_failures = 0
while sample_i < n_samples and n_failures < n_max_failures:
try:
if subsize is None:
pos = XY(0, 0)
else:
pos = XY(
np.random.randint(0, im_mea - subsize - 16),
np.random.randint(0, im_mea - subsize - 16),
)
subregion_pixel_aligned_cy0_im = _subregion(pixel_aligned_cy0_im, pos)
for cy_i, pixel_aligned_cy_im in enumerate(pixel_aligned_cy_ims):
if cy_i == 0:
continue
# Use a small region to improve speed
subregion_pixel_aligned_cy_im = _subregion(pixel_aligned_cy_im, pos)
try:
_dy, _dx = _subpixel_align_one_im(
subregion_pixel_aligned_cy0_im, subregion_pixel_aligned_cy_im,
)
sub_pixel_offsets[sample_i, cy_i, :] = (_dy, _dx)
except Exception:
# This is a general exception handler because there
# are a number of ways that the _subpixel_align_one_im
# can fail including linear algebera, etc. All
# of which end up with a skip and a retry.
n_failures += 1
raise AlignmentError
sample_i += 1
except AlignmentError:
# Try again with a new pos
if n_failures >= n_max_failures:
raise AlignmentError
return np.mean(sub_pixel_offsets, axis=0) | f96a2bc9b4c55976fd4c49da3f59afa991c53ff1 | 16,883 |
def obj_setclass(this, klass):
"""
set Class for `this`!!
"""
return this.setclass(klass) | 4447df2f3055f21c9066a254290cdd037e812b64 | 16,884 |
def format(number, separator=' ', format=None, add_check_digit=False):
"""Reformat the number to the standard presentation format. The separator
used can be provided. If the format is specified (either 'hex' or 'dec')
the number is reformatted in that format, otherwise the current
representation is kept. If add_check_digit is True a check digit will be
added if it is not present yet."""
# first parse the number
number, cd = _parse(number)
# format conversions if needed
if format == 'dec' and len(number) == 14:
# convert to decimal
number = '%010d%08d' % (int(number[0:8], 16), int(number[8:14], 16))
if cd:
cd = calc_check_digit(number)
elif format == 'hex' and len(number) == 18:
# convert to hex
number = '%08X%06X' % (int(number[0:10]), int(number[10:18]))
if cd:
cd = calc_check_digit(number)
# see if we need to add a check digit
if add_check_digit and not cd:
cd = calc_check_digit(number)
# split number according to format
if len(number) == 14:
number = [number[i * 2:i * 2 + 2]
for i in range(7)] + [cd]
else:
number = (number[:5], number[5:10], number[10:14], number[14:], cd)
return separator.join(x for x in number if x) | 6890ed398eb7c173540c0392b9ed8ef66f8d170b | 16,885 |
def parse_equal_statement(line):
"""Parse super-sequence statements"""
seq_names = line.split()[1:]
return seq_names | ee0de00a990ac10c365af16dccf491b7ea8ed785 | 16,886 |
def B5(n):
"""Factor Variables B5."""
return np.maximum(0, c4(n) - 3 * np.sqrt(1 - c4(n) ** 2)) | bc2fbd91e337310fe5d4326d55440ce2055da650 | 16,887 |
def y_yhat_plots(y, yh, title="y and y_score", y_thresh=0.5):
"""Output plots showing how y and y_hat are related:
the "confusion dots" plot is analogous to the confusion table,
and the standard ROC plot with its AOC value.
The y=1 threshold can be changed with the y_thresh parameter.
"""
# The predicted y value with threshold = y_thresh
y_pred = 1.0 * (yh > y_thresh)
# Show table of actual and predicted counts
crosstab = pd.crosstab(y, y_pred, rownames=[
'Actual'], colnames=[' Predicted'])
print("\nConfusion matrix (y_thresh={:.3f}):\n\n".format(y_thresh),
crosstab)
# Calculate the various metrics and rates
tn = crosstab[0][0]
fp = crosstab[1][0]
fn = crosstab[0][1]
tp = crosstab[1][1]
##print(" tn =",tn)
##print(" fp =",fp)
##print(" fn =",fn)
##print(" tp =",tp)
this_fpr = fp / (fp + tn)
this_fnr = fn / (fn + tp)
this_recall = tp / (tp + fn)
this_precision = tp / (tp + fp)
this_accur = (tp + tn) / (tp + fn + fp + tn)
this_posfrac = (tp + fn) / (tp + fn + fp + tn)
print("\nResults:\n")
print(" False Pos = ", 100.0 * this_fpr, "%")
print(" False Neg = ", 100.0 * this_fnr, "%")
print(" Recall = ", 100.0 * this_recall, "%")
print(" Precision = ", 100.0 * this_precision, "%")
print("\n Accuracy = ", 100.0 * this_accur, "%")
print(" Pos. fract. = ", 100.0 * this_posfrac, "%")
# Put them in a dataframe
ysframe = pd.DataFrame([y, yh, y_pred], index=[
'y', 'y-hat', 'y-pred']).transpose()
# If the yh is discrete (0 and 1s only) then blur it a bit
# for a better visual dots plot
if min(abs(yh - 0.5)) > 0.49:
ysframe["y-hat"] = (0.51 * ysframe["y-hat"]
+ 0.49 * np.random.rand(len(yh)))
# Make a "confusion dots" plot
# Add a blurred y column
ysframe['y (blurred)'] = y + 0.1 * np.random.randn(len(y))
# Plot the real y (blurred) vs the predicted probability
# Note the flipped ylim values.
ysframe.plot.scatter('y-hat', 'y (blurred)', figsize=(12, 5),
s=2, xlim=(0.0, 1.0), ylim=(1.8, -0.8))
# show the "correct" locations on the plot
plt.plot([0.0, y_thresh], [0.0, 0.0], '-',
color='green', linewidth=5)
plt.plot([y_thresh, y_thresh], [0.0, 1.0], '-',
color='gray', linewidth=2)
plt.plot([y_thresh, 1.0], [1.0, 1.0], '-',
color='green', linewidth=5)
plt.title("Confusion-dots Plot: " + title, fontsize=16)
# some labels
ythr2 = y_thresh/2.0
plt.text(ythr2 - 0.03, 1.52, "FN", fontsize=16, color='red')
plt.text(ythr2 + 0.5 - 0.03, 1.52, "TP", fontsize=16, color='green')
plt.text(ythr2 - 0.03, -0.50, "TN", fontsize=16, color='green')
plt.text(ythr2 + 0.5 - 0.03, -0.50, "FP", fontsize=16, color='red')
plt.show()
# Make the ROC curve
# Set the y-hat as the index and sort on it
ysframe = ysframe.set_index('y-hat').sort_index()
# Put y-hat back as a column (but the sorting remains)
ysframe = ysframe.reset_index()
# Initialize the counts for threshold = 0
p_thresh = 0
FN = 0
TN = 0
TP = sum(ysframe['y'])
FP = len(ysframe) - TP
# Assemble the fpr and recall values
recall = []
fpr = []
# Go through each sample in y-hat order,
# advancing the threshold and adjusting the counts
for iprob in range(len(ysframe['y-hat'])):
p_thresh = ysframe.iloc[iprob]['y-hat']
if ysframe.iloc[iprob]['y'] == 0:
FP -= 1
TN += 1
else:
TP -= 1
FN += 1
# Recall and FPR:
recall.append(TP / (TP + FN))
fpr.append(FP / (FP + TN))
# Put recall and fpr in the dataframe
ysframe['Recall'] = recall
ysframe['FPR'] = fpr
# - - - ROC - - - could be separate routine
zoom_in = False
# Calculate the area under the ROC
roc_area = 0.0
for ifpr in range(1, len(fpr)):
# add on the bit of area (note sign change, going from high fpr to low)
roc_area += 0.5 * (recall[ifpr] + recall[ifpr - 1]
) * (fpr[ifpr - 1] - fpr[ifpr])
plt.figure(figsize=(8, 8))
plt.title("ROC: " + title, size=16)
plt.plot(fpr, recall, '-b')
# Set the scales
if zoom_in:
plt.xlim(0.0, 0.10)
plt.ylim(0.0, 0.50)
else:
# full range:
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
# The reference line
plt.plot([0., 1.], [0., 1.], '--', color='orange')
# The point at the y_hat = y_tresh threshold
if True:
plt.plot([this_fpr], [this_recall], 'o', c='blue', markersize=15)
plt.xlabel('False Postive Rate', size=16)
plt.ylabel('Recall', size=16)
plt.annotate('y_hat = {:.2f}'.format(y_thresh),
xy=(this_fpr + 0.015,
this_recall), size=14, color='blue')
plt.annotate(' Pos.Fraction = ' +
' {:.0f}%'.format(100 * this_posfrac),
xy=(this_fpr + 0.02, this_recall - 0.03),
size=14, color='blue')
# Show the ROC area (shows on zoomed-out plot)
plt.annotate('ROC Area = ' + str(roc_area)
[:5], xy=(0.4, 0.1), size=16, color='blue')
# Show the plot
plt.show()
return ysframe | 82a8154bd618cc1451a44b2b42fca0407e9979cb | 16,888 |
def _derive_scores(model, txt_file, base_words):
"""
Takes a model, a text file, and a list of base words.
Returns a dict of {base_word: score}, where score is an integer between 0
and 100 which represents the average similarity of the text to the given
word.
"""
with open(txt_file, 'r') as f:
text = f.read()
words = sample_words(text)
# This is a list of dicts of the form {base_word: score}.
raw_scores = [_single_word_score(model, base_words, word) for word in words]
summed_scores = {}
for base_word in base_words:
summed_scores[base_word] = sum([item[base_word] for item in raw_scores])
summed_scores[base_word] = round(
100 * summed_scores[base_word] / len(words)
)
return summed_scores | 9c377b5dec742f5ba174f780252fd78d162a8713 | 16,889 |
def features_ids_argument_parser() -> ArgumentParser:
"""
Creates a parser suitable to parse the argument describing features ids in different subparsers
"""
parser = ArgumentParser(add_help=False, parents=[collection_option_parser()])
parser.add_argument(FEATURES_IDS_ARGNAME, nargs='+',
help='features identifiers or features UUIDs')
return parser | df24ebaff182c88c7ad6cf38e2e4a5784d54a48b | 16,891 |
def isolate_blue_blocks(image, area_min=10, side_ratio=0.5):
"""Return a sequence of masks on the original area showing significant blocks of blue."""
contours, _ = cv2.findContours(
blue(image).astype(np.uint8) * 255, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
rects = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if min(w, h) / max(w, h) > side_ratio and cv2.contourArea(c) > area_min:
rects.append((x, y, w, h))
masks = np.zeros_like()
return filtered | 7cd6e895750f208e18c60a48d2bc0e8d1710d6c0 | 16,892 |
from typing import Union
from io import StringIO
from pathlib import Path
from typing import Optional
from typing import Dict
from typing import Callable
from typing import List
from typing import Tuple
def read_gtf(
filepath_or_buffer: Union[str, StringIO, Path],
expand_attribute_column: bool = True,
infer_biotype_column: bool = False,
column_converters: Optional[Dict[str, Callable[..., str]]] = None,
usecols: Optional[List[str]] = None,
features: Optional[Tuple[str]] = None,
chunksize: int = 1024 * 1024,
) -> pd.DataFrame:
"""
Parse a GTF into a dictionary mapping column names to sequences of values.
Parameters
----------
filepath_or_buffer : str or buffer object
Path to GTF file (may be gzip compressed) or buffer object
such as StringIO
expand_attribute_column : bool
Replace strings of semi-colon separated key-value values in the
'attribute' column with one column per distinct key, with a list of
values for each row (using None for rows where key didn't occur).
infer_biotype_column : bool
Due to the annoying ambiguity of the second GTF column across multiple
Ensembl releases, figure out if an older GTF's source column is actually
the gene_biotype or transcript_biotype.
column_converters : dict, optional
Dictionary mapping column names to conversion functions. Will replace
empty strings with None and otherwise passes them to given conversion
function.
usecols : list of str or None
Restrict which columns are loaded to the give set. If None, then
load all columns.
features : set of str or None
Drop rows which aren't one of the features in the supplied set
chunksize : int
"""
if isinstance(filepath_or_buffer, str):
filepath_or_buffer = Path(filepath_or_buffer)
if isinstance(filepath_or_buffer, Path) and not filepath_or_buffer.exists():
logger.exception(f"GTF file does not exist: {filepath_or_buffer}")
raise FileNotFoundError
if expand_attribute_column:
result_df = parse_gtf_and_expand_attributes(
filepath_or_buffer, chunksize=chunksize, restrict_attribute_columns=usecols
)
else:
result_df = parse_gtf(
filepath_or_buffer, chunksize=chunksize, features=features
)
if column_converters:
for column_name in column_converters:
result_df[column_name] = result_df[column_name].astype(
column_converters[column_name], errors="ignore"
)
# Hackishly infer whether the values in the 'source' column of this GTF
# are actually representing a biotype by checking for the most common
# gene_biotype and transcript_biotype value 'protein_coding'
if infer_biotype_column:
unique_source_values = result_df["source"].unique()
if "protein_coding" in unique_source_values:
column_names = result_df.columns.unique()
# Disambiguate between the two biotypes by checking if
# gene_biotype is already present in another column. If it is,
# the 2nd column is the transcript_biotype (otherwise, it's the
# gene_biotype)
if "gene_biotype" not in column_names:
logger.info("Using column 'source' to replace missing 'gene_biotype'")
result_df["gene_biotype"] = result_df["source"]
if "transcript_biotype" not in column_names:
logger.info(
"Using column 'source' to replace missing 'transcript_biotype'"
)
result_df["transcript_biotype"] = result_df["source"]
if usecols is not None:
column_names = result_df.columns.unique()
valid_columns = [c for c in usecols if c in column_names]
result_df = result_df[valid_columns]
return result_df | c7478d88ce5d6d6e823bf28c965f366b9b8d1522 | 16,893 |
def trimAlphaNum(value):
"""
Trims alpha numeric characters from start and ending of a given value
>>> trimAlphaNum(u'AND 1>(2+3)-- foobar')
u' 1>(2+3)-- '
"""
while value and value[-1].isalnum():
value = value[:-1]
while value and value[0].isalnum():
value = value[1:]
return value | e9d44ea5dbe0948b9db0c71a5ffcdd5c80e95746 | 16,895 |
def hrm_job_title_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages.NONE
db = current.db
table = db.hrm_job_title
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT | ca3d2bfb4056b28b712f4cbf37c8c91d840c0161 | 16,896 |
def is_empty_array_expr(ir: irast.Base) -> bool:
"""Return True if the given *ir* expression is an empty array expression.
"""
return (
isinstance(ir, irast.Array)
and not ir.elements
) | dcf3775e7544ad64e9a533238a9549ed21dc3393 | 16,897 |
def get_raw_entity_names_from_annotations(annotations):
"""
Args:
annotated_utterance: annotated utterance
Returns:
Wikidata entities we received from annotations
"""
raw_el_output = annotations.get("entity_linking", [{}])
entities = []
try:
if raw_el_output:
if isinstance(raw_el_output[0], dict):
entities = raw_el_output[0].get("entity_ids", [])
if isinstance(raw_el_output[0], list):
entities = raw_el_output[0][0]
except Exception as e:
error_message = f"Wrong entity linking output format {raw_el_output} : {e}"
sentry_sdk.capture_exception(e)
logger.exception(error_message)
return entities | 482be69ef5fec52b70ade4839b48ae2f4155033b | 16,898 |
def nextPara(file, line):
"""Go forward one paragraph from the specified line and return the line
number of the first line of that paragraph.
Paragraphs are delimited by blank lines. It is assumed that the
current line is standalone (which is bogus).
- file is an array of strings
- line is the starting point (zero-based)"""
maxLine = len(file) - 1
# Skip over current paragraph
while (line != maxLine and not isempty(file[line])):
line = line + 1
# Skip over white space
while (line != maxLine and isempty(file[line])):
line = line + 1
return line | a104042225bc9404a5b5fe8f5410a3021e45f64d | 16,899 |
def build_asignar_anexos_query(filters, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().filter(ambito__path__istartswith=request.get_perfil().ambito.path).order_by('nombre') | 1a176182fa0559bac56df17f32345d2c4c22b1f1 | 16,900 |
def _median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise ValueError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2 | f05a6b067f95fc9e3fc9350b163b3e89c0792814 | 16,902 |
def num_translate(value: str) -> str:
"""переводит числительное с английского на русский """
str_out = NUM_DICT.get(value)
return str_out | 8555556843ea5235f462dbb7b092eaa09168ab0e | 16,903 |
def get_patch_shape(corpus_file):
"""Gets the patch shape (height, width) from the corpus file.
Args:
corpus_file: Path to a TFRecords file.
Returns:
A tuple (height, width), extracted from the first record.
Raises:
ValueError: if the corpus_file is empty.
"""
example = tf.train.Example()
try:
example.ParseFromString(next(tf.python_io.tf_record_iterator(corpus_file)))
except StopIteration as e:
raise ValueError('corpus_file cannot be empty: %s' % e)
return (example.features.feature['height'].int64_list.value[0],
example.features.feature['width'].int64_list.value[0]) | 054a43d1aa7809b55c57fa0e7574dd43273f4bae | 16,904 |
import re
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError as e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes)) | 78ee345ed67d61994245efff8b103d908aa1a7a4 | 16,905 |
def get_children_as_dict(parent):
"""For a given parent object, return all children as a dictionary with the childs tag as key"""
child_list = getChildElementsListWithSpecificXpath(parent, "*")
child_dict = {}
for child in child_list:
value = get_children_as_dict(child)
if child.tag not in child_dict:
child_dict[child.tag] = [value] if value != {} else [child.text]
else:
child_dict[child.tag].append(value if value != {} else child.text)
return child_dict | 054d3591a34536c79e0e5b3715dad6e414d29d46 | 16,906 |
import itertools
import six
def get_multi_tower_fn(num_gpus, variable_strategy,
model_fn, device_setter_fn, lr_provider):
"""Returns a function that will build the resnet model.
Args:
num_gpus: number of GPUs to use (obviously)
variable_strategy: "GPU" or "CPU"
model_fn: The function providing the model as in
loss, gradvars, preds = model_fn(is_training,
features,
labels,
data_format, params)
lr_provider: a function that takes a tf.train.get_global_step() and returns
a learning rate value for that step
device_setter_fn: A device setter
"""
def _multi_tower_model_fn(features, labels, mode, params):
"""A model function that distributes models amongst towers.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
momentum = params.momentum
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = device_setter_fn(
variable_strategy, worker_device, num_gpus)
with tf.variable_scope('neural_network', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = \
model_fn(is_training,
tower_features[i],
tower_labels[i],
data_format, params)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
learning_rate = lr_provider(tf.train.get_global_step())
loss = tf.reduce_mean(tower_losses, name='loss')
examples_sec_hook = reporting_utils.ExamplesPerSecondHook(
params.train_batch_size, every_n_steps=10)
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
train_hooks = [logging_hook, examples_sec_hook]
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
if params.sync:
raise ValueError("We don't support parallel processing at the moment.")
# optimizer = tf.train.SyncReplicasOptimizer(
# optimizer, replicas_to_aggregate=num_workers)
# sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
# train_hooks.append(sync_replicas_hook)
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
# noinspection PyUnboundLocalVariable
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities'] for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
metrics = {
'accuracy':
tf.metrics.accuracy(stacked_labels, predictions['classes'])
}
# noinspection PyUnboundLocalVariable
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics)
return _multi_tower_model_fn | 72e149d25cc6c80a8ea4c5f35f9215cb71aa2a65 | 16,908 |
from typing import Union
def extract_by_css(
content: str, selector: str, *, first: bool = True
) -> Union[str, list]:
"""Extract values from HTML content using CSS selector.
:param content: HTML content
:param selector: CSS selector
:param first: (optional) return first found element or all of them
:return: value of the 1st found element or emtpy string if not found; or a list of all found elements
"""
extracted = ScrapySelector(text=content).css(selector).extract()
if first:
result = extracted[0] if len(extracted) > 0 else ""
else:
result = extracted
return result | b03d76c893c0f23da332c9978bedc5c9c408e840 | 16,909 |
def generate_styles():
""" Create custom style rules """
# Set navbar so it's always at the top
css_string = "#navbar-top{background-color: white; z-index: 100;}"
# Set glossdef tip
css_string += "a.tip{text-decoration:none; font-weight:bold; cursor:pointer; color:#2196F3;}"
css_string += "a.tip:hover{position: relative;border-bottom: 1px dashed #2196F3;}"
# Set glossdef span
css_string += "a.tip span{display: none;background-color: white;font-weight: normal;border:1px solid gray;width: 250px;}"
css_string += "a.tip:hover span{display: block;position: absolute;z-index: 100;padding: 5px 15px;}"
return css_string | 44b36321dedba352c6aa30a352c7cd65cca1f79a | 16,910 |
from pathlib import Path
def get_config_file() -> Path:
"""
Get default config file.
"""
return get_project_root()/'data/config/config.yaml' | 92bc2af7e55b424bcf10355790f377c90a73cf9b | 16,911 |
def kilometers_to_miles(dist_km):
"""Converts km distance to miles
PARAMETERS
----------
dist_km : float
Scalar distance in kilometers
RETURNS
-------
dist_mi : float
Scalar distance in kilometers
"""
return dist_km / 1.609344 | 61707d483961e92dcd290c7b0cd8ba8f650c7b5b | 16,912 |
def _objc_provider_framework_name(path):
"""Returns the name of the framework from an `objc` provider path.
Args:
path: A path that came from an `objc` provider.
Returns:
A string containing the name of the framework (e.g., `Foo` for `Foo.framework`).
"""
return path.rpartition("/")[2].partition(".")[0] | 607c040a9a9c56a793473ffcba779fc7d7a64ed5 | 16,913 |
def sample_publisher(name='EA'):
"""Create and return a sample publisher"""
return Publisher.objects.create(name=name) | da3d859897c9c3a6f98aa9a7950d77d1390a7527 | 16,915 |
def AddGlobalFile(gfile):
"""
Add a global file to the cmd string.
@return string containing knob
"""
string = ''
if gfile:
string = ' --global_file ' + gfile
return string | 70c4bee610766bbea4faf4e463f88ee65f8804f5 | 16,916 |
def read_file(filename):
"""Opens the file with the given filename and creates the puzzle in it.
Returns a pair consisting of the puzzle grid and the list of clues. Assumes
that the first line gives the size. Afterwards, the rows and clues are given.
The description of the rows and clues may interleave arbitrarily.
"""
size = 0
out_list = []
rows = []
clues = []
with open(filename, 'r') as file:
for line in file:
line = line.replace('\n', '')
line = split_type(line)
if line[0] == 'SIZE':
size = int(line[1])
elif line[0] == 'ROW':
rows.append(read_row(line[1]))
else:
clues.append(read_clue(line[1]))
return (rows, clues) | 332ea941aef3b484e2083bfcc734d4bc9fd7f62c | 16,918 |
def login_user(request):
"""View to login a new user"""
user = authenticate(username=request.POST['EMail'][:30], password=request.POST['Password'])
if user is not None:
if user.is_active:
login(request, user)
send_email("ROCK ON!!!", "User login - " + user.first_name + " " + user.last_name)
# Redirect to a success page.
return HttpResponse('success')
else:
# Return a 'disabled account' error message
return HttpResponse('Account disabled')
else:
# Return an 'invalid login' error message.
return HttpResponse('Invalid username or password') | ce3c126192df1aeab171438587cdc78a51ebda77 | 16,919 |
def gdf_convex_hull(gdf):
"""
Creates a convex hull around the total extent of a GeoDataFrame.
Used to define a polygon for retrieving geometries within. When calculating
densities for urban blocks we need to retrieve the full extent of e.g.
buildings within the blocks, not crop them to an arbitrary bounding box.
Parameters
----------
gdf : geodataframe
currently accepts a projected gdf
Returns
-------
shapely polygon
"""
### INSERT CHECK FOR CRS HERE?
# project gdf back to geographic coordinates as footprints_from_polygon
# requires it
gdf_temp = ox.projection.project_gdf(gdf, to_latlong=True)
# determine the boundary polygon to fetch buildings within
# buffer originally 0.000225, buffer actually needs to go whole block away
# to get complete highways therefor trying 0.001
boundary=gdf_temp.cascaded_union.convex_hull.buffer(0.001)
# NOTE - maybe more efficient to generate boundary first then reproject second?
return boundary | c636e67b77ed312a952e7f4af3b3535c983417e3 | 16,920 |
from typing import Dict
from typing import Any
def room_operating_mode(mode: str) -> Dict[str, Any]:
"""Payload to set operating mode for
:class:`~pymultimatic.model.component.Room`.
"""
return {"operationMode": mode} | df5d1434d5994eca266a3fd06b6a742710bad0eb | 16,921 |
from typing import Dict
from typing import Any
def _validate_options(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Looks up the exporter_type from the data, selects the correct export
options serializer based on the exporter_type and finally validates the data using
that serializer.
:param data: A dict of data to serialize using an exporter options serializer.
:return: validated export options data
"""
option_serializers = table_exporter_registry.get_option_serializer_map()
validated_exporter_type = validate_data(BaseExporterOptionsSerializer, data)
serializer = option_serializers[validated_exporter_type["exporter_type"]]
return validate_data(serializer, data) | 8d380d3052c3e1cd4d859fa46829034ba1cf6860 | 16,922 |
def householder_name (name, rank):
"""Returns if the name conforms to Householder notation.
>>> householder_name('A_1', 2)
True
>>> householder_name('foobar', 1)
False
"""
base, _, _ = split_name(name)
if base in ['0', '1']:
return True
elif rank == 0:
if base in GREEK_ALPHA:
return True
elif rank == 1:
if len(base) == 1 and base.isalpha() and base.islower():
return True
elif rank == 2:
if len(base) == 1 and base.isupper() and base.isalpha():
return True
return False | 63ff3395e065a79b4d5ee76fb3092efa0cb32b2b | 16,923 |
def calculateDerivatives(x,t,id):
"""
dxdt, x0, id_, x_mean = calculateDerivatives(x,t,id)
Missing data is assumed to be encoded as np.nan
"""
nm = ~np.isnan(t) & ~np.isnan(x) # not missing
id_u = np.unique(id)
id_ = []
dxdt = []
x0 = []
x_mean = []
for k in range(0,len(id_u)):
rowz = id==id_u[k]
rowz = rowz & nm
t_k = t[rowz]
x_k = x[rowz]
if np.sum(rowz)>1:
# Gradient via linear regression
lm = np.polyfit(t_k,x_k,1)
id_.append(id_u[k])
dxdt.append(lm[0])
x0.append(lm[1])
x_mean.append(np.nanmean(x_k))
print('k = {0} \n * n = {1}\n * dx/dt = {2} | x0 = {3} | mean(x) = {4}'.format(k,sum(rowz),dxdt[-1],x0[-1],x_mean[-1]))
#plt.plot(t[rowz],x[rowz],'x')
#plt.plot([min(t[rowz]),max(t[rowz])],[min(t[rowz]),max(t[rowz])]*dxdt[-1] + x0[-1],'-')
#plt.show()
# Remove any nan
dxdt_isnan = np.isnan(dxdt)
x0_isnan = np.isnan(x0)
dxdt = np.delete(dxdt,np.where(dxdt_isnan | x0_isnan)[0])
x0 = np.delete(x0,np.where(dxdt_isnan | x0_isnan)[0])
id_u = np.delete(id_u,np.where(dxdt_isnan | x0_isnan)[0])
return dxdt, x0, id_, x_mean | 86a6e2fc3e50e65ffd162728b79b50ab6ee09a81 | 16,924 |
import requests
import time
def SolveCaptcha(api_key, site_key, url):
"""
Uses the 2Captcha service to solve Captcha's for you.
Captcha's are held in iframes; to solve the captcha, you need a part of the url of the iframe. The iframe is usually
inside a div with id=gRecaptcha. The part of the url we need is the query parameter k, this is called the site_key:
www.google.com/recaptcha/api2/anchor?ar=1&k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9&co=aHR0cHM6Ly93d3cuZGljZS5jb206NDQz&hl=en&v=oqtdXEs9TE9ZUAIhXNz5JBt_&size=normal&cb=rpcg9w84syix
k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
Here the site_key is 6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
You also need to supply the url of the current page you're on.
This function will return a string with the response key from captcha validating the test. This needs to be inserted
into an input field with the id=g-recaptcha-response.
:param api_key: The 2Captcha API key.
:param site_key: The site_key extracted from the Captcha iframe url
:param url: url of the site you're on
:return: The response from captcha validating the test
"""
print("Solving Captcha...")
print("Sending Request...")
request_response = requests.get("https://2captcha.com/in.php?", params={
"googlekey": site_key,
"method": "userrecaptcha",
"pageurl": url,
"key": api_key,
"json": 1,
"invisible": 0,
})
request_response.raise_for_status()
print("Waiting for Response...")
time.sleep(30)
answer_response_json = {'status': 0, 'request': 'CAPCHA_NOT_READY'}
while answer_response_json['request'] == 'CAPCHA_NOT_READY':
answer_response = requests.get("https://2captcha.com/res.php", params={
"key": api_key,
"action": "get",
"id": request_response.json()['request'],
"json": 1
})
answer_response_json = answer_response.json()
print(answer_response_json)
time.sleep(5)
if answer_response_json['status'] == 1:
print("Solved!")
return answer_response_json['request']
elif answer_response_json['request'] == 'ERROR_CAPTCHA_UNSOLVABLE':
raise TimeoutError("ERROR_CAPTCHA_UNSOLVABLE")
else:
raise Exception(answer_response_json['request']) | e610a265d03be65bfd6321a266776a8102c227d0 | 16,925 |
def clean_crn(crn, duplicates = True, trivial = True, inter = None):
"""Takes a crn and removes trivial / duplicate reactions. """
new = []
seen = set()
for [R, P] in crn:
lR = sorted(interpret(R, inter)) if inter else sorted(R)
lP = sorted(interpret(P, inter)) if inter else sorted(P)
tR = tuple(lR)
tP = tuple(lP)
if trivial and tR == tP:
continue
if duplicates and (tR, tP) in seen:
continue
new.append([lR, lP])
seen.add((tR, tP))
return new | 28f4e8eac7b6aea0505491ef55ce54d8d05f0069 | 16,927 |
def get_db_mapping(mesh_id):
"""Return mapping to another name space for a MeSH ID, if it exists.
Parameters
----------
mesh_id : str
The MeSH ID whose mappings is to be returned.
Returns
-------
tuple or None
A tuple consisting of a DB namespace and ID for the mapping or None
if not available.
"""
return mesh_to_db.get(mesh_id) | ae3f8de5c93ab0230a7c87edfa2d3996a9c8667b | 16,928 |
def MC_dBESQ_gateway(N = 10**6, t = 0, n0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected dBESQ using birth-death simulation, exact BESQ solution, dLaguerre simulation
or PDE systems.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'birth-death', 'exact-besq', 'laguerre', 'pde'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'birth-death':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
xt_array = bd_simulator(t, x0=n0, num_paths=N, method='bessel', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
elif method == 'exact-besq':
if test == 'laguerre':
return np.mean(exp(-t+1)*jv(0, 2*np.sqrt(np.random.gamma(n0+1)))).round(num_decimal)
elif method == 'laguerre':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
s = log(t / 2)
def poisson_x0():
return np.random.poisson(np.random.gamma(n0+1))
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(np.random.poisson(t/2 *np.random.gamma(xt_array+1)))).round(num_decimal) | 6d09ca8ef2f772e194c7ae656ec4bf9e8a2b6948 | 16,929 |
def resolve_image(image):
""" Resolve an informal image tag into a full Docker image tag. Any tag
available on Docker Hub for Neo4j can be used, and if no 'neo4j:' prefix
exists, this will be added automatically. The default edition is
Community, unless a cluster is being created in which case Enterprise
edition is selected instead. Explicit selection of Enterprise edition can
be made by adding an '-enterprise' suffix to the image tag.
If a 'file:' URI is passed in here instead of an image tag, the Docker
image will be loaded from that file instead.
Examples of valid tags:
- 3.4.6
- neo4j:3.4.6
- latest
- file:/home/me/image.tar
"""
if image.startswith("file:"):
return load_image_from_file(image[5:])
elif ":" in image:
return image
else:
return "neo4j:" + image | 7d03b936f90c459a2dade179d9e38bd17c8c1af8 | 16,931 |
def _cifar_meanstd_normalize(image):
"""Mean + stddev whitening for CIFAR-10 used in ResNets.
Args:
image: Numpy array or TF Tensor, with values in [0, 255]
Returns:
image: Numpy array or TF Tensor, shifted and scaled by mean/stdev on
CIFAR-10 dataset.
"""
# Channel-wise means and std devs calculated from the CIFAR-10 training set
cifar_means = [125.3, 123.0, 113.9]
cifar_devs = [63.0, 62.1, 66.7]
rescaled_means = [x / 255. for x in cifar_means]
rescaled_devs = [x / 255. for x in cifar_devs]
image = (image - rescaled_means) / rescaled_devs
return image | 286ab555d30fd779c093e3b8801821f8370e1ca8 | 16,932 |
def get_value_counts_and_frequencies(elem: Variable, data: pd.DataFrame) -> Categories:
"""Call function to generate frequencies depending on the variable type
Input:
elem: dict
data: pandas DataFrame
Output:
statistics: OrderedDict
"""
statistics: Categories = Categories()
_scale = elem["scale"]
statistics.update(get_categorical_frequencies(elem, data))
return statistics | 7afe35dc605c1eb25158c8a948eeabcfb0027dc6 | 16,933 |
def determineLinearRegions(data, minLength=.1, minR2=.96, maxSlopeInterceptDiff=.75):
"""
Determine regions of a plot that are approximately linear by performing
linear least-squares on a rolling window.
Parameters
----------
data : array_like
Data within which linear regions are to be identified
minLength : int or float
The minimum length of a linear segment, either as an
integer number of indices, or as a float fraction of the
overall data length.
minR2 : float
The minimum r-squared value for a region to be
considered linear.
maxSlopeInterceptDiff : float
The float percentage difference allowed between slopes
and intercepts of adjacent slices for them to be
considered the same region.
Returns
-------
regionIndices : np.ndarray[N,2]
The start and end indices for the N detected regions.
slopes : np.ndarray[N]
The slope of each region.
intercepts : np.ndarray[N]
The intercept of each region.
"""
if minLength < 1:
minLinSteps = int(len(data)*minLength)
else:
minLinSteps = int(minLength)
inLinearRegion = False
linearRegions = []
slopes = []
intercepts = []
# Perform least squares on a rolling window
i = 0
while i < len(data) - minLinSteps:
xArr = np.arange(i, i+minLinSteps)
slope, intercept, r2, p_value, std_err = linregress(xArr, data[i:i+minLinSteps])
if np.abs(r2) > minR2:
if inLinearRegion:
# Calculate how different new slope is from old one
if np.abs((np.mean(slopes[-1]) - slope) / np.mean(slopes[-1])) < maxSlopeInterceptDiff and np.abs((np.mean(intercepts[-1]) - intercept) / np.mean(intercepts[-1])) < maxSlopeInterceptDiff:
# This is still the same linear region, so we extend the bounds
linearRegions[-1][1] = i+minLinSteps
# And average in the slopes and intercepts
slopes[-1] += [slope]
intercepts[-1] += [intercept]
else:
# Otherwise, we have a new linear region, which we start
# at the end of the other one
i = linearRegions[-1][1]
inLinearRegion = False
continue
else:
# New linear region
linearRegions.append([i, i+minLinSteps])
slopes.append([slope])
intercepts.append([intercept])
inLinearRegion = True
else:
inLinearRegion = False
i += 1
slopes = np.array([np.mean(s) for s in slopes])
intercepts = np.array([np.mean(inter) for inter in intercepts])
return np.array(linearRegions), slopes, intercepts | 318672634082ae87b18f087e8aee65efc1da3f59 | 16,934 |
def compute_dispersion(aperture, beam, dispersion_type, dispersion_start,
mean_dispersion_delta, num_pixels, redshift, aperture_low, aperture_high,
weight=1, offset=0, function_type=None, order=None, Pmin=None, Pmax=None,
*coefficients):
"""
Compute a dispersion mapping from a IRAF multi-spec description.
:param aperture:
The aperture number.
:param beam:
The beam number.
:param dispersion_type:
An integer representing the dispersion type:
0: linear dispersion
1: log-linear dispersion
2: non-linear dispersion
:param dispersion_start:
The value of the dispersion at the first physical pixel.
:param mean_dispersion_delta:
The mean difference between dispersion pixels.
:param num_pixels:
The number of pixels.
:param redshift:
The redshift of the object. This is accounted for by adjusting the
dispersion scale without rebinning:
>> dispersion_adjusted = dispersion / (1 + redshift)
:param aperture_low:
The lower limit of the spatial axis used to compute the dispersion.
:param aperture_high:
The upper limit of the spatial axis used to compute the dispersion.
:param weight: [optional]
A multiplier to apply to all dispersion values.
:param offset: [optional]
A zero-point offset to be applied to all the dispersion values.
:param function_type: [optional]
An integer representing the function type to use when a non-linear
dispersion mapping (i.e. `dispersion_type = 2`) has been specified:
1: Chebyshev polynomial
2: Legendre polynomial
3: Cubic spline
4: Linear spline
5: Pixel coordinate array
6: Sampled coordinate array
:param order: [optional]
The order of the Legendre or Chebyshev function supplied.
:param Pmin: [optional]
The minimum pixel value, or lower limit of the range of physical pixel
coordinates.
:param Pmax: [optional]
The maximum pixel value, or upper limit of the range of physical pixel
coordinates.
:param coefficients: [optional]
The `order` number of coefficients that define the Legendre or Chebyshev
polynomial functions.
:returns:
An array containing the computed dispersion values.
"""
if dispersion_type in (0, 1):
# Simple linear or logarithmic spacing
dispersion = \
dispersion_start + np.arange(num_pixels) * mean_dispersion_delta
if dispersion_start == 1:
dispersion = 10.**dispersion
elif dispersion_type == 2:
# Non-linear mapping.
if function_type is None:
raise ValueError("function type required for non-linear mapping")
elif function_type not in range(1, 7):
raise ValueError(
"function type {0} not recognised".format(function_type))
if function_type == 1:
order = int(order)
n = np.linspace(-1, 1, Pmax - Pmin + 1)
temp = np.zeros((Pmax - Pmin + 1, order), dtype=float)
temp[:, 0] = 1
temp[:, 1] = n
for i in range(2, order):
temp[:, i] = 2 * n * temp[:, i-1] - temp[:, i-2]
for i in range(0, order):
temp[:, i] *= coefficients[i]
dispersion = temp.sum(axis=1)
elif function_type == 2:
# Legendre polynomial.
if None in (order, Pmin, Pmax, coefficients):
raise TypeError("order, Pmin, Pmax and coefficients required "
"for a Chebyshev or Legendre polynomial")
Pmean = (Pmax + Pmin)/2
Pptp = Pmax - Pmin
x = (np.arange(num_pixels) + 1 - Pmean)/(Pptp/2)
p0 = np.ones(num_pixels)
p1 = mean_dispersion_delta
dispersion = coefficients[0] * p0 + coefficients[1] * p1
for i in range(2, int(order)):
if function_type == 1:
# Chebyshev
p2 = 2 * x * p1 - p0
else:
# Legendre
p2 = ((2*i - 1)*x*p1 - (i - 1)*p0) / i
dispersion += p2 * coefficients[i]
p0, p1 = (p1, p2)
elif function_type == 3:
# Cubic spline.
if None in (order, Pmin, Pmax, coefficients):
raise TypeError("order, Pmin, Pmax and coefficients required "
"for a cubic spline mapping")
s = (np.arange(num_pixels, dtype=float) + 1 - Pmin)/(Pmax - Pmin) \
* order
j = s.astype(int).clip(0, order - 1)
a, b = (j + 1 - s, s - j)
x = np.array([
a**3,
1 + 3*a*(1 + a*b),
1 + 3*b*(1 + a*b),
b**3])
dispersion = np.dot(np.array(coefficients), x.T)
else:
raise NotImplementedError("function type not implemented yet")
else:
raise ValueError(
"dispersion type {0} not recognised".format(dispersion_type))
# Apply redshift correction.
dispersion = weight * (dispersion + offset) / (1 + redshift)
return dispersion | 94fcb70652bad0f2fa26cf73981129f3ae949d8b | 16,935 |
def normalize_pcp_area(pcp):
"""
Normalizes a pcp so that the sum of its content is 1,
outputting a pcp with up to 3 decimal points.
"""
pcp = np.divide(pcp, np.sum(pcp))
new_format = []
for item in pcp:
new_format.append(item)
return np.array(new_format) | ea0feeda3f8515b538ae62b08aad09a16ddb2a73 | 16,936 |
def calc_line_flux(spec, ws, ivar, w0, w1, u_flux):
""" calculate the flux and flux error of the line within the range w0 and w1 using trapz rule"""
u_spec = spec.unit
u_ws = ws.unit
ivar = ivar.to(1./(u_spec**2))
spec_uless = np.array(spec)
ws_uless = np.array(ws)
ivar_uless = np.array(ivar)
if ivar.unit != (1./(spec.unit**2)):
raise Exception("[spector] spec and ivar units inconsistent")
# select region to integrate
select_ws = (ws_uless > w0) & (ws_uless < w1)
ws_sel = ws_uless[select_ws]
spec_sel = spec_uless[select_ws]
ivar_sel = ivar_uless[select_ws]
var_sel = 1./ivar_sel
# integrate
f, fvar = trapz_var(x=ws_sel, y=spec_sel, yvar=var_sel)
f = (f*u_spec*u_ws).to(u_flux)
ferr = (np.sqrt(fvar)*u_spec*u_ws).to(u_flux)
return f, ferr | 78206ce98025ead64e207bd69a8adf1a31178744 | 16,937 |
def boolean(entry, option_key="True/False", **kwargs):
"""
Simplest check in computer logic, right? This will take user input to flick the switch on or off
Args:
entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1.
option_key (str): What kind of Boolean we are setting. What Option is this for?
Returns:
Boolean
"""
error = f"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled"
if not isinstance(entry, str):
raise ValueError(error)
entry = entry.upper()
if entry in ("1", "TRUE", "ON", "ENABLED", "ENABLE", "YES"):
return True
if entry in ("0", "FALSE", "OFF", "DISABLED", "DISABLE", "NO"):
return False
raise ValueError(error) | d62b36d08651d02719b5866b7798c36efd2a018f | 16,938 |
import tqdm
import copy
def edge_preserving_filter(ref_map: np.ndarray, guided_image: np.ndarray,
window_size: int, epsilon: float = 1e-10) -> np.ndarray:
"""
Perform edge - preserving filtering on the newly created reference map.
:param ref_map: Classification reference map.
:param guided_image: Guided image as a mean over all bands from hyperspectral data.
:param window_size: Size of the convolving window.
:param epsilon: Regularizer constant.
:return: Improved classification map.
"""
print("Window size = {}".format(window_size))
col_indexes, row_indexes = \
range(0, ref_map.shape[ROW_AXIS], window_size), range(0, ref_map.shape[COLUMNS_AXIS], window_size)
print("Calculating coefficients:")
a_k_map, b_k_map = np.empty(shape=ref_map.shape), np.empty(shape=ref_map.shape)
for i in tqdm(range(ref_map.shape[SPECTRAL_AXIS]), total=ref_map.shape[SPECTRAL_AXIS]):
for row, col in product(col_indexes, row_indexes):
p_k = copy(ref_map[row:row + window_size, col:col + window_size, i])
i_k = copy(guided_image[row:row + window_size, col:col + window_size])
sum_ = np.sum(i_k * p_k - np.mean(i_k) * np.mean(p_k)) / (window_size ** 2)
a_k = sum_ / (np.var(i_k) + epsilon)
b_k = np.mean(p_k) - a_k * np.mean(i_k)
a_k_map[row:row + window_size, col:col + window_size, i] = a_k
b_k_map[row:row + window_size, col:col + window_size, i] = b_k
output_image = np.empty(shape=ref_map.shape)
print("Calculating new \"improved\" classification map:")
for i in tqdm(range(ref_map.shape[SPECTRAL_AXIS]), total=ref_map.shape[SPECTRAL_AXIS]):
for row_index, col_index in product(range(ref_map.shape[ROW_AXIS]), range(ref_map.shape[COLUMNS_AXIS])):
a_k_sum, b_k_sum = 0, 0
row_sub_indexes, col_sub_indexes = \
list(filter(lambda x: 0 <= x < ref_map.shape[ROW_AXIS],
list(range(row_index - floor(window_size / 2),
row_index + ceil(window_size / 2))))), \
list(filter(lambda x: 0 <= x < ref_map.shape[COLUMNS_AXIS],
list(range(col_index - floor(window_size / 2),
col_index + ceil(window_size / 2)))))
for sub_row_idx, sub_col_idx in product(row_sub_indexes, col_sub_indexes):
a_k_sum += a_k_map[sub_row_idx, sub_col_idx, i]
b_k_sum += b_k_map[sub_row_idx, sub_col_idx, i]
a_k_sum, b_k_sum = a_k_sum / (row_sub_indexes.__len__() * col_sub_indexes.__len__()), \
b_k_sum / (row_sub_indexes.__len__() * col_sub_indexes.__len__())
output_image[row_index, col_index, i] = a_k_sum * guided_image[row_index, col_index] + b_k_sum
output_image = np.argmax(output_image, axis=-1) + BG_CLASS
return output_image | 1ca88240c26fd4eae67f869bc95a8b0ce885260b | 16,939 |
from typing import List
def preprocess_annotated_utterance(
annotated_utterance: str,
not_entity: str = NOT_ENTITY,
) -> List[str]:
"""Character Level Entity Label Producer
Named-entity of each character is extracted by XML-like annotation.
Also, they would be collected in a list conform to the order of characters
in the sentence.
Args:
annotated_utterance (a string):
An utterance with annotations looks like <a>blabla</a>.
It is a special format for labeling named-entity in an utterance.
not_entity (a string, default = "DONT_CARE"):
A representation of words that we don't care about.
Returns:
entities (a list of string):
A list of named-entity labels in character level.
Examples:
>>> from ynlu.sdk.evaluation.utils import preprocess_annotated_utterance
>>> preprocess_annotated_utterance(
annotated_utterance="<drink>Coffee</drink>, please.",
not_entity="n",
)
>>> ["drink", "drink", "drink", "drink", "drink", "drink", "n",
"n", "n", "n", "n", "n", "n", "n", "n"]
"""
clean_utterance = remove_annotation(annotated_utterance)
entity_word_pair = FINDALL_PROG.findall(annotated_utterance)
entities = [not_entity] * len(clean_utterance)
begin_index = 0
for entity, word in entity_word_pair:
start_idx = clean_utterance.find(word, begin_index)
if start_idx == -1:
raise ValueError(
"Word {} can not be found in {}".format(word, clean_utterance),
)
entities[start_idx: start_idx + len(word)] = [entity] * len(word)
begin_index = start_idx + len(word)
return entities | b148f19017b97a0f4859abc129cdca50fd187c15 | 16,940 |
from typing import List
from typing import Tuple
import jinja2
def generate_constant_table(
name: str,
constants: List[Constant],
*,
data_type: str = "LREAL",
guid: str = "",
lookup_by_key: bool = False,
**kwargs
) -> Tuple[str, str]:
"""
Generate a GVL constant table, with no interpolation.
Parameters
----------
name : str
The code block name.
constants : list of Constant
Dictionary of name to dataframe.
data_type : str, optional
The data type. Defaults to LREAL.
guid : str, optional
The function block globally unique identifier / GUID.
table_prefix : str, optional
The name with which to prefix all table arrays.
lookup_input : str, optional
The function block input variable name - the indexed parameter which
you're looking up in the table.
lookup_index : int, optional
The per-row array index of the lookup value. Not fully supported
just let; leave this at 0 for now.
row_delta_variable : str, optional
The auto-generated code delta variable. Not necessary to set, unless
you really want to customize the output.
**kwargs :
Additional keyword arguments to pass to or override in the template.
Returns
-------
code : str
The constant table source code.
"""
template_kw = dict(
name=name,
guid=guid or guid_from_string(name),
data_type=data_type,
constants=constants,
)
template_kw.update(kwargs)
template_fn = (
CONSTANT_GVL_LOOKUP_TEMPLATE
if lookup_by_key
else CONSTANT_GVL_TEMPLATE
)
template = jinja2.Template(open(template_fn, "rt").read())
return template.render(template_kw) | 54b491ac3673c68a0e7ef819389e393e921d841f | 16,941 |
def filter_stop_words(text):
"""
Filter all stop words from a string to reduce headline size.
:param text: text to filter
:return: shortened headline
"""
words = filter(lambda w: not w in s, text.split())
line = ""
l = 0
for w in words:
if l < 20:
line += w + " "
l += 1
else:
return line.strip()
return line.strip() | d27b63018fa8f7b2d072e768c54ce4a056c58ff1 | 16,942 |
def importPublicKey(publickey):
""" Cette fonction permet de exporter la clé public,
elle prend en paramètre use clé public """
return RSA.importKey(publickey) | b744efc95fc154edcf4149134b7b307e75a0bb17 | 16,943 |
def _format_contact(resource, key):
"""
Return the contact field with the correct values.
This is mainly stripping out the unecessary fields from the telecom part of
the response.
"""
contacts = resource.pop(key)
resource[key] = []
for contact in contacts:
contact["telecom"] = _format_telecom(
contact,
"telecom",
add_textphone_extension=False,
whitelist=["id", "use", "period", "extension"]
)
resource[key].append(contact)
return resource[key] | 57291dfdf2a724df2cd2342891aa96309648a9c1 | 16,944 |
from datetime import datetime
def get_day_type(date):
"""
Returns if a date is a weeday or weekend
:param date datetime:
:return string:
"""
# check if date is a datetime.date
if not isinstance(date, datetime.date):
raise TypeError('date is not a datetime.date')
day_type = ""
if date.weekday() in (0, 1, 2, 3, 4):
day_type = c.WEEKDAY
else:
day_type = c.WEEKEND
return day_type | 72d74746a7782e0f45b3b7d0292b4cbd4ad9f167 | 16,945 |
def case_insensitive_equals(name1: str, name2: str) -> bool:
"""
Convenience method to check whether two strings match, irrespective of their case and any surrounding whitespace.
"""
return name1.strip().lower() == name2.strip().lower() | 28b7e5bfb5e69cf425e1e8983895f1ad42b59342 | 16,946 |
def get_access_token(cmd, subscription=None, resource=None, scopes=None, resource_type=None, tenant=None):
"""
get AAD token to access to a specified resource.
Use 'az cloud show' command for other Azure resources
"""
if resource is None and resource_type:
endpoints_attr_name = cloud_resource_type_mappings[resource_type]
resource = getattr(cmd.cli_ctx.cloud.endpoints, endpoints_attr_name)
profile = Profile(cli_ctx=cmd.cli_ctx)
creds, subscription, tenant = profile.get_raw_token(subscription=subscription, resource=resource, scopes=scopes,
tenant=tenant)
result = {
'tokenType': creds[0],
'accessToken': creds[1],
# 'expires_on': creds[2].get('expires_on', None),
'expiresOn': creds[2].get('expiresOn', None),
'tenant': tenant
}
if subscription:
result['subscription'] = subscription
return result | 9a5190db41e4061698ead3846a6e53f42e64deed | 16,947 |
def ensure_listable(obj):
"""Ensures obj is a list-like container type"""
return obj if isinstance(obj, (list, tuple, set)) else [obj] | bdc5dbe7e06c1cc13afde28762043ac3fb65e5ac | 16,948 |
def merge_dicts(*dicts: dict) -> dict:
"""Merge dictionaries into first one."""
merged_dict = dicts[0].copy()
for dict_to_merge in dicts[1:]:
for key, value in dict_to_merge.items():
if key not in merged_dict or value == merged_dict[key]:
merged_dict[key] = value
else:
raise ValueError(
f"Test {key} already has a mark we don't want to overwrite: \n"
f"- existing: {merged_dict[key]} "
f"- new value: {value}"
)
merged_dict.update(dict_to_merge)
return merged_dict | b32a9f4bed149144a3f75b43ed45c8de4351f3d1 | 16,949 |
from re import X
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
for moves in _winner_moves():
if all(board[i][j] is X for (i, j) in moves):
return X
elif all(board[i][j] is O for (i, j) in moves):
return O
return None | c6e3b35b2cf37ff3da4fe5cd306f7a6f78603f16 | 16,950 |
def specified_kwargs(draw, *keys_values_defaults: KVD):
"""Generates valid kwargs given expected defaults.
When we can't realistically use hh.kwargs() and thus test whether xp infact
defaults correctly, this strategy lets us remove generated arguments if they
are of the default value anyway.
"""
kw = {}
for keyword, value, default in keys_values_defaults:
if value is not default or draw(booleans()):
kw[keyword] = value
return kw | bd3dfdcbb084a87b0c60d9221692f8fbd3e70333 | 16,951 |
def add_image():
"""User uploads a new landmark image, and inserts into db."""
imageURL = request.form.get("imageURL")
landmark_id = request.form.get("landmark_id")
new_image = LandmarkImage(landmark_id=landmark_id,
imageurl=imageURL)
db.session.add(new_image)
db.session.commit()
return "Success" | dce5f9c21daef67b1a13b1d590fe027213c408e0 | 16,952 |
def merge(link1: Node, link2: Node) -> Node:
"""
Merge two linklists.
Parameters
-----------
link1: Node
link2: Node
Returns
---------
out: Node
Notes
------
"""
link = Node(None)
ptr = link
while link1 and link2:
if link1.val <= link2.val:
ptr.next = link1 #Node(link1.val)
ptr = ptr.next
link1 = link1.next
else:
ptr.next = link2 #Node(link2.val)
ptr = ptr.next
link2 = link2.next
while link1:
ptr.next = Node(link1.val)
ptr = ptr.next
link1 = link1.next
while link2:
ptr.next = Node(link2.val)
ptr = ptr.next
link2 = link2.next
return link.next | 5d40acbd1ffb595a7f4605c3181ede19fb4adbb3 | 16,953 |
def l1_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l1-norm optimization problem."""
cvx.solvers.options['show_progress'] = not CVX_SUPRESS_PRINT
# Non-Weighted optimization:
if w_i is None:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(a_i)], [cvx.matrix(b_i)]])
q = cvx.matrix(c_i * -1)
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# Weighted optimization:
else:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(np.multiply(a_i, w_i))],
[cvx.matrix(np.multiply(b_i, w_i))]])
q = cvx.matrix(np.multiply(w_i, c_i * -1))
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# return resulting point
return (x0, y0) | 0966516185c99b936a1fcc8b3d4c74e67587bc63 | 16,954 |
def set_order(market, order_type, amount, price, keys, stop_price=None):
"""
Create an order
Arguments:
market (str) : market name,
order_type (str) : may be "limit", "market", "market_by_quote",
"limit_stop_loss"
amount (float) : positive if BUY order, and negative for SELL
price (float) : price of 1 ask currency in a quoted currency. Necessary only
when type is "limit"
keys (dict): {
"private" : "",
"public" : ""
}
Optional arguments:
stop_price (float) : price when activates "limit_stop_loss" type order. If
None then the same as price
Returns:
(list) [
[0] (int) order ID,
[1] (NoneType) not in use,
[2] (NoneType) not in use,
[3] (str) name of the market,
[4] (int) time stamp of the creation in ms,
[5] (int) time stamp of the update in ms,
[6] (str) initial volume,
[7] (str) order volume,
[8] (str) order type ("LIMIT" or "MARKET"),
[9] (NoneType) not in use,
[10] (NoneType) not in use,
[11] (NoneType) not in use,
[12] (NoneType) not in use,
[13] (str) order status,
[14] (NoneType) not in use,
[15] (NoneType) not in use,
[16] (str) order price,
[17] (str) average price of deals in order,
[18] (NoneType) not is use,
[19] (str) for stop price but None for other orders,
[20] (NoneType) not in use,
[21] (NoneType) not in use,
[22] (NoneType) not in use,
[23] (NoneType) not in use,
[24] (NoneType) not in use,
[25] (NoneType) not in use,
[26] (NoneType) not in use,
[27] (NoneType) not in use,
[28] (NoneType) not in use,
[29] (NoneType) not in use,
[30] (NoneType) not in use,
[31] (NoneType) not in use,
]
"""
body = {
"symbol": market,
"type": order_type,
"amount": amount,
"price": price,
"stop_price": price,
}
return _request("auth/w/order/submit", body=body, keys=keys) | 8f9823be8a39d404062114432604c8480aed20c6 | 16,955 |
import calendar
def convert_ts(tt):
"""
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
"""
try:
ts = calendar.timegm(tt)
"""
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
"""
if ts < 0:
ts = 0
except TypeError:
ts = None
return ts | a3c2f5ae3d556290b6124d60fd4f84c1c2685195 | 16,956 |
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | fef62699a5a16385ffeeb47f252c1a3142fa9c96 | 16,957 |
import re
import json
def receive_github_hook(request):
"""a hook is sent on some set of events, specifically:
push/deploy: indicates that the content for the repository changed
pull_request: there is an update to a pull request.
This function checks that (globally) the event is valid, and if
so, runs a function depending on the event.
"""
# We do these checks again for sanity
if request.method == "POST":
if DISABLE_WEBHOOKS:
return JsonResponseMessage(message="Webhooks disabled")
if not re.search("GitHub-Hookshot", request.META["HTTP_USER_AGENT"]):
return JsonResponseMessage(message="Agent not allowed")
# Only allow application/json content type
if request.META["CONTENT_TYPE"] != "application/json":
return JsonResponseMessage(message="Incorrect content type")
# Check that it's coming from the right place
required_headers = ["HTTP_X_GITHUB_DELIVERY", "HTTP_X_GITHUB_EVENT"]
if not check_headers(request, required_headers):
return JsonResponseMessage(message="Agent not allowed")
# Has to be a push, deployment, or pull_request
event = request.META["HTTP_X_GITHUB_EVENT"]
# Ping happens on setup
if event == "ping":
return JsonResponseMessage(
message="Ping received, no action taken.", status=200
)
# But don't allow types beyond push, deploy, pr
if event not in ["push", "deployment", "pull_request", "repository"]:
return JsonResponseMessage(message="Incorrect delivery method.")
# A signature is also required
signature = request.META.get("HTTP_X_HUB_SIGNATURE")
if not signature:
return JsonResponseMessage(message="Missing credentials.")
# Parse the body
payload = load_body(request)
repo = payload.get("repository")
repo_name = repo["full_name"]
# If it's a repository event, might be transferred or renamed
if event == "repository":
if payload.get("action") == "transferred":
owner = payload["changes"]["owner"]["from"]["user"]["login"]
repo_name = "%s/%s" % (owner, repo.get("name"))
# Retrieve the article
try:
article = Article.objects.get(repo__full_name=repo_name)
except Article.DoesNotExist:
return JsonResponseMessage(message="Article not found", status=404)
# Don't continue if the repository is archived (this shouldn't happen)
if article.archived:
return JsonResponseMessage(message="Repository is archived.")
# Validate the payload with the collection secret
status = validate_payload(
secret=str(article.secret),
payload=request.body,
request_signature=signature,
)
if not status:
return JsonResponseMessage(message="Invalid credentials.")
# Branch must be master
branch = payload.get("ref", "refs/heads/master").replace("refs/heads/", "")
# Update repo metadata that might change
article.repo = repo
article.save()
# Submit job with django_rq to update article
if event == "pull_request":
against_branch = payload["pull_request"]["base"]["ref"]
branch = payload["pull_request"]["head"]["ref"]
if not branch.startswith("update/term") or against_branch != "master":
return JsonResponseMessage(message="Ignoring branch.", status=200)
# Requesting user is derived from branch
user = branch.replace("update/term-", "").split("-")[0]
res = django_rq.enqueue(
update_pullrequest,
article_uuid=article.uuid,
user=user,
action=payload["action"],
url=payload["pull_request"]["html_url"],
number=payload["number"],
merged_at=payload["pull_request"]["merged_at"],
)
elif event in ["push", "deployment"]:
if branch != "master":
return JsonResponseMessage(message="Ignoring branch.", status=200)
article.commit = payload["after"]
article.save()
res = django_rq.enqueue(update_article, article_uuid=article.uuid)
elif event == "repository":
res = django_rq.enqueue(
repository_change,
article_uuid=article.uuid,
action=payload["action"],
repo=json.dumps(payload["repository"]),
)
return JsonResponseMessage(
message="Hook received and parsing.", status=200, status_message="Received"
)
return JsonResponseMessage(message="Invalid request.") | 874a71c3c9c002f3714ce0b4bb80586e8d67d7e8 | 16,958 |
import torch
def dice(y, t, normalize=True, class_weight=None,
ignore_label=-1, reduce='mean', eps=1e-08):
""" Differentable Dice coefficient.
See: https://arxiv.org/pdf/1606.04797.pdf
Args:
y (~torch.Tensor): Probability
t (~torch.Tensor): Ground-truth label
normalize (bool, optional): If True, calculate the dice coefficients for each class and take the average. Defaults to True.
class_weight (list or ndarray, optional): Defaults to None.
ignore_label (int, optional): Defaults to -1.
reduce (str, optional): Defaults to 'mean'.
eps (float, optional): Defaults to 1e-08.
"""
_check_type_forward(y, t)
device = y.device
dtype = y.dtype
if class_weight is not None:
class_weight = torch.as_tensor(class_weight, dtype=dtype, device=device)
b, c = y.shape[:2]
t_onehot = to_onehot(t, n_class=c)
y = y.view(b, c, -1)
t_onehot = t_onehot.view(b, c, -1)
if ignore_label != -1:
t_onehot = torch.cat( (t_onehot[:, :ignore_label], t_onehot[:, ignore_label + 1:]), dim=1)
y = torch.cat( (y[:, :ignore_label], y[:, ignore_label + 1:]), dim=1)
intersection = y * t_onehot
cardinality = y + t_onehot
if normalize: # NOTE: channel-wise
intersection = torch.sum(intersection, dim=-1)
cardinality = torch.sum(cardinality, dim=-1)
ret = (2. * intersection / (cardinality + eps))
if class_weight is not None:
ret *= class_weight
ret = torch.mean(ret, dim=1)
else:
intersection = torch.sum(intersection, dim=(0, 2))
cardinality = torch.sum(cardinality, dim=(0, 2))
ret = (2. * intersection / (cardinality + eps))
if class_weight is not None:
ret *= class_weight
if reduce == 'mean':
ret = torch.mean(ret)
else:
raise NotImplementedError('unsupported reduce type..')
return ret | dd0b6fb75688ed0579a3bf9a513f73d0b785e57e | 16,959 |
def read_start_params(path_or_database):
"""Load the start parameters DataFrame.
Args:
path_or_database (pathlib.Path, str or sqlalchemy.MetaData)
Returns:
params (pd.DataFrame): see :ref:`params`.
"""
database = load_database(**_process_path_or_database(path_or_database))
optimization_problem = read_last_rows(
database=database,
table_name="optimization_problem",
n_rows=1,
return_type="dict_of_lists",
)
start_params = optimization_problem["params"][0]
return start_params | 31cc6d5f538a8616f9eda676e4bf8757f02f1cb3 | 16,960 |
def calcCovariance(modes):
"""Return covariance matrix calculated for given *modes*."""
if isinstance(modes, Mode):
array = modes._getArray()
return np.outer(array, array) * modes.getVariance()
elif isinstance(modes, ModeSet):
array = modes._getArray()
return np.dot(array, np.dot(np.diag(modes.getVariances()), array.T))
elif isinstance(modes, NMA):
return modes.getCovariance()
else:
raise TypeError('modes must be a Mode, NMA, or ModeSet instance') | 7803e765dcf4ad40158040013691bd0f3d7775be | 16,962 |
def sparse_tensor_value_to_texts(value):
"""
Given a :class:`tf.SparseTensor` ``value``, return an array of Python strings
representing its values.
This function has been modified from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/util/text.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
return sparse_tuple_to_texts((value.indices, value.values, value.dense_shape)) | e1133532ecd88478d9a5a96773e413992e6566f8 | 16,963 |
def coding_problem_45(rand5):
"""
Using a function rand5() that returns an integer from 1 to 5 (inclusive) with uniform probability, implement a
function rand7() that returns an integer from 1 to 7 (inclusive).
Note: for n >= 24, rand5() ** n is a multiple of 7 and therefore rand5() ** 24 % 7 is an unbiased implementation
of rand7(). To avoid having to rely on big integer libraries, we use the property (a + b) % n == ((a % n) + b) % n
which is easy to prove by decomposing a into a // n * n + a % n.
>>> from random import randint
>>> rand5 = lambda: randint(0, 4)
>>> rand7 = coding_problem_45(rand5)
>>> 0 <= rand7 < 7
True
"""
rand7 = 0
for _ in range(24):
rand7 = (rand7 * 5 + rand5()) % 7
return rand7 | 8e26c6f95d953e0a8b3d8a33232c886742a535ce | 16,964 |
def handle_rss_api(output, kwargs):
""" Special handler for API-call 'set_config' [rss] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if not name:
return None
feed = config.get_config('rss', name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigRSS(name, kwargs)
action = kwargs.get('filter_action')
if action in ('add', 'update'):
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_upd_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
elif action == 'delete':
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_del_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
return name | 73bd10dc2a40cc1648423372e8fcae065e83dbce | 16,965 |
def progress(job_id, user: User = Depends(auth_user), db: Session = Depends(get_db)):
"""
Get a user's progress on a specific job.
"""
job = _job(db, job_id)
check_job_user(db, user, job)
progress = rules.get_progress_report(db, job, user)
return progress | fc581297463bc46ce461811d7f675cc99ee63b65 | 16,966 |
import json
def getRoom(borough):
"""Return a JSON dataset for property type of airbnb listing"""
prpt = db.session.query(data.Borough,
data.Room_Type, data.Price, data.Review_Rating, data.review_scores_cleanliness,
data.review_scores_value, data.host_response_rate).statement
df = pd.read_sql_query(prpt, db.session.bind)
df = df[df['Borough'] == borough]
df["host_response_rate"] = df["host_response_rate"].str.replace("%", "").astype(float)
df["review_scores_cleanliness"] = df["review_scores_cleanliness"].str.replace(".", "").astype(float)
df["review_scores_value"] = df["review_scores_value"].str.replace(".", "").astype(float)
df1 = df.groupby('Room_Type').count().reset_index()
df2 = df.groupby('Room_Type').mean().reset_index().round(2)
df = pd.merge(df1, df2, on='Room_Type')
df = df[['Room_Type', 'Borough', 'Price_y', 'Review_Rating_y', 'review_scores_cleanliness_y', 'review_scores_value_y', 'host_response_rate_y']].rename(
columns={'Price_y': 'Avg_price', 'Review_Rating_y':'RRate', 'review_scores_cleanliness_y':'RClean', 'review_scores_value_y':'RValue', 'host_response_rate_y':'HostResponseR' })
df['percent'] = round((df.Borough/df.Borough.sum())*100, 2)
d = df.to_dict('records')
return json.dumps(d) | 2ae05f1f5a501b0a8e25dfc7b212cb0aeecbc0f1 | 16,967 |
def get_info(name_file, what='V', parent_folder='txt_files'):
"""Get data from txt file and convert to data list
:param name_file : name of the file, without txt extension
:param what : V = vertices, E = edges, R = pose
:param parent_folder"""
file_path = get_file_path(name_file, parent_folder)
if what == 'V' or what == 'R':
my_type = 'float'
else:
my_type = 'int'
data_dict = read_data_txt(file_path, my_type)
data = as_list(data_dict)
return data | f06608340622c7173dffabb6c08f178b9e887e73 | 16,968 |
def receive_message(
sock, operation, request_id, max_message_size=MAX_MESSAGE_SIZE):
"""Receive a raw BSON message or raise socket.error."""
header = _receive_data_on_socket(sock, 16)
length = _UNPACK_INT(header[:4])[0]
actual_op = _UNPACK_INT(header[12:])[0]
if operation != actual_op:
raise ProtocolError("Got opcode %r but expected "
"%r" % (actual_op, operation))
# No request_id for exhaust cursor "getMore".
if request_id is not None:
response_id = _UNPACK_INT(header[8:12])[0]
if request_id != response_id:
raise ProtocolError("Got response id %r but expected "
"%r" % (response_id, request_id))
if length <= 16:
raise ProtocolError("Message length (%r) not longer than standard "
"message header size (16)" % (length,))
if length > max_message_size:
raise ProtocolError("Message length (%r) is larger than server max "
"message size (%r)" % (length, max_message_size))
return _receive_data_on_socket(sock, length - 16) | 0c1cd762a2a0889d2894993e0f5362e0acdaee36 | 16,969 |
def cycle_interval(starting_value, num_frames, min_val, max_val):
"""Cycles through the state space in a single cycle."""
starting_in_01 = (starting_value - min_val) / (max_val - min_val)
grid = np.linspace(starting_in_01, starting_in_01 + 2.,
num=num_frames, endpoint=False)
grid -= np.maximum(0, 2 * grid - 2)
grid += np.maximum(0, -2 * grid)
return grid * (max_val - min_val) + min_val | 34fa0d60b9d5d99eee9666d70c77e4375d37ace8 | 16,970 |
def commit_ref_info(repos, skip_invalid=False):
"""
Returns a dict of information about what commit should be tagged in each repo.
If the information in the passed-in dictionary is invalid in any way,
this function will throw an error unless `skip_invalid` is set to True,
in which case the invalid information will simply be logged and ignored.
Arguments:
repos (dict): A dict mapping Repository objects to openedx.yaml data.
skip_invalid (bool): if true, log invalid data in `repos`, but keep going.
Returns:
A dict mapping Repositories to a dict about the ref to tag, like this::
{
Repository(<full_repo_name>): {
"ref": "name of tag or branch"
"ref_type": "tag", # or "branch"
"sha": "1234566789abcdef",
"message": "The commit message"
"author": {
"name": "author's name",
"email": "author's email"
}
"committer": {
"name": "committer's name",
"email": "committer's email",
}
},
Repository(<next_repo_name>): {...},
...
}
"""
ref_info = {}
for repo, repo_data in nice_tqdm(repos.items(), desc='Find commits'):
# are we specifying a ref?
ref = repo_data["openedx-release"].get("ref")
if ref:
try:
ref_info[repo] = get_latest_commit_for_ref(repo, ref)
except (GitHubError, ValueError):
if skip_invalid:
msg = "Invalid ref {ref} in repo {repo}".format(
ref=ref,
repo=repo.full_name
)
log.error(msg)
continue
else:
raise
return ref_info | 86425248cd4a90aa75d03c2965d63aba3a38e81d | 16,971 |
def function(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m] | cb71d63ee35adde701eba91e068b0f6898005f04 | 16,972 |
def find_binaries(*args, **kwargs):
"""Given images data, return a list of dicts containing details of
all binaries in the image which can be identified with image_id or
image_tag.
One of image_id or image_tag must be specified.
:params: See `find_image`
:exception: exceptions.ImageNotFound
:exception: exceptions.ParameterError
:exception: exceptions.NoPackages
:return: A list of dicts:
As per the Twistlock API, each dict takes the form:
{
name: 'binary name',
path: 'full path to the binary including the name'
md5: 'md5 hash for the binary'
cveCount: 'Number of CVEs reported for the binary'
}
"""
image = find_image(*args, **kwargs)
return image['data']['binaries'] | 0bc4345279f11cc751f4aee62a7773f0fa21643a | 16,973 |
import copy
def solve_version(d):
""" solve version difference,
argument map d is deepcopied.
"""
# make copy
d = copy.deepcopy(d)
v = d.get('version', 0)
# functions in _update
for f in _update_chain[v:]:
d = f(d)
return d | ce6a412cc2350a3f6c97c7e1f649a537c35f6722 | 16,974 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.