content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def plot_feature_importance(feature_keys, feature_importances, ax=None, **kwargs):
"""
Plot features importance after model training (typically from scikit-learn)
Parameters
----------
feature_keys: list of string
feature_importances: `numpy.ndarray`
ax: `matplotlib.pyplot.axes`
Returns
-------
ax
"""
ax = plt.gca() if ax is None else ax
sort_mask = np.argsort(feature_importances)[::-1]
ax.bar(np.array(feature_keys)[sort_mask], np.array(feature_importances)[sort_mask], **kwargs)
for t in ax.get_xticklabels():
t.set_rotation(45)
ax.set_title("Feature importances")
return ax | aa3a747002d7c82f91de52e011b269b105c4bb70 | 9,080 |
def simulate_timestamps_till_horizon(mu, alpha, beta, Thorizon = 60, \
seed=None, node=None, output_rejected_data=False):
"""
Inputs:
mu, alpha, beta are parameters of intensity function of HP
"""
#################
# Initialisation
#################
rng = default_rng(seed) # get instance of random generator
t = 0 # initialise current time to be 0
i = 0 # set event counter to be 0
epsilon = 10**(-10) # This was used in many HP code
lambda_star = mu # upper bound at current time t = 0
ts = np.array([]); accepted_event_intensity = [lambda_star]
# containter for rejected time points and their correspodning intensities
rejected_points = []; rpy = []
# M_y stores upper bound of current times while M_x stores their x-values
M_x = []; M_y = []
#################
# Begin loop
#################
while(t < Thorizon):
previous_lambda_star = lambda_star; previous_t = t
# compute upper bound of intensity using intensity function
lambda_star = intensity_func(t+epsilon, ts, mu, alpha, beta)
u = rng.uniform(0,1) # draw a uniform random number between (0,1)
tau = -np.log(u)/lambda_star # sample inter-arrival time
t = t + tau # update current time by adding tau to current time
M_x += [previous_t,t]
M_y += [previous_lambda_star]
s = rng.uniform(0,1)# draw another standard uniform random number
# compute intensity function at current time t
lambda_t = intensity_func(t, ts, mu, alpha, beta)
if (t >= Thorizon):
break
##########################
## Rejection Sampling test
if s <= lambda_t/lambda_star:
ts = np.append(ts, float(t))
if (node != None):
ts = np.append(ts, [float(t), np.array([node])])
accepted_event_intensity.append(lambda_t)
i += 1
else:
rejected_points += [t]
rpy += [lambda_t]
if output_rejected_data:
return ts, i, accepted_event_intensity, rejected_points, rpy
return ts | 6d9e7a7c747c7a07fe94069017b32a47e3d35ac2 | 9,081 |
import logging
import time
import torch
from datetime import datetime
def jp_inference_on_dataset(model, data_loader, evaluator):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.forward` accurately.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = get_world_size()
logger = logging.getLogger(__name__)
logger.info("Start inference on {} images".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
evaluator.reset()
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
print("rank",comm.get_rank(),"is processing batch",idx)
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs) #RUN THE MODEL!!!!!!!!!
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
evaluator.process(inputs, outputs)
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
"Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / img per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results | e18113b4fc47bf48562bdee8dc8e4a2bdbe4c884 | 9,082 |
def boolToYes(b):
"""Convert a Boolean input into 'yes' or 'no'
Args:
b (bool): The Boolean value to be converted
Returns:
str: 'yes' if b is True, and 'no' otherwise.
"""
if b:
return "yes"
else:
return "no" | ff94b66b5a166592062bf1d5b286b425e7997304 | 9,083 |
def top_symptoms(dic, title):
"""Find and plot top symptoms in the dictionary based on count
Args:
dic (dict): Dictionary containing text-count pair
Returns:
[dictionary]: Top 5 symptoms with their count
"""
assert isinstance(dic, dict) and len(dic) > 0, "dic is not a nonempty dictionary"
labels = []
sizes = []
counts = 0
top5 = sorted(dic, key=dic.get, reverse=True)[:5]
others = sorted(dic, key=dic.get, reverse=True)[5:]
for i in others:
counts += dic[i]
for i in top5:
labels.append(i)
sizes.append(dic[i])
labels.append("OTHER")
sizes.append(counts)
fig = go.Figure(data=[go.Pie(labels=labels, values=sizes, hole=0.3)])
fig.update_layout(
title=title,
template=None,
title_x=0.5,
width=1000,
height=900,
margin=dict(l=20, r=20, t=50, b=20),
legend=dict(font=dict(size=25, color="black")),
)
fig.show()
return top5 | 1acfcec04d2a5c11f7f1a4e90eb9142de042c875 | 9,084 |
def _calc_z(h: DataArray, zice: DataArray, zeta: DataArray,
s: DataArray, Cs: DataArray,
hc: float, Vtransform: int) -> DataArray:
"""
Calculate grid z-coord depth given water depth (h), iceshelf depth (zice),
sea surface (zeta), and vertical grid transformation parameters.
Inputs:
h, zice, zeta - bathymetry extracted by set_hzz
s, Cs, hc, Vtransform - ROMS grid transformation parameters
Output:
z - depth of rho/w points
"""
if Vtransform == 1:
z0 = hc*s + (h-zice-hc)*Cs
z = zeta*(1.0+z0/(h-zice)) + z0 - zice
elif Vtransform == 2:
z0 = (hc*s + (h-zice)*Cs) / (hc+h-zice)
z = zeta + (zeta+h-zice)*z0 - zice
return z | 6580d3c2825cbea0bba33d03b2c0ad62bbd5b227 | 9,085 |
def gap_loss(preds, D, A):
"""
This module implement the loss function in paper [Azada Zazi, Will Hang. et al, 2019] Nazi, Azade & Hang, Will & Goldie, Anna & Ravi, Sujith & Mirhoseini, Azalia. (2019). GAP: Generalizable Approximate Graph Partitioning Framework.
Args:
preds (tensor(float)): output predited value, have size n x g
D (tensor(float)): degree of nodes, have size n x 1
A (tensor(bool)): adjacent matrix of graph, have size n x n
Returns:
float: the results of the loss function
"""
temp = tf.matmul(tf.transpose(preds), D)
temp = tf.div(preds, temp)
temp = tf.matmul(temp, tf.transpose(1-preds))
temp = tf.multiply(temp, A)
return tf.reduce_sum(temp) | 9418ee8bda3e7b1a5284c36412fefa158eec0f91 | 9,086 |
def number_of_hole(img, hole_img, hole_counter):
""" 判斷hole的數量去執行相對應的函式
0個hole執行zero_of_hole
1個hole執行one_of_hole
2個hole執行my_text.set("Answer : 8")
大於2個hole則執行my_text.set("Error : holes number = " + str(hole_counter) + "( > 2 )")) """
switcher = {
0:zero_of_hole,
1:one_of_hole,
2:lambda x1, x2:my_text.set("Answer : 8") #參數x1, x2從未使用, 為了return function
}
func = switcher.get(hole_counter, lambda x1, x2:my_text.set("Error : holes number = " + str(hole_counter) + "( > 2 )")) #參數x1, x2從未使用, 為了return function
return func(img, hole_img) | 583fd05b0f10e3ea1c7cee11bd416b8d41d7f840 | 9,087 |
def get_merged_by_value_coords(spans_value, digits=None):
"""returns adjacent spans merged if they have the same value. Assumes
[(start, end, val), ..] structure and that spans_value is sorted in
ascending order.
Arguments:
- digits: if None, any data can be handled and exact values are
compared. Otherwise values are rounded to that many digits.
"""
assert len(spans_value[0]) == 3, 'spans_value must have 3 records per row'
starts, ends, vals = zip(*spans_value)
indices_distinct_vals = get_run_start_indices(vals, digits=digits)
data = []
i = 0
for index, val in indices_distinct_vals:
start = starts[index]
end = ends[index]
prev_index = max(index-1, 0)
try:
data[-1][1] = ends[prev_index]
except IndexError:
pass
data.append([start, end, val])
if index < len(ends):
data[-1][1] = ends[-1]
return data | c186c503972b4b48e627c14df77bd5a780b59f5b | 9,088 |
def vint_mask_for_length(length):
"""
Returns the bitmask for the first byte of a variable-length integer (used for element ID and size descriptors).
:arg length: the length of the variable-length integer
:type length: int
:returns: the bitmask for the first byte of the variable-length integer
:rtype: int
"""
return 0b10000000 >> (length - 1) | 92fe3cb0fa09713ff4b650349294a2b241bb3918 | 9,089 |
from itertools import tee
def parse(tokens):
"""
S-expr ::= ( S-expr* ) | AtomSymbol | ' S-expr
' S-expr = (quote S-expr)
"""
def _parse(tokens):
while True:
token = next(tokens)
if token == "(":
s_expr = []
while True:
tokens, tokens_clone = tee(tokens)
if next(tokens_clone) == ")":
next(tokens)
return tuple(s_expr)
s_expr.append(_parse(tokens))
elif token == ")" or token == "":
raise Exception("parse error")
elif token == "'":
return "quote", _parse(tokens)
else:
return token
s_expr = _parse(tokens)
if next(tokens) != "":
raise Exception("parse error")
return s_expr | 90c8e3cd8482899749d30d5344390cfd5f24989f | 9,090 |
import numpy
import warnings
def preproc(raw,
dark=None,
flat=None,
solidangle=None,
polarization=None,
absorption=None,
mask=None,
dummy=None,
delta_dummy=None,
normalization_factor=1.0,
empty=None,
split_result=False,
variance=None,
dark_variance=None,
poissonian=False,
dtype=numpy.float32
):
"""Common preprocessing step for all integration engines
:param data: raw value, as a numpy array, 1D or 2D
:param mask: array non null where data should be ignored
:param dummy: value of invalid data
:param delta_dummy: precision for invalid data
:param dark: array containing the value of the dark noise, to be subtracted
:param flat: Array containing the flatfield image. It is also checked for dummies if relevant.
:param solidangle: the value of the solid_angle. This processing may be performed during the rebinning instead. left for compatibility
:param polarization: Correction for polarization of the incident beam
:param absorption: Correction for absorption in the sensor volume
:param normalization_factor: final value is divided by this
:param empty: value to be given for empty bins
:param split_result: set to true to separate signal from normalization and
return an array of float2, float3 (with variance) ot float4 (including counts)
:param variance: provide an estimation of the variance, enforce
split_result=True and return an float3 array with variance in second position.
:param dark_variance: provide an estimation of the variance of the dark_current,
enforce split_result=True and return an float3 array with variance in second position.
:param poissonian: set to "True" for assuming the detector is poissonian and variance = max(1, raw + dark)
:param dtype: dtype for all processing
All calculation are performed in single precision floating point (32 bits).
NaN are always considered as invalid values
if neither empty nor dummy is provided, empty pixels are 0.
Empty pixels are always zero in "split_result" mode.
When set to False, i.e the default, the pixel-wise operation is:
.. math::
I = \\frac{raw - dark}{flat \\cdot solidangle \\cdot polarization \\cdot absorption}
Invalid pixels are set to the dummy or empty value.
When split_result is set to True, each result is a float2
or a float3 (with an additional value for the variance) as such:
I = [:math:`raw - dark`, :math:`variance`, :math:`flat \\cdot solidangle \\cdot polarization \\cdot absorption`]
If split_result is 4, then the count of pixel is appended to the list, i.e. 1 or 0 for masked pixels
Empty pixels will have all their 2 or 3 or 4 values to 0 (and not to dummy or empty value)
If poissonian is set to True, the variance is evaluated as raw + dark, with a minimum of 1.
"""
if isinstance(dtype, str):
dtype = numpy.dtype(dtype).type
shape = raw.shape
out_shape = list(shape)
if split_result or (variance is not None) or poissonian:
if split_result == 4:
out_shape += [4]
elif (variance is not None) or poissonian:
out_shape += [3]
else:
out_shape += [2]
split_result = True
size = raw.size
if (mask is None) or (mask is False):
mask = numpy.zeros(size, dtype=bool)
else:
assert mask.size == size, "Mask array size is correct"
mask = numpy.ascontiguousarray(mask.ravel(), dtype=bool)
if (dummy is not None) and (delta_dummy is not None):
check_dummy = True
cdummy = dtype(dummy)
ddummy = dtype(delta_dummy)
elif (dummy is not None):
check_dummy = True
cdummy = dtype(dummy)
ddummy = 0.0
else:
check_dummy = False
cdummy = dtype(empty or 0.0)
ddummy = 0.0
signal = numpy.ascontiguousarray(raw.ravel(), dtype=dtype)
normalization = numpy.zeros_like(signal) + normalization_factor
if variance is not None:
variance = numpy.ascontiguousarray(variance.ravel(), dtype=dtype)
elif poissonian:
variance = numpy.maximum(1.0, signal) # this makes a copy
# runtime warning here
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (signal == cdummy)
else:
mask |= (abs(signal - cdummy) <= ddummy)
if dark is not None:
assert dark.size == size, "Dark array size is correct"
dark = numpy.ascontiguousarray(dark.ravel(), dtype=dtype)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (dark == cdummy)
else:
mask |= abs(dark - cdummy) < ddummy
signal -= dark
if poissonian:
variance += dark
elif dark_variance is not None:
variance += dark_variance
if flat is not None:
assert flat.size == size, "Flat array size is correct"
flat = numpy.ascontiguousarray(flat.ravel(), dtype=dtype)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (flat == cdummy)
else:
mask |= abs(flat - cdummy) <= ddummy
normalization *= flat
if polarization is not None:
assert polarization.size == size, "Polarization array size is correct"
normalization *= numpy.ascontiguousarray(polarization.ravel(), dtype=dtype)
if solidangle is not None:
assert solidangle.size == size, "Solid angle array size is correct"
normalization *= numpy.ascontiguousarray(solidangle.ravel(), dtype=dtype)
if absorption is not None:
assert absorption.size == size, "Absorption array size is correct"
normalization *= numpy.ascontiguousarray(absorption.ravel(), dtype=dtype)
mask |= numpy.logical_not(numpy.isfinite(signal))
mask |= numpy.logical_not(numpy.isfinite(normalization))
mask |= (normalization == 0)
if variance is not None:
mask |= numpy.logical_not(numpy.isfinite(variance))
if split_result:
result = numpy.zeros(out_shape, dtype=dtype)
signal[mask] = 0.0
normalization[mask] = 0.0
result[..., 0] = signal.reshape(shape)
if out_shape[-1] == 4:
if variance is not None:
variance[mask] = 0.0
result[..., 1] = variance.reshape(shape)
result[..., 2] = normalization.reshape(shape)
result[..., 3] = 1.0 - mask.reshape(shape)
elif variance is None:
result[:, :, 1] = normalization.reshape(shape)
else:
variance[mask] = 0.0
result[..., 1] = variance.reshape(shape)
result[..., 2] = normalization.reshape(shape)
else:
result = signal / normalization
result[mask] = cdummy
result.shape = shape
return result | 9a21af39470b1f48c81d043a1d4a9ca045804093 | 9,091 |
import scipy
def lB_2_T(lB, T0=298, sigma=4E-10, ret_res=False):
"""Solves for temperature at given Bjerrum length under condition from Adhikari et al. 2019 that lB/l = 1.2 at 298 K."""
def cond(T, lB, sigma=sigma):
"""condition function whose root gives the temperature T given Bjerrum length lB."""
return lB_fn(T, sigma=sigma) - lB
T = scipy.optimize.fsolve(cond, T0, args=(lB,))[0]
if ret_res:
res = np.abs(lB_fn(T, sigma=sigma) - lB)
return T, res
return T | 73d349d95cd69076874e7147280322535b6b1651 | 9,092 |
from typing import Iterable
from typing import Union
import dataclasses
def make_datacls(
cls_name: str,
fields: Iterable[Union[tuple[str, type], tuple[str, type, dataclasses.Field]]],
init: bool = True,
**kwargs,
) -> type:
"""
Return a new dataclass. This function wraps the Python dataclasses.make_dataclass
function, with the following changes to the generated __init__ method:
• initialization method only processes keyword arguments
• initialization method ignores unexpected keyword arguments
• fields (with default values or not) can be declared in any order
• Optional[...] fields default to None if no default value is specified
Keyword arguments are passed on to the dataclasses.make_dataclass function.
"""
dataclass = dataclasses.make_dataclass(
cls_name=cls_name,
fields=fields,
init=False,
**kwargs,
)
if init:
dataclass.__init__ = _datacls_init(dataclass)
return dataclass | d3797443212504605310ed75fbcb5ce37570b868 | 9,093 |
def square_loss(X, y, theta, reg_beta=0.0):
"""Computes squared loss and gradient.
Based on mean square margin loss.
X: (k, n) data items.
y: (k, 1) result (+1 or -1) for each data item in X.
theta: (n, 1) parameters.
reg_beta: optional regularization strength, for L2 regularization.
Returns (loss, dtheta) where loss is the aggregate numeric loss for this
theta, and dtheta is (n, 1) gradients for theta based on that loss.
Note: the mean (division by k) helps; otherwise, the loss is very large and
a tiny learning rate is required to prevent divergence in the beginning of
the search.
"""
k, n = X.shape
margin = y * X.dot(theta)
diff = margin - 1
loss = np.dot(diff.T, diff) / k + np.dot(theta.T, theta) * reg_beta / 2
dtheta = np.zeros_like(theta)
for j in range(n):
dtheta[j, 0] = (2 * np.dot((diff * y).T, X[:, j]) / k +
reg_beta * theta[j, 0])
return loss.flat[0], dtheta | 3a1cc74eed3abd9c3a7921c9ea02e2169594f504 | 9,094 |
def load_graph(model_file):
"""Loads a TensorFlow graph from file."""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph | 41e097afd34631ce8b2b94c9a67121886a568ede | 9,097 |
def find_children(node, tag, xml_ns, ns_key):
"""
Finds the collection of children nodes
Parameters
----------
node : ElementTree.Element
tag : str
xml_ns : None|dict
ns_key : None|str
"""
if xml_ns is None:
return node.findall(tag)
elif ns_key is None:
return node.findall('default:{}'.format(tag), xml_ns)
else:
return node.findall('{}:{}'.format(ns_key, tag), xml_ns) | b51d9f588661c3f609dc53adaa328f974e17d5fb | 9,098 |
import re
def normalize_string(string, ignore_spaces, ignore_punctuation):
"""Normalizes strings to prepare them for crashing comparison."""
string = string.upper()
if ignore_punctuation:
string = re.sub(r"[^1-9a-z \n\r\t]", "", string, flags=re.I)
if ignore_spaces:
string = re.sub(r"\w+", "", string)
else:
string = string.strip()
string = re.sub(r"[ \n\r\t]+", " ", string)
return string | 31de2b9644eb0943470430c6c3f2ea8a94dfb3cf | 9,099 |
def word_to_forms(word):
"""Return all possible forms for a word.
Args:
word (unicode)
Returns:
forms (set[unicode])
"""
forms = set()
lemmas = lemmatize(word)
for lemma in lemmas:
forms.update(lemma_to_forms(lemma))
return forms | eee7fee2389d180664ff6e07718461043716ba7e | 9,100 |
def load_decamCorners():
"""
Returns the CCD corners of the DECam camera.
Returns:
decamCorners : *list* of *float*
A list of the angular degree offsets of the CCD corners.
"""
with open('%s/DECam_corners.dat' % data_dir) as f:
corners_dct = eval(''.join(f.readlines()))
decamCorners = [v for v in corners_dct.values()]
return decamCorners | 5dd58af44029c87db5623dfa38a6223570836665 | 9,101 |
def reduce_expr(expr):
"""
Reduces a boolean algebraic expression based on the identity X + XY = X
Args:
expr (str): representation of the boolean algebraic expression
Returns:
A string representing the reduced algebraic expression
"""
reduced = True
for term in expr:
matches = [t for t in expr if t!=term and len(set(term).intersection(set(t))) == len(term)]
if(matches):
reduced = False
if reduced:
return expr
new_expr = []
temp_expr = expr
for term in expr:
#find the term that differs with it by at most one position
matches = [t for t in expr if t!=term and len(set(term).intersection(set(t))) == len(term)]
if(matches):
new_expr.append(term)
temp_expr.remove(term)
for match in matches:
temp_expr.remove(match)
#if such a term is found reduce it by the rule x+ xy =x
#remove both terms from the list and add to new expression
#if no term is found add the term to the next expression
expr = reduce_expr(new_expr+temp_expr)
return expr | 2c278e6ea6f133c51c5e98796f288f366fd10cb3 | 9,102 |
def arcColor(renderer, x, y, rad, start, end, color):
"""Draws an arc to the renderer with a given color.
The start and end of the arc are defined in units of degrees, with 0 being
the bottom of the arc circle and increasing counter-clockwise (e.g. 90 being
the rightmost point of the circle).
If the rendering color has any transparency, blending will be enabled.
Args:
renderer (:obj:`SDL_Renderer`): The renderer to draw on.
x (int): The X coordinate of the center of the circle.
y (int): The Y coordinate of the center of the circle.
rad (int): The radius (in pixels) of the circle.
start (int): The start of the arc (in degrees).
end (int): The end of the arc (in degrees).
color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer
(e.g. ``0xFF0000FF`` for solid red).
Returns:
int: 0 on success, or -1 on failure.
"""
return _funcs["arcColor"](renderer, x, y, rad, start, end, color) | 3297a32236afe2881979b105992df17742dd06c3 | 9,106 |
def dos_element_spd(
folder,
element_spd_dict,
output='dos_element_spd.png',
fill=True,
alpha=0.3,
linewidth=1.5,
sigma=0.05,
energyaxis='x',
color_list=None,
legend=True,
total=True,
figsize=(4, 3),
erange=[-6, 6],
spin='up',
combination_method='add',
fontsize=7,
save=True,
):
"""
This function plots the element projected density of states of the s, p, and d orbitals.
Parameters:
folder (str): This is the folder that contains the VASP files
element_spd_dict (dict[str:str]): A dictionary that contains the individual atoms and the corresponding
orbitals to project onto. For example, if the user wants to project onto the s, p, d orbitals
of In and the p orbitals of As for an InAs structure then the dictionary would be {'In':'spd', 'As':'p'}
output (str): File name of the resulting plot.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
linewidth (float): Linewidth of lines
sigma (float): Standard deviation for gaussian filter
energyaxis (str): Determines the axis to plot the energy on ('x' or 'y')
color_list (list): List of colors that is the same length as the number of projections
in the plot.
legend (bool): Determines whether to draw the legend or not
total (bool): Determines wheth to draw the total density of states or not
spin (str): Which spin direction to parse ('up' or 'down')
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list): Energy range for the DOS plot ([lower bound, upper bound])
combination_method (str): If spin == 'both', this determines if the spin up and spin down
desnities are added or subtracted. ('add' or 'sub')
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
dos = Dos(folder=folder, spin=spin, combination_method=combination_method)
fig = plt.figure(figsize=figsize, dpi=400)
ax = fig.add_subplot(111)
_figure_setup_dos(ax=ax, fontsize=fontsize, energyaxis=energyaxis)
dos.plot_element_spd(
ax=ax,
element_spd_dict=element_spd_dict,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis=energyaxis,
color_list=color_list,
legend=legend,
total=total,
erange=erange,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax | bf20cee25ace85e91a32f3b05016b0cd34f20600 | 9,107 |
import yaml
def load(m, schema=UPLOAD_MANIFEST_SCHEMA):
""" Load and validate a manifest.
"""
manifest = yaml.load(m)
validate(
manifest, schema=schema,
)
return manifest | 2bdbaf856a476b14b7ca0a353c4cdd56b3de2ae0 | 9,108 |
def transform_phenotype(inv_root, y, fam_indices, null_mean = None):
"""
Transform phenotype based on inverse square root of phenotypic covariance matrix.
If the null model included covariates, the fitted mean is removed rather than the overall mean
"""
# Mean normalise phenotype
if null_mean is None:
y = y - np.mean(y)
else:
y = y - null_mean
# Transform by family
for fam in fam_indices.keys():
famsize = fam_indices[fam].shape[0]
if famsize == 1:
y[fam_indices[fam]] = inv_root[1] * y[fam_indices[fam]]
else:
y[fam_indices[fam]] = inv_root[famsize].dot(y[fam_indices[fam]])
return y | d16699bb04a578050608910a9eea0ee6ea0b450d | 9,109 |
import json
import requests
def download_site_build(event_file: str, download_path: str = "build-site.tar.gz") -> int:
"""Will download the site bulid if this is a forked PR bulid.
Args:
event_file (str): event file from the workflow
Returns:
int: PR num of the build if relevant
"""
with open(event_file, 'r') as f:
github_event = json.load(f)
target_url = github_event['target_url']
print(f'target_url: {target_url}')
# target_url is of the form:
# https://circleci.com/gh/demisto/content-docs/142?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-li
target_url = target_url.split('?')[0]
build_num = target_url.split('/')[-1]
print(f'circleci build: {build_num}')
circle_url = f'https://circleci.com/api/v1.1/project/github/demisto/content-docs/{build_num}'
print(f'Checking circleci url: {circle_url}')
res = requests.get(circle_url, verify=VERIFY_SSL)
res.raise_for_status()
build_json = res.json()
# check that this is a pull request
if not build_json.get('pull_requests') or not build_json.get('pull_requests')[0].get('url'):
print('Not a pull request. Skipping')
return 0
branch = build_json.get('branch')
if not branch or not branch.startswith('pull/'):
print(f'Skipping branch as it is not an external pull: {branch}')
return 0
pr_num = branch.split('/')[1]
# get artifacts
res = requests.get(f'{circle_url}/artifacts', verify=VERIFY_SSL)
res.raise_for_status()
artifacts = res.json()
download_url = None
for art in artifacts:
if 'build-site.tar.gz' in art.get('path'):
download_url = art.get('url')
break
if not download_url:
raise ValueError(f"download url missing for artifacts: {artifacts}")
print(f'Downloading build artifact from: {download_url} (pr num: {pr_num}) to: {download_path} ...')
download_file(download_url, download_path)
return int(pr_num) | 62250aa4a420a11745b76e41cffd111c7d675a3a | 9,110 |
def createVskDataDict(labels,data):
"""Creates a dictionary of vsk file values from labels and data.
Parameters
----------
labels : array
List of label names for vsk file values.
data : array
List of subject measurement values corresponding to the label
names in `labels`.
Returns
-------
vsk : dict
Dictionary of vsk file values. Dictionary keys correspond to
names in `labels` and dictionary values correspond to values in
`data`.
Examples
--------
This example tests for dictionary equality through python instead of
doctest since python does not guarantee the order in which dictionary
elements are printed.
>>> labels = ['MeanLegLength', 'LeftKneeWidth', 'RightAnkleWidth']
>>> data = [940.0, 105.0, 70.0]
>>> res = createVskDataDict(labels, data)
>>> res == {'MeanLegLength':940.0, 'LeftKneeWidth':105.0, 'RightAnkleWidth':70.0}
True
"""
vsk={}
for key,data in zip(labels,data):
vsk[key]=data
return vsk | a4669e4a173aaeef534d13faceaeab869eb62cb3 | 9,111 |
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, L, C = x.shape
#print(x.shape)
#print(window_size[0])
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], L // window_size[2], window_size[2], C)
windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size[0], window_size[1], window_size[2], C)
return windows | cfc97230e044f9d3a8fdfa7dde4b17a43f351169 | 9,112 |
def _get_unique(node_list, key, mode=None):
"""
Returns number or names of unique nodes in a list.
:param node_list: List of dictionaries returned by Neo4j transactions.
:param key: Key accessing specific node in dictionary.
:param mode: If 'num', the number of unique nodes is returned.
:return: Unique nodes (list of nodes) or node number
"""
unique_samples = list()
if node_list:
# it is possible that nodes do not yet exist
for item in node_list:
unique_samples.append(item[key].get('name'))
unique_samples = set(unique_samples)
if mode == 'num':
unique_samples = len(unique_samples)
return unique_samples | 8f639dfce0efe9f7d40f2bba5f2b8757706cd6d7 | 9,113 |
def calculate_cpufreq_weighted_time_in_state(
final_time_in_cpufreq_state_by_cpu, time_in_cpufreq_state_by_cpu):
"""Calculate the weighted average in each CPU frequency state.
Args:
final_time_in_cpufreq_state_by_cpu: Final time in each CPU frequency
state. See the return value of parse_cpufreq_stats_time_in_state() for
the format.
time_in_cpufreq_state_by_cpu: Initial time in each CPU frequency
state. See the return value of parse_cpufreq_stats_time_in_state() for
the format.
Returns:
(weighted_time_in_cpufreq_state, weighted_average_cpufreq) tuple where
weighted_time_in_cpufreq_state is a dictionary that contains the
fractional time (0..1) in each CPU frequency state keyed by CPU number and
weighted_average_cpufreq is a dictionary containing the overall weighted
average CPU frequency keyed by CPU number.
"""
weighted_average_cpufreq = dict([(c, 0.0)
for c in time_in_cpufreq_state_by_cpu])
weighted_time_in_cpufreq_state_by_cpu = {}
for cpu, time_in_cpufreq_state in time_in_cpufreq_state_by_cpu.iteritems():
final_time_in_cpufreq_state = final_time_in_cpufreq_state_by_cpu[cpu]
weighted_time_in_cpufreq_state = {}
delta_time_in_cpufreq_state = {}
total_time = 0.0
for freq in time_in_cpufreq_state:
delta_time_in_cpufreq_state[freq] = (
final_time_in_cpufreq_state.get(freq, 0) -
time_in_cpufreq_state.get(freq, 0))
total_time += delta_time_in_cpufreq_state[freq]
for freq, cpu_time in delta_time_in_cpufreq_state.iteritems():
weight = float(cpu_time) / total_time
weighted_time_in_cpufreq_state[freq] = weight
weighted_average_cpufreq[cpu] += freq * weight
weighted_time_in_cpufreq_state_by_cpu[cpu] = weighted_time_in_cpufreq_state
return (weighted_time_in_cpufreq_state_by_cpu, weighted_average_cpufreq) | 8f8f3646bbfef89c1d2c3f65d6b3a56b76fb19e4 | 9,114 |
from PIL import Image
def array2imgdata_pil(A, format='PNG'):
"""get png data from array via converting to PIL Image"""
if A.shape[2] == 3:
mode = 'RGB'
elif A.shape[2] == 4:
mode = 'RGBA'
else:
mode = 'L'
img = Image.fromstring(mode, A.shape[:2], A.tostring())
return pil2imgdata(img, format) | f206dc6eaefec2065c10c02b0ecafa5662f9d21c | 9,115 |
def h_eval(data):
"""
Function takes dictionary
Evaluate values and convert string to correct type (boolean/int/float/long/string)
"""
if isinstance(data, dict):
for _k in list(data.keys()):
data[_k] = h_eval(data[_k])
if data[_k] is None or (isinstance(data[_k], dict) and not data[_k]):
data.pop(_k)
return data
if isinstance(data, list) or isinstance(data, tuple) or isinstance(data, set):
res = []
for _k in data:
res.append(h_eval(_k))
if isinstance(data, tuple):
return tuple(res)
if isinstance(data, set):
return set(res)
return res
try:
if isinstance(data, str):
if data.endswith("%"):
data = data[:-1]
if data.lower() == "false":
return False
if data.lower() == "true":
return True
if data.lower() == "n/e":
return None
try:
return int(data)
except Exception:
pass
try:
return float(data)
except Exception:
pass
return data
except Exception:
return data | 28a3529283719cab321c712a9e8723d5ff314ef8 | 9,116 |
def _recursive_simplify(expr):
""" Simplify the expression as much as possible based on
domain knowledge. """
input_expr = expr
# Simplify even further, based on domain knowledge:
# windowses = ('WIN32', 'WINRT')
apples = ("MACOS", "UIKIT", "IOS", "TVOS", "WATCHOS")
bsds = ("FREEBSD", "OPENBSD", "NETBSD")
androids = ("ANDROID", "ANDROID_EMBEDDED")
unixes = (
"APPLE",
*apples,
"BSD",
*bsds,
"LINUX",
*androids,
"HAIKU",
"INTEGRITY",
"VXWORKS",
"QNX",
"WASM",
)
unix_expr = simplify_logic("UNIX")
win_expr = simplify_logic("WIN32")
false_expr = simplify_logic("false")
true_expr = simplify_logic("true")
expr = expr.subs(Not(unix_expr), win_expr) # NOT UNIX -> WIN32
expr = expr.subs(Not(win_expr), unix_expr) # NOT WIN32 -> UNIX
# UNIX [OR foo ]OR WIN32 -> ON [OR foo]
expr = _simplify_expressions(expr, Or, (unix_expr, win_expr), true_expr)
# UNIX [AND foo ]AND WIN32 -> OFF [AND foo]
expr = _simplify_expressions(expr, And, (unix_expr, win_expr), false_expr)
expr = _simplify_flavors_in_condition("WIN32", ("WINRT",), expr)
expr = _simplify_flavors_in_condition("APPLE", apples, expr)
expr = _simplify_flavors_in_condition("BSD", bsds, expr)
expr = _simplify_flavors_in_condition("UNIX", unixes, expr)
expr = _simplify_flavors_in_condition("ANDROID", ("ANDROID_EMBEDDED",), expr)
# Simplify families of OSes against other families:
expr = _simplify_os_families(expr, ("WIN32", "WINRT"), unixes)
expr = _simplify_os_families(expr, androids, unixes)
expr = _simplify_os_families(expr, ("BSD", *bsds), unixes)
for family in ("HAIKU", "QNX", "INTEGRITY", "LINUX", "VXWORKS"):
expr = _simplify_os_families(expr, (family,), unixes)
# Now simplify further:
expr = simplify_logic(expr)
while expr != input_expr:
input_expr = expr
expr = _recursive_simplify(expr)
return expr | 40b9dcc6d0a5b176f36ed2e01fc35a005c3c851f | 9,117 |
import uuid
def dropzone(url, **kwargs):
"""Dropzone component
A basic dropzone component that supports drag and drop uploading
of files which are posted to the URL provided.
>>> zoom.system.site = zoom.sites.Site()
>>> zoom.system.site.packages = {}
>>> zoom.system.request = zoom.utils.Bunch(app=zoom.utils.Bunch(name='hello', packages={}))
>>> c = dropzone('/app/files')
>>> isinstance(c, zoom.Component)
True
"""
zoom.requires('dropzone')
id = 'dropzone_' + uuid.uuid4().hex
js = """
var %(id)s = new Dropzone("#%(id)s", {url: "%(url)s"});
""" % dict(id=id, url=url)
html = div(classed='dropzone', id=id, **kwargs)
return zoom.Component(html) | d8626b158b8adb738a4f74f6009da93043365cc4 | 9,118 |
import math
def fmt_bytes(size_bytes):
"""Return a nice 'total_size' string with Gb, Mb, Kb, and Byte ranges"""
units = ["Bytes", "KB", "MB", "GB"]
if size_bytes == 0:
return f"{0} Bytes"
for unit in units:
digits = int(math.log10(size_bytes)) + 1
if digits < 4:
return f"{round(size_bytes, 1)} {unit}"
size_bytes /= 1024
return f"{size_bytes} TB" | 40613403092bdc9d8dca8b0b487d5af6c887b075 | 9,119 |
from typing import Union
import warnings
def get_valid_split(records: dict, train_val_test: Union[list, np.ndarray]) -> dict:
""" Gets a train, val, test split with at least one instance of every class
Keep doing train_test_split until each split of the data has at least one single example of every behavior
in the dataset. it would be bad if your train data had class counts: [1000, 0, 0, 10] and your test data had
class counts: [500, 100, 300, 0]
Parameters
----------
records: dict of dicts
See train_val_test_split
train_val_test: list, np.ndarray
See train_val_test_split
Returns
-------
split_dict: dict
See train_val_test_split
"""
is_wrong = True
split_dict = None
while is_wrong:
split_dict = train_val_test_split(records, train_val_test)
should_continue = do_all_classes_have_labels(records, split_dict)
if not should_continue:
warnings.warn('Not all classes in the dataset have *any* labels!')
return split_dict
is_wrong = False
for split in ['train', 'val', 'test']:
labelfiles = [records[i]['label'] for i in split_dict[split]]
if len(labelfiles) > 0:
_, class_counts, _, _, _ = read_all_labels(labelfiles)
if not np.all(class_counts > 0):
is_wrong = True
return split_dict | ce575ba0937e32b527e5d70de9855b6fa9f4a686 | 9,121 |
def _random_op(sites, ldim, hermitian=False, normalized=False, randstate=None,
dtype=np.complex_):
"""Returns a random operator of shape (ldim,ldim) * sites with local
dimension `ldim` living on `sites` sites in global form.
:param sites: Number of local sites
:param ldim: Local ldimension
:param hermitian: Return only the hermitian part (default False)
:param normalized: Normalize to Frobenius norm=1 (default False)
:param randstate: numpy.random.RandomState instance or None
:returns: numpy.ndarray of shape (ldim,ldim) * sites
>>> A = _random_op(3, 2); A.shape
(2, 2, 2, 2, 2, 2)
"""
op = _randfuncs[dtype]((ldim**sites,) * 2, randstate=randstate)
if hermitian:
op += np.transpose(op).conj()
if normalized:
op /= np.linalg.norm(op)
return op.reshape((ldim,) * 2 * sites) | 202fe112e896b14abb38a8477afa9eab7689f7f6 | 9,122 |
def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=50):
"""Minimize the energy of the given system.
Parameters
----------
platform : simtk.openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
tolerance : simtk.unit.Quantity with units compatible with energy/distance, optional, default = 1*kilocalories_per_mole/angstroms
Minimization tolerance
maxIterations : int, optional, default=50
Maximum number of iterations for minimization
Returns
-------
minimized_positions : simtk.openmm.Quantity with shape [nparticle,3] with units compatible with distance
The energy-minimized positions.
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
minimized_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return minimized_positions | d8dc9d1eab96c512aa72da36909490adfa8bbd6f | 9,123 |
import aiohttp
async def fetch(url="", headers=DEFAULT_HEADERS, params={}, payload={}, method="GET", loop=None):
"""fetch content from the url"""
if not url:
return
async with aiohttp.ClientSession(loop=loop, headers=headers) as session:
_method = getattr(session, method.lower())
async with _method(url, params=params, data=payload) as resp:
return await resp.json() | 9be522a373532a3452a85ec1e8aae10e7659e999 | 9,124 |
import json
def retrieve_prefix_fixture():
"""Load test fixture data."""
j = json.load(open("./tests/fixtures/s3_prefix_list.json"))
return j | 5befb33c577263976edee6546bafca40b47e25bb | 9,125 |
def parse_args(version: str) -> Namespace:
"""
Parse arguments passed to the application.
A custom argument parser handles multiple commands and options to launch
the desired function.
Parameters
----------
version : string
A ``string`` of the Bobber version.
Returns
-------
Namespace
Returns a ``Namespace`` of all of the arguments that were parsed from
the application during runtime.
"""
parser = ArgumentParser(f'Bobber Version: {version}')
parser.add_argument('--version', action='version', version=__version__)
# Required positional command subparser which should be specified first
commands = parser.add_subparsers(dest='command', metavar='command')
commands_parent = ArgumentParser(add_help=False)
# More general options which apply to a majority of the running commands
# Note that all arguments prepended with '--' are optional
commands_parent.add_argument('log_path', metavar='log-path', help='Path '
'used to store log files on the head node')
commands_parent.add_argument('hosts', help='Comma-separated list of '
'hostnames or IP addresses',
type=unique_hosts)
commands_parent.add_argument('--config-path', help='Read a JSON config '
'file with expected parameters and use those '
'values for testing. Ignores all other '
'optional flags')
commands_parent.add_argument('--gpus', help='Number of GPUs contained '
'within a system or systems under test '
'(heterogeneous counts not supported)',
type=int)
commands_parent.add_argument('--compute-gid', help='The compute gid. '
'defaults to 0 - check with "show_gids" '
'command. A non-default gid is needed for '
'Ethernet (frequently gid 3)', type=int,
default=0)
commands_parent.add_argument('--nccl-tc', help='NCCL setting required to '
'use prio3 traffic for Ethernet. Set to 106 '
'for Ethernet, and do not set for IB.',
type=int)
commands_parent.add_argument('--batch-size-sm', help='Batch size to use '
'with DALI data ingest tests for small '
'images', type=int)
commands_parent.add_argument('--batch-size-lg', help='Batch size to use '
'with DALI data ingest tests for large '
'images', type=int)
commands_parent.add_argument('--nccl-max', help='Specify the maximum data '
'size to test with NCCL, in Gigabytes '
'(default is 1 GB)', type=int)
commands_parent.add_argument('--nccl-ib-hcas', help='Specify the list of '
'interfaces to use for NCCL test multinode '
'communication', default='')
commands_parent.add_argument('--ssh-iface', help='Specify ssh interface '
'for the system(s) under test ', default='')
commands_parent.add_argument('--no-direct', help='Disable running with '
'direct IO for applications that support it',
action='store_true')
commands_parent.add_argument('--io-depth', help='Customize the IO depth '
'for direct IO testing', type=int, default=16)
commands_parent.add_argument('--bw-threads', help='Maximum number of '
'threads to use for bandwidth tests',
type=int)
commands_parent.add_argument('--125k-threads', dest='stg_125k_threads',
help='Maximum number of threads to use for '
'125K IO size tests', type=int)
commands_parent.add_argument('--iops-threads', help='Maximum number of '
'threads to use for iops tests', type=int)
commands_parent.add_argument('--read-pattern', help='Specify IO pattern '
'for fio read tests. Supported values: '
'read, randread. Defaults to read.',
default='read',
choices=READ_PATTERNS)
commands_parent.add_argument('--write-pattern', help='Specify IO pattern '
'for fio write tests. Supported values: '
'write, randwrite. Defaults to write.',
default='write',
choices=WRITE_PATTERNS)
commands_parent.add_argument('--iterations', help='Number of iterations to'
' execute per test - a seperate log file will'
' be generated for each iteration', type=int,
default=10)
commands_parent.add_argument('--sweep', help='If present, will run all '
'tests for all specified iterations from a '
'single system to the number of systems '
'specified in the --hosts flag, with a step '
'of a single system (so, 3 systems specified '
'would result in tests for 1, 2, and 3 '
'systems)', action='store_true')
commands_parent.add_argument('--system', help='If system is specified, '
'iops-threads, 125k-threads, bw-threads, '
'gpus, batch size, and network interface '
'names are given default values - override '
'by specifying the flags you\'d prefer to '
'override, ignore the flags you are ok with '
'using defaults for '
'supported systems: dgx-a100-single, '
'dgx-a100-dual, and dgx-2 for now. -single '
'is used for a system with a single storage '
'NIC, and -dual is used for a system with two'
' storage NICs', choices=SYSTEMS.keys())
commands_parent.add_argument('--stg-extra-flags', help='Experimental - '
'add extra flags to stg tests (currently '
'supported - stg-bw and stg-iops). If '
'providing more than one flag, wrap entire '
'set in quotes')
commands_parent.add_argument('--pause', help='Pause between tests for N '
'seconds to ensure any activity is finished '
'before the next test begins. Defaults to 0 '
'(no pause).', type=int, default=0)
# Create the test initiation commands with the general options above
commands.add_parser(RUN_ALL, help='Run all tests',
parents=[commands_parent])
commands.add_parser(RUN_DALI, help='Run DALI tests only',
parents=[commands_parent])
commands.add_parser(RUN_NCCL, help='Run NCCL tests only',
parents=[commands_parent])
commands.add_parser(RUN_STG_BW, help='Run storage bandwidth test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_125K, help='Run storage 125 IO size test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_IOPS, help='Run storage IOPS test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_META, help='Run storage metadata test only',
parents=[commands_parent])
# Options specific to exporting the containers
export = commands.add_parser(EXPORT, help='Export the container for '
'multisystem tests')
# Options specific to parsing the results
parse = commands.add_parser(PARSE_RESULTS, help='Parse and display results'
'from the log files')
parse.add_argument('log_path', metavar='log-path', help='Path to saved '
'logfile location')
parse.add_argument('--json-filename', help='Specify the filename to use '
'for saving the JSON data. If not specified, the JSON '
'data will not be saved.', default=None, type=str)
parse.add_argument('--override-version-check', help='Optionally skip the '
'version check to ensure the same version of Bobber '
'was used for all tests.', action='store_true')
parse.add_argument('--compare-baseline', help='Compare the values produced'
' by a test run against a pre-defined baseline to '
'verify performance meets an acceptable threshold. '
'This command is ignored if the --custom-baseline flag '
'is used.',
choices=BASELINES)
parse.add_argument('--custom-baseline', help='Compare against a custom '
'baseline to verify performance meets an acceptable '
'threshold. This command overrides the '
'--compare-baseline flag.', type=str)
parse.add_argument('--baseline-tolerance', help='The percentage of '
'tolerance to include while comparing results against '
'a baseline. For example, if the desire is to allow '
'results to be within 5%% of the baseline and still '
'pass, enter "5" for the tolerance. This will only '
'measure tolerance below the result and will not punish'
' if numbers are higher than the baseline above the '
'tolerance level. This value is ignored if not running '
'the baseline comparison. Defaults to 0 tolerance.',
type=int, default=0)
parse.add_argument('--verbose', help='Display text-based information for '
'each system count in addition to the table.',
action='store_true')
# Options specific to building the containers
build = commands.add_parser(BUILD, help='Build the container')
# Options specific to casting the containers
cast = commands.add_parser(CAST, help='Start the container')
cast.add_argument('storage_path', metavar='storage-path', help='Path at '
'which the filesystem under test is mounted')
cast.add_argument('--ignore-gpu', help='Start the Bobber container '
'without GPUs', action='store_true')
# Options specific to loading a Docker image from a local binary
load = commands.add_parser(LOAD, help='Load a container from a local '
'binary')
load.add_argument('filename', help='Filename of local *.tar file of '
'the image to load')
return parser.parse_args() | 20cbacbb611d9495fcd6ad444b258a5588c1bddb | 9,126 |
def grid_reference_to_northing_easting(grid_reference):
"""
Needs to include reference
:param grid_reference:
:return:
"""
grid_reference = grid_reference.strip().replace(' ', '')
if len(grid_reference) == 0 or len(grid_reference) % 2 == 1 or len(grid_reference) > 12:
return None, None
grid_reference = grid_reference.upper()
if grid_reference[0] not in 'STNOH' or grid_reference[1] == 'I':
return None, None
e = n = 0
c = grid_reference[0]
if c == 'T':
e = 500000
elif c == 'N':
n = 500000
elif c == 'O':
e = 500000
n = 500000
elif c == 'H':
n = 1000000
c = ord(grid_reference[1]) - 66
if c < 8: # J
c += 1
e += (c % 5) * 100000
n += (4 - c/5) * 100000
c = grid_reference[2:]
try:
s = c[:int(len(c)/2)]
while len(s) < 5:
s += '0'
e += int(s)
s = c[int(-len(c)/2):]
while len(s) < 5:
s += '0'
n += int(s)
except Exception as error:
print("Caught exception during conversion. Issue: {}".format(error))
return None, None
# Data is converted into integers
return int(e), int(n) | 3da96e36f9be1e369d0425f2b9c34e432eb5ca77 | 9,127 |
def sn_random_numbers(shape, antithetic=True, moment_matching=True,
fixed_seed=False):
"""Returns an ndarray object of shape with (pseudo)random numbers
that are standard normally distributed.
Parameters
----------
shape : tuple (o, n, m)
Generation of array with shape (o, n, m).
antithetic : bool, default=True
Generation of antithetic variates.
moment_matching : bool, default=True
Matching of first and second moments.
fixed_seed : bool, default=False
Flag to fix the seed.
Returns
-------
ran: numpy.ndarray
(o, n, m) array of (pseudo)random numbers.
"""
if fixed_seed:
np.random.seed(1000)
if antithetic:
ran = np.random.standard_normal(
(shape[0], shape[1], shape[2] // 2))
ran = np.concatenate((ran, -ran), axis=2)
else:
ran = np.random.standard_normal(shape)
if moment_matching:
ran = ran - np.mean(ran)
ran = ran / np.std(ran)
if shape[0] == 1:
return ran[0]
else:
return ran | ca173cf02f51a8bb6cd976fc84f42597497cd426 | 9,129 |
def blue(N: int) -> np.ndarray:
"""
Blue noise.
* N: Amount of samples.
Power increases with 6 dB per octave.
Power density increases with 3 dB per octave.
https://github.com/python-acoustics
"""
x = white(N)
X = rfft(x) / N
S = np.sqrt(np.arange(X.size)) # Filter
y = irfft(X*S).real[:N]
return normalise(y) | 80e0c449f6548b19546a4e58fa3b3e108f21d6df | 9,130 |
from typing import Dict
def _remove_attribute(note_dict: Dict, attribute: str) -> Dict:
""" Create a copy of the note where a single attribute is removed """
d = dict(note_dict)
d[attribute] = None
return d | d2659b887c1a2a7c67f6785889db2aa2039f9627 | 9,131 |
from pathlib import Path
import yaml
def get_config(path: str) -> config_schema:
"""Load the config from the path, validate and return the dcitionary
Args:
path (str): Path the config.yaml
Returns:
config_schema: The configuration dictionary
"""
config_path = Path(path)
config = yaml.full_load(open(config_path))
return Schema(config_schema).validate(config) | bf355508b52192eb78857eadedde923b623ecc56 | 9,132 |
def compare_chars(first, second):
"""
Returns the greater of the two characters
:param first:
:param second:
:return: char
"""
return chr(max(ord(first), ord(second))) | aee1e5767d6ab767bc8da27d382acc105f62d9f5 | 9,133 |
import re
def find_author():
"""This returns 'The NeuroKit's development team'"""
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__author__"),
open("../neurokit2/__init__.py").read(),
)
return str(result.group(1)) | bcf7fb52021cd23ef48b57aae9444676bc26a862 | 9,136 |
from ecpy.utils.util import is_enable_native, _native
from ecpy.fields.Zmod import ZmodElement
from ecpy.fields.ExtendedFiniteField import ExtendedFiniteFieldElement
def tate_pairing(E, P, Q, m, k=2):
"""
Calculate Tate Pairing
Args:
E: The Elliptic Curve
P: A point over E which has order m
Q: A point over E which has order m
m: The order of P, Q on E
k: [Optional] The Embedding Degree of m on E
"""
if is_enable_native:
P = _native.EC_elem(E.ec, tuple(P.x), tuple(P.y), tuple(P.z))
Q = _native.EC_elem(E.ec, tuple(Q.x), tuple(Q.y), tuple(Q.z))
if E.ec.type == 1:
t = _native.FF_elem(0)
elif E.ec.type == 2:
t = _native.EF_elem(0, 0)
_native.tate_pairing(t, E.ec, P, Q, m, k)
if E.ec.type == 1:
return ZmodElement(E.field, t.to_python())
elif E.ec.type == 2:
t = t.to_python()
return ExtendedFiniteFieldElement(E.field, t[0], t[1])
else:
f = miller(E, P, Q, m)
return f ** (((E.field.p ** k) - 1) // m) | 804f8d9730df010f690b363b658ba59361e87f47 | 9,137 |
def kl_loss(img,decoded_img,encoder_log_var,encoder_mu):
"""
LK loss for VAEs
"""
kl_loss = -0.5 * tf.reduce_sum( (1+encoder_log_var-tf.exp(encoder_log_var)-encoder_mu**2), axis=[1,2,3],name='klloss' )
return tf.reduce_mean(kl_loss,axis=0) | bd6f644bea86fbc2e5c4426e0e556a432256bb07 | 9,138 |
import warnings
def steady(L, maxiter=10, tol=1e-6, itertol=1e-5, method='solve',
use_umfpack=True, use_precond=False):
"""
Deprecated. See steadystate instead.
"""
message = "steady has been deprecated, use steadystate instead"
warnings.warn(message, DeprecationWarning)
return steadystate(L, [], maxiter=maxiter, tol=tol,
use_umfpack=use_umfpack, use_precond=use_precond) | 36cf008c0b2c773359df5f2251282a2eb12dc613 | 9,139 |
def and_intersection(map_list):
"""
Bitwise or a list of HealSparseMaps as an intersection. Only pixels that
are valid in all the input maps will have valid values in the output.
Only works on integer maps.
Parameters
----------
map_list : `list` of `HealSparseMap`
Input list of maps to bitwise and
Returns
-------
result : `HealSparseMap`
Bitwise and of maps
"""
filler = map_list[0]._sparse_map.dtype.type(-1)
return _apply_operation(map_list, np.bitwise_and, filler, union=False, int_only=True) | 183df05210cdd29bf5dd0a5654ca08297cb9f72a | 9,140 |
import re
def parse(url):
"""Parses a cache URL."""
config = {}
url = urlparse.urlparse(url)
# Handle python 2.6 broken url parsing
path, query = url.path, url.query
if '?' in path and query == '':
path, query = path.split('?', 1)
cache_args = dict([(key.upper(), ';'.join(val)) for key, val in
urlparse.parse_qs(query).items()])
# Update with environment configuration.
backend = BACKENDS.get(url.scheme)
if not backend:
raise Exception('Unknown backend: "{0}"'.format(url.scheme))
config['BACKEND'] = BACKENDS[url.scheme]
redis_options = {}
if url.scheme == 'hiredis':
redis_options['PARSER_CLASS'] = 'redis.connection.HiredisParser'
# File based
if not url.netloc:
if url.scheme in ('memcached', 'pymemcached', 'djangopylibmc'):
config['LOCATION'] = 'unix:' + path
elif url.scheme in ('redis', 'hiredis'):
match = re.match(r'.+?(?P<db>\d+)', path)
if match:
db = match.group('db')
path = path[:path.rfind('/')]
else:
db = '0'
config['LOCATION'] = 'unix:%s?db=%s' % (path, db)
else:
config['LOCATION'] = path
# URL based
else:
# Handle multiple hosts
config['LOCATION'] = ';'.join(url.netloc.split(','))
if url.scheme in ('redis', 'hiredis'):
if url.password:
redis_options['PASSWORD'] = url.password
# Specifying the database is optional, use db 0 if not specified.
db = path[1:] or '0'
port = url.port if url.port else 6379
config['LOCATION'] = "redis://%s:%s/%s" % (url.hostname, port, db)
if redis_options:
config.setdefault('OPTIONS', {}).update(redis_options)
if url.scheme == 'uwsgicache':
config['LOCATION'] = config.get('LOCATION', 'default') or 'default'
# Pop special options from cache_args
# https://docs.djangoproject.com/en/1.10/topics/cache/#cache-arguments
options = {}
for key in ['MAX_ENTRIES', 'CULL_FREQUENCY']:
val = cache_args.pop(key, None)
if val is not None:
options[key] = int(val)
if options:
config.setdefault('OPTIONS', {}).update(options)
config.update(cache_args)
return config | 48328a0863272f17c8ad1e848af0bb29e0118545 | 9,141 |
def states_state(id=""):
""" displays a HTML page with a list of cities by states """
states = storage.all(State).values()
states = sorted(states, key=lambda k: k.name)
found = 0
state = ""
cities = []
for i in states:
if id == i.id:
state = i
found = 1
break
if found:
states = sorted(state.cities, key=lambda k: k.name)
state = state.name
if id and not found:
found = 2
return render_template('9-states.html',
state=state,
array=states,
found=found) | b00bd67a8242a2a21f32226b34df69e214db8856 | 9,142 |
def make_tree_item(parent, text, icon, first_col_text=None, second_col_text=None):
"""
构造树的子项
:param parent: 要构造子项的父节点元素
:param text: 构造的子节点信息
:param icon: 图标,该元素的展示图标对象
:param first_col_text: 第一列隐藏信息
:param second_col_text: 第二列隐藏信息
"""
item = MyTreeWidgetItem(parent)
item.setIcon(0, icon)
item.setText(0, text)
if first_col_text:
# 作为隐藏属性,写于指定列
item.setText(1, first_col_text)
if second_col_text:
item.setText(2, second_col_text)
return item | a93e9d75ac9841c27fa472575054f00e6d1b1cb4 | 9,143 |
def get_boundary_condition(name):
"""
Return a boundary condition by name
"""
try:
return _BOUNDARY_CONDITIONS[name]
except KeyError:
ocellaris_error(
'Boundary condition "%s" not found' % name,
'Available boundary conditions:\n'
+ '\n'.join(
' %-20s - %s' % (n, s.description)
for n, s in sorted(_BOUNDARY_CONDITIONS.items())
),
)
raise | 29ba4d456452cb33cf48c44cace337683d1feab8 | 9,144 |
from datetime import datetime
import json
def run(ts):
""" Actually do the hard work of getting the USDM in geojson """
pgconn = get_dbconn('postgis')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Look for polygons into the future as well as we now have Flood products
# with a start time in the future
cursor.execute("""
SELECT ST_asGeoJson(geom) as geojson, dm, valid
from usdm WHERE valid = %s ORDER by dm ASC
""", (ts, ))
if cursor.rowcount == 0:
# go back one week
cursor.execute("""
SELECT ST_asGeoJson(geom) as geojson, dm, valid
from usdm WHERE valid = %s ORDER by dm ASC
""", (ts - datetime.timedelta(days=7), ))
utcnow = datetime.datetime.utcnow()
res = {'type': 'FeatureCollection',
'features': [],
'generation_time': utcnow.strftime("%Y-%m-%dT%H:%M:%SZ"),
'count': cursor.rowcount}
for row in cursor:
res['features'].append(dict(type="Feature",
id=row['dm'],
properties=dict(
date=row['valid'].strftime("%Y-%m-%d"),
dm=row['dm']),
geometry=json.loads(row['geojson'])
))
return json.dumps(res) | b9e8f853f5d039fa8bb5fdc70a9f281ea94d7e37 | 9,145 |
def mtxslv_user_ratings(user_id, dataset):
"""
Receives user_id and dataset. Look for all
occurences of user_id in dataset and returns
such subset.
If no user_id is found, return an empty
numpy array.
"""
subset = [] # the same thing as I_i (set of item user_id has voted)
for it in range(0,np.shape(dataset)[0]):
if (dataset[it,user_column] == user_id):
subset.append(dataset[it,:].tolist())
return np.array(subset) | 7b494b95af316d93e6f9b0a20a351179eb2906b9 | 9,146 |
def random_rotation(min, max, prng=DEFAULT_PRNG):
""" Construct a random rotation between -max and max.
Args
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 rotation matrix
"""
return rotation(prng.uniform(min, max)) | 58ae95edf57886164e0e123a89534000ef2f8182 | 9,147 |
def country_code_from_name(country_names,l3=False):
"""2 letter ['BE'] or 3 letter codes ['BEL'] from country names
Accepts string or list of strings e.g, 'Serbia' or ['Belgium','Slovakia']
Update 3/1/2022: also accepts non uppercase titles, e.g. ['united Kingdom', 'hungary']
Arguments:
*country_names* (string or list of strings) : country names
*l3* (Boolean) : return 3l-code; default = False -> returns 2l-code
Returns
*sel* (string or list of strings) : 2l or 3l codes
"""
if True:
data = config['paths']['data']
df = pd.read_csv((data / 'country_codes.csv'), delimiter=';')
df_3l = df['country']
if l3:
code_col = 'code3' #return 3l code
else:
code_col = 'code2' #return 2l code
unpack = False
if not isinstance(country_names, list):
country_names = [country_names]
unpack = True
capitalized_names = [name.title() for name in country_names]
sel = list(df.loc[df.country.isin(capitalized_names)][code_col])
if unpack: sel = sel[0]
return sel | d45a48b4f11de383def2817609c6682157de4443 | 9,148 |
def steem_amount(value):
"""Returns a decimal amount, asserting units are STEEM"""
return parse_amount(value, 'STEEM') | 12ce78e0c9002dd8cd0b136563ad81ab05817ff7 | 9,149 |
def get_edge_angle(fx,fy):
"""エッジ強度と勾配を計算する関数
"""
# np.power : 行列のn乗を計算
# np.sqrt : 各要素の平方根を計算
edge = np.sqrt(np.power(fx.astype(np.float32),2)+np.power(fy.astype(np.float32),2))
edge = np.clip(edge, 0, 255)
fx = np.maximum(fx, 1e-5)
angle = np.arctan(fy/fx)
return edge,angle | 80c7acc05867b6443aa7b4643d75d4fb79e792a9 | 9,150 |
def write_site_pair_score_data_to_file(sorted_data_list, output_file_path, algorithm_used, max_iterations=None, num_threads=None):
"""Since site indices are starting from zero within python we add one to
each of them when they are being written to output file.
"""
formater = '#' + '='*100
formater += '\n'
with open(output_file_path, 'w') as fh:
fh.write(formater)
fh.write('# This result is computed using {}\n'.format(algorithm_used))
if max_iterations is not None:
fh.write('# maximum number of gradient decent iterations: {}\n'.format(max_iterations))
if num_threads is not None:
fh.write('# Number of threads used: {}\n'.format(num_threads))
fh.write('# The first and second columns are site pairs. The third column represents interaction score\n')
fh.write(formater)
for site_pair, score in sorted_data_list:
i, j = site_pair[0] + 1, site_pair[1] + 1
fh.write('{}\t{}\t{}\n'.format(i, j, score))
return None | 5ee4ec97fbc1b6f86e36946ee6d925604858b063 | 9,151 |
def cs_2tuple_list(value):
"""
Parses a comma separated 2-tuple strings into a python list of tuples
>>> cs_2tuple_list('')
[]
>>> cs_2tuple_list('(foobar, "test")')
[('foobar', 'test')]
>>> cs_2tuple_list('(foobar, "test"), ('"'barfoo', "' lalala) ')
[('foobar', 'test'), ('barfoo', 'lalala')]
>>> cs_2tuple_list('(foobar, "test"), ("(barfoo", "lalala)")')
[('foobar', 'test'), ('(barfoo', 'lalala)')]
"""
res = ['']
in_tuple = False
quote_char = None
for char in value:
if in_tuple:
if not quote_char and char in ["'", '"']:
quote_char = char
elif char == quote_char:
quote_char = None
elif not quote_char and char == ")":
res[-1] = tuple(cs_string_list(res[-1]))
in_tuple = False
else:
res[-1] += char
elif char == " ":
continue
elif char == ",":
res.append('')
elif char == "(":
in_tuple = True
else:
raise ValueError("Unexpected character '{}' after '{}'"
.format(char, res))
if in_tuple or quote_char:
raise ValueError("Unterminated tuple {}".format(res[-1]))
# remove empty string stored as state
if not isinstance(res[-1], tuple):
del res[-1]
if any(not isinstance(e, tuple) or len(e) != 2 for e in res):
raise ValueError("Unexpected value in {}".format(res))
return res | f6d5b5d440ef524c9c2f2c79f410c432229a8099 | 9,152 |
def expectatedCapacityFactorFromDistribution( powerCurve, windspeedValues, windspeedCounts):
"""Computes the expected capacity factor of a wind turbine based on an explicitly-provided wind speed distribution
"""
windspeedValues = np.array(windspeedValues)
windspeedCounts = np.array(windspeedCounts)
if not len(windspeedValues.shape) == 1: raise ResError("windspeedValues must be 1-dimensional")
# Handle 2 dimensional counts with 1 dimensional wind speeds
if len(windspeedCounts.shape) > 1:
if not windspeedCounts.shape[0] == windspeedValues.shape[0]:
raise ResError("Dimensional incompatability")
windspeedValues = np.reshape(windspeedValues, (windspeedCounts.shape[0],1))
# Estimate generation distribution
gen = np.interp(windspeedValues, powerCurve.ws, powerCurve.cf, left=0, right=0) * windspeedCounts
meanGen = gen.sum(0)/windspeedCounts.sum(0)
# Done
return meanGen | fbcea908265cfbfcd9eb49632ac1027538417bfe | 9,153 |
def setup_2d_em_pic():
"""
Returns a 2D electromagnetic PIC for testing
"""
params = {
"length": [2 * np.pi, 2 * np.pi],
"cells": [32, 32],
"dimensions": 2,
"nppc": 10,
"single_stream": { # defaults for single stream instability
"stream_v": 3,
"stream_frac": 0.8,
"stream_width": 1
},
"landau": { # defaults for Landau damping
"amplitude": 0.8,
"mode": 3
},
"two_stream": { # defaults for two stream instability
"vpos": 2,
"vneg": -2,
"stream_frac": 1,
"stream_width": 0.8
},
}
sim_params = plat.params.Parameters(2)
sim_params.set_from_dict(params)
pic = plat.pic_2d_em.PIC_2D_EM(sim_params)
return pic | 136e8e83b63329866dc4c547075dcfd603279de6 | 9,155 |
def young_laplace(Bo,nPoints,L):
"""
Bo = float - Bond number
nPoints = int - number of integration points desired
L = float - final arc length for range of integration
"""
#integration range and number of integration points
s1=L
N=nPoints
#set initial values
s0 = 0
y0 = [0.00001,0.00001,0.00001]
sVec = np.linspace(s0,s1,N)
bond=Bo
sol = odeint(ode_system,y0,sVec,args=(bond,))
r = sol[:,1]
z = sol[:,2]
fi = sol[:,0]
return r,z,fi | 80ceaf8d66e4c309a9f0c16e329c90a26a9b43a0 | 9,156 |
import torch
def get_uncertain_point_coords_on_grid(uncertainty_map, num_points):
"""
Find `num_points` most uncertain points from `uncertainty_map` grid.
Args:
uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
values for a set of points on a regular H x W grid.
num_points (int): The number of points P to select.
Returns:
point_indices (Tensor): A tensor of shape (N, P) that contains indices from
[0, H x W) of the most uncertain points.
point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
coordinates of the most uncertain points from the H x W grid.
"""
R, _, H, W = uncertainty_map.shape
h_step = 1.0 / float(H)
w_step = 1.0 / float(W)
num_points = min(H * W, num_points)
point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]
point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
return point_indices, point_coords | a9d622c232f22359ac030679a49ed6f36345623c | 9,157 |
def amex_credit_card(input_filename, month):
"""Format is just contents.
date, description, amount"""
test = _make_month_test(0, month)
def transform(xs):
return [xs[0], xs[1],
'-' + xs[2] if xs[2][0] != '-' else xs[2][1:]]
return _csv_transform(input_filename, test, transform,
None) | c60bedd7b0afeda40e1fe9a34e27b0fd796f25f2 | 9,158 |
def loadf(file_like, *args, attributes=None, **kwargs):
"""Read a data file and load it -- scaled -- in memory.
This function differs from `read` in several ways:
* The output data type should be a floating point type.
* If an affine scaling (slope, intercept) is defined in the
file, it is applied to the data.
* the default output data type is `torch.get_default_dtype()`.
Parameters
----------
file_like : str or file object
Path to file or file object (with methods `seek`, `read`)
dtype : dtype_like, optional
Output data type. By default, use `torch.get_default_dtype()`.
Should be a floating point type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
attributes : list[str]
List of attributes to return as well.
See `MappedArray` for the possible attributes.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
file = map(file_like, permission='r', keep_open=False)
dat = file.fdata(*args, **kwargs)
if attributes:
attributes = {getattr(file, key) for key in attributes}
return dat, attributes
else:
return dat | 528c11132a83b7d341bb9688a75a4483aa59c155 | 9,159 |
from typing import List
def _backsubstitution(A: MatrixData, B: List[float]) -> List[float]:
""" Solve equation A . x = B for an upper triangular matrix A by backsubstitution.
Args:
A: row major matrix
B: vector of floats
"""
num = len(A)
x = [0.0] * num
for i in range(num - 1, -1, -1):
x[i] = B[i] / A[i][i]
for row in range(i - 1, -1, -1):
B[row] -= A[row][i] * x[i]
return x | 789c94842da2b751008e47a1ef661abeaded2da7 | 9,161 |
def deiterize(func):
"""The inverse of iterize.
Takes an "iterized" (a.k.a. "vectorized") function (i.e. a function that
works on iterables), and
That is, takes a func(X,...) function and returns a next(iter(func([X],
...))) function."""
return Pipe(wrap_first_arg_in_list(func), iter, next) | 7ef4786ec858f1cbd8fb79b8cfee0e4b03bcb33c | 9,162 |
def ecg_data(rdb, day, patient, time):
""" Returns DatFrame to plot ecg signal """
sql = """SELECT * FROM ECG where "Day"='{0}' and "Patient"='{1}' and "Date"::time='{2}' """.format(day, patient, time)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df | 4a384f920c0fdf45e1818633d2ef125a29b4a562 | 9,163 |
def evaluate(data_set_file_or_name, data_format=None, data_directory=None,
map_features=None, feature_selection=None, example_filter=None,
noisy_preprocessing_methods=None, preprocessing_methods=None,
split_data_set=None, splitting_method=None,
splitting_fraction=None,
model_type=None, latent_size=None, hidden_sizes=None,
number_of_importance_samples=None,
number_of_monte_carlo_samples=None,
inference_architecture=None, latent_distribution=None,
number_of_classes=None, parameterise_latent_posterior=False,
prior_probabilities_method=None,
generative_architecture=None, reconstruction_distribution=None,
number_of_reconstruction_classes=None, count_sum=None,
proportion_of_free_nats_for_y_kl_divergence=None,
minibatch_normalisation=None, batch_correction=None,
dropout_keep_probabilities=None,
number_of_warm_up_epochs=None, kl_weight=None,
minibatch_size=None, run_id=None, models_directory=None,
included_analyses=None, analysis_level=None,
decomposition_methods=None, highlight_feature_indices=None,
export_options=None, analyses_directory=None,
evaluation_set_kind=None, sample_size=None,
prediction_method=None, prediction_training_set_kind=None,
model_versions=None, **keyword_arguments):
"""Evaluate model on data set."""
if split_data_set is None:
split_data_set = defaults["data"]["split_data_set"]
if splitting_method is None:
splitting_method = defaults["data"]["splitting_method"]
if splitting_fraction is None:
splitting_fraction = defaults["data"]["splitting_fraction"]
if models_directory is None:
models_directory = defaults["models"]["directory"]
if evaluation_set_kind is None:
evaluation_set_kind = defaults["evaluation"]["data_set_name"]
if sample_size is None:
sample_size = defaults["models"]["sample_size"]
if prediction_method is None:
prediction_method = defaults["evaluation"]["prediction_method"]
if prediction_training_set_kind is None:
prediction_training_set_kind = defaults["evaluation"][
"prediction_training_set_kind"]
if model_versions is None:
model_versions = defaults["evaluation"]["model_versions"]
if analyses_directory is None:
analyses_directory = defaults["analyses"]["directory"]
evaluation_set_kind = normalise_string(evaluation_set_kind)
prediction_training_set_kind = normalise_string(
prediction_training_set_kind)
model_versions = parse_model_versions(model_versions)
print(title("Data"))
binarise_values = False
if reconstruction_distribution == "bernoulli":
if noisy_preprocessing_methods:
if noisy_preprocessing_methods[-1] != "binarise":
noisy_preprocessing_methods.append("binarise")
else:
binarise_values = True
data_set = DataSet(
data_set_file_or_name,
data_format=data_format,
directory=data_directory,
map_features=map_features,
feature_selection=feature_selection,
example_filter=example_filter,
preprocessing_methods=preprocessing_methods,
binarise_values=binarise_values,
noisy_preprocessing_methods=noisy_preprocessing_methods
)
if not split_data_set or evaluation_set_kind == "full":
data_set.load()
if split_data_set:
training_set, validation_set, test_set = data_set.split(
method=splitting_method, fraction=splitting_fraction)
data_subsets = [data_set, training_set, validation_set, test_set]
for data_subset in data_subsets:
clear_data_subset = True
if data_subset.kind == evaluation_set_kind:
evaluation_set = data_subset
clear_data_subset = False
if data_subset.kind == prediction_training_set_kind:
prediction_training_set = data_subset
clear_data_subset = False
if clear_data_subset:
data_subset.clear()
else:
splitting_method = None
splitting_fraction = None
evaluation_set = data_set
prediction_training_set = data_set
evaluation_subset_indices = indices_for_evaluation_subset(
evaluation_set)
models_directory = build_directory_path(
models_directory,
data_set=evaluation_set,
splitting_method=splitting_method,
splitting_fraction=splitting_fraction
)
analyses_directory = build_directory_path(
analyses_directory,
data_set=evaluation_set,
splitting_method=splitting_method,
splitting_fraction=splitting_fraction
)
print(title("Model"))
if number_of_classes is None:
if evaluation_set.has_labels:
number_of_classes = (
evaluation_set.number_of_classes
- evaluation_set.number_of_excluded_classes)
model = _setup_model(
data_set=evaluation_set,
model_type=model_type,
latent_size=latent_size,
hidden_sizes=hidden_sizes,
number_of_importance_samples=number_of_importance_samples,
number_of_monte_carlo_samples=number_of_monte_carlo_samples,
inference_architecture=inference_architecture,
latent_distribution=latent_distribution,
number_of_classes=number_of_classes,
parameterise_latent_posterior=parameterise_latent_posterior,
prior_probabilities_method=prior_probabilities_method,
generative_architecture=generative_architecture,
reconstruction_distribution=reconstruction_distribution,
number_of_reconstruction_classes=number_of_reconstruction_classes,
count_sum=count_sum,
proportion_of_free_nats_for_y_kl_divergence=(
proportion_of_free_nats_for_y_kl_divergence),
minibatch_normalisation=minibatch_normalisation,
batch_correction=batch_correction,
dropout_keep_probabilities=dropout_keep_probabilities,
number_of_warm_up_epochs=number_of_warm_up_epochs,
kl_weight=kl_weight,
models_directory=models_directory
)
if not model.has_been_trained(run_id=run_id):
raise Exception(
"Model not found. Either it has not been trained or "
"scVAE is looking in the wrong directory. "
"The model directory resulting from the model specification is: "
"\"{}\"".format(model.log_directory())
)
if ("best_model" in model_versions
and not better_model_exists(model, run_id=run_id)):
model_versions.remove("best_model")
if ("early_stopping" in model_versions
and not model_stopped_early(model, run_id=run_id)):
model_versions.remove("early_stopping")
print(subtitle("Analysis"))
analyses.analyse_model(
model=model,
run_id=run_id,
included_analyses=included_analyses,
analysis_level=analysis_level,
export_options=export_options,
analyses_directory=analyses_directory
)
print(title("Results"))
print("Evaluation set: {} set.".format(evaluation_set.kind))
print("Model version{}: {}.".format(
"" if len(model_versions) == 1 else "s",
enumerate_strings(
[v.replace("_", " ") for v in model_versions], conjunction="and")))
if prediction_method:
prediction_specifications = PredictionSpecifications(
method=prediction_method,
number_of_clusters=number_of_classes,
training_set_kind=prediction_training_set.kind
)
print("Prediction method: {}.".format(
prediction_specifications.method))
print("Number of clusters: {}.".format(
prediction_specifications.number_of_clusters))
print("Prediction training set: {} set.".format(
prediction_specifications.training_set_kind))
print()
for model_version in model_versions:
use_best_model = False
use_early_stopping_model = False
if model_version == "best_model":
use_best_model = True
elif model_version == "early_stopping":
use_early_stopping_model = True
print(subtitle(model_version.replace("_", " ").capitalize()))
print(heading("{} evaluation".format(
model_version.replace("_", "-").capitalize())))
(
transformed_evaluation_set,
reconstructed_evaluation_set,
latent_evaluation_sets
) = model.evaluate(
evaluation_set=evaluation_set,
evaluation_subset_indices=evaluation_subset_indices,
minibatch_size=minibatch_size,
run_id=run_id,
use_best_model=use_best_model,
use_early_stopping_model=use_early_stopping_model,
output_versions="all"
)
print()
if sample_size:
print(heading("{} sampling".format(
model_version.replace("_", "-").capitalize())))
sample_reconstruction_set, __ = model.sample(
sample_size=sample_size,
minibatch_size=minibatch_size,
run_id=run_id,
use_best_model=use_best_model,
use_early_stopping_model=use_early_stopping_model
)
print()
else:
sample_reconstruction_set = None
if prediction_method:
print(heading("{} prediction".format(
model_version.replace("_", "-").capitalize())))
latent_prediction_training_sets = model.evaluate(
evaluation_set=prediction_training_set,
minibatch_size=minibatch_size,
run_id=run_id,
use_best_model=use_best_model,
use_early_stopping_model=use_early_stopping_model,
output_versions="latent",
log_results=False
)
print()
cluster_ids, predicted_labels, predicted_superset_labels = (
predict_labels(
training_set=latent_prediction_training_sets["z"],
evaluation_set=latent_evaluation_sets["z"],
specifications=prediction_specifications
)
)
evaluation_set_versions = [
transformed_evaluation_set, reconstructed_evaluation_set
] + list(latent_evaluation_sets.values())
for evaluation_set_version in evaluation_set_versions:
evaluation_set_version.update_predictions(
prediction_specifications=prediction_specifications,
predicted_cluster_ids=cluster_ids,
predicted_labels=predicted_labels,
predicted_superset_labels=predicted_superset_labels
)
print()
print(heading("{} analysis".format(
model_version.replace("_", "-").capitalize())))
analyses.analyse_results(
evaluation_set=transformed_evaluation_set,
reconstructed_evaluation_set=reconstructed_evaluation_set,
latent_evaluation_sets=latent_evaluation_sets,
model=model,
run_id=run_id,
sample_reconstruction_set=sample_reconstruction_set,
decomposition_methods=decomposition_methods,
evaluation_subset_indices=evaluation_subset_indices,
highlight_feature_indices=highlight_feature_indices,
best_model=use_best_model,
early_stopping=use_early_stopping_model,
included_analyses=included_analyses,
analysis_level=analysis_level,
export_options=export_options,
analyses_directory=analyses_directory
)
return 0 | 399411ca9560d9e48e73846c9fb70898f028f90a | 9,164 |
def deploy_new_company(company_id):
"""
Deploy new company contract
:param company_id: Company off chain id for deploy
:return: True in case of successful, false otherwise
"""
try:
instance = Company.objects.get(pk=company_id)
except Company.DoesNotExist:
logger.error('Company with id {} not found, contract will bot be deployed.'.format(company_id))
return False
else:
oracle = OracleHandler()
w3 = utils.get_w3()
contract_file = 'dapp/contracts/Company.sol'
compile_sol = compile_files([contract_file, ],
output_values=("abi", "ast", "bin", "bin-runtime",))
create_abi(compile_sol[contract_file + ':Company']['abi'], 'Company')
obj = w3.eth.contract(
abi=compile_sol[contract_file + ':Company']['abi'],
bytecode=compile_sol[contract_file + ':Company']['bin'],
bytecode_runtime=compile_sol[contract_file + ':Company']['bin-runtime'],
)
args = [settings.VERA_COIN_CONTRACT_ADDRESS, settings.VERA_ORACLE_CONTRACT_ADDRESS, ]
logger.info('Try to unlock account: {}.'.format(oracle.unlockAccount()))
try:
txn_hash = obj.deploy(transaction={'from': oracle.account}, args=args)
except Exception as e:
logger.warning('Error while deploy new company contract. Company {}: {}'.format(company_id, e))
else:
logger.info('Lock account: {}'.format(oracle.lockAccount()))
save_txn.delay(txn_hash.hex(), 'NewCompany', instance.created_by.id, company_id)
save_txn_to_history.delay(instance.created_by.id, txn_hash.hex(),
'Creation of a new Company contract') | 3871f2ae9948001a1fe24a4c4d2791b7d12f79d7 | 9,165 |
import random
def MEMB(G,rb,cycle=0):
"""
It returns a dictionary with {box_id:subgraph_generated_by_the_nodes_in_this_box}
The box_id is the center of the box.
cycle: Ignore this parameter. Use the default cycle=0.
"""
adj = G.adj
number_of_nodes = G.number_of_nodes()
covered_nodes = set()
center_nodes = set()
non_center_nodes = G.nodes()
center_node_found = 0
boxes={} #this will be "box_id:[nodes in box]"
central_distance_of_node = {} #"node:central_distance"
node_box_id = {} #"node:box_id"
nodes_sorted_by_central_distance={} #Dict with {central_distance:[nodes]}
excluded_mass_of_non_centers_rb = {} #This contains [(node:excluded_mass)] for rb
excluded_mass_of_non_centers_rb2 = {} #This contains [(node:excluded_mass)] for rb+1
rb2 = rb + 1
for node in non_center_nodes:
#if node in [5000,10000,20000,30000]: print "node", node
level=0 # the current level
nextlevel={node:1} # list of nodes to check at next level
paths_rb=None
paths_rb2={node:[node]} # paths dictionary (paths to key from source)
while nextlevel:
paths_rb = deepcopy(paths_rb2)
thislevel=nextlevel
nextlevel={}
for v in thislevel:
for w in G.neighbors(v):
if not paths_rb2.has_key(w):
paths_rb2[w]=paths_rb2[v]+[w]
nextlevel[w]=1
level=level+1
if (rb2 <= level): break
excluded_mass_of_node = len(paths_rb2)
try:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node] = [node]
excluded_mass_of_node = len(paths_rb)
try:
excluded_mass_of_non_centers_rb[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb[excluded_mass_of_node] = [node]
maximum_excluded_mass = 0
nodes_with_maximum_excluded_mass=[]
new_covered_nodes = {}
center_node_and_mass = []
cycle_index = 0
while len(covered_nodes) < number_of_nodes:
#print len(covered_nodes),number_of_nodes
cycle_index += 1
if cycle_index == cycle:
rb2 = rb+1
cycle_index = 0
else:
rb2 = rb
while 1:
if rb2 == rb+1:
#t1=time.time()
while 1:
maximum_key = max(excluded_mass_of_non_centers_rb2.keys())
node = random.choice(excluded_mass_of_non_centers_rb2[maximum_key])
if node in center_nodes:
excluded_mass_of_non_centers_rb2[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb2[maximum_key]: del excluded_mass_of_non_centers_rb2[maximum_key]
else:
break
nodes_visited = {}
bfs = single_source_shortest_path(G,node,cutoff=rb2)
for i in bfs:
nodes_visited[i] = len(bfs[i])-1
excluded_mass_of_node = len(set(nodes_visited.keys()).difference(covered_nodes))
if excluded_mass_of_node == maximum_key:
center_node_and_mass = (node,maximum_key)
excluded_mass_of_non_centers_rb2[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb2[maximum_key]: del excluded_mass_of_non_centers_rb2[maximum_key]
new_covered_nodes = nodes_visited
break
else:
excluded_mass_of_non_centers_rb2[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb2[maximum_key]: del excluded_mass_of_non_centers_rb2[maximum_key]
try:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node] = [node]
#print "time", time.time()-t1
else:
#t1=time.time()
while 1:
maximum_key = max(excluded_mass_of_non_centers_rb.keys())
node = random.choice(excluded_mass_of_non_centers_rb[maximum_key])
if node in center_nodes:
excluded_mass_of_non_centers_rb[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb[maximum_key]: del excluded_mass_of_non_centers_rb[maximum_key]
else:
break
nodes_visited = {}
bfs = single_source_shortest_path(G,node,cutoff=rb)
for i in bfs:
nodes_visited[i] = len(bfs[i])-1
excluded_mass_of_node = len(set(nodes_visited.keys()).difference(covered_nodes))
if excluded_mass_of_node == maximum_key:
center_node_and_mass = (node,maximum_key)
excluded_mass_of_non_centers_rb[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb[maximum_key]: del excluded_mass_of_non_centers_rb[maximum_key]
new_covered_nodes = nodes_visited
break
else:
excluded_mass_of_non_centers_rb[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb[maximum_key]: del excluded_mass_of_non_centers_rb[maximum_key]
try:
excluded_mass_of_non_centers_rb[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb[excluded_mass_of_node] = [node]
#print "time", time.time()-t1
center_node_found = center_node_and_mass[0]
boxes[center_node_found] = [center_node_found]
node_box_id[center_node_found] = center_node_found
non_center_nodes.remove(center_node_found)
center_nodes.add(center_node_found)
covered_nodes = covered_nodes.union(set(new_covered_nodes.keys()))
#print len(covered_nodes)
for i in new_covered_nodes:
try:
if central_distance_of_node[i] > new_covered_nodes[i]:
nodes_sorted_by_central_distance[central_distance_of_node[i]].remove(i)
if not nodes_sorted_by_central_distance[central_distance_of_node[i]]:
del nodes_sorted_by_central_distance[central_distance_of_node[i]]
try:
nodes_sorted_by_central_distance[new_covered_nodes[i]].append(i)
except KeyError:
nodes_sorted_by_central_distance[new_covered_nodes[i]] = [i]
central_distance_of_node[i] = new_covered_nodes[i]
except KeyError:
central_distance_of_node[i] = new_covered_nodes[i]
try:
nodes_sorted_by_central_distance[new_covered_nodes[i]].append(i)
except:
nodes_sorted_by_central_distance[new_covered_nodes[i]] = [i]
max_distance = max(nodes_sorted_by_central_distance.keys())
for i in range(1,max_distance+1):
for j in nodes_sorted_by_central_distance[i]:
targets = list(set(adj[j].iterkeys()).intersection(set(nodes_sorted_by_central_distance[i-1])))
node_box_id[j] = node_box_id[random.choice(targets)]
boxes[node_box_id[j]].append(j)
boxes_subgraphs={}
for i in boxes:
boxes_subgraphs[i] = subgraph(G,boxes[i])
return boxes_subgraphs | dfe522aa1e6140e98d32d2eee039aec366d76a8d | 9,166 |
def shownames(namespace, **args):
"""helper method to generate a template keyword for a namespace"""
ctx = args['ctx']
repo = ctx.repo()
ns = repo.names[namespace]
names = ns.names(repo, ctx.node())
return showlist(ns.templatename, names, plural=namespace, **args) | 29105570ad822975c69c7b1d30b94e368d554873 | 9,168 |
def only_half_radius(
subsampled_radius: float, full_diameter: float, radius_constraint: float
):
"""
Check if radius is smaller than fraction of full radius.
"""
assert 0.0 <= radius_constraint <= 1.0
return subsampled_radius <= ((full_diameter / 2) * radius_constraint) | 565c301932d5445e8bbb594085e65df63814663a | 9,169 |
def complete_from_man(context: CommandContext):
"""
Completes an option name, based on the contents of the associated man
page.
"""
if context.arg_index == 0 or not context.prefix.startswith("-"):
return
cmd = context.args[0].value
def completions():
for desc, opts in _parse_man_page_options(cmd).items():
yield RichCompletion(
value=opts[-1], display=", ".join(opts), description=desc
)
return completions(), False | 4312ff8323a72a405737a8476cb7ad541dbff718 | 9,170 |
def sanitise_description(original: str) -> str:
"""
Remove newlines from ticket descriptions.
:param original: the string to sanitise
:return: the same string, with newlines as spaces
"""
return original.replace("\n", " ") | 741aa7df758fb342a0d9a0fa182d24a643f5dbbc | 9,171 |
def dnsdomain_get_all(context):
"""Get a list of all dnsdomains in our database."""
return IMPL.dnsdomain_get_all(context) | fc7bde05cdb35f60c30943f1ebebdb4217af9467 | 9,172 |
def mockselect(r, w, x, timeout=0): # pylint: disable=W0613
"""Simple mock for select()
"""
readable = [s for s in r if s.ready_for_read]
return readable, w[:], [] | 920810aea2f7813885805011d646ddaabfd1901c | 9,173 |
def solve_5c2c9af4(x):
"""
Required Transformation: The input contains 3 cells with non-zero value. The non-zero valued cells are diagonally
positioned with some amount of gap between each non-zero valued cells. The program should identify the colour and
their position in the grid and form a squared box around the centered non-zero valued cell. Each squared box should
be of equal width between the previous one.
Implementation: The solution is to identify the coloured cells in the grid and form a squared boxes around centered
cell (non-zero valued cell). The Width should be same between each squared box, where width is measured by the
difference between the number of rows or columns between 2 consecutive non-zero valued cells.
The non-zero valued cells can be arranged in 2 forms,
1. Up Slope
2. Down Slope
In the case of Up slope, once the first non-zero valued cell is identified, the pattern to fill the cells are as
follows,
RIGHT, DOWN, LEFT, UP
Whereas in the case of Down Slope, once the first non-zero valued cell is identified, the pattern to fill the cells
are as follows,
DOWN, LEFT, UP, RIGHT
After one full rotation, the row & column is recalculated based on the width.This process is repeated until the
row & column goes out of the grid.
Training & Test Grid: The solution works on all Training & Test cases
"""
non_zero_indexes = np.nonzero(x)
non_zero_row_array = non_zero_indexes[0]
non_zero_col_array = non_zero_indexes[1]
# Difference between the columns of first & second non-zero valued cell
width = non_zero_col_array[0] - non_zero_col_array[1]
row, col = non_zero_row_array[0], non_zero_col_array[0]
# Centered non-zero Valued cell. This cell will become the reference point for all the squared boxes in the grid
midpoint_loc = (non_zero_row_array[1], non_zero_col_array[1])
value = x[non_zero_row_array[1], non_zero_col_array[1]]
# Assign the initial width to Original Width because the width values increases as the size of the square increase.
original_width = width
while True:
if width > 0:
# Up Slope: down, left, up, right
row, col = travel_down(x, row, col, midpoint_loc[0], abs(width), value)
row, col = travel_left(x, row, col, midpoint_loc[1], abs(width), value)
row, col = travel_up(x, row, col, midpoint_loc[0], abs(width), value)
row, col = travel_right(x, row, col, midpoint_loc[1], abs(width), value)
# Recalculate the rows & column based on the original width. Because each square should have same width
row, col = row - abs(original_width), col + abs(original_width)
else:
# Down Slope: right, down, left, up
row, col = travel_right(x, row, col, midpoint_loc[1], abs(width), value)
row, col = travel_down(x, row, col, midpoint_loc[0], abs(width), value)
row, col = travel_left(x, row, col, midpoint_loc[1], abs(width), value)
row, col = travel_up(x, row, col, midpoint_loc[0], abs(width), value)
# Recalculate the rows & column based on the original width. Because each square should have same width
row, col = row - abs(original_width), col - abs(original_width)
width = width + original_width
# If the rows or columns exceed beyond the grid size terminate the loop.
if (row < -1 and col < -1) or (row < -1 and col > x[0].shape[0]):
break
return x | fcb9701c4bccabe237481ed2acd052b7501abd3f | 9,175 |
def kernel_primitive_zhao_vec(x, s0=0.08333, theta=0.242):
"""
Calculates the primitive of the Zhao kernel for given values.
Optimized using nd-arrays and vectorization.
:param x: points to evaluate, should be a nd-array
:param s0: initial reaction time
:param theta: empirically determined constant
:param c0: normalization constant
:return: primitives evaluated at given points
"""
c0 = 1.0 / s0 / (1 - 1.0 / -theta)
res = np.copy(x)
res[x < 0] = 0
res[(x <= s0) & (x >= 0)] = c0 * res[(x <= s0) & (x >= 0)]
res[x > s0] = c0 * (s0 + (s0 * (1 - (res[x > s0] / s0) ** -theta)) / theta)
return res | 26cd9230e23f4217ec5a7eee2e8522bef0a40c4e | 9,176 |
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights | 94498a5fd499a09e24a964a6ac975420d702c536 | 9,178 |
from typing import List
def merge(input_list: List, low: int, mid: int, high: int) -> List:
"""
sorting left-half and right-half individually
then merging them into result
"""
result = []
left, right = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0))
input_list[low : high + 1] = result + left + right
return input_list | 0d53b0670899b4853563c9dda0eb47a8c66bae00 | 9,181 |
def fc_caps(activation_in,
pose_in,
ncaps_out,
name='class_caps',
weights_regularizer=None):
"""Fully connected capsule layer.
"The last layer of convolutional capsules is connected to the final capsule
layer which has one capsule per output class." We call this layer 'fully
connected' because it fits these characteristics, although Hinton et al. do
not use this teminology in the paper.
See Hinton et al. "Matrix Capsules with EM Routing" for detailed description.
Author:
Ashley Gritzman 27/11/2018
Args:
activation_in:
(batch_size, child_space, child_space, child_caps, 1)
(64, 7, 7, 8, 1)
pose_in:
(batch_size, child_space, child_space, child_caps, 16)
(64, 7, 7, 8, 16)
ncaps_out: number of class capsules
name:
weights_regularizer:
Returns:
activation_out:
score for each output class
(batch_size, ncaps_out)
(64, 5)
pose_out:
pose for each output class capsule
(batch_size, ncaps_out, 16)
(64, 5, 16)
"""
with tf.variable_scope(name) as scope:
# Get shapes
shape = pose_in.get_shape().as_list()
batch_size = shape[0]
child_space = shape[1]
child_caps = shape[3]
with tf.variable_scope('v') as scope:
# In the class_caps layer, we apply same multiplication to every spatial
# location, so we unroll along the batch and spatial dimensions
# (64, 5, 5, 32, 16) -> (64*5*5, 32, 16)
pose = tf.reshape(
pose_in,
shape=[batch_size * child_space * child_space, child_caps, 16])
activation = tf.reshape(
activation_in,
shape=[batch_size * child_space * child_space, child_caps, 1],
name="activation")
# (64*5*5, 32, 16) -> (65*5*5, 32, 5, 16)
votes = utl.compute_votes(pose, ncaps_out, weights_regularizer)
# (65*5*5, 32, 5, 16)
assert (
votes.get_shape() ==
[batch_size * child_space * child_space, child_caps, ncaps_out, 16])
logger.info('class_caps votes original shape: {}'
.format(votes.get_shape()))
with tf.variable_scope('coord_add') as scope:
# (64*5*5, 32, 5, 16)
votes = tf.reshape(
votes,
[batch_size, child_space, child_space, child_caps, ncaps_out,
votes.shape[-1]])
votes = coord_addition(votes)
with tf.variable_scope('routing') as scope:
# Flatten the votes:
# Combine the 4 x 4 spacial dimensions to appear as one spacial dimension # with many capsules.
# [64*5*5, 16, 5, 16] -> [64, 5*5*16, 5, 16]
votes_flat = tf.reshape(
votes,
shape=[batch_size, child_space * child_space * child_caps,
ncaps_out, votes.shape[-1]])
activation_flat = tf.reshape(
activation,
shape=[batch_size, child_space * child_space * child_caps, 1])
spatial_routing_matrix = utl.create_routing_map(child_space=1, k=1, s=1)
logger.info('class_caps votes in to routing shape: {}'
.format(votes_flat.get_shape()))
pose_out, activation_out = em.em_routing(votes_flat,
activation_flat,
batch_size,
spatial_routing_matrix)
activation_out = tf.squeeze(activation_out, name="activation_out")
pose_out = tf.squeeze(pose_out, name="pose_out")
logger.info('class_caps activation shape: {}'
.format(activation_out.get_shape()))
logger.info('class_caps pose shape: {}'.format(pose_out.get_shape()))
tf.summary.histogram("activation_out", activation_out)
return activation_out, pose_out | eb190d36c718d0c518b12c172e2a13b49d7ba012 | 9,182 |
def get_optional_relations():
"""Return a dictionary of optional relations.
@returns {relation: relation_name}
"""
optional_interfaces = {}
if relation_ids('ceph'):
optional_interfaces['storage-backend'] = ['ceph']
if relation_ids('neutron-plugin'):
optional_interfaces['neutron-plugin'] = ['neutron-plugin']
if relation_ids('shared-db') or relation_ids('pgsql-db'):
optional_interfaces['database'] = ['shared-db', 'pgsql-db']
return optional_interfaces | bdb1dcd04cfd31130e5463772d3d31f8aac7d894 | 9,183 |
def splitmod(n, k):
"""
Split n into k lists containing the elements of n in positions i (mod k).
Return the heads of the lists and the tails.
"""
heads = [None]*k
tails = [None]*k
i = 0
while n is not None:
if heads[i] is None:
heads[i] = n
if tails[i] is not None:
tails[i].next = n
tails[i] = n
n.next, n = None, n.next
i = (i+1)%k
return heads, tails | a4a1885ce0c9541c534145d0236996a511cbdd00 | 9,184 |
def find_flats(flats, flat2_finder=find_flat2):
"""Find flat pairs."""
file1s = sorted([item.strip() for item in flats
if item.find('flat1') != -1])
return [(f1, flat2_finder(f1)) for f1 in file1s] | 7003bc35ef0ed6a4fe71b1d686b7c46b99e6d8ca | 9,186 |
def binary_accuracy(a,b):
"""
Calculate the binary acc.
"""
return ((a.argmax(dim=1) == b).sum().item()) / a.size(0) | 5f9b09199b2e88169a0cbe9ee7cb4bb351c09e4a | 9,187 |
def add_ice_post_arrow_hq_lq_arguments2(parser):
"""Add quiver QV threshold to mark an isoform as high-quality or low-quality."""
# if isinstance(parser, PbParser):
# #parser = _wrap_parser(parser)
# arg_parser = parser.arg_parser.parser
# tcp = parser.tool_contract_parser
# tcp.add_float(BaseConstants.HQ_ARROW_MIN_ACCURACY_ID, "hq_arrow_min_accuracy",
# default=BaseConstants.HQ_ARROW_MIN_ACCURACY_DEFAULT,
# name="Minimum Quiver|Arrow Accuracy", description=BaseConstants.HQ_ARROW_MIN_ACCURACY_DESC)
# tcp.add_int(BaseConstants.QV_TRIM_FIVEPRIME_ID, "qv_trim_5",
# default=BaseConstants.QV_TRIM_FIVEPRIME_DEFAULT,
# name="Trim QVs 5'", description=BaseConstants.QV_TRIM_FIVEPRIME_DESC)
# tcp.add_int(BaseConstants.QV_TRIM_THREEPRIME_ID, "qv_trim_3",
# default=BaseConstants.QV_TRIM_THREEPRIME_DEFAULT,
# name="Trim QVs 3'", description=BaseConstants.QV_TRIM_THREEPRIME_DESC)
# else:
# assert isinstance(parser, argparse.ArgumentParser)
# arg_parser = parser
arg_parser = parser
icq_gp = arg_parser.add_argument_group("IceArrow High QV/Low QV arguments")
icq_gp.add_argument("--hq_arrow_min_accuracy",
type=float,
default=BaseConstants.HQ_ARROW_MIN_ACCURACY_DEFAULT,
dest="hq_arrow_min_accuracy",
help=BaseConstants.HQ_ARROW_MIN_ACCURACY_DESC)
icq_gp.add_argument("--qv_trim_5",
type=int,
default=BaseConstants.QV_TRIM_FIVEPRIME_DEFAULT,
dest="qv_trim_5",
help=BaseConstants.QV_TRIM_FIVEPRIME_DESC)
icq_gp.add_argument("--qv_trim_3",
type=int,
default=BaseConstants.QV_TRIM_THREEPRIME_DEFAULT,
dest="qv_trim_3",
help=BaseConstants.QV_TRIM_THREEPRIME_DESC)
icq_gp.add_argument("--hq_min_full_length_reads",
type=int,
default=2,
help="Minimum number of FL support to be an HQ isoform (default: 2)")
icq_gp = arg_parser.add_argument_group("IceArrow2 HQ/LQ IO arguments")
icq_gp.add_argument("--hq_isoforms_fa",
default=None,
type=str,
dest="hq_isoforms_fa",
help="Arrow polished, high quality isoforms " +
"in FASTA, default: root_dir/output/all_arrowed_hq.fasta")
icq_gp.add_argument("--hq_isoforms_fq",
default=None,
type=str,
dest="hq_isoforms_fq",
help="Arrow polished, high quality isoforms " +
"in FASTQ, default: root_dir/output/all_arrowed_hq.fastq")
icq_gp.add_argument("--lq_isoforms_fa",
default=None,
type=str,
dest="lq_isoforms_fa",
help="Arrow polished, low quality isoforms " +
"in FASTA, default: root_dir/output/all_arrowed_lq.fasta")
icq_gp.add_argument("--lq_isoforms_fq",
default=None,
type=str,
dest="lq_isoforms_fq",
help="Arrow polished, low quality isoforms " +
"in FASTQ, default: root_dir/output/all_arrowed_lq.fastq")
return parser | f2fabe409df095be26b4085ccbd635e4ab6ce88a | 9,188 |
def dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values):
"""
Evaluates model on a dev set
"""
batch_size = 256
#print("tset:",tset)
user_te = np.array(list(tset.keys()))
#print("user_te:",user_te)
user_te2 = user_te[:, np.newaxis]
#user_te2 = user_te
ll = int(len(user_te) / batch_size) + 1
recall50 = []
recall100 = []
recall200 = []
ndcg50 = []
ndcg100 = []
ndcg200 = []
for batch_num in range(int(ll)):
print(batch_num/ll*100,"%")
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(user_te))
# u_batch 是每个batch中的一个对user的一个list
u_batch = user_te2[start_index:end_index]
# batch_users 是这个batch中user的个数
batch_users = end_index - start_index
num_user = train_m.shape[0]#user总数
num_movie = train_m.shape[1]#item总数
user_list = user_te[start_index:end_index]
batch_rating_pairs = generate_pair(user_list, num_movie)
batch_dec_graph = generate_dec_graph(batch_rating_pairs, num_user, num_movie).to(args.device)
Two_Stage = False
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, batch_dec_graph, dataset.user_feature, dataset.movie_feature, Two_Stage)
#pred_ratings = th.softmax(pred_ratings, dim=1)
#print("pred_rating",pred_ratings.shape)
pred_ratings = pred_ratings.cpu().detach().numpy()
#pred_argmax = np.argmax(pred_ratings, axis=1)
pred_index = np.zeros_like(pred_ratings[:,0])
for j in range(len(pred_index)):
#pred_index[j][pred_argmax[j]] = 1
pred_index[j] = pred_ratings[j][1]
#print("pred_rating",pred_index[0:10])
#real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
#real_pred_ratings = (th.from_numpy(pred_index).to(args.device) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
real_pred_ratings = th.from_numpy(pred_index).to(args.device)
print("real_pred_ratings", th.sum(real_pred_ratings>=1))
u_b = user_te[start_index:end_index]
real_pred_ratings = real_pred_ratings.cpu()
#print("pred_shape:", real_pred_ratings.shape)
pre = real_pred_ratings.reshape(batch_users, -1)
#print("pred_shape:", pre.shape)
#pre = np.reshape(real_pred_ratings, (batch_users, num_movie))
pre = pre.detach().numpy()
idx = np.zeros_like(pre, dtype=bool)
idx[train_m[u_b].nonzero()] = True
pre[idx] = -np.inf
recall = []
for kj in [50, 100, 200]:
idx_topk_part = np.argpartition(-pre, kj, 1)
# print pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
# print idx_topk_part
pre_bin = np.zeros_like(pre, dtype=bool)
pre_bin[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]] = True
# print pre_bin
true_bin = np.zeros_like(pre, dtype=bool)
true_bin[test_m[u_b].nonzero()] = True
tmp = (np.logical_and(true_bin, pre_bin).sum(axis=1)).astype(np.float32)
#print("tmp:",tmp)
recall.append(tmp / np.minimum(kj, true_bin.sum(axis=1)))
#print("recall:",tmp / np.minimum(kj, true_bin.sum(axis=1)))
# print tmp
#print("recall:",recall)
ndcg = []
for kj in [50, 100, 200]:
# 获取前20个元素的大致序号
idx_topk_part = np.argpartition(-pre, kj, 1)
#print("pre:",pre.shape)
#
#print("idx_topk_part[:, :kj]:",idx_topk_part[:, :kj])
#获取每个用户对应的前20个预测的index
topk_part = pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
#print("topk_part:",topk_part[0:2])
idx_part = np.argsort(-topk_part, axis=1)
# 将预测分数进行排序,从大到校输出index的值
#print("idx_part:",idx_part[0:2])
idx_topk = idx_topk_part[np.arange(end_index - start_index)[:, np.newaxis], idx_part]
# 得到原来的序列中的对应index
#print("idx_topk:",idx_topk[0:2])
tp = np.log(2) / np.log(np.arange(2, kj + 2))
test_batch = test_m[u_b]
#print("test_batch:",test_batch)
DCG = (test_batch[np.arange(batch_users)[:, np.newaxis], idx_topk].toarray() * tp).sum(axis=1)
# 就只计算真实结果在预测结果中的第几号的dcg
#print("tp:",tp)
#print("DCG:",DCG)
IDCG = np.array([(tp[:min(n, kj)]).sum()
for n in test_batch.getnnz(axis=1)])
#print("IDCG:",np.array([(tp[:min(n, kj)]).sum()
# for n in test_batch.getnnz(axis=1)]))
ndcg.append(DCG / IDCG)
#print("ndcg:",ndcg)
recall50.append(recall[0])
recall100.append(recall[1])
recall200.append(recall[2])
ndcg50.append(ndcg[0])
ndcg100.append(ndcg[1])
ndcg200.append(ndcg[2])
recall50 = np.hstack(recall50)
recall100 = np.hstack(recall100)
recall200 = np.hstack(recall200)
ndcg50 = np.hstack(ndcg50)
ndcg100 = np.hstack(ndcg100)
ndcg200 = np.hstack(ndcg200)
print("recall50:",recall50[0:10])
print("ndcg50:", ndcg50.shape)
print("recall50:", np.mean(recall50), "ndcg50:",np.mean(ndcg50))
print("recall100:",np.mean(recall100),"ndcg100:", np.mean(ndcg100))
print("recall200:",np.mean(recall200), "ndcg200:",np.mean(ndcg200))
#f1.write(str(np.mean(recall100)) + ' ' + str(np.mean(ndcg100)) + '\n')
#f1.flush()
return np.mean(recall50), np.mean(recall100), np.mean(recall200), np.mean(ndcg50), np.mean(ndcg100), np.mean(ndcg200) | 73c77d15d4221a0116454e06f3d7823ffc66a323 | 9,189 |
def email_change_view(request, extra_context={},
success_url='email_verification_sent',
template_name='email_change/email_change_form.html',
email_message_template_name='email_change_request',
form_class=EmailChangeForm):
"""Allow a user to change the email address associated with the
user account.
"""
if request.method == 'POST':
form = form_class(username=request.user.username,
data=request.POST,
files=request.FILES)
if form.is_valid():
email = form.cleaned_data.get('email')
# First clean all email change requests made by this user
# Except subscription email validation
qs = EmailChangeRequest.objects.filter(user=request.user) \
.exclude(email=request.user.email)
qs.delete()
# Create an email change request
email_request = EmailChangeRequest.objects.create(
user=request.user,
email=email
)
email_request.send(email_message_template_name)
return redirect(success_url)
else:
form = form_class(username=request.user.username)
context = RequestContext(request, extra_context)
context['form'] = form
return render_to_response(template_name, context_instance=context) | 1b54394a04d07bea0d121574b71ba6a1a840f401 | 9,190 |
def check_is_pair(record1, record2):
"""Check if the two sequence records belong to the same fragment.
In an matching pair the records are left and right pairs
of each other, respectively. Returns True or False as appropriate.
Handles both Casava formats: seq/1 and seq/2, and 'seq::... 1::...'
and 'seq::... 2::...'.
"""
if hasattr(record1, 'quality') or hasattr(record2, 'quality'):
if not (hasattr(record1, 'quality') and hasattr(record2, 'quality')):
raise ValueError("both records must be same type (FASTA or FASTQ)")
lhs1, rhs1 = _split_left_right(record1.name)
lhs2, rhs2 = _split_left_right(record2.name)
# handle 'name/1'
if lhs1.endswith('/1') and lhs2.endswith('/2'):
subpart1 = lhs1.split('/', 1)[0]
subpart2 = lhs2.split('/', 1)[0]
assert subpart1
if subpart1 == subpart2:
return True
# handle '@name 1:rst'
elif lhs1 == lhs2 and rhs1.startswith('1:') and rhs2.startswith('2:'):
return True
return False | 225cd65d8c33968556c04a67ba304ebbc65ad4f2 | 9,191 |
import bz2
def decompress_bzip2_from_hdu(hdu):
"""Decompress data in a PyFits HDU object using libz2.
"""
data = hdu.data.field(0)
source_type = np.dtype(hdu.header['PCSRCTP'])
return (np.fromstring(bz2.decompress(data.tostring()),
dtype=source_type),
numpy_type_to_fits_type(source_type)) | 0fb99decefdc70ae082728c2d5b1228f79d13264 | 9,192 |
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid : string
A grid in string form.
Returns:
grid : dict
Keys are the boxes (e.g., 'A1').
Values are the values in each box (e.g., '8').
If the box has no value, then the value will be '123456789'.
"""
# Change assertion for different-sized grids
assert len(grid) == 81
digits = '123456789'
values = []
for c in grid:
if c == '.':
values.append(digits)
elif c in digits:
values.append(c)
# Sanity check that values is size it should be
assert len(values) == 81
return dict(zip(boxes, values)) | 560fb700d6fc91b3ab94c2a5573a82d92ac4eb9d | 9,193 |
from typing import List
import re
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :
"""!
@brief Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes
@param [in] `build_type` Unknown str
@param [in] `tag_name` Github tag name of the release
@param [in] `config` config metadata set in main.py
"""
tag_regex = re.compile("nightly_(.*)")
build_group_regex = re.compile("nightly_.*-builds-([^.]+).*")
files = []
try:
with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp:
# extract version
version_str = tag_regex.match(tag_name).group(1)
# extract filepath w/ version
# then list all ftp hits with that path
path_template = config["ftp"]["path"]
path = path_template.format(type=build_type, version=version_str)
file_entries = list(ftp.mlsd(path, ["type"]))
# get all ftp hits of type file
for entry in file_entries:
if entry[1]["type"] == "file":
files.append(entry[0])
except error_perm:
print("Received permanent FTP error!")
return []
out_data = []
for file in files:
# from the file list, extract only nightly files
file_match = build_group_regex.match(file)
if file_match is None:
print("Ignoring non nightly file '{}'".format(file))
continue
group_match = file_match.group(1)
primary_url = None
mirrors = []
# x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post
if "x64" in group_match:
group_match = group_match.replace("x64", "Win64")
# construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary
for mirror in config["ftp"]["mirrors"]:
download_url = mirror.format(type=build_type, version=version_str, file=file)
if primary_url is None:
primary_url = download_url
else:
mirrors.append(download_url)
# Form the List[ReleaseFile] list with the download URL links
out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))
return out_data | f56bc55df5a7880f215c3ebd0a1eeb6796840798 | 9,194 |
def variables(i, o):
""" WRITEME
:type i: list
:param i: input L{Variable}s
:type o: list
:param o: output L{Variable}s
:returns:
the set of Variables that are involved in the subgraph that lies between i and o. This
includes i, o, orphans(i, o) and all values of all intermediary steps from i to o.
"""
return variables_and_orphans(i, o)[0] | 18f07280dae8471cd9c4f447061342d733e7b3c7 | 9,195 |
def ldns_resolver_dnssec_cd(*args):
"""LDNS buffer."""
return _ldns.ldns_resolver_dnssec_cd(*args) | eeaa2f7e7d385d64c575926f93247ad44594d09e | 9,197 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.