content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_product_type_name(stac_item):
""" Create a ProductType name from a STAC Items metadata
"""
properties = stac_item['properties']
assets = stac_item['assets']
parts = []
platform = properties.get('platform') or properties.get('eo:platform')
instruments = properties.get('instruments') or \
properties.get('eo:instruments')
constellation = properties.get('constellation') or \
properties.get('eo:constellation')
mission = properties.get('mission') or properties.get('eo:mission')
if platform:
parts.append(platform)
if instruments:
parts.extend(instruments)
if constellation:
parts.append(constellation)
if mission:
parts.append(mission)
bands = properties.get('eo:bands')
if not bands:
bands = []
for asset in assets.values():
bands.extend(asset.get('eo:bands'), [])
parts.extend([band['name'] for band in bands])
if not parts:
raise RegistrationError(
'Failed to generate Product type name from metadata'
)
return '_'.join(parts) | fc7351c513eae63233b32b86fe6e5098a1571c8a | 15,921 |
def get_show_default():
""" gets the defaults """
return SHOW_DEFAULT | 88f6b202ae16155b8ec87eb566535703e33033b7 | 15,922 |
import torch
def sample_langevin_v2(x, model, stepsize, n_steps, noise_scale=None, intermediate_samples=False,
clip_x=None, clip_grad=None, reject_boundary=False, noise_anneal=None,
spherical=False, mh=False, temperature=None, norm=False, cut=True):
"""Langevin Monte Carlo
x: torch.Tensor, initial points
model: An energy-based model. returns energy
stepsize: float
n_steps: integer
noise_scale: Optional. float. If None, set to np.sqrt(stepsize * 2)
clip_x : tuple (start, end) or None boundary of square domain
reject_boundary: Reject out-of-domain samples if True. otherwise clip.
"""
assert not ((stepsize is None) and (noise_scale is None)), 'stepsize and noise_scale cannot be None at the same time'
if noise_scale is None:
noise_scale = np.sqrt(stepsize * 2)
if stepsize is None:
stepsize = (noise_scale ** 2) / 2
noise_scale_ = noise_scale
stepsize_ = stepsize
if temperature is None:
temperature = 1.
# initial data
x.requires_grad = True
E_x = model(x)
grad_E_x = autograd.grad(E_x.sum(), x, only_inputs=True)[0]
if clip_grad is not None:
grad_E_x = clip_vector_norm(grad_E_x, max_norm=clip_grad)
E_y = E_x; grad_E_y = grad_E_x;
l_samples = [x.detach().to('cpu')]
l_dynamics = []; l_drift = []; l_diffusion = []; l_accept = []
for i_step in range(n_steps):
noise = torch.randn_like(x) * noise_scale_
dynamics = - stepsize_ * grad_E_x / temperature + noise
y = x + dynamics
reject = torch.zeros(len(y), dtype=torch.bool)
if clip_x is not None:
if reject_boundary:
accept = ((y >= clip_x[0]) & (y <= clip_x[1])).view(len(x), -1).all(dim=1)
reject = ~ accept
y[reject] = x[reject]
else:
y = torch.clamp(y, clip_x[0], clip_x[1])
if norm:
y = y/y.sum(dim=(2,3)).view(-1,1,1,1)
if spherical:
y = y / y.norm(dim=1, p=2, keepdim=True)
# y_accept = y[~reject]
# E_y[~reject] = model(y_accept)
# grad_E_y[~reject] = autograd.grad(E_y.sum(), y_accept, only_inputs=True)[0]
E_y = model(y)
grad_E_y = autograd.grad(E_y.sum(), y, only_inputs=True)[0]
if clip_grad is not None:
grad_E_y = clip_vector_norm(grad_E_y, max_norm=clip_grad)
if mh:
y_to_x = ((grad_E_x + grad_E_y) * stepsize_ - noise).view(len(x), -1).norm(p=2, dim=1, keepdim=True) ** 2
x_to_y = (noise).view(len(x), -1).norm(dim=1, keepdim=True, p=2) ** 2
transition = - (y_to_x - x_to_y) / 4 / stepsize_ # B x 1
prob = -E_y + E_x
accept_prob = torch.exp((transition + prob) / temperature)[:,0] # B
reject = (torch.rand_like(accept_prob) > accept_prob) # | reject
y[reject] = x[reject]
E_y[reject] = E_x[reject]
grad_E_y[reject] = grad_E_x[reject]
x = y; E_x = E_y; grad_E_x = grad_E_y
l_accept.append(~reject)
x = y; E_x = E_y; grad_E_x = grad_E_y
if noise_anneal is not None:
noise_scale_ = noise_scale / (1 + i_step)
l_dynamics.append(dynamics.detach().cpu())
l_drift.append((- stepsize * grad_E_x).detach().cpu())
l_diffusion.append(noise.detach().cpu())
l_samples.append(x.detach().cpu())
if cut:
x = x[x.var(dim=(2,3))>1e-6].view(-1,1,40,40)
return {'sample': x.detach(), 'l_samples': l_samples, 'l_dynamics': l_dynamics,
'l_drift': l_drift, 'l_diffusion': l_diffusion, 'l_accept': l_accept} | a3dd79facb089afbeafc4e9845cf1324de75226b | 15,923 |
def fpoly(x, m):
"""Compute the first `m` simple polynomials.
Parameters
----------
x : array-like
Compute the simple polynomials at these abscissa values.
m : :class:`int`
The number of simple polynomials to compute. For example, if
:math:`m = 3`, :math:`x^0`, :math:`x^1` and
:math:`x^2` will be computed.
Returns
-------
:class:`numpy.ndarray`
"""
if isinstance(x, np.ndarray):
n = x.size
else:
n = 1
if m < 1:
raise ValueError('Order of polynomial must be at least 1.')
try:
dt = x.dtype
except AttributeError:
dt = np.float64
leg = np.ones((m, n), dtype=dt)
if m >= 2:
leg[1, :] = x
if m >= 3:
for k in range(2, m):
leg[k, :] = leg[k-1, :] * x
return leg | 335c73bf4008be1331d8f030266f5f89d072ed2c | 15,924 |
def _function_fullname(f):
"""Return the full name of the callable `f`, including also its module name."""
function, _ = getfunc(f) # get the raw function also for OOP methods
if not function.__module__: # At least macros defined in the REPL have `__module__=None`.
return function.__qualname__
return f"{function.__module__}.{function.__qualname__}" | eb6fd829081a4606c7be4520a15d627960360b8f | 15,927 |
def dists2centroids_numpy(a):
"""
:param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l))
:return a: Box ndarray, shape is (*, h, w, 4=(cx, cy, w, h))
"""
return corners2centroids_numpy(dists2corners_numpy(a)) | a85122d871179a9d0fb7fa9b844caa448398184c | 15,928 |
import math
def heatmap(data_df, figsize=None, cmap="Blues", heatmap_kw=None, gridspec_kw=None):
""" Plot a residue matrix as a color-encoded matrix.
Parameters
----------
data_df : :class:`pandas.DataFrame`
A residue matrix produced with :func:`~luna.analysis.residues.generate_residue_matrix`.
figsize : tuple, optional
Size (width, height) of a figure in inches.
cmap : str, iterable of str
The mapping from data values to color space. The default value is 'Blues'.
heatmap_kw : dict, optional
Keyword arguments for :func:`seaborn.heatmap`.
gridspec_kw : dict, optional
Keyword arguments for :class:`matplotlib.gridspec.GridSpec`.
Used only if the residue matrix (``data_df``) contains interactions.
Returns
-------
: :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` of :class:`matplotlib.axes.Axes`
"""
data_df = data_df.reset_index()
heatmap_kw = heatmap_kw or {}
gridspec_kw = gridspec_kw or {}
interactions = None
if "interaction" in data_df.columns:
interactions = sorted(data_df["interaction"].unique())
max_value = data_df[data_df.columns[2:]].max().max()
else:
max_value = data_df[data_df.columns[1:]].max().max()
if not interactions:
data_df.set_index('entry', inplace=True)
fig = plt.figure(figsize=figsize)
ax = sns.heatmap(data_df, cmap=cmap, vmax=max_value, vmin=0, **heatmap_kw)
ax.set_xlabel("")
ax.set_ylabel("")
return ax
else:
ncols = 3
if "ncols" in gridspec_kw:
ncols = gridspec_kw["ncols"]
del gridspec_kw["ncols"]
nrows = math.ceil(len(interactions) / ncols)
fig, axs = plt.subplots(nrows, ncols, figsize=figsize, gridspec_kw=gridspec_kw)
row, col = 0, 0
for i, interaction in enumerate(interactions):
df = data_df[data_df["interaction"] == interaction].copy()
df.drop(columns="interaction", inplace=True)
df.set_index('entry', inplace=True)
g = sns.heatmap(df, cmap=cmap, vmax=max_value, vmin=0, ax=axs[row][col], **heatmap_kw)
g.set_title(interaction)
g.set_xlabel("")
g.set_ylabel("")
col += 1
if col == ncols:
row += 1
col = 0
if len(interactions) < nrows * ncols:
diff = (nrows * ncols) - len(interactions)
for i in range(1, diff + 1):
axs[-1][-1 * i].axis('off')
return axs | 99ba802f82f9425fa3946253be78730b6216d9c9 | 15,929 |
import torch
def combined_loss(x, reconstructed_x, mean, log_var, args):
"""
MSE loss for reconstruction, KLD loss as per VAE.
Also want to output dimension (element) wise RCL and KLD
"""
# First, binary data
loss1 = torch.nn.BCEWithLogitsLoss(size_average=False)
loss1_per_element = torch.nn.BCEWithLogitsLoss(
size_average=False,
reduce=False
)
binary_range = args.binary_real_one_hot_parameters['binary_range']
reconstructed_x1 = reconstructed_x[:, binary_range[0]: binary_range[1]]
x1 = x[:, binary_range[0]: binary_range[1]]
RCL1 = loss1(reconstructed_x1, x1)
RCL1_per_element = loss1_per_element(reconstructed_x1, x1)
# Next, real data
loss2 = torch.nn.MSELoss(size_average=False)
loss2_per_element = torch.nn.MSELoss(size_average=False, reduce=False)
real_range = args.binary_real_one_hot_parameters['real_range']
reconstructed_x2 = reconstructed_x[:, real_range[0]: real_range[1]]
x2 = x[:, real_range[0]: real_range[1]]
RCL2 = loss2(reconstructed_x2, x2)
RCL2_per_element = loss2_per_element(reconstructed_x2, x2)
# Next, one-hot data
loss3 = torch.nn.CrossEntropyLoss(size_average=True)
loss3_per_element = torch.nn.CrossEntropyLoss(
size_average=True,
reduce=False
)
one_hot_range = args.binary_real_one_hot_parameters['one_hot_range']
reconstructed_x3 = reconstructed_x[:, one_hot_range[0]: one_hot_range[1]]
x3 = x[:, one_hot_range[0]: one_hot_range[1]]
# This has 3 one-hot's. lets split it up
x3_1 = x3[:, :19]
x3_2 = x3[:, 19:19 + 19]
x3_3 = x3[:, 19+19:]
reconstructed_x3_1 = reconstructed_x3[:, :19]
reconstructed_x3_2 = reconstructed_x3[:, 19:19 + 19]
reconstructed_x3_3 = reconstructed_x3[:, 19+19:]
_, labels1 = x3_1.max(dim=1)
_, labels2 = x3_2.max(dim=1)
_, labels3 = x3_3.max(dim=1)
# print(labels.size(), reconstructed_x3.size(), x3.size())
RCL3_1 = loss3(reconstructed_x3_1, labels1.long())
RCL3_per_element_1 = loss3_per_element(reconstructed_x3_1, labels1.long())
RCL3_2 = loss3(reconstructed_x3_2, labels2.long())
RCL3_per_element_2 = loss3_per_element(reconstructed_x3_2, labels2.long())
RCL3_3 = loss3(reconstructed_x3_3, labels3.long())
RCL3_per_element_3 = loss3_per_element(reconstructed_x3_3, labels3.long())
KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
KLD_per_element = -0.5 * (1 + log_var - mean.pow(2) - log_var.exp())
RCL = RCL1 + RCL2 + RCL3_1 + RCL3_2 + RCL3_3
RCL_per_element = torch.cat(
(
RCL1_per_element,
RCL2_per_element,
RCL3_per_element_1.view([-1, 1]),
RCL3_per_element_2.view([-1, 1]),
RCL3_per_element_3.view([-1, 1])
),
1
)
return RCL + args.beta_vae*KLD, RCL, KLD, RCL_per_element, KLD_per_element | 162b2706f9643f66ebb0c3b000ea025d411029e2 | 15,930 |
def isfloat(string: str) -> bool:
"""
This function receives a string and returns if it is a float or not.
:param str string: The string to check.
:return: A boolean representing if the string is a float.
:rtype: bool
"""
try:
float(string)
return True
except (ValueError, TypeError):
return False | ac6d8fcbbcf6b8cb442c50895576f417618a7429 | 15,931 |
import re
def parse_path_kvs(file_path):
"""
Find all key-value pairs in a file path;
the pattern is *_KEY=VALUE_*.
"""
parser = re.compile("(?<=[/_])[a-z0-9]+=[a-zA-Z0-9]+[.]?[0-9]*(?=[_/.])")
kvs = parser.findall(file_path)
kvs = [kv.split("=") for kv in kvs]
return {kv[0]: to_number(kv[1]) for kv in kvs} | 65d3711752808299272383f4b1328336ba9c463c | 15,932 |
def user_count_by_type(utype: str) -> int:
"""Returns the total number of users that match a given type"""
return get_count('users', 'type', (utype.lower(),)) | 232c4cc40ba31b4fb60f40708f2a38ae73096aea | 15,933 |
def grouperElements(liste, function=len):
"""
fonctions qui groupe selon la fonction qu'on lui donne.
Ainsi pour le kalaba comme pour les graphèmes, nous aurons
besoin de la longueur,
"""
lexique=[]
data=sorted(liste, key=function)
for k,g in groupby(data, function):
lexique.append(list(g))
return lexique | e75e8e379378ac1207ae0ee9521f630c04cff2f7 | 15,935 |
def SensorLocation_Cast(*args):
"""
Cast(BaseObject o) -> SensorLocation
SensorLocation_Cast(Seiscomp::Core::BaseObjectPtr o) -> SensorLocation
"""
return _DataModel.SensorLocation_Cast(*args) | 85a5a6f711c0c5d77f0b93b2e6f819bdfd466ce1 | 15,936 |
def fatorial(num=1, show=False):
"""
-> Calcula o fatorial de um número.
:param num: Fatorial a ser calculado
:param show: (opicional) Mostra a conta
:return: Fatorial de num.
"""
print('-=' * 20)
fat = 1
for i in range(num, 0, -1):
fat *= i
if show:
resp = f'{str(num)}! = '
while num > 1:
resp += f'{str(num)} x '
num -= 1
resp += f'{str(num)} = {str(fat)}'
return resp
else:
return fat | 80ca60d2ba64a7089f3747a13c109de0bc7c159c | 15,937 |
def linear_trend(series=None, coeffs=None, index=None, x=None, median=False):
"""Get a series of points representing a linear trend through `series`
First computes the lienar regression, the evaluates at each
dates of `series.index`
Args:
series (pandas.Series): data with DatetimeIndex as the index.
coeffs (array or List): [slope, intercept], result from np.polyfit
index (DatetimeIndex, list[date]): Optional. If not passing series, can pass
the DatetimeIndex or list of dates to evaluate coeffs at.
Converts to numbers using `matplotlib.dates.date2num`
x (ndarray-like): directly pass the points to evaluate the poly1d
Returns:
Series: a line, equal length to arr, with same index as `series`
"""
if coeffs is None:
coeffs = fit_line(series, median=median)
if index is None and x is None:
index = series.dropna().index
if x is None:
x = mdates.date2num(index)
poly = np.poly1d(coeffs)
linear_points = poly(x)
return pd.Series(linear_points, index=index) | 6bd09089ffd828fd3d408c0c2b03c3facfcfbd6b | 15,939 |
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id) | 8dda987916cb772d6498cd295056ef2b5465c00d | 15,940 |
def graph_from_tensors(g, is_real=True):
"""
"""
loop_edges = list(nx.selfloop_edges(g))
if len(loop_edges) > 0:
g.remove_edges_from(loop_edges)
if is_real:
subgraph = (g.subgraph(c) for c in nx.connected_components(g))
g = max(subgraph, key=len)
g = nx.convert_node_labels_to_integers(g)
else:
g = pick_connected_component_new(g)
return g | 7f43531f7cbf9221a6b00a56a24325b58f60ea84 | 15,941 |
def hook(t):
"""Calculate the progress from download callbacks (For progress bar)"""
def inner(bytes_amount):
t.update(bytes_amount) # Update progress bar
return inner | d8228b9dec203aaa32d268dea8feef52e8db6137 | 15,942 |
def delete(event, context):
"""
Delete a cfn stack using an assumed role
"""
stack_id = event["PhysicalResourceId"]
if '[$LATEST]' in stack_id:
# No stack was created, so exiting
return stack_id, {}
cfn_client = get_client("cloudformation", event, context)
cfn_client.delete_stack(StackName=stack_id)
return stack_id | 555682546aa6f1bbbc133538003b51f02e744d70 | 15,943 |
from typing import Match
import six
def _rec_compare(lhs,
rhs,
ignore,
only,
key,
report_mode,
value_cmp_func,
_regex_adapter=RegexAdapter):
"""
Recursive deep comparison implementation
"""
# pylint: disable=unidiomatic-typecheck
lhs_cat = _categorise(lhs)
rhs_cat = _categorise(rhs)
## NO VALS
if ((lhs_cat == Category.ABSENT) or (rhs_cat == Category.ABSENT)) and \
(lhs_cat != Category.CALLABLE) and (rhs_cat != Category.CALLABLE):
return _build_res(
key=key,
match=Match.PASS if lhs_cat == rhs_cat else Match.FAIL,
lhs=fmt(lhs),
rhs=fmt(rhs))
## CALLABLES
if lhs_cat == rhs_cat == Category.CALLABLE:
match = Match.from_bool(lhs == rhs)
return _build_res(
key=key,
match=match,
lhs=(0, 'func', callable_name(lhs)),
rhs=(0, 'func', callable_name(rhs)))
if lhs_cat == Category.CALLABLE:
result, error = compare_with_callable(callable_obj=lhs, value=rhs)
return _build_res(
key=key,
match=Match.from_bool(result),
lhs=(0, 'func', callable_name(lhs)),
rhs='Value: {}, Error: {}'.format(
rhs, error) if error else fmt(rhs))
if rhs_cat == Category.CALLABLE:
result, error = compare_with_callable(callable_obj=rhs, value=lhs)
return _build_res(
key=key,
match=Match.from_bool(result),
lhs='Value: {}, Error: {}'.format(
lhs, error) if error else fmt(lhs),
rhs=(0, 'func', callable_name(rhs)))
## REGEXES
if lhs_cat == rhs_cat == Category.REGEX:
match = _regex_adapter.compare(lhs, rhs)
return _build_res(
key=key,
match=match,
lhs=_regex_adapter.serialize(lhs),
rhs=_regex_adapter.serialize(rhs))
if lhs_cat == Category.REGEX:
match = _regex_adapter.match(regex=lhs, value=rhs)
return _build_res(
key=key,
match=match,
lhs=_regex_adapter.serialize(lhs),
rhs=fmt(rhs))
if rhs_cat == Category.REGEX:
match = _regex_adapter.match(regex=rhs, value=lhs)
return _build_res(
key=key,
match=match,
lhs=fmt(lhs),
rhs=_regex_adapter.serialize(rhs))
## VALUES
if lhs_cat == rhs_cat == Category.VALUE:
response = value_cmp_func(lhs, rhs)
match = Match.from_bool(response)
return _build_res(
key=key,
match=match,
lhs=fmt(lhs),
rhs=fmt(rhs))
## ITERABLE
if lhs_cat == rhs_cat == Category.ITERABLE:
results = []
match = Match.IGNORED
for lhs_item, rhs_item in six.moves.zip_longest(lhs, rhs):
# iterate all elems in both iterable non-mapping objects
result = _rec_compare(
lhs_item,
rhs_item,
ignore,
only,
key=None,
report_mode=report_mode,
value_cmp_func=value_cmp_func)
match = Match.combine(match, result[1])
results.append(result)
# two lists of formatted objects from a
# list of objects with lhs/rhs attributes
lhs_vals, rhs_vals = _partition(results)
return _build_res(
key=key,
match=match,
lhs=(1, lhs_vals),
rhs=(1, rhs_vals))
## DICTS
if lhs_cat == rhs_cat == Category.DICT:
match, results = _cmp_dicts(
lhs, rhs, ignore, only, report_mode, value_cmp_func)
lhs_vals, rhs_vals = _partition(results)
return _build_res(
key=key,
match=match,
lhs=(2, lhs_vals),
rhs=(2, rhs_vals))
## DIFF TYPES -- catch-all for unhandled
# combinations, e.g. VALUE vs ITERABLE
return _build_res(
key=key,
match=Match.FAIL,
lhs=fmt(lhs),
rhs=fmt(rhs)) | b7d26ed038152ee98a7b50821f3485cdc66a29d4 | 15,944 |
def exists_job_onqueue(queuename, when, hour):
"""
Check if a job is present on queue
"""
scheduler = Scheduler(connection=Redis())
jobs = scheduler.get_jobs()
for job in jobs:
if 'reset_stats_queue' in job.func_name:
args = job.args
if queuename == args[0] and when == args[1] and hour == args[2]:
return True
return False | 165bb3da4746267d789d39ee30ebd9b098ea7c1e | 15,945 |
def q_inv_batch_of_sequences(seq):
"""
:param seq: (n_batch x n_frames x 32 x 4)
:return:
"""
n_batch = seq.size(0)
n_frames = seq.size(1)
n_joints = seq.size(2)
seq = seq.reshape((n_batch * n_frames * n_joints, 4))
seq = qinv(seq)
seq = seq.reshape((n_batch, n_frames, n_joints, 4))
return seq | 9c2035a1864e47e99ac074815199217867da0c96 | 15,946 |
def msa_job_space_demand(job_space_demand):
"""
Job space demand aggregated to the MSA.
"""
df = job_space_demand.local
return df.fillna(0).sum(axis=1).to_frame('msa') | 044fe6e814c2773629b8f648b789ba99bbdf0108 | 15,947 |
def get_pdf_cdf_3(corr, bins_pdf, bins_cdf, add_point=True, cdf_bool=True,
checknan=False):
"""
corr is a 3d array, the first dimension are the iterations, the second
dimension is usually the cells
the function gives back the pdf and the cdf
add_point option duplicated the last point
checknan checks if there are any nans in the set and gives nan as
result for the pdf and cdf instead 0 as would be calculated naturally
"""
N1, N2, N3 = corr.shape
pdfs = np.zeros((N1, N2, len(bins_pdf) - 1))
cdfs = np.zeros((N1, N2, len(bins_cdf) - 1))
for i in range(N1):
pdfs[i], cdfs[i] = get_pdf_cdf_2(corr[i], bins_pdf, bins_cdf,
add_point=False, cdf_bool=False,
checknan=checknan)
if cdf_bool:
cdfs = np.cumsum(cdfs, axis=2)/corr.shape[2]
if add_point:
pdfs = add_point3(pdfs)
cdfs = add_point3(cdfs)
return pdfs, cdfs | 0c6983bf6c3f77aebb7a9c667c54a560ed4a3cf0 | 15,948 |
def incidence_matrix(
H, order=None, sparse=True, index=False, weight=lambda node, edge, H: 1
):
"""
A function to generate a weighted incidence matrix from a Hypergraph object,
where the rows correspond to nodes and the columns correspond to edges.
Parameters
----------
H: Hypergraph object
The hypergraph of interest
order: int, optional
Order of interactions to use. If None (default), all orders are used. If int,
must be >= 1.
sparse: bool, default: True
Specifies whether the output matrix is a scipy sparse matrix or a numpy matrix
index: bool, default: False
Specifies whether to output dictionaries mapping the node and edge IDs to indices
weight: lambda function, default=lambda function outputting 1
A function specifying the weight, given a node and edge
Returns
-------
I: numpy.ndarray or scipy csr_matrix
The incidence matrix, has dimension (n_nodes, n_edges)
rowdict: dict
The dictionary mapping indices to node IDs, if index is True
coldict: dict
The dictionary mapping indices to edge IDs, if index is True
"""
edge_ids = H.edges
if order is not None:
edge_ids = [id_ for id_, edge in H._edge.items() if len(edge) == order + 1]
if not edge_ids:
return (np.array([]), {}, {}) if index else np.array([])
node_ids = H.nodes
num_edges = len(edge_ids)
num_nodes = len(node_ids)
node_dict = dict(zip(node_ids, range(num_nodes)))
edge_dict = dict(zip(edge_ids, range(num_edges)))
if node_dict and edge_dict:
if index:
rowdict = {v: k for k, v in node_dict.items()}
coldict = {v: k for k, v in edge_dict.items()}
if sparse:
# Create csr sparse matrix
rows = []
cols = []
data = []
for node in node_ids:
memberships = H.nodes.memberships(node)
# keep only those with right order
memberships = [i for i in memberships if i in edge_ids]
if len(memberships) > 0:
for edge in memberships:
data.append(weight(node, edge, H))
rows.append(node_dict[node])
cols.append(edge_dict[edge])
else: # include disconnected nodes
for edge in edge_ids:
data.append(0)
rows.append(node_dict[node])
cols.append(edge_dict[edge])
I = csr_matrix((data, (rows, cols)))
else:
# Create an np.matrix
I = np.zeros((num_nodes, num_edges), dtype=int)
for edge in edge_ids:
members = H.edges.members(edge)
for node in members:
I[node_dict[node], edge_dict[edge]] = weight(node, edge, H)
if index:
return I, rowdict, coldict
else:
return I
else:
if index:
return np.array([]), {}, {}
else:
return np.array([]) | efbac24664f30a1cd424843042d7e203a0e96c37 | 15,951 |
def initial_landing_distance(interest_area, fixation_sequence):
"""
Given an interest area and fixation sequence, return the initial landing
distance on that interest area. The initial landing distance is the pixel
distance between the first fixation to land in an interest area and the
left edge of that interest area (or, in the case of right-to-left text,
the right edge). Technically, the distance is measured from the text onset
without including any padding. Returns `None` if no fixation landed on the
interest area.
"""
for fixation in fixation_sequence.iter_without_discards():
if fixation in interest_area:
for char in interest_area:
if fixation in char: # be sure not to find a fixation in the padding
return abs(interest_area.onset - fixation.x)
return None | b3512ea7cb149667e09c56541340122ec1dddcb1 | 15,952 |
import gzip
import pickle
def load_object(filename):
"""
Load saved object from file
:param filename: The file to load
:return: the loaded object
"""
with gzip.GzipFile(filename, 'rb') as f:
return pickle.load(f) | f7e15216c371e1ab05169d40ca4df15611fa7978 | 15,953 |
from typing import Dict
from typing import Tuple
def list_events_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Lists all events and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
max_results = args.get('max_results')
event_created_date_before = args.get('event_created_date_before')
event_created_date_after = args.get('event_created_date_after')
raw_response = client.list_events(
event_created_date_before=event_created_date_before,
event_created_date_after=event_created_date_after,
max_results=max_results)
events = raw_response.get('event')
if events:
title = f'{INTEGRATION_NAME} - List events:'
context_entry = raw_response_to_context(events)
context = {
f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID && val.ID === obj.ID)': context_entry
}
# Creating human readable for War room
human_readable = tableToMarkdown(title, context_entry)
# Return data to Demisto
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any events.', {}, {} | b4e3916ee8d65a47e2128453fd042d998184ea7b | 15,954 |
def response_map(fetch_map):
"""Create an expected FETCH response map from the given request map.
Most of the keys returned in a FETCH response are unmodified from the
request. The exceptions are BODY.PEEK and BODY partial range. A BODY.PEEK
request is answered without the .PEEK suffix. A partial range (e.g.
BODY[]<0.1000>) has the octet count (1000) removed, since that information
is provided in the literal size (and may be different if the data was
truncated).
"""
if not isinstance(fetch_map, dict):
fetch_map = dict((v, v) for v in fetch_map)
rmap = {}
for k, v in fetch_map.items():
for name in ('BODY', 'BINARY'):
if k.startswith(name):
k = k.replace(name + '.PEEK', name, 1)
if k.endswith('>'):
k = k.rsplit('.', 1)[0] + '>'
rmap[k] = v
return rmap | 42d992662e5bba62046c2fc1a50f0f8275798ef8 | 15,956 |
def RigidTendonMuscle_getClassName():
"""RigidTendonMuscle_getClassName() -> std::string const &"""
return _actuators.RigidTendonMuscle_getClassName() | 8c6bd6604350e6e2a30ee48c018307bc68dea76f | 15,957 |
import json
import time
import uuid
def submit():
"""Receives the new paste and stores it in the database."""
if request.method == 'POST':
form = request.get_json(force=True)
pasteText = json.dumps(form['pasteText'])
nonce = json.dumps(form['nonce'])
burnAfterRead = json.dumps(form['burnAfterRead'])
pasteKeyHash = json.dumps(form['hash'])
if burnAfterRead == "true":
burnAfterRead = True
else:
burnAfterRead = False
# Creates Expire time
expireTime = json.dumps(form['expire_time'])
expireTime = int(time.time()) + int(expireTime)*60
# set paste type
pasteType = json.dumps(form['pasteType'])[1:-1] # cuts "'" out
# print(type(form['nonce']))
db = get_db()
# Creates random 64 bit int
idAsInt = uuid.uuid4().int >> 65
db.execute('''insert into pastes (id, paste_text, nonce,
expire_time, burn_after_read, paste_hash, paste_format) values (?, ?, ?, ?, ?, ?, ?)''',
[idAsInt, pasteText, nonce, expireTime, burnAfterRead, pasteKeyHash, pasteType])
db.commit() # add text to sqlite3 db
return jsonify(id=hex(idAsInt)[2:]) | 3f88b665b226c81785b0ecafe3389bb15dcbeaa4 | 15,958 |
def money_recall_at_k(recommended_list, bought_list, prices_recommended, prices_bought, k=5):
""" Доля дохода по релевантным рекомендованным объектам
:param recommended_list - список id рекомендаций
:param bought_list - список id покупок
:param prices_recommended - список цен для рекомендаций
:param prices_bought - список цен покупок
"""
flags = np.isin(recommended_list[:k], bought_list) # get recommend to bought matches
prices = np.array(prices_recommended[:k]) # get prices of recommended items
return flags @ prices / np.sum(prices_bought) | edeb6c56c5ce6a2af0321aee350c5f129737cab0 | 15,959 |
import networkx
def get_clustering_fips( collection_of_fips, adj = None ):
"""
Finds the *separate* clusters of counties or territorial units that are clustered together. This is used to identify possibly *different* clusters of counties that may be separate from each other. If one does not supply an adjacency :py:class:`dict`, it uses the adjacency dictionary that :py:meth:`fips_adj_2018 <covid19_stats.COVID19Database.fips_adj_2018>` returns. Look at :download:`fips_2019_adj.pkl.gz </_static/gis/fips_2019_adj.pkl.gz>` to see what this dictionary looks like.
:param list collection_of_fips: the :py:class:`list` of counties or territorial units, each identified by its `FIPS code`_.
:param dict adj: optionally specified adjacency dictionary. Otherwise it uses the :py:meth:`fips_adj_2018 <covid19_stats.COVID19Database.fips_adj_2018>` returned dictionary. Look at :download:`fips_2019_adj.pkl.gz </_static/gis/fips_2019_adj.pkl.gz>` to see what this dictionary looks like.
:returns: a :py:class:`list` of counties clustered together. Each cluster is a :py:class:`set` of `FIPS code`_\ s of counties grouped together.
:rtype: list
"""
if adj is None: adj = COVID19Database.fips_adj_2018( )
fips_rem = set( collection_of_fips )
#
## our adjacency matrix from this
subset = set(filter(lambda tup: all(map(lambda tok: tok in fips_rem, tup)), adj )) | \
set(map(lambda fips: ( fips, fips ), fips_rem ))
G = networkx.Graph( sorted( subset ) )
#
## now greedy clustering algo
fips_clusters = [ ]
while len( fips_rem ) > 0:
first_fips = min( fips_rem )
fips_excl = fips_rem - set([ first_fips, ])
fips_clust = [ first_fips ]
for fips in fips_excl:
try:
dist = networkx.shortest_path_length( G, first_fips, fips )
fips_clust.append( fips )
except: pass
fips_clusters.append( set( fips_clust ) )
fips_rem = fips_rem - set( fips_clust )
return fips_clusters | acdd6daa9b0b5d200d98271a4c989e5a5912a684 | 15,960 |
def stop_after(space_number):
""" Decorator that determines when to stop tab-completion
Decorator that tells command specific complete function
(ex. "complete_use") when to stop tab-completion.
Decorator counts number of spaces (' ') in line in order
to determine when to stop.
ex. "use exploits/dlink/specific_module " -> stop complete after 2 spaces
"set rhost " -> stop completing after 2 spaces
"run " -> stop after 1 space
:param space_number: number of spaces (' ') after which tab-completion should stop
:return:
"""
def _outer_wrapper(wrapped_function):
@wraps(wrapped_function)
def _wrapper(self, *args, **kwargs):
try:
if args[1].count(" ") == space_number:
return []
except Exception as err:
logger.error(err)
return wrapped_function(self, *args, **kwargs)
return _wrapper
return _outer_wrapper | f0ca0bb0f33c938f6a1de619f70b204e92b20974 | 15,961 |
def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线
"""
根据规则,强行将粘连的区域切分
:param img_closed_original: 二值化图片
:return: 处理后的二值化图片
"""
img_closed = img_closed_original.copy()
img_closed = img_closed // 250
#print(img_closed.shape)
width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数
start_region_flag = 0
start_region_index = 0 # 身份证起始点高度值
end_region_index = 0 # 身份证结束点高度值
for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代
if start_region_flag == 0 and width_sum[i] > 330:
start_region_flag = 1
start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点
if width_sum[i] > 330:
end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点
# 身份证区域中白点最少的高度值,认为这是正反面的交点
# argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值
min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0]
img_closed_original[min_line_position][:] = 0
for i in range(1, 11): # 参数可变,分割10个点
temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i]
if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内
img_closed_original[temp_line_position][:] = 0 # 强制变为0
return img_closed_original | 28e5e64e15cb349df186752c669ae16d01e21549 | 15,962 |
def _search(progtext, qs=None):
""" Perform memoized url fetch, display progtext. """
loadmsg = "Searching for '%s'" % (progtext)
wdata = pafy.call_gdata('search', qs)
def iter_songs():
wdata2 = wdata
while True:
for song in get_tracks_from_json(wdata2):
yield song
if not wdata2.get('nextPageToken'):
break
qs['pageToken'] = wdata2['nextPageToken']
wdata2 = pafy.call_gdata('search', qs)
# # The youtube search api returns a maximum of 500 results
length = min(wdata['pageInfo']['totalResults'], 500)
slicer = IterSlicer(iter_songs(), length)
# paginatesongs(slicer, length=length, msg=msg, failmsg=failmsg, loadmsg=loadmsg)
func = slicer
s = 0
e = 3
if callable(func):
songs = (s, e)
else:
songs = func[s:e]
return songs | 55310c4ad05b597b48e32dde810eff9db51d66c0 | 15,963 |
import numpy
def img_to_vector(img_fn, label=0):
"""Read the first 32 characters of the first 32 rows of an image file.
@return <ndarray>: a 1x(1024+1) numpy array with data and label, while the
label is defaults to 0.
"""
img = ""
for line in open(img_fn).readlines()[:32]:
img += line[:32]
# labels are always attached at the last position
itera = [_ for _ in img + str(label)]
return numpy.fromiter(itera, "f4") | f1d7161a0bc4d6ffebc6ee1b32eafb28c4d75f7f | 15,964 |
import appdirs
def get_config():
"""Return a user configuration object."""
config_filename = appdirs.user_config_dir(_SCRIPT_NAME, _COMPANY) + ".ini"
config = _MyConfigParser()
config.optionxform = str
config.read(config_filename)
config.set_filename(config_filename)
return config | 192ea496f80d77f241ec6deb6a4aa4b1ef7d17cf | 15,965 |
import asyncio
import websockets
def launch_matchcomms_server() -> MatchcommsServerThread:
"""
Launches a background process that handles match communications.
"""
host = 'localhost'
port = find_free_port() # deliberately not using a fixed port to prevent hardcoding fragility.
event_loop = asyncio.new_event_loop()
matchcomms_server = MatchcommsServer()
start_server = websockets.serve(matchcomms_server.handle_connection, host, port, loop=event_loop)
server = event_loop.run_until_complete(start_server)
thread = Thread(target=event_loop.run_forever, daemon=True)
thread.start()
return MatchcommsServerThread(
root_url=URL(scheme='ws', netloc=f'{host}:{port}', path='', params='', query='', fragment=''),
_server=server,
_event_loop=event_loop,
_thread=thread,
) | 4c23c599a61f029972ae3e54ceb3066a4ce9f207 | 15,966 |
def acq_randmaxvar():
"""Initialise a RandMaxVar fixture.
Returns
-------
RandMaxVar
Acquisition method.
"""
gp, prior = _get_dependencies_acq_fn()
# Initialising the acquisition method.
method_acq = RandMaxVar(model=gp, prior=prior)
return method_acq | 5f306d104032abc993ab7726e08453d5c18f2526 | 15,967 |
import json
def from_config(func):
"""Run a function from a JSON configuration file."""
def decorator(filename):
with open(filename, 'r') as file_in:
config = json.load(file_in)
return func(**config)
return decorator | 4342a5f6fab8f8274b9dfb762be3255672f4f332 | 15,968 |
def update_user(user, domain, password=None):
""" create/update user record. if password is None, the user is
removed. Password should already be SHA512-CRYPT'd """
passwdf = PASSWDFILE % {"domain": domain}
passwdb = KeyValueFile.open_file(passwdf, separator=":", lineformat=USERLINE+"\n")
passwdb[user] = password
return True | 6e65be52fe0fb737c5189da295694bf482be9f5d | 15,969 |
def puzzle_pieces(n):
"""Return a dictionary holding all 1, 3, and 7 k primes."""
kprimes = defaultdict(list)
kprimes = {key : [] for key in [7, 3, 1]}
upper = 0
for k in sorted(kprimes.keys(), reverse=True):
if k == 7:
kprimes[k].extend(count_Kprimes(k, 2, n))
if not kprimes[k]:
return []
upper = n - kprimes[k][0]
if k == 3:
kprimes[k].extend(count_Kprimes(k, 2, upper))
upper -= kprimes[k][0]
if k == 1:
primes = get_primes(upper)
for p in takewhile(lambda x: x <= upper, primes):
kprimes[k].append(p)
return kprimes | 4ad36f316a2dfa39aca9c2b574781f9199fb13ef | 15,970 |
import warnings
def periodogram_snr(periodogram,periods,index_to_evaluate,duration,per_type,
freq_window_epsilon=3.,rms_window_bin_size=100):
"""
Calculate the periodogram SNR of the best period
Assumes fixed frequency spacing for periods
periodogram - the periodogram values
periods - periods associated with the above values
index_to_evaluate - index of period to examine
duration - total duration of the observations
per_type - which period search algorithm was used
(optional)
freq_window_epsilon - sets the size of the exclusion area
in the periodogram for the calculation
rms_window_bin_size - number of points to include in
calculating the RMS for the SNR
"""
# Some value checking
if len(periodogram) != len(periods):
raise ValueError("The lengths of the periodogram and the periods are not the same")
if hasattr(index_to_evaluate,'__len__'):
raise AttributeError("The index_to_evaluate has len attribute")
if np.isnan(periodogram[index_to_evaluate]):
raise ValueError("Selected periodogram value is nan")
if np.isinf(periodogram[index_to_evaluate]):
raise ValueError("Selected periodogram value is not finite")
if per_type.upper() not in ['LS','PDM','BLS']:
raise ValueError("Periodogram type " + per_type + " not recognized")
# Setting up some parameters
freq_window_size = freq_window_epsilon/duration
delta_frequency = abs(1./periods[1] - 1./periods[0])
freq_window_index_size = int(round(freq_window_size/delta_frequency))
# More value checking
if freq_window_index_size > len(periodogram):
raise ValueError("freq_window_index_size is greater than total periodogram length")
elif freq_window_index_size > .9*len(periodogram):
raise ValueError("freq_window_index_size is greater than 90% total length of periodogram")
elif freq_window_index_size > .8*len(periodogram):
print("here 80%")
warnings.warn("freq_window_index_size is greater than 80% total length of periodogram")
perdgm_window = [] # For storing values for RMS calculation
# Which values to include in perdgm_window
if index_to_evaluate > freq_window_index_size:
perdgm_window.extend(periodogram[max(0,index_to_evaluate-freq_window_index_size-rms_window_bin_size+1):index_to_evaluate-freq_window_index_size+1].tolist())
if index_to_evaluate + freq_window_index_size < len(periodogram):
perdgm_window.extend(periodogram[index_to_evaluate+freq_window_index_size:index_to_evaluate+freq_window_index_size+rms_window_bin_size].tolist())
perdgm_window = np.array(perdgm_window)
# Include only finite values
wherefinite = np.isfinite(perdgm_window)
# Sigma clip
vals, low, upp = sigmaclip(perdgm_window[wherefinite],low=3,high=3)
# Calculate standard deviation
stddev = np.std(vals)
# Return
if per_type.upper() == 'PDM': # If PDM, use correct amplitude
return (1.-periodogram[index_to_evaluate])/stddev
else:
return periodogram[index_to_evaluate]/stddev | 6b1f84d03796dc839cdb87b94bce69a8eef4f60e | 15,971 |
def derivative_overview(storage_service_id, storage_location_id=None):
"""Return a summary of derivatives across AIPs with a mapping
created between the original format and the preservation copy.
"""
report = {}
aips = AIP.query.filter_by(storage_service_id=storage_service_id)
if storage_location_id:
aips = aips.filter_by(storage_location_id=storage_location_id)
aips = aips.all()
all_aips = []
for aip in aips:
if not aip.preservation_file_count > 0:
continue
aip_report = {}
aip_report[fields.FIELD_TRANSFER_NAME] = aip.transfer_name
aip_report[fields.FIELD_UUID] = aip.uuid
aip_report[fields.FIELD_FILE_COUNT] = aip.original_file_count
aip_report[fields.FIELD_DERIVATIVE_COUNT] = aip.preservation_file_count
aip_report[fields.FIELD_RELATED_PAIRING] = []
original_files = File.query.filter_by(
aip_id=aip.id, file_type=FileType.original
)
for original_file in original_files:
preservation_derivative = File.query.filter_by(
file_type=FileType.preservation, original_file_id=original_file.id
).first()
if preservation_derivative is None:
continue
file_derivative_pair = {}
file_derivative_pair[
fields.FIELD_DERIVATIVE_UUID
] = preservation_derivative.uuid
file_derivative_pair[fields.FIELD_ORIGINAL_UUID] = original_file.uuid
original_format_version = original_file.format_version
if original_format_version is None:
original_format_version = ""
file_derivative_pair[fields.FIELD_ORIGINAL_FORMAT] = "{} {} ({})".format(
original_file.file_format, original_format_version, original_file.puid
)
file_derivative_pair[fields.FIELD_DERIVATIVE_FORMAT] = "{}".format(
preservation_derivative.file_format
)
aip_report[fields.FIELD_RELATED_PAIRING].append(file_derivative_pair)
all_aips.append(aip_report)
report[fields.FIELD_ALL_AIPS] = all_aips
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
return report | ab688e89c9bc9cec408e022a487d824a229a80a9 | 15,972 |
import tarfile
def fetch_packages(vendor_dir, packages):
"""
Fetches all packages from github.
"""
for package in packages:
tar_filename = format_tar_path(vendor_dir, package)
vendor_owner_dir = ensure_vendor_owner_dir(vendor_dir, package['owner'])
url = format_tarball_url(package)
print("Downloading {owner}/{project} {version}".format(**package))
urlretrieve(url, tar_filename)
with tarfile.open(tar_filename) as tar:
tar.extractall(vendor_owner_dir, members=tar.getmembers())
return packages | 4589ce242ab8221a34ea87ce020f53a7874e73cb | 15,973 |
def execute_search(search_term, sort_by, **kwargs):
"""
Simple search API to query Elasticsearch
"""
# Get the Elasticsearch client
client = get_client()
# Perform the search
ons_index = get_index()
# Init SearchEngine
s = SearchEngine(using=client, index=ons_index)
# Define type counts (aggregations) query
s = s.type_counts_query(search_term)
# Execute
type_counts_response = s.execute()
# Format the output
aggregations, total_hits = aggs_to_json(
type_counts_response.aggregations, "docCounts")
# Setup initial paginator
page_number = int(get_form_param("page", False, 1))
page_size = int(get_form_param("size", False, 10))
paginator = None
if total_hits > 0:
paginator = Paginator(
total_hits,
MAX_VISIBLE_PAGINATOR_LINK,
page_number,
page_size)
# Perform the content query to populate the SERP
# Init SearchEngine
s = SearchEngine(using=client, index=ons_index)
# Define the query with sort and paginator
s = s.content_query(
search_term, sort_by=sort_by, paginator=paginator, **kwargs)
# Execute the query
content_response = s.execute()
# Update the paginator
paginator = Paginator(
content_response.hits.total,
MAX_VISIBLE_PAGINATOR_LINK,
page_number,
page_size)
# Check for featured results
featured_result_response = None
# Only do this if we have results and are on the first page
if total_hits > 0 and paginator.current_page <= 1:
# Init the SearchEngine
s = SearchEngine(using=client, index=ons_index)
# Define the query
s = s.featured_result_query(search_term)
# Execute the query
featured_result_response = s.execute()
# Return the hits as JSON
return hits_to_json(
content_response,
aggregations,
paginator,
sort_by.name,
featured_result_response=featured_result_response) | 48ec250c6deceaca850230e4be2e0e282f5838e4 | 15,974 |
def last_char_to_aou(word):
"""Intended for abbreviations, returns "a" or "ä" based on vowel harmony
for the last char."""
assert isinstance(word, str)
ch = last_char_to_vowel(word)
if ch in "aou":
return "a"
return "ä" | 3a37e97e19e1ca90ccf26d81756db57445f68a26 | 15,975 |
def times_vector(mat, vec):
"""Returns the symmetric block-concatenated matrix multiplied by a vector.
Specifically, each value in the vector is multiplied by a row of the full
matrix. That is, the vector is broadcast and multiplied element-wise. Note
this would be the transpose of full_mat * vec if full_mat represented the full
symmetric matrix.
Args:
mat: The symmetric matrix represented as the concatenated blocks.
vec: The vector, having the same dimension as the materialized matrix.
"""
rows, cols = mat.shape
num_blocks = num_blocks_from_total_blocks(cols // rows)
multiplied = []
for i in range(num_blocks):
mat_block = mat[Ellipsis,
rows * ((i + 1) * i) // 2:rows * ((i + 1) * (i + 2)) // 2]
vec_block = vec[Ellipsis, rows * i:rows * (i + 1)]
multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block))
return jnp.concatenate(multiplied, axis=-1) | 5b90ebd293535810c7ad8e1ad681033997e8c1c8 | 15,976 |
import pathlib
def ensure_path(path:[str, pathlib.Path]):
"""
Check if the input path is a string or Path object, and return a path object.
:param path: String or Path object with a path to a resource.
:return: Path object instance
"""
return path if isinstance(path, pathlib.Path) else pathlib.Path(path) | 40cd2e1271f7f74adbf0928f769ca1a3d89acd50 | 15,977 |
def examine_mode(mode):
"""
Returns a numerical index corresponding to a mode
:param str mode: the subset user wishes to examine
:return: the numerical index
"""
if mode == 'test':
idx_set = 2
elif mode == 'valid':
idx_set = 1
elif mode == 'train':
idx_set = 0
else:
raise NotImplementedError
return idx_set | 4fee6f018cacff4c760cb92ef250cad21b497697 | 15,978 |
def create_pinata(profile_name: str) -> Pinata:
"""
Get or create a Pinata SDK instance with the given profile name.
If the profile does not exist, you will be prompted to create one,
which means you will be prompted for your API key and secret. After
that, they will be stored securely using ``keyring`` and accessed
as needed without prompt.
Args:
profile_name (str): The name of the profile to get or create.
Returns:
:class:`~pinata.sdk.Pinata`
"""
try:
pinata = Pinata.from_profile_name(profile_name)
except PinataMissingAPIKeyError:
set_keys_from_prompt(profile_name)
pinata = Pinata.from_profile_name(profile_name)
if not pinata:
set_keys_from_prompt(profile_name)
return Pinata.from_profile_name(profile_name) | a1b88b8bb5b85a73a8bce01860398a9cbf2d1491 | 15,980 |
def create_tfid_weighted_vec(tokens, w2v, n_dim, tfidf):
"""
Create train, test vecs using the tf-idf weighting method
Parameters
----------
tokens : np.array
data (tokenized) where each line corresponds to a document
w2v : gensim.Word2Vec
word2vec model
n_dim : int
dimensionality of our word vectors
Returns
-------
vecs_w2v : np.array
data ready for the model, shape (n_samples, n_dim)
"""
vecs_w2v = np.concatenate(
[build_doc_vector(doc, n_dim, w2v, tfidf)
for doc in tokens])
return vecs_w2v | 8503932c2b268ff81752fb22e8640ce9413ad2e5 | 15,981 |
def miniimagenet(folder, shots, ways, shuffle=True, test_shots=None,
seed=None, **kwargs):
"""Helper function to create a meta-dataset for the Mini-Imagenet dataset.
Parameters
----------
folder : string
Root directory where the dataset folder `miniimagenet` exists.
shots : int
Number of (training) examples per class in each task. This corresponds
to `k` in `k-shot` classification.
ways : int
Number of classes per task. This corresponds to `N` in `N-way`
classification.
shuffle : bool (default: `True`)
Shuffle the examples when creating the tasks.
test_shots : int, optional
Number of test examples per class in each task. If `None`, then the
number of test examples is equal to the number of training examples per
class.
seed : int, optional
Random seed to be used in the meta-dataset.
kwargs
Additional arguments passed to the `MiniImagenet` class.
See also
--------
`datasets.MiniImagenet` : Meta-dataset for the Mini-Imagenet dataset.
"""
defaults = {
'transform': Compose([Resize(84), ToTensor()])
}
return helper_with_default(MiniImagenet, folder, shots, ways,
shuffle=shuffle, test_shots=test_shots,
seed=seed, defaults=defaults, **kwargs) | a9be1fff33b8e5163d6a5af4bd48dc71dcb88864 | 15,982 |
def process_account_request(request, order_id, receipt_code):
"""
Process payment via online account like PayPal, Amazon ...etc
"""
order = get_object_or_404(Order, id=order_id, receipt_code=receipt_code)
if request.method == "POST":
gateway_name = request.POST["gateway_name"]
gateway = get_object_or_404(Gateway, name=gateway_name)
try:
if gateway.name == Gateway.PAYPAL:
processor = PayPal(gateway)
return HttpResponseRedirect(processor.create_account_payment(order, request.user))
else:
raise ImproperlyConfigured('Doorstep doesn\'t yet support payment with %s account.'
% gateway.get_name_display())
except DoorstepError as e:
request.session['processing_error'] = e.message
return HttpResponseRedirect(reverse('payments_processing_message'))
raise Http404 | be5bdb027034e2f2791968755e41bbac762d1dda | 15,983 |
def add_classification_categories(json_object, classes_file):
"""
Reads the name of classes from the file *classes_file* and adds them to
the JSON object *json_object*. The function assumes that the first line
corresponds to output no. 0, i.e. we use 0-based indexing.
Modifies json_object in-place.
Args:
json_object: an object created from a json in the format of the detection API output
classes_file: the list of classes that correspond to the output elements of the classifier
Return:
The modified json_object with classification_categories added. If the field 'classification_categories'
already exists, then this function is a no-op.
"""
if ('classification_categories' not in json_object.keys()) or (len(json_object['classification_categories']) == 0):
# Read the name of all classes
with open(classes_file, 'rt') as fi:
class_names = fi.read().splitlines()
# remove empty lines
class_names = [cn for cn in class_names if cn.strip()]
# Create field with name *classification_categories*
json_object['classification_categories'] = dict()
# Add classes using 0-based indexing
for idx, name in enumerate(class_names):
json_object['classification_categories']['%i'%idx] = name
else:
print('WARNING: The input json already contains the list of classification categories.')
return json_object | ef92902210f275238271c21e20f8f0eec90253b0 | 15,984 |
import copy
def create_compound_states(reference_thermodynamic_state,
top,
protocol,
region=None,
restraint=False):
"""
Return alchemically modified thermodynamic states.
Parameters
----------
reference_thermodynamic_state : ThermodynamicState object
top : Topography or Topology object
protocol : dict
The dictionary ``{parameter_name: list_of_parameter_values}`` defining
the protocol. All the parameter values list must have the same
number of elements.
region : str or list
Atomic indices defining the alchemical region.
restraint : bool
If ligand exists, restraint ligand and receptor movements.
"""
create_compound_states.metadata = {}
compound_state = _reference_compound_state(reference_thermodynamic_state,
top,
region=region,
restraint=restraint)
create_compound_states.metadata.update(_reference_compound_state.metadata)
# init the array of compound states
compound_states = []
protocol_keys, protocol_values = zip(*protocol.items())
for state_id, state_values in enumerate(zip(*protocol_values)):
compound_states.append(copy.deepcopy(compound_state))
for lambda_key, lambda_value in zip(protocol_keys, state_values):
if hasattr(compound_state, lambda_key):
setattr(compound_states[state_id], lambda_key, lambda_value)
else:
raise AttributeError(
'CompoundThermodynamicState object does not '
'have protocol attribute {}'.format(lambda_key))
return compound_states | 9ef5c14628237f3754e8522d11aa6bcbe399e1b3 | 15,985 |
def tts_init():
"""
Initialize choosen TTS.
Returns: tts (TextToSpeech)
"""
if (TTS_NAME == "IBM"):
return IBM_initialization()
elif (TTS_NAME == "pytts"):
return pytts_initialization()
else:
print("ERROR - WRONG TTS") | 4de36b27298d015b808cbc4973daf02354780787 | 15,987 |
def string_dumper(dumper, value, _tag=u'tag:yaml.org,2002:str'):
"""
Ensure that all scalars are dumped as UTF-8 unicode, folded and quoted in
the sanest and most readable way.
"""
if not isinstance(value, basestring):
value = repr(value)
if isinstance(value, str):
value = value.decode('utf-8')
style = None
multilines = '\n' in value
if multilines:
literal_style = '|'
style = literal_style
return dumper.represent_scalar(_tag, value, style=style) | 081e0adaa45072f2b75c9eb1374ce2009bf4fd1d | 15,988 |
import math
def to_hours_from_seconds(value):
"""From seconds to rounded hours"""
return Decimal(math.ceil((value / Decimal(60)) / Decimal(60))) | 2ceb1f74690d26f0d0d8f60ffdc012b801dd6be3 | 15,989 |
def extract_named_geoms(sde_floodplains = None, where_clause = None,
clipping_geom_obj = None):
"""
Clips SDE flood delineations to the boundary of FEMA floodplain changes, and
then saves the geometry and DRAINAGE name to a list of dictionaries.
:param sde_floodplains: {str} The file path to the UTIL.Floodplains layer
:param where_clause: {str} The where clause used to isolate polygons of interest
:param clipping_geom_obj: {arc geom obj} The geometry object representing
the boundaries of the LOMR/FIRM update
:return: {list} [{"SHAPE@": <Poly obj>, "DRAINAGE": "drain name"},...]
"""
sde_fields = ['SHAPE@', 'DRAINAGE']
with arcpy.da.SearchCursor(sde_floodplains, sde_fields, where_clause) as sCurs:
named_geoms = []
geom = None
for row in sCurs:
# if clipper.contains(row[0].centroid) or row[0].overlaps(clipper):
geom = row[0].clip(clipping_geom_obj.extent)
named_geoms.append({'SHAPE@': geom, 'DRAINAGE': str(row[1])})
return named_geoms | d56b5caf8a11358db4fc43f51b8a29840698fd3a | 15,990 |
from typing import Sequence
from typing import List
from typing import Any
def convert_examples_to_features(examples: Sequence[InputExampleTC],
labels: List[str],
tokenizer: Any,
max_length: int = 512,
ignore_lbl_id: int = -100
) -> List[InputFeaturesTC]:
"""Converts sequence of ``InputExampleTC to list of ``InputFeaturesTC``.
Args:
examples (:obj:`list` of :obj:`InputExampleTC`): Sequence of
``InputExampleTC`` containing the examples to be converted to
features.
tokenizer (:obj): Instance of a transformer tokenizer that will
tokenize the example tokens and convert them to model specific ids.
max_length (int): the maximum length of the post-tokenized tokens and
the respective associated fields in an InputFeaturesTC. Sequences
longer will be truncated, sequences shorter will be padded.
This length includes any special tokens that must be added such
as [CLS] and [SEP] in BERT.
ignore_lbl_id (int, optional): a value of a label id to be ignored,
used for subword tokens. This is typically negative.
Usually, -1 or `torch.nn.CrossEntropy().ignore_index`.
Returns:
If the input is a list of ``InputExamplesTC``, will return
a list of task-specific ``InputFeaturesTC`` which can be fed to the
model.
"""
logger.info(f'Using label list {labels}')
label2id = {label: i for i, label in enumerate(labels)}
all_features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Converting example %d" % (ex_index))
feats, tks = convert_example_to_features(example=example,
label2id=label2id,
tokenizer=tokenizer,
max_length=max_length,
ignore_lbl_id=ignore_lbl_id)
if ex_index < 5:
log_example_features(example, feats, tks)
all_features.append(feats)
return all_features | f051cb9fd68aaf08da15e99f978a6bdc24fea5d3 | 15,992 |
def update_setup_cfg(setupcfg: ConfigUpdater, opts: ScaffoldOpts):
"""Update `pyscaffold` in setupcfg and ensure some values are there as expected"""
if "options" not in setupcfg:
template = templates.setup_cfg(opts)
new_section = ConfigUpdater().read_string(template)["options"]
setupcfg["metadata"].add_after.section(new_section.detach())
# Add "PyScaffold" section if missing and update saved extensions
setupcfg = templates.add_pyscaffold(setupcfg, opts)
return setupcfg, opts | b08b0faa0645151b24d8eb40b2920e63caf764e9 | 15,994 |
def testable_renderable() -> CXRenderable:
"""
Provides a generic CXRenderable useful for testin the base class.
"""
chart: CanvasXpress = CanvasXpress(
render_to="canvasId",
data=CXDictData(
{
"y": {
"vars": ["Gene1"],
"smps": ["Smp1", "Smp2", "Smp3"],
"data": [[10, 35, 88]]
}
}
),
config=CXConfigs(
CXGraphType(CXGraphTypeOptions.Bar)
)
)
return SampleRenderable(chart) | 3e37096e51e081da8c3fa43f973248252c0276dd | 15,995 |
def secondSolution( fixed, c1, c2, c3 ):
"""
If given four tangent circles, calculate the other one that is tangent
to the last three.
@param fixed: The fixed circle touches the other three, but not
the one to be calculated.
@param c1, c2, c3: Three circles to which the other tangent circle
is to be calculated.
"""
curf = fixed.curvature()
cur1 = c1.curvature()
cur2 = c2.curvature()
cur3 = c3.curvature()
curn = 2 * (cur1 + cur2 + cur3) - curf
mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn
return Circle( mn.real, mn.imag, 1/curn ) | 1a6aca3e5d6a26f77b1fbc432ff26fba441e02f7 | 15,996 |
def collect3d(v1a,ga,v2a,use_nonan=True):
"""
set desired line properties
"""
v1a = np.real(v1a)
ga = np.real(ga)
v2a = np.real(v2a)
# remove nans for linewidth stuff later.
ga_nonan = ga[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
v1a_nonan = v1a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
v2a_nonan = v2a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
if use_nonan:
sol = np.zeros((len(ga_nonan),3))
sol[:,0] = v1a_nonan
sol[:,1] = ga_nonan
sol[:,2] = v2a_nonan
else:
sol = np.zeros((len(ga),3))
sol[:,0] = v1a
sol[:,1] = ga
sol[:,2] = v2a
sol = np.transpose(sol)
points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3)
segs = np.concatenate([points[:-1],points[1:]],axis = 1)
line3d = Line3DCollection(segs,linewidths=(1.+(v1a_nonan)/(.001+np.amax(v1a_nonan))*6.),colors='k')
return line3d | de53fcb859c8c95b1b95a4ad2ffea102a090e94e | 15,997 |
import urllib
import requests
def get_job_priorities(rest_url):
"""This retrieves priorities of all active jobs"""
url = urllib.parse.urljoin(rest_url, "/jobs/priorities")
resp = requests.get(url)
return resp.json() | 020e825d531394798c041f32683bccfea19684c9 | 15,999 |
def create_vertices_intrinsics(disparity, intrinsics):
"""3D mesh vertices from a given disparity and intrinsics.
Args:
disparity: [B, H, W] inverse depth
intrinsics: [B, 4] reference intrinsics
Returns:
[B, L, H*W, 3] vertex coordinates.
"""
# Focal lengths
fx = intrinsics[:, 0]
fy = intrinsics[:, 1]
fx = fx[Ellipsis, tf.newaxis, tf.newaxis]
fy = fy[Ellipsis, tf.newaxis, tf.newaxis]
# Centers
cx = intrinsics[:, 2]
cy = intrinsics[:, 3]
cx = cx[Ellipsis, tf.newaxis]
cy = cy[Ellipsis, tf.newaxis]
batch_size, height, width = disparity.shape.as_list()
vertex_count = height * width
i, j = tf.meshgrid(tf.range(width), tf.range(height))
i = tf.cast(i, tf.float32)
j = tf.cast(j, tf.float32)
width = tf.cast(width, tf.float32)
height = tf.cast(height, tf.float32)
# 0.5 is added to get the position of the pixel centers.
i = (i + 0.5) / width
j = (j + 0.5) / height
i = i[tf.newaxis]
j = j[tf.newaxis]
depths = 1.0 / tf.clip_by_value(disparity, 0.01, 1.0)
mx = depths / fx
my = depths / fy
px = (i-cx) * mx
py = (j-cy) * my
vertices = tf.stack([px, py, depths], axis=-1)
vertices = tf.reshape(vertices, (batch_size, vertex_count, 3))
return vertices | d476767c71fb1a8cefe121a3aaf8cbf9a19e7943 | 16,000 |
def _find_smart_path(challbs, preferences, combinations):
"""Find challenge path with server hints.
Can be called if combinations is included. Function uses a simple
ranking system to choose the combo with the lowest cost.
"""
chall_cost = {}
max_cost = 1
for i, chall_cls in enumerate(preferences):
chall_cost[chall_cls] = i
max_cost += i
# max_cost is now equal to sum(indices) + 1
best_combo = []
# Set above completing all of the available challenges
best_combo_cost = max_cost
combo_total = 0
for combo in combinations:
for challenge_index in combo:
combo_total += chall_cost.get(challbs[
challenge_index].chall.__class__, max_cost)
if combo_total < best_combo_cost:
best_combo = combo
best_combo_cost = combo_total
combo_total = 0
if not best_combo:
_report_no_chall_path()
return best_combo | 96f55288bfa08de32badd9f1a96b3decd76573c8 | 16,001 |
def run_generator_and_test(test_case,
mlmd_connection,
generator_class,
pipeline,
task_queue,
use_task_queue,
service_job_manager,
num_initial_executions,
num_tasks_generated,
num_new_executions,
num_active_executions,
expected_exec_nodes=None,
ignore_node_ids=None):
"""Runs generator.generate() and tests the effects."""
if service_job_manager is None:
service_job_manager = service_jobs.DummyServiceJobManager()
with mlmd_connection as m:
executions = m.store.get_executions()
test_case.assertLen(
executions, num_initial_executions,
f'Expected {num_initial_executions} execution(s) in MLMD.')
tasks = run_generator(
mlmd_connection,
generator_class,
pipeline,
task_queue,
use_task_queue,
service_job_manager,
ignore_node_ids=ignore_node_ids)
with mlmd_connection as m:
test_case.assertLen(
tasks, num_tasks_generated,
f'Expected {num_tasks_generated} task(s) to be generated.')
executions = m.store.get_executions()
num_total_executions = num_initial_executions + num_new_executions
test_case.assertLen(
executions, num_total_executions,
f'Expected {num_total_executions} execution(s) in MLMD.')
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
test_case.assertLen(
active_executions, num_active_executions,
f'Expected {num_active_executions} active execution(s) in MLMD.')
if expected_exec_nodes:
for i, task in enumerate(tasks):
_verify_exec_node_task(test_case, pipeline, expected_exec_nodes[i],
active_executions[i].id, task)
return tasks | c7f03b5db9f100c8c5eec029e843ce4ab1cdb84e | 16,002 |
def sort_func(kd1, kd2):
"""
Compares 2 key descriptions
:param kd1: First key description
:param kd2: Second key description
:return: -1,0,1 depending on whether kd1 le,eq or gt then kd2
"""
_c = type_order(kd1, kd2)
if _c is not None:
return _c
return kid_order(kd1, kd2) | 2ac9100f9c69c283266cc4ba4f6d6262551ce1b5 | 16,003 |
def sumdigits(a: int):
"""Sum of the digits of an integer"""
return sum(map(int, str(a))) | 018bcc429e6ea3842fd9e9e2580820aed29bc0aa | 16,004 |
from datetime import datetime
def nth_weekday_of_month(y, m, n, w):
"""
y = 2020; m = 2
assert nth_weekday_of_month(y, m, -1, 'sat') == dt(2020, 2, 29)
assert nth_weekday_of_month(y, m, -2, 'sat') == dt(2020, 2, 22)
assert nth_weekday_of_month(y, m, 1, 'sat') == dt(2020, 2, 1)
assert nth_weekday_of_month(y, m, 1, 'sun') == dt(2020, 2, 2)
assert nth_weekday_of_month(y, m, 1, 'monday') == dt(2020, 2, 3)
assert nth_weekday_of_month(y, 'G', 3, 'sat') == dt(2020, 2, 15)
assert nth_weekday_of_month(y, 'G', 3, 'sun') == dt(2020, 2, 16)
assert nth_weekday_of_month(y, 'G', 3, 'monday') == dt(2020, 2, 17)
"""
if n < 0 :
return nth_weekday_of_month(y, m+1, 1, w) + datetime.timedelta(7 * n)
t = dt(y, m , 1)
bump = wkdays[w[:3].lower()] - t.weekday()
if bump < 0:
bump = bump + 7
bump = bump + (n-1) * 7
res = t + datetime.timedelta(bump)
return res | 2f422b3fac4d97db64f541b54158248c44afad14 | 16,005 |
def getBits(val, hiIdx: int, loIdx: int) -> int:
"""Returns a bit slice of a value.
Args:
val: Original value.
hiIdx: Upper (high) index of slice.
loIdx: Lower index of slice.
Returns:
The bit slice.
"""
return (~(MASK_32<<(hiIdx-loIdx+1)) & (val>>loIdx)) | acaf1a36fceb12ee99140aca0769dde084ee08d6 | 16,006 |
def get_hostname():
"""Returns the hostname, from /etc/hostname."""
hostname = ""
try:
with open('/etc/hostname') as f:
hostname = f.read().rstrip()
if len(hostname) == 0:
hostname = "Unknown"
except:
hostname = "Unknown"
return hostname | 4cd4ffc1c8c56bc2e440443fdbc315d27fb94033 | 16,007 |
def is_valid_body(val):
"""Body must be a dictionary."""
return isinstance(val, dict) | ef3a605e1e84ce9d74f77c07799d1abb58aaf61a | 16,008 |
def _vba_to_python_op(op, is_boolean):
"""
Convert a VBA boolean operator to a Python boolean operator.
"""
op_map = {
"Not" : "not",
"And" : "and",
"AndAlso" : "and",
"Or" : "or",
"OrElse" : "or",
"Eqv" : "|eq|",
"=" : "|eq|",
">" : ">",
"<" : "<",
">=" : ">=",
"=>" : ">=",
"<=" : "<=",
"=<" : "<=",
"<>" : "|neq|",
"is" : "|eq|"
}
if (not is_boolean):
op_map["Not"] = "~"
op_map["And"] = "&"
op_map["AndAlso"] = "&"
op_map["Or"] = "|"
op_map["OrElse"] = "|"
return op_map[op] | a6ed0c65c6c2d2635f14fb664540eaf283ee4065 | 16,009 |
def file_diff_format(filename1, filename2):
"""
Inputs:
filename1 - name of first file
filename2 - name of second file
Output:
Returns a four line string showing the location of the first
difference between the two files named by the inputs.
If the files are identical, the function instead returns the
string "No differences\n".
If either file does not exist or is not readable, then the
behavior of this function is undefined.
"""
# read files
lst1 = get_file_lines(filename1)
lst2 = get_file_lines(filename2)
# get tuple indicating line and index of first difference between two files
my_tup = multiline_diff(lst1, lst2)
# handle identical case
if my_tup[0] == -1:
return "No differences\n"
else:
# get 3 line formatted output of first difference between two lines
sdf_output = singleline_diff_format(lst1[my_tup[0]], lst2[my_tup[0]], my_tup[1])
# all other cases
return "Line " + str(my_tup[0]) + ":\n" + sdf_output | c2027767ac6694620d895ef1565a03e7b706c2e7 | 16,010 |
def get_label_number(window):
"""This method assigns to each label of a window a number."""
mode_list = ["bike", "car", "walk", "bus", "train"]
current_label_number = 0
for mode in enumerate(mode_list):
if window[1] == mode[1]:
current_label_number = mode[0]
return current_label_number | 5ed3c683e8619e1b07857992f54079bc68fdfa58 | 16,012 |
def midi_array_to_event(midi_as_array):
"""
Take converted MIDI array and convert to array of Event objects
"""
# Sort MIDI array
midi = sorted(midi_as_array, key=itemgetter(2))
# Init result
result = []
# Accumulators for computing start and end times
active_notes = []
curr_time = 0
# For comparing velocities
prev_vel_range = 0
# For all the entries in the midi array
for i in midi:
# Add the current note
active_notes.append(i)
# Get time shift values
shift_values, shift_sum = get_shift_value(i[2] - curr_time)
# Apply time shift to the next start note
if shift_values:
for s in shift_values:
if s > 0:
result.append(Event(EventType.TIME_SHIFT, s))
else:
result.append(Event(EventType.TIME_SHIFT, shift_sum))
# Update time
curr_time += shift_sum
# Check if there are notes that are playing that need to end
notes_to_end = [x for x in active_notes if curr_time >= x[3]]
active_notes[:] = (x for x in active_notes if curr_time < x[3])
# For the finished notes
for j in notes_to_end:
# End the note
result.append(Event(EventType.NOTE_OFF, j[1]))
# If the velocity has changed by a large enough amount, add a set velocity event
temp_velocity = i[0]
bin_size = (127/20)
for vel in range(20):
if temp_velocity < (vel + 1) * bin_size:
if prev_vel_range != vel:
result.append(Event(EventType.SET_VELOCITY, int((vel + 1) * bin_size)))
prev_vel_range = vel
break
# Start the note
result.append(Event(EventType.NOTE_ON, i[1]))
# If there are still notes in midi_acc
if active_notes:
for i in active_notes:
if i[3] > curr_time:
# Apply time shift
shift_values, shift_sum = get_shift_value(i[3] - curr_time)
if shift_values:
for s in shift_values:
if s > 0:
result.append(Event(EventType.TIME_SHIFT, s))
else:
result.append(Event(EventType.TIME_SHIFT, shift_sum))
# Update time
curr_time += shift_sum
# End note
result.append(Event(EventType.NOTE_OFF, i[1]))
# Return array
return result | 63391a1fa045f2185ce22c3ab5da186169d445e7 | 16,013 |
from typing import Dict
from typing import Type
def find_benchmarks(module) -> Dict[str, Type[Benchmark]]:
"""Enumerate benchmarks in `module`."""
found = {}
for name in module.__all__:
benchmark_type = getattr(module, name)
found[benchmark_type.name] = benchmark_type
return found | 4b456a44963629da0b6072dcb9e6e8946cbaef23 | 16,014 |
def out_folder_android_armv8_clang(ctx, section_name, option_name, value):
""" Configure output folder for Android ARMv8 Clang """
if not _is_user_input_allowed(ctx, option_name, value):
Logs.info('\nUser Input disabled.\nUsing default value "%s" for option: "%s"' % (value, option_name))
return value
# GUI
if not ctx.is_option_true('console_mode'):
return ctx.gui_get_attribute(section_name, option_name, value)
_output_folder_disclaimer(ctx)
return _get_string_value(ctx, 'Android ARMv8 Clang Output Folder', value) | b713642879cfcffe78fc415adbbea7c13c319925 | 16,015 |
def MCTS(root, verbose = False):
"""initialization of the chemical trees and grammar trees"""
run_time=time.time()+600*2
rootnode = Node(state = root)
state = root.Clone()
maxnum=0
iteration_num=0
start_time=time.time()
"""----------------------------------------------------------------------"""
"""global variables used for save valid compounds and simulated compounds"""
valid_compound=[]
all_simulated_compound=[]
desired_compound=[]
max_score=-100.0
desired_activity=[]
time_distribution=[]
num_searched=[]
current_score=[]
depth=[]
all_score=[]
"""----------------------------------------------------------------------"""
while maxnum<10100:
print maxnum
node = rootnode
state = root.Clone()
"""selection step"""
node_pool=[]
print "current found max_score:",max_score
while node.childNodes!=[]:
node = node.Selectnode()
state.SelectPosition(node.position)
print "state position:,",state.position
depth.append(len(state.position))
if len(state.position)>=81:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
"""------------------------------------------------------------------"""
"""expansion step"""
"""calculate how many nodes will be added under current leaf"""
expanded=expanded_node(model,state.position,val)
nodeadded=node_to_add(expanded,val)
all_posible=chem_kn_simulation(model,state.position,val,nodeadded)
generate_smile=predict_smile(all_posible,val)
new_compound=make_input_smile(generate_smile)
node_index,score,valid_smile,all_smile=check_node_type(new_compound,SA_mean,SA_std,logP_mean,logP_std,cycle_mean,cycle_std)
print node_index
valid_compound.extend(valid_smile)
all_simulated_compound.extend(all_smile)
all_score.extend(score)
iteration_num=len(all_simulated_compound)
if len(node_index)==0:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
re=[]
for i in range(len(node_index)):
m=node_index[i]
maxnum=maxnum+1
node.Addnode(nodeadded[m],state)
node_pool.append(node.childNodes[i])
if score[i]>=max_score:
max_score=score[i]
current_score.append(max_score)
else:
current_score.append(max_score)
depth.append(len(state.position))
"""simulation"""
re.append((0.8*score[i])/(1.0+abs(0.8*score[i])))
if maxnum==100:
maxscore100=max_score
time100=time.time()-start_time
if maxnum==500:
maxscore500=max_score
time500=time.time()-start_time
if maxnum==1000:
maxscore1000=max_score
time1000=time.time()-start_time
if maxnum==5000:
maxscore5000=max_score
time5000=time.time()-start_time
if maxnum==10000:
time10000=time.time()-start_time
maxscore10000=max_score
#valid10000=10000*1.0/len(all_simulated_compound)
"""backpropation step"""
#print "node pool length:",len(node.childNodes)
for i in range(len(node_pool)):
node=node_pool[i]
while node != None:
node.Update(re[i])
node = node.parentNode
#finish_iteration_time=time.time()-iteration_time
#print "four step time:",finish_iteration_time
"""check if found the desired compound"""
#print "all valid compounds:",valid_compound
finished_run_time=time.time()-start_time
print "logp max found:", current_score
#print "length of score:",len(current_score)
#print "time:",time_distribution
print "valid_com=",valid_compound
print "num_valid:", len(valid_compound)
print "all compounds:",len(all_simulated_compound)
print "score=", all_score
print "depth=",depth
print len(depth)
print "runtime",finished_run_time
#print "num_searched=",num_searched
print "100 max:",maxscore100,time100
print "500 max:",maxscore500,time500
print "1000 max:",maxscore1000,time1000
print "5000 max:",maxscore5000,time5000
print "10000 max:",maxscore10000,time10000
return valid_compound | 686a412c0f4cc4cd81d96872e9929d1ce51e7ed8 | 16,017 |
def update_internalnodes_MRTKStandard() -> bpy.types.NodeGroup:
"""定義中のノードグループの内部ノードを更新する
Returns:
bpy.types.NodeGroup: 作成ノードグループの参照
"""
# データ内に既にMRTKStandardのノードグループが定義されているか確認する
# (get関数は対象が存在しない場合 None が返る)
get_nodegroup = bpy.data.node_groups.get(def_nodegroup_name)
# ノードグループが取得できたか確認する
if get_nodegroup == None:
# ノードグループが定義されていない場合は処理を行わない
return None
# 入力出力ノードを除くノードグループ内部のノードとリンクのみ更新を行う
# 現在の内部ノードを全て操作する
for node in get_nodegroup.nodes:
# 入力ノードか確認する
if node.name == def_inputnode_name:
# 入力ノードの場合、処理しない
continue
# 出力ノードか確認する
if node.name == def_outputnode_name:
# 出力ノードの場合、処理しない
continue
# 入出力ノード以外は全て削除する
get_nodegroup.nodes.remove(node)
# ノードグループにバージョン記載ノードを作成する
group_versionnode = add_nodegroup_MRTKStandard_framenode()
# ノードグループにBSDFノードを作成する
group_bsdfnode = add_nodegroup_MRTKStandard_bsdfnode()
# ノードグループにRGBミックスノードを作成する
group_rgbmix = add_nodegroup_MRTKStandard_rgbmixnode()
# ノードグループに滑らかさ数値反転ノードを作成する
group_smoothinversion = add_nodegroup_MRTKStandard_smoothinversionnode()
# ノードグループを構成するのリンク情報を設定する
link_result = link_MRTKStandardNodeGroup_default()
# リンク接続に成功したか
if link_result == False:
# リンク接続に失敗した場合はノードを返さない
return None
return get_nodegroup | 14d152377b58de842ff6cc228e80fbb0c48c5128 | 16,018 |
def _ensure_consistent_schema(
frame: SparkDF,
schemas_df: pd.DataFrame,
) -> SparkDF:
"""Ensure the dataframe is consistent with the schema.
If there are column data type mismatches, (more than one data type
for a column name in the column schemas) then will try to convert
the data type if possible:
* if they are all number data types, then picks the largest number
type present
* if one of the types is string, then ensures it casts the column to
string type
Also fills any missing columns with Null values, ensuring correct
dtype.
Parameters
----------
frame : SparkDF
column_schemas : set
A set of simple column schemas in the form (name, dtype) for all
dataframes set to be concatenated.
Returns
-------
SparkDF
Input dataframe with consistent schema.
"""
final_schema = _get_final_schema(schemas_df)
missing_fields = [f for f in final_schema if f not in frame.dtypes]
for column, dtype in missing_fields:
# If current frame missing the column in the schema, then
# set values to Null.
vals = (
F.lit(None) if column not in frame.columns
else F.col(column)
)
# Cast the values with the correct dtype.
frame = frame.withColumn(column, vals.cast(dtype))
return frame | 653f2740fa2ba090c1a1ace71b09523848d52be7 | 16,019 |
def shave_bd(img, bd):
"""
Shave border area of spatial views. A common operation in SR.
:param img:
:param bd:
:return:
"""
return img[bd:-bd, bd:-bd, :] | 4b822c5e57787edb74955fd350ad361080b8640b | 16,020 |
def plotly_single(ma, average_type, color, label, plot_type='line'):
"""A plotly version of plot_single. Returns a list of traces"""
summary = list(np.ma.__getattribute__(average_type)(ma, axis=0))
x = list(np.arange(len(summary)))
if isinstance(color, str):
color = list(matplotlib.colors.to_rgb(color))
traces = [go.Scatter(x=x, y=summary, name=label, line={'color': "rgba({},{},{},0.9)".format(color[0], color[1], color[2])}, showlegend=False)]
if plot_type == 'fill':
traces[0].update(fill='tozeroy', fillcolor=color)
if plot_type in ['se', 'std']:
if plot_type == 'se': # standard error
std = np.std(ma, axis=0) / np.sqrt(ma.shape[0])
else:
std = np.std(ma, axis=0)
x_rev = x[::-1]
lower = summary - std
trace = go.Scatter(x=x + x_rev,
y=np.concatenate([summary + std, lower[::-1]]),
fill='tozerox',
fillcolor="rgba({},{},{},0.2)".format(color[0], color[1], color[2]),
line=go.Line(color='transparent'),
showlegend=False,
name=label)
traces.append(trace)
return traces | b568d0e4496fc424aa5b07ff90bf45880a374d56 | 16,022 |
from pathlib import Path
def create_task(className, *args, projectDirectory='.', dryrun=None, force=None, source=False):
"""Generates task class from the parameters derived from :class:`.Task`
Fails if the target file already exists unless ``force=True`` or ``--force`` in the CLI is set.
Setting the ``--source`` will generate a different template that have stubs with the functions that need to be overwritten.
Parameters
----------
className : string (CamelCase)
Name of the class to be created
projectDirectory : string (default='.')
Location of the project directory, the code will be created in ``projectDirectory/data_models/class_name.py``.
dryrun : bool (default=None)
If set to ``True`` it returns the generated code as a string
force : bool (default=None)
If set to ``True`` it overwrites the target file
source : bool (default=False)
If set to ``True`` the class will generate stubs for functions to be overwritten
*args : List of strings (CamelCase)
Classes to be imported into the generated code from the datamodel, fails if class not found
Returns
-------
content : string
The generated code if ``dryrun`` is specified
"""
if source:
taskType = NameString('Source')
else:
taskType = NameString('Task')
project = HypergolProject(projectDirectory=projectDirectory, dryrun=dryrun, force=force)
className = NameString(className)
dependencies = [NameString(value) for value in args]
project.check_dependencies(dependencies)
content = project.render(
templateName=f'{taskType.asFileName}.j2',
templateData={'className': className, 'dependencies': dependencies},
filePath=Path(projectDirectory, 'tasks', className.asFileName)
)
return project.cli_final_message(creationType=taskType, name=className, content=(content, )) | 028e751a43930e167f000d5ff0d0c76d1340cec4 | 16,023 |
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar]) | d65e0dfef94060db243de2a3a4f162aa01e12537 | 16,024 |
def __resolve_key(key: Handle) -> PyHKEY:
"""
Returns the full path to the key
>>> # Setup
>>> fake_registry = fake_reg_tools.get_minimal_windows_testregistry()
>>> load_fake_registry(fake_registry)
>>> # Connect registry and get PyHkey Type
>>> reg_handle = ConnectRegistry(None, HKEY_CURRENT_USER)
>>> __resolve_key(key=reg_handle).handle.full_key
'HKEY_CURRENT_USER'
>>> __resolve_key(key=HKEY_CURRENT_USER).handle.full_key
'HKEY_CURRENT_USER'
>>> # Test PyHKey Type (the most common)
>>> discard = __resolve_key(reg_handle)
>>> # Test int Type
>>> discard = __resolve_key(HKEY_CURRENT_USER)
>>> # Test HKEYType
>>> hkey = HKEYType(handle=reg_handle.handle, access=reg_handle._access)
>>> discard = __resolve_key(hkey)
>>> # Test invalid handle
>>> discard = __resolve_key(42)
Traceback (most recent call last):
...
OSError: [WinError 6] The handle is invalid
>>> # Test invalid type
>>> discard = __resolve_key('spam') # noqa
Traceback (most recent call last):
...
RuntimeError: unknown Key Type
"""
if isinstance(key, PyHKEY):
key_handle = key
elif isinstance(key, int):
try:
key_handle = PyHKEY(__fake_registry.hive[key])
except KeyError:
error = OSError("[WinError 6] The handle is invalid")
setattr(error, "winerror", 6)
raise error
elif isinstance(key, HKEYType):
key_handle = PyHKEY(handle=key.handle, access=key._access)
else:
raise RuntimeError("unknown Key Type")
return key_handle | 70344ac3b068793a0c40e4151fb210c269dba743 | 16,025 |
def vec3f_unitZ():
"""vec3f_unitZ() -> vec3f"""
return _libvncxx.vec3f_unitZ() | dd0b6e28333a8d72918113b0f5caf788fb51bd43 | 16,026 |
def get_public_key(public_key_path=None, private_key_path=None):
"""get_public_key.
Loads public key. If no path is specified, loads signing_key.pem.pub from the
current directory. If a private key path is provided, the public key path is
ignored and the public key is loaded from the private key.
:param public_key_path: a string of the public key file name, with relative or full path
:param private_key_path: a string of the private key file name, with relative or full path
:return:
"""
if private_key_path is not None:
private_key = get_private_key(private_key_path)
public_key = private_key.publickey().exportKey("PEM")
return public_key
elif public_key_path is None:
public_key_path = "signing_key.pem.pub"
with open(public_key_path, "rb") as f:
public_key = RSA.importKey(f.read())
return public_key | f37bb64e9a0971c77986b6c82b8519153f3f8eaa | 16,028 |
def InitializeState(binaryString):
"""
State initializer
"""
state = np.zeros(shape=(4, 4), dtype=np.uint8)
plaintextBytes = SplitByN(binaryString, 8)
for col in range(4):
for row in range(4):
binary = plaintextBytes[col * 4 + row]
state[row, col] = int(binary, 2)
return np.matrix(state) | b8de68bba8837865f9e74c43be1b6144774d69ad | 16,029 |
from typing import Dict
from typing import Any
def _apply_modifier(s: str, modifier: str, d: Dict[Any, str]) -> str:
"""
This will search for the ^ signs and replace the next
digit or (digits when {} is used) with its/their uppercase representation.
:param s: Latex string code
:param modifier: Modifier command
:param d: Dict to look upon
:return: New text with replaced text.
"""
s = s.replace(modifier, "^")
newtext = ""
mode_normal, mode_modified, mode_long = range(3)
mode = mode_normal
for ch in s:
if mode == mode_normal and ch == '^':
mode = mode_modified
continue
elif mode == mode_modified and ch == '{':
mode = mode_long
continue
elif mode == mode_modified:
newtext += d.get(ch, ch)
mode = mode_normal
continue
elif mode == mode_long and ch == '}':
mode = mode_normal
continue
if mode == mode_normal:
newtext += ch
else:
newtext += d.get(ch, ch)
return newtext | c54a2c66ff6ee768e588b14472fa5707edf9bc56 | 16,030 |
import concurrent
def threaded_polling(data, max_workers):
"""
Multithreaded polling method to get the data from cryptocompare
:param data: dictionary containing the details to be fetched
:param max_workers: maximum number of threads to spawn
:return list: containing the high low metrics for each pair
"""
hl_parsed_data = list()
exchange = data["name"]
pairs = data["pair_whitelist"]
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Start the load operations and mark each future with its URL
future_to_pairs = [
executor.submit(run_parser, _.split("/")[0], _.split("/")[1], exchange)
for _ in pairs
]
total = len(future_to_pairs)
count = 0
for future in concurrent.futures.as_completed(future_to_pairs):
try:
data = future.result()
hl_parsed_data.append(data)
except Exception as exc:
print(exc)
else:
count += 1
msg = f"Parsing {data['symbol']:10} | {count:2}/{total:2}"
print(msg, end="\r")
print(f"Pairs processed from {exchange} | {count:2}/{total:2}")
return hl_parsed_data | 587a07912b8ea6d0267638c72a98fdbeb6b0ebf0 | 16,031 |
from typing import Dict
from typing import Any
from typing import List
def sub(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, List[int]]:
"""Return numpy-style subtraction layer registration information (shape)
NOTE: https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html"""
assert len(in_xlayers) == 2, "Subtract layer expects two input layers"
lX, rX = in_xlayers
shape = TensorShape(get_numpy_broadcasted_shape(lX.shapes[:], rX.shapes[:]))
return {'shape': shape} | ebe4160dc92c259d2d6fd58a96eb78c697c0a5e4 | 16,032 |
import re
def word_column_filter_df(dataframe, column_to_filter, column_freeze, word_list):
# La fonction .where() donne une position qu'il faut transformer en index
# Il faut entrer le nom d'une colonne repère (exemple: code produit) pour retrouver l'index, ou construire un colonne de re-indexée.
"""Filtre les colonnes d'un dataframe, en fonction d'une liste de mots, puis retourne le dataframe"""
position_to_drop_lst = np.where(dataframe[column_to_filter].str.contains('|'.join(map(re.escape, word_list)),
np.NaN))[0]
indices_to_drop_lst = []
for position in position_to_drop_lst:
indice = (dataframe[dataframe[column_freeze] == dataframe.iloc[position].loc[column_freeze]]).index[0]
indices_to_drop_lst.append(indice)
print("Nombre de lignes supprimées:")
nbr= len(indices_to_drop_lst)
print(nbr)
print("\n")
dataframe.drop(indices_to_drop_lst, axis=0,inplace=True)
return dataframe | f58acf2188a192c1aa6cedecbfeeb66d7d073ba5 | 16,033 |
def adjust_learning_rate(optimizer, iteration, epoch_size, hyp, epoch, epochs):
"""adjust learning rate, warmup and lr decay
:param optimizer: optimizer
:param gamma: gamma
:param iteration: iteration
:param epoch_size: epoch_size
:param hyp: hyperparameters
:param epoch: epoch
:param epochs: the number of epochs
:return: lr
"""
step_index = 0
if epoch < 6:
# The first 6 epochs carried out warm up
learning_rate = 1e-6 + (hyp['lr0'] - 1e-6) * iteration / (epoch_size * 2)
else:
if epoch > epochs * 0.5:
# At 50% of the epochs, the learning rate decays in Gamma
step_index = 1
if epoch > epochs * 0.7:
# At 70% of the epochs, the learning rate decays in Gamma^2
step_index = 2
learning_rate = hyp['lr0'] * (0.1 ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
return learning_rate | c90c61fcecca99d31214c96cdf7d96b6ba682daa | 16,034 |
import numpy as np
from scipy.io import loadmat
import numpy as np
import h5py
from numpy import fromfile, empty, append
def reading_data(data_file):
"""
Read in a data file (16 bit) and obtain the entire data set that is
multiplexed between ECG and Pulse data. The data is then extracted and appended
to separate arrays
:param data_file: The binary data file to be loaded into the function
:return data: The ECG data in array
"""
try:
m = loadmat(data_file)
x = dict(m)
fs = x.get('f_s')
fs = np.array(fs)
fs = fs.flatten()
pp = x.get('pulse_P')
pp = np.array(pp)
pp = pp.flatten()
ecg = x.get('ECG')
ecg = np.array(ecg)
ecg = ecg.flatten()
print(fs)
return fs, pp, ecg
except ValueError:
try:
# for h5py
with h5py.File(data_file, 'r') as hf:
fs = hf.get('f_s')
fs = np.array(fs)
fs = fs.flatten()
pp = hf.get('pp')
pp = np.array(pp)
pp = pp.flatten()
ecg = hf.get('ECG')
ecg = np.array(ecg)
ecg = ecg.flatten()
print(fs)
return fs, pp, ecg
except IOError:
fs = fromfile(data_file, dtype='uint16', count=1, sep='')
hrData = fromfile(data_file, dtype='uint16', count=-1, sep='')
ecg = empty(shape=[0, len(hrData)], dtype=int) # Initialize Empty Arrays
pp = empty(shape=[0, len(hrData)], dtype=int) # Initialize Empty Arrays
for i in range(1, len(hrData), 2):
ecg = append(ecg, hrData[i])
for k in range(2, len(hrData), 2):
pp = append(pp, hrData[k])
print(ecg)
return fs, pp, ecg | 0ba4980db08a8877fabdfcc282d45c18d868a0a3 | 16,035 |
from typing import Callable
def two_body_mc_grad(env1: AtomicEnvironment, env2: AtomicEnvironment,
d1: int, d2: int, hyps: 'ndarray', cutoffs: 'ndarray',
cutoff_func: Callable = cf.quadratic_cutoff) \
-> (float, 'ndarray'):
"""2-body multi-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
(float, np.ndarray):
Value of the 2-body kernel and its gradient with respect to the
hyperparameters.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
return two_body_mc_grad_jit(env1.bond_array_2, env1.ctype, env1.etypes,
env2.bond_array_2, env2.ctype, env2.etypes,
d1, d2, sig, ls, r_cut, cutoff_func) | 8cba89674c5e1dea999d026aad8f9b393a57f5cc | 16,036 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.