content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def GetIdentifierStart(token):
"""Returns the first token in an identifier.
Given a token which is part of an identifier, returns the token at the start
of the identifier.
Args:
token: A token which is part of an identifier.
Returns:
The token at the start of the identifier or None if the identifier was not
of the form 'a.b.c' (e.g. "['a']['b'].c").
"""
start_token = token
previous_code_token = GetPreviousCodeToken(token)
while (previous_code_token and (
previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
IsDot(previous_code_token))):
start_token = previous_code_token
previous_code_token = GetPreviousCodeToken(previous_code_token)
if IsDot(start_token):
return None
return start_token | 6b3ad9fb9d43411fc7df147ace872f75c70b5d11 | 11,400 |
def load_spec(filename):
"""
loads the IDL spec from the given file object or filename, returning a
Service object
"""
service = Service.from_file(filename)
service.resolve()
return service | 6dfea85635d3b610ee998999397fc92fd516933c | 11,401 |
import os
import configparser
def connect(db_config_name):
"""
Check the current environment to determine which database
parameters to use, then connect to the target database on the
specified host.
:return: A database connection object.
"""
config_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'config'
)
property_file = os.environ.get('DB_PARAM_FILE')
if property_file is None:
logger.info("No environmental variable set; using 'default.ini'.")
property_file = 'default.ini'
else:
logger.info("property file set: '{}'".format(property_file))
config = configparser.ConfigParser()
property_path = os.path.join(config_path, property_file)
with open(property_path) as f:
config.read_file(f)
db_host = config.get(db_config_name, 'db_host')
db_name = config.get(db_config_name, 'db_name')
logger.info("Connecting to database '{}' on host '{}'."
.format(db_name, db_host))
client = pymongo.MongoClient(db_host, 27017)
try:
logger.info("Authenticating database '{}'.".format(db_name))
client[db_name].authenticate(config.get(db_config_name, 'user'),
config.get(db_config_name, 'password'))
except configparser.NoOptionError:
logger.info("No username/password provided; "
"attempting to connect anyway.")
return client[db_name] | bb4a74887c38213a5089de79e06f67b6b8de606f | 11,402 |
import torch
def load_model(file_path, *, epoch, model, likelihood, mll, optimizer, loss):
"""モデルの保存関数
Parameters
----------
file_path : str
モデルの保存先のパスとファイル名
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
Returns
-------
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
"""
temp = torch.load(file_path)
epoch = temp['epoch']
model.load_state_dict(temp['model'])
likelihood.load_state_dict(temp['likelihood'])
mll.load_state_dict(temp['mll'])
optimizer.load_state_dict(temp['optimizer'])
loss = temp['loss']
return epoch, model, likelihood, mll, optimizer, loss | ccc7f221164d89ed29326f720becd29e3442c52b | 11,403 |
import re
def valid_account_id(log, account_id):
"""Validate account Id is a 12 digit string"""
if not isinstance(account_id, str):
log.error("supplied account id {} is not a string".format(account_id))
return False
id_re = re.compile(r'^\d{12}$')
if not id_re.match(account_id):
log.error("supplied account id '{}' must be a 12 digit number".format(account_id))
return False
return True | 30f3aa9547f83c4bea53041a4c79ba1242ae4754 | 11,404 |
import numpy
def prod(a, axis=None, dtype=None, out=None):
"""
Product of array elements over a given axis.
Parameters
----------
a : array_like
Elements to multiply.
axis : None or int or tuple of ints, optional
Axis or axes along which a multiply is performed.
The default (`axis` = `None`) is perform a multiply over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a multiply is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
protuct_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> np.prod([0.5, 1.5])
2.0
>>> np.prod([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.prod([[0, 1], [0, 5]])
6
>>> np.prod([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.prod([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).prod(dtype=np.int8)
-128
"""
if not bhary.check(a) and not bhary.check(out):
return numpy.prod(a, axis=axis, dtype=dtype, out=out)
else:
if dtype is not None:
a = array_create.array(a, dtype=dtype)
return ufuncs.multiply.reduce(a, axis=axis, out=out) | c33a506847b13924aa903b5daeece0312cc29c8f | 11,405 |
import random
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Initialize dict with all pages with count 0
pr_sample = dict([(page, 0) for page in corpus])
sample_page = None
# Iterate over n samples and increment page each time it is selected
for i in range(n):
if sample_page:
transition_dist = transition_model(corpus, sample_page, damping_factor)
sample_page = random.choices(list(transition_dist.keys()), weights=list(transition_dist.values()), k=1)[0]
else:
sample_page = random.choice(list(pr_sample.keys()))
# Record sample selection for each time it is chosen
pr_sample[sample_page] += 1
# Apply overall percentage by dividing each page count by n
for page in pr_sample:
pr_sample[page] /= n
return pr_sample | 32c89d7669718c714663e66a926bb27f9c219c38 | 11,406 |
def guess_layout_cols_lr(mr,
buf,
alg_prefix,
layout_alg_force=None,
verbose=False):
"""
Assume bits are contiguous in columns
wrapping around at the next line
Least significant bit at left
Can either start in very upper left of bit colum and go right
Or can start in upper right of bit colum and go left
Related permutations are handled by flipx, rotate, etc
"""
# Must be able to divide input
txtw, _txth = mr.txtwh()
if txtw % mr.word_bits() != 0:
verbose and "guess_layout_cols_lr: bad width"
return
bit_cols = txtw // mr.word_bits()
# upper left start moving right
def ul_oi2cr(offset, maski):
bitcol = offset % bit_cols
col = maski * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "cols-right"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, ul_oi2cr, buf), alg_prefix + name
# upper right start moving left
def ur_oi2cr(offset, maski):
bitcol = bit_cols - 1 - offset % bit_cols
col = maski * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "cols-left"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, ur_oi2cr, buf), alg_prefix + name
# Used in TMS320C15
# even bits start from left side, odd bits from right
# Basically alternating cols-right and cols-left
# they move towards each other and then start again on the next line
if mr.word_bits() % 2 == 0:
def squeeze_lr_oi2cr(offset, maski):
left_bit = maski & 0xFFFE
if maski % 2 == 0:
# cols-right
bitcol = offset % bit_cols
else:
# cols-left (offset by left_bit)
bitcol = 2 * bit_cols - 1 - offset % bit_cols
col = left_bit * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "squeeze-lr"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, squeeze_lr_oi2cr, buf), alg_prefix + name | dbbbf68ee251fb50c413648e97c9957ed7c086ec | 11,407 |
def decrease(rse_id, account, files, bytes, session=None):
"""
Decreases the specified counter by the specified amount.
:param rse_id: The id of the RSE.
:param account: The account name.
:param files: The amount of files.
:param bytes: The amount of bytes.
:param session: The database session in use.
"""
return increase(rse_id=rse_id, account=account, files=-files, bytes=-bytes, session=session) | 2ad193e5f50c0bcb19f0d796c7f8b9da115a1f2d | 11,408 |
def get_import_error(import_error_id, session):
"""
Get an import error
"""
error = session.query(ImportError).filter(ImportError.id == import_error_id).one_or_none()
if error is None:
raise NotFound("Import error not found")
return import_error_schema.dump(error) | 37444be97de3c4fa97fba60d87f469c428011db1 | 11,409 |
import requests
import json
def get_violations(nsi_uuid=None):
"""Returns info on all SLA violations.
:param nsi_uuid: (Default value = None) uuid of a service instance.
:returns: A list. [0] is a bool with the result. [1] is a list of
SLA violations associated to a service instance.
"""
url = env.sl_violations_api
if nsi_uuid:
url = env.sl_violations_api + '/service/' + nsi_uuid
# get current list of violations
resp = requests.get(url, timeout=env.timeout)
if resp.status_code != 200:
LOG.debug("Request returned with " + (str(resp.status_code)))
error = resp.text
return False, error
violations = json.loads(resp.text)
return True, violations | 83c9688ee401bbb19800f89cc78a07d1af2b2f6f | 11,410 |
def roll_dice():
""" simulate roll dice """
results = []
for num in range(times):
result = randint(1, sides)
results.append(result)
return results | 9a8442ff777c8c03146bcb9a0f8a2dc19e87a195 | 11,411 |
def _read_calib_SemKITTI(calib_path):
"""
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, 'r') as f:
for line in f.readlines():
if line == '\n':
break
key, value = line.split(':', 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
calib_out['P2'] = calib_all['P2'].reshape(3, 4) # 3x4 projection matrix for left camera
calib_out['Tr'] = np.identity(4) # 4x4 matrix
calib_out['Tr'][:3, :4] = calib_all['Tr'].reshape(3, 4)
return calib_out | 2d71146ce79ce39309930bb8a452c185c35c3061 | 11,412 |
import torch
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda | 44559520faf06fbf9b6f17ac1b29b829840e7f38 | 11,413 |
from typing import Mapping
def root_nodes(g: Mapping):
"""
>>> g = dict(a='c', b='ce', c='abde', d='c', e=['c', 'z'], f={})
>>> sorted(root_nodes(g))
['f']
Note that `f` is present: Isolated nodes are considered both as
root and leaf nodes both.
"""
nodes_having_parents = set(chain.from_iterable(g.values()))
return set(g) - set(nodes_having_parents) | 67c2043053f82a9a17f148c57bbf4d2501530f99 | 11,414 |
def _GetRemoteFileID(local_file_path):
"""Returns the checked-in hash which identifies the name of file in GCS."""
hash_path = local_file_path + '.sha1'
with open(hash_path, 'rb') as f:
return f.read(1024).rstrip() | 4a06dcdd30e379891fe3f9a5b3ecc2c4fd1a98ed | 11,415 |
def stress_stress(
bond_array_1, c1, etypes1, bond_array_2, c2, etypes2, sig, ls, r_cut, cutoff_func
):
"""2-body multi-element kernel between two partial stress components
accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 2-body kernel.
"""
kernel_matrix = np.zeros((6, 6))
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
sig2 = sig * sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
e1 = etypes1[m]
for n in range(bond_array_2.shape[0]):
e2 = etypes2[n]
# check if bonds agree
if (c1 == c2 and e1 == e2) or (c1 == e2 and c2 == e1):
rj = bond_array_2[n, 0]
r11 = ri - rj
D = r11 * r11
s1 = 0
for d1 in range(3):
ci = bond_array_1[m, d1 + 1]
B = r11 * ci
fi, fdi = cutoff_func(r_cut, ri, ci)
for d2 in range(d1, 3):
coordinate_1 = bond_array_1[m, d2 + 1] * ri
s2 = 0
for d3 in range(3):
cj = bond_array_2[n, d3 + 1]
A = ci * cj
C = r11 * cj
fj, fdj = cutoff_func(r_cut, rj, cj)
for d4 in range(d3, 3):
coordinate_2 = bond_array_2[n, d4 + 1] * rj
force_kern = force_helper(
A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2
)
kernel_matrix[s1, s2] += (
force_kern * coordinate_1 * coordinate_2
)
s2 += 1
s1 += 1
return kernel_matrix / 4 | c832b6951774eff3b37dd3a674be74ad917409df | 11,416 |
def is_color_rgb(color):
"""Is a color in a valid RGB format.
Parameters
----------
color : obj
The color object.
Returns
-------
bool
True, if the color object is in RGB format.
False, otherwise.
Examples
--------
>>> color = (255, 0, 0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0.0, 0.0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0, 0)
>>> is_color_rgb(color)
False
>>> color = (255, 0.0, 0.0)
>>> is_color_rgb(color)
False
>>> color = (256, 0, 0)
>>> is_color_rgb(color)
False
"""
if isinstance(color, (tuple, list)):
if len(color) == 3:
if all(isinstance(c, float) for c in color):
if all(c >= 0.0 and c <= 1.0 for c in color):
return True
elif all(isinstance(c, int) for c in color):
if all(c >= 0 and c <= 255 for c in color):
return True
return False | 46b8241d26fa19e4372587ffebda3690972c3395 | 11,417 |
def edit_post_svc(current_user, id, content):
"""
Updates post content.
:param current_user:
:param id:
:param content:
:return:
"""
post = single_post_svc(id)
if post is None or post.user_id != current_user:
return None
post.content = content
db.session.commit()
return True | a17b632f402ef3f915bf06bde86ab0ff40956177 | 11,418 |
from re import M
def free_free_absorp_coefPQ(n_e,n_i,T,f):
"""Returns a physical quantity for the free-free absorption coefficient
given the electron density, ion density, kinetic temperature and frequency
as physical quantities. From Shklovsky (1960) as quoted by Kraus (1966)."""
value = 9.8e-13 * n_e.inBaseUnits().value * n_i.inBaseUnits().value \
* M.pow(T.inBaseUnits().value,-1.5) * M.pow(f.inBaseUnits().value,-2) \
* (19.8 + M.log(M.pow(T.inBaseUnits().value,1.5)/f.inBaseUnits().value))
return P.pq(value,'1/m') | 17a09bf20f4363be4f273694168df2cf0eee8b38 | 11,419 |
def pixel_gain_mode_statistics(gmaps):
"""returns statistics of pixels in defferent gain modes in gain maps
gr0, gr1, gr2, gr3, gr4, gr5, gr6 = gmaps
"""
arr1 = np.ones_like(gmaps[0], dtype=np.int32)
return [np.sum(np.select((gr,), (arr1,), default=0)) for gr in gmaps] | b9c6b4c601724105d381e77f7c293e0bd00f3ba8 | 11,420 |
def run_parallel(ds1, ds2):
""" Run the calculation using multiprocessing.
:param ds1: list with points
:param ds2: list with points
:return: list of distances
"""
pool = mp.Pool(processes=mp.cpu_count())
result = pool.starmap(euclidian_distance, [(p1, p2) for p1 in ds1 for p2 in ds2])
pool.close()
return result | e8a6b0124db1948ab72b9081863cdfe77a75e08d | 11,421 |
import re
def to_latin(name):
"""Convert all symbols to latin"""
symbols = (u"іїєабвгдеёжзийклмнопрстуфхцчшщъыьэюяІЇЄАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
u"iieabvgdeejzijklmnoprstufhzcss_y_euaIIEABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA")
tr = {ord(a): ord(b) for a, b in zip(*symbols)}
translated_name = name.translate(tr)
translated_name = re.sub("[^A-Za-z0-9]", "_", translated_name)
return translated_name | 06a0d535fa7a74feea33e58815da2792a6026def | 11,422 |
def network_instance_create(network, host, attrs=None):
"""
Creates a network_instance of given kind and host, configuring it with the given attributes.
Parameter *kind*:
The parameter *kind* must be a string identifying one of the supported
network_instance kinds.
Parameter *host*:
The parameter *host* must be a string giving a host for the network_instance.
Parameter *attrs*:
The attributes of the network_instance can be given as the parameter *attrs*.
This parameter must be a dict of attributes if given. Attributes can
later be changed using :py:func:`network_instance_modify`.
Return value:
The return value of this method is the info dict of the new network_instance as
returned by :py:func:`resource_info`.
"""
if not attrs: attrs = {}
attrs = dict(attrs)
attrs.update(host=host, network=network)
res = NetworkInstance.create(attrs)
return res.info() | 23efa27090081bc59f917cdcf7497f75be0f93b4 | 11,423 |
def update_get():
"""Fetches the state of the latest update job.
Returns:
On success, a JSON data structure with the following properties:
status: str describing the status of the job. Can be one of
["NOT_RUNNING", "DONE", "IN_PROGRESS"].
Example:
{
"status": "NOT_RUNNING"
}
Returns error object on failure.
"""
status, error = update.status.get()
if error:
return json_response.error(error), 500
return json_response.success({'status': str(status)}) | 9e1a2438fc8b4d1dd1bd1354d478c4d4e3e58098 | 11,424 |
def create_github_url(metadata, is_file=False):
"""Constrói a URL da API
Constrói a URL base da API do github a partir
dos dados presentes no metadata.
Args:
metadata: JSON com informações acerca do dataset.
is_file: FLAG usada pra sinalizar se o dataset é apenas um elemento.
"""
url_params = metadata['url'].split('/')
server_idx = url_params.index('github.com')
username = url_params[server_idx + 1]
repo = url_params[server_idx + 2]
data_path = metadata['path']
return ("https://raw.githubusercontent.com/{}/{}/master{}" if is_file else "https://api.github.com/repos/{}/{}/contents{}").format(username, repo, data_path) | 6c138d92cd7b76f87c225a1fd98e7d397b0d6d28 | 11,425 |
def kge_2012(obs, sim, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Kling-Gupta Efficiency (KGE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
The (weighted) Kling-Gupta Efficiency [kling_2012]_ is computed as follows:
.. math:: \\text{KGE} = 1 - \\sqrt{(r-1)^2 + (\\beta-1)^2 - (\\gamma-1)^2}
where :math:`\\beta = \\bar{x} / \\bar{y}` and :math:`\\gamma =
\\frac{\\bar{\\sigma}_x / \\bar{x}}{\\bar{\\sigma}_y / \\bar{y}}`. If
weighted equals True, the weighted mean, variance and pearson
correlation are used.
References
----------
.. [kling_2012] Kling, H., Fuchs, M., and Paulin, M. (2012). Runoff
conditions in the upper Danube basin under an ensemble of climate
change scenarios. Journal of Hydrology, 424-425:264 - 277.
"""
if missing == "drop":
obs = obs.dropna()
sim = sim.reindex(obs.index).dropna()
# Return nan if the time indices of the sim and obs don't match
if sim.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
r = pearsonr(obs=obs, sim=sim, weighted=weighted, max_gap=max_gap)
mu_sim = mean(sim, weighted=weighted, max_gap=max_gap)
mu_obs = mean(obs, weighted=weighted, max_gap=max_gap)
beta = mu_sim / mu_obs
gamma = (std(sim, weighted=weighted, max_gap=max_gap) / mu_sim) / \
(std(obs, weighted=weighted, max_gap=max_gap) / mu_obs)
kge = 1 - sqrt((r - 1) ** 2 + (beta - 1) ** 2 + (gamma - 1) ** 2)
return kge | 974735f9deb8ffedf88711af5c059ac0aae90218 | 11,426 |
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s)) | b99b051699f8a5395fe24e2e909f1690c0e67e4c | 11,427 |
def color_calibration(
src_imgs,
src_color_space,
src_is_linear,
ref_imgs,
ref_color_space,
ref_is_linear,
verbose=False,
distance="de00",
):
"""Function that does color calibration for a given target image according to a given reference image
STEP1: load the colorcheckers from the src and ref images
STEP2: TODO: linearize the src and ref color checkers if necessary
STEP3: TODO: convert the src and ref color checkers into the same color space (usually the color space of the ref image)
STEP4: optimize the CCM to minimize the CIE2000 distance between the ref and calibrated target color checkers
STEP5: compute the calibrated image with the optimzed CCM
Args:
src (String): path of target image file
src_color_space (enum color_space): color space of target image
src_is_linear (bool): indicates whether the target image is linearized (sRGB or RGB)
ref (String): path of reference image file
ref_color_space (enum color_space): color space of reference image
ref_is_linear (bool): indicates whether the reference iamge is linearized (sRGB or RGB)
"""
# Paramters of the standard color checker with aruco tags
col_n = 6
row_n = 4
aruco_type = "DICT_5X5_50"
# load the colorcheckers from the src and ref images
src_colorchecker = None
ref_colorchecker = None
for img in src_imgs:
try:
color_checker = detect_colorchecker(img, row_n, col_n, aruco_type, verbose=verbose)
except SystemExit:
continue
if src_colorchecker is None:
src_colorchecker = color_checker
src_colorchecker = (src_colorchecker + color_checker) / 2
for img in ref_imgs:
try:
color_checker = detect_colorchecker(img, row_n, col_n, aruco_type, verbose=verbose)
except SystemExit:
continue
if ref_colorchecker is None:
ref_colorchecker = color_checker
ref_colorchecker = (ref_colorchecker + color_checker) / 2
# TODO: if the src has a different color space than the ref image, unify their color spaces
# use CCM_3x3 to find the optimized CCM, which brings src closer to ref
ccm = CCM_3x3(src_colorchecker, ref_colorchecker, distance=distance)
ccm_3x3, error = ccm.value()
calibrated_images = []
for img in src_imgs:
img = ccm.infer_image(img)
calibrated_images.append(img)
if verbose:
cv2.imshow("image after calibration", img)
cv2.imwrite("imgs/output_infered.png", img)
if verbose:
cv2.waitKey(0)
cv2.destroyAllWindows()
return ccm_3x3, error, calibrated_images | 9a225096b760a3bda87adb8ae2d81690da43650d | 11,428 |
def getChiv6ch(mol):
"""
Chiv6h related to ring 6
"""
return getChivnch(mol, 6) | 31a688b1d0b98b75d81b9c4c5e93bb8d62ee732e | 11,429 |
def is_outside_of_range(type_key: CLTypeKey, value: int) -> bool:
"""Returns flag indicating whether a value is outside of numeric range associated with the CL type.
"""
constraints = NUMERIC_CONSTRAINTS[type_key]
return value < constraints.MIN or value > constraints.MAX | fb8ec41d7edc094242df6bad13b8f32285a86007 | 11,430 |
def read_skel(dset, path):
"""
:param dset: name of dataset, either 'ntu-rgbd' or 'pku-mmd'
:param path: path to the skeleton file
:return:
"""
if dset == 'ntu-rgbd':
file = open(path, 'r')
lines = file.readlines()
num_lines = len(lines)
num_frames = int(lines[0])
# print(num_lines, num_frames)
line_id = 1
data = []
for i in range(num_frames):
num_skels = int(lines[line_id])
# print(num_skels)
joints = []
for _ in range(num_skels):
num_joints = int(lines[line_id+2])
# print(num_joints)
joint = []
for k in range(num_joints):
tmp = lines[line_id+3+k].rstrip().split(' ')
x_3d, y_3d, z_3d, x_depth, y_depth, x_rgb, y_rgb, orientation_w,\
orientation_x, orientation_y, orientation_z = list(
map(float, tmp[:-1]))
joint.append([x_3d, y_3d, z_3d])
joints.append(joint)
line_id += 2+num_joints
joints = np.array(joints)
data.append(joints)
line_id += 1
assert line_id == num_lines
elif dset == 'pku-mmd':
file = open(path, 'r')
lines = file.readlines()
# num_lines = len(lines)
data = []
for line in lines:
joints = list(map(float, line.rstrip().split(' ')))
joints = np.array(joints).reshape(2, -1, 3)
if not np.any(joints[1]):
joints = joints[0][np.newaxis, :, :]
data.append(joints)
elif dset == 'cad-60':
f = open(path, 'r')
lines = f.readlines()
data = []
# Last line is "END"
for line in lines[:-1]:
# fist item is frame number, last item is empty
row = line.split(',')[1:-1]
row = list(map(float, row))
joints = []
for i in range(15):
if i < 11:
# First 11 joints
index = 14 * i + 10
else:
# Joint 12 ~ 15
index = 11 * 14 + (i - 11) * 4
joint = row[index: index+3]
joints.append(joint)
joints = np.array(joints) / 1000.0 # millimeter to meter
joints = joints[np.newaxis, :, :] # To match ntu-rgb format
data.append(joints)
else:
raise NotImplementedError
return data | f461ecb30ec1e7b66ce5d162ccc21b3ba34e6be8 | 11,431 |
def humanize_date(date_string):
""" returns dates as in this form: 'August 24 2019' """
return convert_date(date_string).strftime("%B %d %Y") | d9656af2c4091219d6ee259b557caadbde2cc393 | 11,432 |
import os
def get_checkpoint(checkpoint_path, requested_step=None, basename='checkpoint'):
"""
根据checkpoint重载模型
"""
if requested_step is not None:
model_checkpoint_path = '%s/%s-%s' % (checkpoint_path, basename, requested_step)
if os.path.exists(model_checkpoint_path) is None:
print('No checkpoint file found at [%s]' % checkpoint_path)
exit(-1)
print(model_checkpoint_path)
print(model_checkpoint_path)
return model_checkpoint_path, requested_step
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
# Restore checkpoint as described in top of this program
print(ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
return ckpt.model_checkpoint_path, global_step
else:
print('No checkpoint file found at [%s]' % checkpoint_path)
exit(-1) | 66a00e73ac032c2011fcf5d3f95ed457b06f1e51 | 11,433 |
import sys
import traceback
def handle_error(exception: Exception, request: Request=None):
"""
If an exception is thrown, deal with it and present an error page.
"""
if request is None:
request = {'_environ': {'PATH_INFO': ''}}
if not getattr(exception, 'hide_traceback', False):
(e_type, e_value, e_tb) = sys.exc_info()
message = f"{exception.__class__} occurred on {request._environ['PATH_INFO']!r}: {exception}" \
f"\n{''.join(traceback.format_exception(e_type, e_value, e_tb))}"
request._environ['wsgi.errors'].write(message)
if isinstance(exception, RequestError):
status = getattr(exception, 'status', 404)
else:
status = 500
if status in ERROR_HANDLERS:
return ERROR_HANDLERS[status](request, exception)
return not_found(request, exception) | cc0a8c4344ebbe8d6119e5d89f3f119709431374 | 11,434 |
def tree_sanity_check(tree: Node) -> bool:
"""
Sanity check for syntax trees: One and the same node must never appear
twice in the syntax tree. Frozen Nodes (EMTPY_NODE, PLACEHOLDER)
should only exist temporarily and must have been dropped or eliminated
before any kind of tree generation (i.e. parsing) or transformation
is finished.
:param tree: the root of the tree to be checked
:returns: `True`, if the tree is "sane", `False` otherwise.
"""
node_set = set() # type: Set[Node]
for node in tree.select_if(lambda nd: True, include_root=True):
if not isinstance(node, Node) or node in node_set or isinstance(node, FrozenNode):
return False
node_set.add(node)
return True | 03b61729c67859bbf9820489ef8fc9768ea59f9f | 11,435 |
from pathlib import Path
import os
import logging
def fileCompare( filename1, filename2, folder1=None, folder2=None, printFlag=True, exitCount:int=10 ):
"""
Compare the two utf-8 files.
"""
filepath1 = Path( folder1, filename1 ) if folder1 else filename1
filepath2 = Path( folder2, filename2 ) if folder2 else filename2
if verbosityLevel > 1:
if filename1==filename2:
vPrint( 'Quiet', debuggingThisModule, "Comparing {!r} files in folders {!r} and {!r}…".format( filename1, folder1, folder2 ) )
else: vPrint( 'Quiet', debuggingThisModule, "Comparing files {!r} and {!r}…".format( filename1, filename2 ) )
# Do a preliminary check on the readability of our files
if not os.access( filepath1, os.R_OK ):
logging.error( f"fileCompare: File1 {filepath1!r} is unreadable" )
return None
if not os.access( filepath2, os.R_OK ):
logging.error( f"fileCompare: File2 {filepath2!r} is unreadable" )
return None
# Read the files into lists
lineCount, lines1 = 0, []
with open( filepath1, 'rt', encoding='utf-8' ) as file1:
for line in file1:
lineCount += 1
if lineCount==1 and line[0]==chr(65279): #U+FEFF
if printFlag and verbosityLevel > 2:
vPrint( 'Quiet', debuggingThisModule, " fileCompare: Detected Unicode Byte Order Marker (BOM) in file1" )
line = line[1:] # Remove the Unicode Byte Order Marker (BOM)
if line and line[-1]=='\n': line=line[:-1] # Removing trailing newline character
if not line: continue # Just discard blank lines
lines1.append( line )
lineCount, lines2 = 0, []
with open( filepath2, 'rt', encoding='utf-8' ) as file2:
for line in file2:
lineCount += 1
if lineCount==1 and line[0]==chr(65279): #U+FEFF
if printFlag and verbosityLevel > 2:
vPrint( 'Quiet', debuggingThisModule, " fileCompare: Detected Unicode Byte Order Marker (BOM) in file2" )
line = line[1:] # Remove the Unicode Byte Order Marker (BOM)
if line and line[-1]=='\n': line=line[:-1] # Removing trailing newline character
if not line: continue # Just discard blank lines
lines2.append( line )
# Compare the length of the lists/files
len1, len2 = len(lines1), len(lines2 )
equalFlag = True
if len1 != len2:
if printFlag: vPrint( 'Quiet', debuggingThisModule, "Count of lines differ: file1={}, file2={}".format( len1, len2 ) )
equalFlag = False
# Now compare the actual lines
diffCount = 0
for k in range( min( len1, len2 ) ):
if lines1[k] != lines2[k]:
if printFlag:
vPrint( 'Quiet', debuggingThisModule, " {}a:{!r} ({} chars)\n {}b:{!r} ({} chars)" \
.format( k+1, lines1[k], len(lines1[k]), k+1, lines2[k], len(lines2[k]) ) )
equalFlag = False
diffCount += 1
if diffCount > exitCount:
if printFlag and verbosityLevel > 1:
vPrint( 'Quiet', debuggingThisModule, "fileCompare: stopped comparing after {} mismatches".format( exitCount ) )
break
return equalFlag | e081e9266a01098d6d99a16ad2bc81bf4dfd36fc | 11,436 |
def compute_std_error(g,theta,W,Omega,Nobs,Nsim=1.0e+10,step=1.0e-5,args=()):
""" calculate standard errors from minimum-distance type estimation
g should return a vector with:
data moments - simulated moments as a function of theta
Args:
g (callable): moment function (return vector of length J)
theta (np.ndarray): parameter vector (length K)
W (np.ndarray): weigting matrix (dim J-by-J)
Omega (np.ndarray): covaraince matrix of empirical moments (dim J-by-J)
Nobs (scalar): number of observations
Nsim (scalar,optional): number of simulations
step (scalar,optional): finite step in numerical gradients
args (tupple,optinal): additional arguments passed to g
"""
# a. dimensions
K = len(theta)
J = len(W[0])
# b. numerical gradient.
grad = np.empty((J,K))
for p in range(K):
theta_now = theta.copy()
step_now = np.zeros(K)
step_now[p] = np.fmax(step,step*np.abs(theta_now[p]))
g_forward = g(theta_now + step_now,*args)
g_backward = g(theta_now - step_now,*args)
grad[:,p] = (g_forward - g_backward)/(2.0*step_now[p])
# c. asymptotic variance
GW = grad.T @ W
GWG = GW @ grad
Avar = np.linalg.inv(GWG) @ ( GW @ Omega @ GW.T ) @ np.linalg.inv(GWG)
# d. return asymptotic standard errors
fac = (1.0 + 1.0/Nsim)/Nobs
std_error = np.sqrt(fac*np.diag(Avar))
return std_error | 176710e3b6c18efc535c67e257bc1014b4862135 | 11,437 |
def lhs(paramList, trials, corrMat=None, columns=None, skip=None):
"""
Produce an ndarray or DataFrame of 'trials' rows of values for the given parameter
list, respecting the correlation matrix 'corrMat' if one is specified, using Latin
Hypercube (stratified) sampling.
The values in the i'th column are drawn from the ppf function of the i'th parameter
from paramList, and each columns i and j are rank correlated according to corrMat[i,j].
:param paramList: (list of rv-like objects representing parameters) Only requirement
on parameter objects is that they must implement the ppf function.
:param trials: (int) number of trials to generate for each parameter.
:param corrMat: a numpy matrix representing the correlation between the parameters.
corrMat[i,j] should give the correlation between the i'th and j'th
entries of paramlist.
:param columns: (None or list(str)) Column names to use to return a DataFrame.
:param skip: (list of params)) Parameters to process later because they are
dependent on other parameter values (e.g., they're "linked"). These
cannot be correlated.
:return: ndarray or DataFrame with `trials` rows of values for the `paramList`.
"""
ranks = genRankValues(len(paramList), trials, corrMat) if corrMat is not None else None
samples = np.zeros((trials, len(paramList))) # @UndefinedVariable
skip = skip or []
for i, param in enumerate(paramList):
if param in skip:
continue # process later
values = param.ppf(getPercentiles(trials)) # extract values from the RV for these percentiles
if corrMat is None:
# Sequence is a special case for which we don't shuffle (and we ignore stratified sampling)
if param.param.dataSrc.distroName != 'sequence':
np.random.shuffle(values) # randomize the stratified samples
else:
indices = ranks[:, i] - 1 # make them 0-relative
values = values[indices] # reorder to respect correlations
samples[:, i] = values
return DataFrame(samples, columns=columns) if columns else samples | 787042db9773f4da6a2dbb4552269ac2740fb02e | 11,438 |
def percentError(mean, sd, y_output, logits):
""" Calculates the percent error between the prediction and real value.
The percent error is calculated with the formula:
100*(|real - predicted|)/(real)
The real and predicted values are un normalized to see how accurate the true
predictions are. This metric is created in the name scope "percentError".
Input:
* mean: The mean of the original output distribution
* sd: The standard deviation of the original output distribution
* y_output: The y_output symbolic output from the iterator
* logits: The symbolic prediction output from the nerual network
Returns:
* percentErr: An operation which calculates the percent error when
* used in a training or validation run of the network
"""
with tf.name_scope("percentError", values=[y_output, logits]):
predictions= tf.exp(tf.reduce_sum(logits, axis=-1)*sd + mean)
actualValue = tf.exp(y_output*sd + mean)
percentErr = tf.reduce_mean(abs((actualValue-predictions)*100/(actualValue)))
tf.summary.scalar('Percent_Error', percentErr)
return(percentErr) | 017291a71388ccf2ecb8db6808965b164dfbfe3d | 11,439 |
import os
def get_file_names(directory, prefix='', suffix='', nesting=True):
"""
Returns list of all files in directory
Args:
directory (str): the directory of interest
prefix (str): if provided, files returned must start with this
suffix (str): if provided, files returned must end with this
nesting (bool): if True, looks in all subdirectories of dir. If false, only looks at top-level.
"""
l = []
for path, subdirs, files in os.walk(directory):
for name in files:
if name.startswith(prefix) and name.endswith(suffix) and (nesting or (path == directory)):
l.append(os.path.join(path, name))
return l | b35d6a17ac93674a073076c36c0b84ba1361210b | 11,440 |
def parse_multiple_files(*actions_files):
"""Parses multiple files. Broadly speaking, it parses sequentially all
files, and concatenates all answers.
"""
return parsing_utils.parse_multiple_files(parse_actions, *actions_files) | 186a984d91d04ae82f79e7d22f24bd834b8a0366 | 11,441 |
def frequent_combinations(spark_df: DataFrame, n=10, export=True):
"""
takes a dataframe containing visitor logs and computes n most frequent visitor-visite pairs
:param spark_df: Spark Dataframe
:param n: number of top visitors
:return: pandas dataframe with visitor-visite pairs
"""
# compute aggregate and convert to pandas for visualization
freq_pairs = spark_df.groupBy(['VISITOR_NAME', 'VISITEE_NAME']).agg( \
count('APPT_START_DATE').alias('Visits')
). \
orderBy('Visits', ascending=False). \
limit(n). \
toPandas()
print(freq_pairs)
# persist
if export:
freq_pairs.to_csv(catalog['business/frequent_pairs'], index=False)
return freq_pairs | 5a4ce5e8199acd1462414fa2de95d4af48923434 | 11,442 |
import random
def reservoir_sampling(items, k):
"""
Reservoir sampling algorithm for large sample space or unknow end list
See <http://en.wikipedia.org/wiki/Reservoir_sampling> for detail>
Type: ([a] * Int) -> [a]
Prev constrain: k is positive and items at least of k items
Post constrain: the length of return array is k
"""
sample = items[0:k]
for i in range(k, len(items)):
j = random.randrange(1, i + 1)
if j <= k:
sample[j] = items[i]
return sample | ab2d0dc2bb3cb399ae7e6889f028503d165fbbe4 | 11,443 |
import json
def create_db_engine(app: Flask) -> Engine:
"""Create and return an engine instance based on the app's database configuration."""
url = URL(
drivername=app.config['DATABASE_DRIVER'],
username=app.config['DATABASE_USER'],
password=app.config['DATABASE_PASSWORD'],
host=app.config['DATABASE_HOST'],
port=app.config['DATABASE_PORT'],
database=app.config['DATABASE_DB']
)
return create_engine(
url,
json_serializer=lambda obj: json.dumps(obj, default=json_serialize_default)
) | 5a67f294b58e699345f517ae07c80851ae30eca9 | 11,444 |
import math
def wgs84_to_gcj02(lat, lng):
"""
WGS84转GCJ02(火星坐标系)
:param lng:WGS84坐标系的经度
:param lat:WGS84坐标系的纬度
:return:
"""
dlat = _transformlat(lng - 105.0, lat - 35.0)
dlng = _transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglat, mglng] | 64f2d8a088a159c5751838ba1fc00824bcc3e91e | 11,445 |
def _normalize_kwargs(kwargs, kind='patch'):
"""Convert matplotlib keywords from short to long form."""
# Source:
# github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
if kind == 'line2d':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
mec='markeredgecolor', mew='markeredgewidth',
mfc='markerfacecolor', ms='markersize',)
elif kind == 'patch':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
ec='edgecolor', fc='facecolor',)
for short_name in long_names:
if short_name in kwargs:
kwargs[long_names[short_name]] = kwargs.pop(short_name)
return kwargs | 829f4dfd449064f4c1fc92aa8e481364eb997973 | 11,446 |
def fprime_to_jsonable(obj):
"""
Takes an F prime object and converts it to a jsonable type.
:param obj: object to convert
:return: object in jsonable format (can call json.dump(obj))
"""
# Otherwise try and scrape all "get_" getters in a smart way
anonymous = {}
getters = [attr for attr in dir(obj) if attr.startswith("get_")]
for getter in getters:
# Call the get_ functions, and call all non-static methods
try:
func = getattr(obj, getter)
item = func()
# If there is a property named "args" it needs to be handled specifically unless an incoming command
if (
getter == "get_args"
and not "fprime_gds.common.data_types.cmd_data.CmdData"
in str(type(obj))
):
args = []
for arg_spec in item:
arg_dict = {
"name": arg_spec[0],
"description": arg_spec[1],
"value": arg_spec[2].val,
"type": str(arg_spec[2]),
}
if arg_dict["type"] == "Enum":
arg_dict["possible"] = arg_spec[2].keys()
args.append(arg_dict)
# Fill in our special handling
item = args
anonymous[getter.replace("get_", "")] = item
except TypeError:
continue
return anonymous | 899674167b51cd752c7a8aaa9979856218759022 | 11,447 |
import torch
def subsequent_mask(size: int) -> Tensor:
"""
Mask out subsequent positions (to prevent attending to future positions)
Transformer helper function.
:param size: size of mask (2nd and 3rd dim)
:return: Tensor with 0s and 1s of shape (1, size, size)
"""
mask = np.triu(np.ones((1, size, size)), k=1).astype("uint8")
return torch.from_numpy(mask) == 0 | e065c32164d5250215c846aef39d510f6a93f0cd | 11,448 |
def process_entries(components):
"""Process top-level entries."""
data = {}
for index, value in enumerate(STRUCTURE):
label = value[0]
mandatory = value[1]
# Raise error if mandatory elements are missing
if index >= len(components):
if mandatory is True:
raise ValueError('UNH header is missing mandatory entry for {label}'.format(label=label))
else:
break
# Process
if len(value) == LENGTH_OF_A_SIMPLE_ENTRY:
data[label] = components[index]
elif len(value) == LENGTH_OF_A_NESTED_ENTRY:
data[label] = process_subentries(components, index)
else:
raise ValueError('unexpected structure')
return data | 344b9aa601b71fd9352fdb412d9dfa7492312d1a | 11,449 |
from typing import Tuple
def normalize_input_vector(trainX: np.ndarray, testX: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize the input vector
Args:
trainX (np.ndarray): train embedding array.
testX (np.ndarray): test embedding array.
Returns:
np.ndarray, np.ndarray: normalized train and test arrays.
"""
in_encoder = Normalizer(norm='l2')
trainX = in_encoder.transform(trainX)
testX = in_encoder.transform(testX)
return trainX, testX | 34324541e7db302bd46d41f70bd7fdedb6055ae6 | 11,450 |
def update_cache(cache_data, new_data, key):
"""
Add newly collected data to the pre-existing cache data
Args:
cache_data (dict): Pre-existing chip data
new_data (dict): Newly acquired chip data
key (str): The chip UL coordinates
Returns:
"""
if key in cache_data.keys():
cache_data[key].update(new_data[key])
else:
cache_data[key] = new_data[key]
return cache_data | f439f34d1e95ccd69dc10d5f8c06ca20fc869b1e | 11,451 |
def status(command, **keys):
"""Run a subprogram capturing it's output and return the exit status."""
return _captured_output(command, **keys).status | f2bb97448a812548dfbdea770db9a43d8c46301a | 11,452 |
def normalize(flow: Tensor) -> Tensor:
"""Re-scales the optical flow vectors such that they correspond to motion on the normalized pixel coordinates
in the range [-1, 1] x [-1, 1].
Args:
flow: the optical flow tensor of shape (B, 2, H, W)
Returns:
The optical flow tensor with flow vectors rescaled to the normalized pixel coordinate system.
"""
# flow: (B, 2, H, W)
assert flow.size(1) == 2
h, w = flow.shape[-2:]
return scale(flow, (2.0 / max(w - 1, 1), 2.0 / max(h - 1, 1))) | 9164686650e0728ba1d99b65e5757b0e12d6c934 | 11,453 |
def to_graph6_bytes(G, nodes=None, header=True):
"""Convert a simple undirected graph to bytes in graph6 format.
Parameters
----------
G : Graph (undirected)
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by ``G.nodes()`` is used.
header: bool
If True add '>>graph6<<' bytes to head of data.
Raises
------
NetworkXNotImplemented
If the graph is directed or is a multigraph.
ValueError
If the graph has at least ``2 ** 36`` nodes; the graph6 format
is only defined for graphs of order less than ``2 ** 36``.
Examples
--------
>>> nx.to_graph6_bytes(nx.path_graph(2))
b'>>graph6<<A_\\n'
See Also
--------
from_graph6_bytes, read_graph6, write_graph6_bytes
Notes
-----
The returned bytes end with a newline character.
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
if nodes is not None:
G = G.subgraph(nodes)
H = nx.convert_node_labels_to_integers(G)
nodes = sorted(H.nodes())
return b"".join(_generate_graph6_bytes(H, nodes, header)) | 05617e6ebe6d4a374bfa125e3b5afb1bca3304c1 | 11,454 |
def get_arrays_from_img_label(img, label, img_mode=None):
"""Transform a SimpleITK image and label map into numpy arrays, and
optionally select a channel.
Parameters:
img (SimpleITK.SimpleITK.Image): image
label (SimpleITK.SimpleITK.Image): label map
img_mode (int or None): optional mode channel, so output is 3D
Returns:
(numpy.ndarray, numpy.ndarray): image and label in numpy format
"""
img_np = sitk.GetArrayFromImage(img)
if img_mode is not None:
img_np = img_np[img_mode]
label_np = sitk.GetArrayFromImage(label)
return img_np, label_np.astype(int) | 902cd2cd5f31121e4a57a335a37b42b4caeafb4a | 11,455 |
def _get_error_code(exception):
"""Get the most specific error code for the exception via superclass"""
for exception in exception.mro():
try:
return error_codes[exception]
except KeyError:
continue | c4c9a2ec2f5cf510b6e9a7f6058287e4faf7b5b4 | 11,456 |
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered | 0f1c1207016695ee1440fea214f8f92f8c6398ac | 11,457 |
import json
def read_json_file(file_name: str, encoding: str = "utf-8") -> dict:
"""Reads a json file
:param file_name: path
:param encoding: encoding to use
:return: dict content
"""
with open(file_name, "r", encoding=encoding) as json_file:
return json.load(json_file) | 313aee72b06303dfffd8a2e9f3641d1346329a91 | 11,458 |
def pretty_print_row(col_full_widths, row, max_field_size):
"""
pretty print a row such that each column is padded to have the widths in the col_full_widths vector
"""
start = "| "
if len(row) == len(col_full_widths):
end = " |"
else:
end = "|"
return start + "|".join(pretty_print_field(full_width, field, max_field_size) for full_width, field in zip(col_full_widths, row)) + end | c94807e4de18e4454e0263e25f4103cd914df2cd | 11,459 |
def _get_data_for_agg(new_svarcube, new_tacube):
"""Reshape data for use in iris aggregator based on two cubes."""
dims_to_collapse = set()
dims_to_collapse.update(new_svarcube.coord_dims('air_pressure'))
untouched_dims = set(range(new_svarcube.ndim)) -\
set(dims_to_collapse)
dims = list(untouched_dims) + list(dims_to_collapse)
unrolled_data = np.moveaxis(new_tacube.data, dims,
range(new_svarcube.ndim))
return unrolled_data | 51c2683e3477528809fcf229c94125020cdfee6d | 11,460 |
def refs_changed_by_other_cc(current_user):
"""
Return dictionary with id of reference and log object changed by other cooperative centers
"""
current_user_cc = current_user.profile.get_attribute('cc')
result_list = defaultdict(list)
# get last references of current user cooperative center
refs_from_cc = Reference.objects.filter(cooperative_center_code=current_user_cc).order_by('-id')[:100]
for reference in refs_from_cc:
# get correct class (source our analytic)
c_type = reference.get_content_type_id
# filter by logs of current reference, change type and made by other users
log_list = LogEntry.objects.filter(object_id=reference.id, content_type=c_type, action_flag=2) \
.exclude(user=current_user).order_by('-id')
if log_list:
# exclude from list all changes that was already reviewed (logreview is created)
log_list = log_list.exclude(logreview__isnull=False)
# create list of log users of same cc
exclude_user_list = []
for log in log_list:
log_user_cc = log.user.profile.get_attribute('cc')
if log_user_cc == current_user_cc:
exclude_user_list.append(log.user)
# exclude from log list users from same cc as current user
if exclude_user_list:
log_list = log_list.exclude(user__in=exclude_user_list)
if log_list:
# group result by id (one line for each reference)
for log in log_list:
result_list[log.object_id] = log
return result_list | aa2012f1efe6eeb796e3871af691b685e3388e67 | 11,461 |
from typing import Dict
def chain_head(head: int, child: int, heads: Dict[int, int]):
"""
>>> chain_head(0, 2, {1: 2, 2: 3, 3: 0})
True
>>> chain_head(2, 0, {1: 2, 2: 3, 3: 0})
False
"""
curr_child = child
while curr_child != -1:
if curr_child == head:
return True
curr_child = heads.get(curr_child, -1)
return False | d786d3dbbdc496a1a7515d9df04fa2a09968b87d | 11,462 |
def UpgradeFile(file_proto):
"""In-place upgrade a FileDescriptorProto from v2[alpha\d] to v3alpha.
Args:
file_proto: v2[alpha\d] FileDescriptorProto message.
"""
# Upgrade package.
file_proto.package = UpgradedType(file_proto.package)
# Upgrade imports.
for n, d in enumerate(file_proto.dependency):
file_proto.dependency[n] = UpgradedPath(d)
# Upgrade comments.
for location in file_proto.source_code_info.location:
location.leading_comments = UpgradedComment(location.leading_comments)
location.trailing_comments = UpgradedComment(location.trailing_comments)
for n, c in enumerate(location.leading_detached_comments):
location.leading_detached_comments[n] = UpgradedComment(c)
# Upgrade services.
for s in file_proto.service:
UpgradeService(s)
# Upgrade messages.
for m in file_proto.message_type:
UpgradeMessage(m)
for e in file_proto.enum_type:
UpgradeEnum(e)
return file_proto | 942646c67bb987757449fbb16a6164008957cf99 | 11,463 |
import ipaddress
import logging
def _get_ip_block(ip_block_str):
""" Convert string into ipaddress.ip_network. Support both IPv4 or IPv6
addresses.
Args:
ip_block_str(string): network address, e.g. "192.168.0.0/24".
Returns:
ip_block(ipaddress.ip_network)
"""
try:
ip_block = ipaddress.ip_network(ip_block_str)
except ValueError:
logging.error("Invalid IP block format: %s", ip_block_str)
return None
return ip_block | b887c615091926ed7ebbbef8870e247348e2aa27 | 11,464 |
def mul_ntt(f_ntt, g_ntt, q):
"""Multiplication of two polynomials (coefficient representation)."""
assert len(f_ntt) == len(g_ntt)
deg = len(f_ntt)
return [(f_ntt[i] * g_ntt[i]) % q for i in range(deg)] | 504838bb812792b6bb83b1d485e4fb3221dec36e | 11,465 |
import os
import yaml
import toml
import json
def load_config(filepath: str) -> DictStrAny:
"""Read config file.
The config file can be in yaml, json or toml.
toml is recommended for readability.
"""
ext = os.path.splitext(filepath)[1]
if ext == ".yaml":
with open_read_text(filepath) as fp:
config_dict = yaml.load(fp.read(), Loader=yaml.Loader)
elif ext == ".toml":
config_dict = toml.load(filepath)
elif ext == ".json":
with open_read_text(filepath) as fp:
config_dict = json.load(fp)
else:
raise NotImplementedError(f"Config extention {ext} not supported")
assert isinstance(config_dict, dict)
return config_dict | 80fd4622e00dd5546fb2c3dcbbee6dcedd9601a7 | 11,466 |
def _expected_datatypes(product_type):
"""
Aux function. Contains the most current lists of keys we expect there to be in the different forms of metadata.
"""
if product_type == "SLC":
# Only the datetimes need to be parsed.
expected_dtypes = {
"acquisition_start_utc": "parse_datetime_single",
"acquisition_end_utc": "parse_datetime_single",
"dc_estimate_time_utc": "parse_datetime_single",
"first_pixel_time_utc": "parse_datetime_single",
"state_vector_time_utc": "parse_datetime_vect",
"zerodoppler_start_utc": "parse_datetime_single",
"zerodoppler_end_utc": "parse_datetime_single",
}
elif product_type == "GRD":
# All the fields need to be parsed, so all the datatypes are input.
expected_dtypes = {
"acquisition_end_utc": "parse_datetime_single", # single datetime
"acquisition_mode": str,
"acquisition_prf": float,
"acquisition_start_utc": str,
"ant_elev_corr_flag": bool,
"area_or_point": str,
"avg_scene_height": float,
"azimuth_spacing": float,
"azimuth_look_bandwidth": float,
"azimuth_look_overlap": float,
"azimuth_looks": int,
"azimuth_time_interval": float,
"calibration_factor": float,
"carrier_frequency": float,
"chirp_bandwidth": float,
"chirp_duration": float,
"coord_center": "parse_float_vect", # 1d vect of floats, needs to be parsed
"coord_first_far": "parse_float_vect",
"coord_first_near": "parse_float_vect",
"coord_last_far": "parse_float_vect",
"coord_last_near": "parse_float_vect",
"dc_estimate_coeffs": "parse_float_vect",
"dc_estimate_poly_order": int,
"dc_estimate_time_utc": "parse_datetime_vect", # datetime vector
"dc_reference_pixel_time": float,
"doppler_rate_coeffs": "parse_float_vect",
"doppler_rate_poly_order": int,
"doppler_rate_reference_pixel_time": float,
"gcp_terrain_model": str,
"geo_ref_system": str,
"grsr_coefficients": "parse_float_vect",
"grsr_ground_range_origin": float,
"grsr_poly_order": int,
"grsr_zero_doppler_time": "parse_datetime_single", # single datetime
"heading": float,
"incidence_angle_coefficients": "parse_float_vect",
"incidence_angle_ground_range_origin": float,
"incidence_angle_poly_order": int,
"incidence_angle_zero_doppler_time": "parse_datetime_single", # single datetime
"incidence_center": float,
"incidence_far": float,
"incidence_near": float,
"look_side": str,
"mean_earth_radius": float,
"mean_orbit_altitude": float,
"number_of_azimuth_samples": int,
"number_of_dc_estimations": int,
"number_of_range_samples": int,
"number_of_state_vectors": int,
"orbit_absolute_number": int,
"orbit_direction": str,
"orbit_processing_level": str,
"orbit_relative_number": int,
"orbit_repeat_cycle": int,
"polarization": str,
"posX": "parse_float_vect",
"posY": "parse_float_vect",
"posZ": "parse_float_vect",
"processing_prf": float,
"processing_time": "parse_datetime_single", # single datetime
"processor_version": str,
"product_file": str,
"product_level": str,
"product_name": str,
"product_type": str,
"range_looks": int,
"range_sampling_rate": float,
"range_spacing": float,
"range_spread_comp_flag": bool,
"sample_precision": str,
"satellite_look_angle": str,
"satellite_name": str,
"slant_range_to_first_pixel": float,
"state_vector_time_utc": "parse_datetime_vect", # 1d vect of datetimes, need to be parsed.
"total_processed_bandwidth_azimuth": float,
"velX": "parse_float_vect",
"velY": "parse_float_vect",
"velZ": "parse_float_vect",
"window_function_azimuth": str,
"window_function_range": str,
"zerodoppler_end_utc": "parse_datetime_single", # single datetime
"zerodoppler_start_utc": "parse_datetime_single", # single datetime
}
elif product_type == "xml":
raise NotImplementedError
elif not isinstance(product_type, str):
raise TypeError(
'Did not understand input "product_type", a str was expected but a %s datatype variable was input.'
% type(product_type)
)
else:
raise ValueError(
'Did not understand input "product_type", either "SLC", "GRD" or "xml" was expected but %s was input.'
% product_type
)
return expected_dtypes | ea5a2d78bc5693259955e60847de7a663dcdbf2c | 11,467 |
from typing import List
import copy
def convert_vecs_to_var(
c_sys: CompositeSystem, vecs: List[np.ndarray], on_para_eq_constraint: bool = True
) -> np.ndarray:
"""converts hs of povm to vec of variables.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this state.
vecs : List[np.ndarray]
list of vec of povm elements.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
Returns
-------
np.ndarray
list of vec of variables.
"""
var = copy.copy(vecs)
if on_para_eq_constraint:
del var[-1]
var = np.hstack(var)
return var | cea5d80a7213e12113b1bec425b142667f9bb36e | 11,468 |
import glob
import os
def convert_CSV_into_df(file_loc):
"""
Generate Panda dataframe from CSV data (rotation and position).
"""
df = pd.DataFrame()
for directory in glob.glob(file_loc): # Selecting all the folders in dataset directory.
d = [] # Empty list.
f = directory.split(os.sep)
for file in glob.glob(directory+"*.csv"): # Reading all the CSV files in dataset directory one by one.
d.append(file)
d = sorted(d) # Ensures rotation and position are together
while len(d)!=0:
rot = d.pop(0) # Rmove the header row from rotation and postion CSV.
pos = d.pop(0)
df1 = pd.read_csv(rot, nrows=200) # Read the first 200 rows from rotation and position CSV. value can be 200 or 150.
df2 = pd.read_csv(pos, nrows=200)
df_merge = merge_rot_pos(df1,df2,f[1]) # Call the mearge function to mearge fetch data of rotation and position CSV with class lable.
df = df.append(df_merge,ignore_index=True) # Append the merge data to panda dataframe one by one.
return df | 9df958019b08d1b19b4591e5ef0a40f4af573914 | 11,469 |
def timetable_to_subrip(aligned_timetable):
"""
Converts the aligned timetable into the SubRip format.
Args:
aligned_timetable (list[dict]):
An aligned timetable that is output by the `Aligner` class.
Returns:
str:
Text representing a SubRip file.
"""
# Define a variable to contain the file's contents
file_contents = ""
# Process each block
for i, block in enumerate(aligned_timetable):
# Define a temporary variable to store this caption block
block_text = f"{i + 1}\n" # Every SubRip caption block starts with a number
# Get the start and end time of the block
start_time = timedelta_to_subrip_time(timedelta(seconds=block["start_time"]))
end_time = timedelta_to_subrip_time(timedelta(seconds=block["end_time"]))
# Add the timing line to the block of text
block_text += f"{start_time} --> {end_time}\n"
# Add the line of text from the `block` to the block of text
block_text += block["text"] + "\n\n"
# Add the `block_text` to the `file_contents`
file_contents += block_text
# Return the final file's contents
return file_contents | cddcf115ccb9441966d9c1a0a2b67ba25e00e6da | 11,470 |
from re import T
def add(a: T.Tensor, b: T.Tensor) -> T.Tensor:
"""
Add tensor a to tensor b using broadcasting.
Args:
a: A tensor
b: A tensor
Returns:
tensor: a + b
"""
return a + b | a555de4341b874163c551fff4b7674af1e60ace2 | 11,471 |
def integrate(que):
"""
check if block nears another block and integrate them
@param que: init blocks
@type que: deque
@return: integrated block
@rtype: list
"""
blocks = []
t1, y, x = que.popleft()
blocks.append([y, x])
if t1 == 2:
blocks.append([y, x + 1])
elif t1 == 3:
blocks.append([y + 1, x])
return blocks | a91235f34e1151b6dd9c6c266658cca86b375278 | 11,472 |
def test_binary_query(cbcsdk_mock):
"""Testing Binary Querying"""
called = False
def post_validate(url, body, **kwargs):
nonlocal called
if not called:
called = True
assert body['expiration_seconds'] == 3600
else:
assert body['expiration_seconds'] == 10
return BINARY_GET_FILE_RESP
sha256 = "00a16c806ff694b64e566886bba5122655eff89b45226cddc8651df7860e4524"
cbcsdk_mock.mock_request("GET", f"/ubs/v1/orgs/test/sha256/{sha256}", BINARY_GET_METADATA_RESP)
api = cbcsdk_mock.api
binary = api.select(Binary, sha256)
assert isinstance(binary, Binary)
cbcsdk_mock.mock_request("GET", f"/ubs/v1/orgs/test/sha256/{sha256}/summary/device", BINARY_GET_DEVICE_SUMMARY_RESP)
summary = binary.summary
cbcsdk_mock.mock_request("POST", "/ubs/v1/orgs/test/file/_download", post_validate)
url = binary.download_url()
assert summary is not None
assert url is not None
url = binary.download_url(expiration_seconds=10)
assert url is not None | 5cfd7c7d1ab714b342e13c33cf896032f8387cde | 11,473 |
import typing
def parse_struct_encoding(struct_encoding: bytes) -> typing.Tuple[typing.Optional[bytes], typing.Sequence[bytes]]:
"""Parse an array type encoding into its name and field type encodings."""
if not struct_encoding.startswith(b"{"):
raise ValueError(f"Missing opening brace in struct type encoding: {struct_encoding!r}")
if not struct_encoding.endswith(b"}"):
raise ValueError(f"Missing closing brace in struct type encoding: {struct_encoding!r}")
try:
# Stop searching for the equals if an opening brace
# (i. e. the start of another structure type encoding)
# is reached.
# This is necessary to correctly handle struct types with no name that contain a struct type with a name,
# such as b"{{foo=ii}}" (an unnamed struct containing a struct named "foo" containing two integers).
try:
end = struct_encoding.index(b"{", 1)
except ValueError:
end = -1
equals_pos = struct_encoding.index(b"=", 1, end)
except ValueError:
name = None
field_type_encoding_string = struct_encoding[1:-1]
else:
name = struct_encoding[1:equals_pos]
field_type_encoding_string = struct_encoding[equals_pos+1:-1]
field_type_encodings = list(split_encodings(field_type_encoding_string))
return name, field_type_encodings | 47455b192049b976dce392b928932d8291d1d008 | 11,474 |
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : Slot19
A Slot19 object
Returns
-------
point_list: list
A list of Points
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha0 = self.comp_angle_opening() / 2
alpha1 = self.comp_angle_bottom() / 2
# comp point coordinate (in complex)
Z_ = Rbo * exp(1j * 0)
Z0 = Z_ * exp(1j * alpha0)
if self.is_outwards():
Z1 = (Rbo + self.H0) * exp(1j * alpha1)
else: # inward slot
Z1 = (Rbo - self.H0) * exp(1j * alpha1)
# symetry
Z2 = Z1.conjugate()
Z3 = Z0.conjugate()
return [Z3, Z2, Z1, Z0] | c74c28af57ea90f208ac61cd2433376e9c1a47ac | 11,475 |
import inspect
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
if type(func) is partial:
orig_func = func.func
argspec = getargspec(orig_func)
args = list(argspec[0])
defaults = list(argspec[3] or ())
kwoargs = list(argspec[4])
kwodefs = dict(argspec[5] or {})
if func.args:
args = args[len(func.args):]
for arg in func.keywords or ():
try:
i = args.index(arg) - len(args)
del args[i]
try:
del defaults[i]
except IndexError:
pass
except ValueError: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
del kwodefs[arg]
return inspect.FullArgSpec(args, argspec[1], argspec[2],
tuple(defaults), kwoargs,
kwodefs, argspec[6])
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
return inspect.getfullargspec(func) | df1745daaf7cad09d75937cce399d705ce10de2b | 11,476 |
from typing import Optional
from typing import Union
from typing import Tuple
from typing import Dict
def empirical_kernel_fn(f: ApplyFn,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = ()
) -> EmpiricalKernelFn:
"""Returns a function that computes single draws from NNGP and NT kernels.
Args:
f:
the function whose NTK we are computing. `f` should have the signature
`f(params, inputs[, rng])` and should return an `np.ndarray` outputs.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
Returns:
A function to draw a single sample the NNGP and NTK empirical kernels of a
given network `f`.
"""
kernel_fns = {
'nngp': empirical_nngp_fn(f, trace_axes, diagonal_axes),
'ntk': empirical_ntk_fn(f, trace_axes, diagonal_axes)
}
@utils.get_namedtuple('EmpiricalKernel')
def kernel_fn(x1: np.ndarray,
x2: Optional[np.ndarray],
get: Union[None, str, Tuple[str, ...]],
params: PyTree,
**apply_fn_kwargs) -> Dict[str, np.ndarray]:
"""Computes a single sample of the empirical kernel of type `get`.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
get:
type of the empirical kernel. `get=None` means `get=("nngp", "ntk")`.
Can be a string (`"nngp"`) or a tuple of strings (`("ntk", "nngp")`).
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `_split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical kernel. The shape is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
If `get` is a string, returns the requested `np.ndarray`. If `get` is a
tuple, returns an `EmpiricalKernel` namedtuple containing the
requested information.
"""
if get is None:
get = ('nngp', 'ntk')
return {g: kernel_fns[g](x1, x2, params, **apply_fn_kwargs)
for g in get} # pytype: disable=wrong-arg-count
return kernel_fn | 890de1ebdd5f41f5aa257cedf1f03325f11e707c | 11,477 |
def read_image_batch(image_paths, image_size=None, as_list=False):
"""
Reads image array of np.uint8 and shape (num_images, *image_shape)
* image_paths: list of image paths
* image_size: if not None, image is resized
* as_list: if True, return list of images,
else return np.ndarray (default)
:return: np.ndarray or list
"""
images = None
for i, image_path in enumerate(image_paths):
im = load_img(image_path)
if image_size is not None:
im = im.resize(image_size, Image.LANCZOS)
x = img_to_array(im).astype(np.uint8)
if images is None:
if not as_list:
images = np.zeros((len(image_paths),) + x.shape,
dtype=np.uint8)
else: images = []
if not as_list: images[i, ...] = x
else: images.append(x)
return images | 7ee4e01682c5175a6b22db5d48acdb76471d03da | 11,478 |
def dc_vm_backup(request, dc, hostname):
"""
Switch current datacenter and redirect to VM backup page.
"""
dc_switch(request, dc)
return redirect('vm_backup', hostname=hostname) | 168576ac2b3384c1e35a1f972b7362a9ba379582 | 11,479 |
def compute_total_distance(path):
"""compute total sum of distance travelled from path list"""
path_array = np.diff(np.array(path), axis=0)
segment_distance = np.sqrt((path_array ** 2).sum(axis=1))
return np.sum(segment_distance) | c0c4d0303bdeaafdfda84beb65fd4e60a4ff7436 | 11,480 |
def get_relative_positions_matrix(length_x, length_y, max_relative_position):
"""Generates matrix of relative positions between inputs."""
range_vec_x = tf.range(length_x)
range_vec_y = tf.range(length_y)
# shape: [length_x, length_y]
distance_mat = tf.expand_dims(range_vec_x, -1) - tf.expand_dims(range_vec_y, 0)
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat | 661cabfbcb3e8566dd8d9ec4e56a71a4d62091fd | 11,481 |
def func_split_item(k):
""" Computes the expected value and variance of the splitting item random variable S.
Computes the expression (26b) and (26c) in Theorem 8. Remember that r.v. S is the value of index s
such that $\sum_{i=1}^{s-1} w(i) \leq k$ and $\sum_{i=1}^s w(i) > k$.
Args:
k: Int. The capacity of the Knapsack Problem instance.
Returns:
s: float. The expected value of the splitting item random variable.
var_split: float. The variance of the splitting item random variable.
"""
b = 1 + 1 / k # Defining a cumbersome base
s = (1 + 1 / k) ** k # Computing the split item
var_split = (3 + 1 / k) * b ** (k - 1) - b ** (2 * k) # Computing the variance of the split item
return s, var_split | 84ec7f4d76ced51ebdbd28efdc252b5ff3809e79 | 11,482 |
def eq(equation: str) -> int:
"""Evaluate the equation."""
code = compile(equation, "<string>", "eval")
return eval(code) | 5e88cad8009dc3dcaf36b216fa217fbadfaa50b3 | 11,483 |
def is_client_in_data(hass: HomeAssistant, unique_id: str) -> bool:
"""Check if ZoneMinder client is in the Home Assistant data."""
prime_config_data(hass, unique_id)
return const.API_CLIENT in hass.data[const.DOMAIN][const.CONFIG_DATA][unique_id] | 740e74b2d77bcf29aba7d2548930a98ec508fec0 | 11,484 |
from datetime import datetime
def parse_date(datestr):
""" Given a date in xport format, return Python date. """
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") | b802a528418a24300aeba3e33e9df8a268f0a27b | 11,485 |
def generate_database(m, n, uni_range_low=None, uni_range_high=None, exact_number=False):
"""
- Generate Universe by picking n random integers from low (inclusive) to high (exclusive).
If exact_number, then Universe.size == n
- Generate a Database of m records, over the Universe
"""
# generate Universe
if exact_number:
objects = range(n)
else:
objects = list(np.random.randint(uni_range_low, uni_range_high, size=n))
uni = Universe(objects)
# generate Database
db = uni.random_db(m)
return db | a6fad1192d0c286f7fdb585933b5648f0ee9cb4c | 11,486 |
from Foundation import NSUserDefaults as NSUD
def interface_style():
"""Return current platform interface style (light or dark)."""
try: # currently only works on macOS
except ImportError:
return None
style = NSUD.standardUserDefaults().stringForKey_("AppleInterfaceStyle")
if style == "Dark":
return "dark"
else:
return "light" | 5c30da34a3003ec52c3f97fb86dbf2ba73101a88 | 11,487 |
def get_num_forces(cgmodel):
"""
Given a CGModel() class object, this function determines how many forces we are including when evaluating the energy.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- total_forces (int) - Number of forces in the coarse grained model
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> total_number_forces = get_num_forces(cgmodel)
"""
total_forces = 0
if cgmodel.include_bond_forces:
total_forces = total_forces + 1
if cgmodel.include_nonbonded_forces:
total_forces = total_forces + 1
if cgmodel.include_bond_angle_forces:
total_forces = total_forces + 1
if cgmodel.include_torsion_forces:
total_forces = total_forces + 1
return total_forces | 5f5b897f1b0def0b858ca82319f9eebfcf75454a | 11,488 |
def cybrowser_dialog(id=None, text=None, title=None, url=None, base_url=DEFAULT_BASE_URL):
"""Launch Cytoscape's internal web browser in a separate window
Provide an id for the window if you want subsequent control of the window e.g., via cybrowser hide.
Args:
id (str): The identifier for the new browser window
text (str): HTML text to initially load into the browser
title (str): Text to be shown in the title bar of the browser window
url (str): The URL the browser should load
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {'id': id} where ``id`` is the one provided as a parameter to this function
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> cybrowser_dialog(id='Test Window', title='Hello Africa', text='<HTML><HEAD><TITLE>Hello</TITLE></HEAD><BODY>Hello, world!</BODY></HTML>')
{'id': 'Test Window'}
>>> cybrowser_dialog(id='CytoWindow', title='Cytoscape Home Page', url='http://www.cytoscape.org')
{'id': 'CytoWindow'}
See Also:
:meth:`cybrowser_show`, :meth:`cybrowser_hide`
"""
id_str = f' id="{id}"' if id else ''
text_str = f' text="{text}"' if text else ''
title_str = f' title="{title}"' if title else ''
url_str = f' url="{url}"' if url else ''
res = commands.commands_post(f'cybrowser dialog{id_str}{text_str}{title_str}{url_str}', base_url=base_url)
return res | d892fe1a4e48cba8f8561fbe208aec7e2cb4afd7 | 11,489 |
def initialize_stat_dict():
"""Initializes a dictionary which will hold statistics about compositions.
Returns:
A dictionary containing the appropriate fields initialized to 0 or an
empty list.
"""
stat_dict = dict()
for lag in [1, 2, 3]:
stat_dict['autocorrelation' + str(lag)] = []
stat_dict['notes_not_in_key'] = 0
stat_dict['notes_in_motif'] = 0
stat_dict['notes_in_repeated_motif'] = 0
stat_dict['num_starting_tonic'] = 0
stat_dict['num_repeated_notes'] = 0
stat_dict['num_octave_jumps'] = 0
stat_dict['num_fifths'] = 0
stat_dict['num_thirds'] = 0
stat_dict['num_sixths'] = 0
stat_dict['num_seconds'] = 0
stat_dict['num_fourths'] = 0
stat_dict['num_sevenths'] = 0
stat_dict['num_rest_intervals'] = 0
stat_dict['num_special_rest_intervals'] = 0
stat_dict['num_in_key_preferred_intervals'] = 0
stat_dict['num_resolved_leaps'] = 0
stat_dict['num_leap_twice'] = 0
stat_dict['num_high_unique'] = 0
stat_dict['num_low_unique'] = 0
return stat_dict | 42a10b93a960663a42260e1a77d0e8f5a4ff693a | 11,490 |
def nrrd_to_nii(file):
"""
A function that converts the .nrrd atlas to .nii file format
Parameters
----------
file: tuples
Tuple of coronal, sagittal, and horizontal slices you want to view
Returns
-------
F_im_nii: nibabel.nifti2.Nifti2Image
A nifti file format that is used by various medical imaging techniques.
Notes
-------
From: #from: https://nipy.org/nibabel/coordinate_systems.html
"""
_nrrd = nrrd.read(file)
data = _nrrd[0]
header = _nrrd[1] # noqa: F841
F_im_nii = nib.Nifti2Image(data, np.eye(4))
return F_im_nii | 240e94758ef3f52d4e9a4ebb6f0574ade13a1044 | 11,491 |
def reqeustVerifyAuthhandler(request):
"""
본인인증 전자서명을 요청합니다.
- 본인인증 서비스에서 이용기관이 생성하는 Token은 사용자가 전자서명할 원문이 됩니다. 이는 보안을 위해 1회용으로 생성해야 합니다.
- 사용자는 이용기관이 생성한 1회용 토큰을 서명하고, 이용기관은 그 서명값을 검증함으로써 사용자에 대한 인증의 역할을 수행하게 됩니다.
"""
try:
# Kakaocert 이용기관코드, Kakaocert 파트너 사이트에서 확인
clientCode = '020040000001'
# 본인인증 요청정보 객체
requestObj = RequestVerifyAuth(
# 고객센터 전화번호, 카카오톡 인증메시지 중 "고객센터" 항목에 표시
CallCenterNum = '1600-8536',
# 인증요청 만료시간(초), 최대값 1000, 인증요청 만료시간(초) 내에 미인증시 만료 상태로 처리됨
Expires_in = 60,
# 수신자 생년월일, 형식 : YYYYMMDD
ReceiverBirthDay = '19900108',
# 수신자 휴대폰번호
ReceiverHP = '01043245117',
# 수신자 성명
ReceiverName = '정요한',
# 별칭코드, 이용기관이 생성한 별칭코드 (파트너 사이트에서 확인가능)
# 카카오톡 인증메시지 중 "요청기관" 항목에 표시
# 별칭코드 미 기재시 이용기관의 이용기관명이 "요청기관" 항목에 표시
SubClientID = '',
# 인증요청 메시지 부가내용, 카카오톡 인증메시지 중 상단에 표시
TMSMessage = 'TMSMessage0423',
# 인증요청 메시지 제목, 카카오톡 인증메시지 중 "요청구분" 항목에 표시
TMSTitle = 'TMSTitle 0423',
# 은행계좌 실명확인 생략여부
# true : 은행계좌 실명확인 절차를 생략
# false : 은행계좌 실명확인 절차를 진행
# 카카오톡 인증메시지를 수신한 사용자가 카카오인증 비회원일 경우, 카카오인증 회원등록 절차를 거쳐 은행계좌 실명확인 절차를 밟은 다음 전자서명 가능
isAllowSimpleRegistYN = False,
# 수신자 실명확인 여부
# true : 카카오페이가 본인인증을 통해 확보한 사용자 실명과 ReceiverName 값을 비교
# false : 카카오페이가 본인인증을 통해 확보한 사용자 실명과 RecevierName 값을 비교하지 않음.
isVerifyNameYN = True,
# 전자서명할 토큰 원문
Token = 'Token Value 2345',
# PayLoad, 이용기관이 생성한 payload(메모) 값
PayLoad = 'Payload123',
)
result = kakaocertService.requestVerifyAuth(clientCode, requestObj)
return render(request, 'response.html', {'receiptId': result.receiptId})
except KakaocertException as KE:
return render(request, 'exception.html', {'code': KE.code, 'message': KE.message}) | 75bce664f11804ed0abb649ce80ef261ebfd0a34 | 11,492 |
import os
def _file_name_to_valid_time(bulletin_file_name):
"""Parses valid time from file name.
:param bulletin_file_name: Path to input file (text file in WPC format).
:return: valid_time_unix_sec: Valid time.
"""
_, pathless_file_name = os.path.split(bulletin_file_name)
valid_time_string = pathless_file_name.replace(
PATHLESS_FILE_NAME_PREFIX + '_', '')
return time_conversion.string_to_unix_sec(
valid_time_string, TIME_FORMAT_IN_FILE_NAME) | 5e26fa07507fee51c286f7ff0d0148e957a5eca6 | 11,493 |
import ipaddress
import six
def cidr_validator(value, return_ip_interface=False):
"""Validate IPv4 + optional subnet in CIDR notation"""
try:
if '/' in value:
ipaddr, netmask = value.split('/')
netmask = int(netmask)
else:
ipaddr, netmask = value, 32
if not validators.ipv4_re.match(ipaddr) or not 1 <= netmask <= 32:
raise ValueError
ipi = ipaddress.ip_interface(six.text_type(value))
if ipi.is_reserved:
raise ValueError
except ValueError:
raise ValidationError(_('Enter a valid IPv4 address or IPv4 network.'))
if return_ip_interface:
return ipi | 1c0afd08f3f4f079dc4004400449fe6e27cf0ef7 | 11,494 |
def rh2a(rh, T, e_sat_func=e_sat_gg_water):
"""
Calculate the absolute humidity from relative humidity, air temperature,
and pressure.
Parameters
----------
rh:
Relative humidity in Pa / Pa
T:
Temperature in K
e_sat_func: func, optional
Function to estimate the saturation pressure. E.g. e_sat_gg_water for
water and e_sat_gg_ice for ice.
Returns
-------
float :
absolute humidity [kg / kg]
"""
with np.errstate(divide='ignore', invalid='ignore'):
if np.any(rh > 5):
raise TypeError("rh must not be in %")
e = rh*e_sat_func(T)
a = e / (meteo_si.constants.Rvapor*T)
return a | cabbae69d28a68531cf79dfe645e0065ab34534e | 11,495 |
def encoder_decoder_generator(start_img):
"""
"""
layer1 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(start_img)
layer2 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(layer1)
layer3 = Conv2D(64, kernel_size=4, strides=1, activation='elu', padding='same')(layer2)
layer4 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer3)
layer5 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer4)
layer6 = Conv2D(64, kernel_size=2, strides=1, activation='elu', padding='same')(layer5)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = Conv2D(3, kernel_size=1, activation = 'tanh', padding='same')(layer6)
return net | 9f0ccb7ebae8f0742fdcd464ce9cd072a9099d3e | 11,496 |
def off():
"""
Turns the buzzer off (sets frequency to zero Hz)
Returns:
None
"""
return _rc.writeAttribute(OPTYPE.BUZZER_FREQ, [0]) | 66e2160fed93ba49bf6c39dff0003a05f2875a77 | 11,497 |
import os
import sys
def ircelsos_data_dir():
"""Get the data directory
Adapted from jupyter_core
"""
home = os.path.expanduser('~')
if sys.platform == 'darwin':
return os.path.join(home, 'Library', 'ircelsos')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', os.path.join(home, '.local', 'share'))
return os.path.join(appdata, 'ircelsos')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = os.environ.get("XDG_DATA_HOME", os.path.join(home, '.local', 'share'))
return os.path.join(xdg, 'ircelsos') | a8c79f3dde6d87aec8c79bd7d35f6fbab19fccd8 | 11,498 |
def get_shodan_dicts():
"""Build Shodan dictionaries that hold definitions and naming conventions."""
risky_ports = [
"ftp",
"telnet",
"http",
"smtp",
"pop3",
"imap",
"netbios",
"snmp",
"ldap",
"smb",
"sip",
"rdp",
"vnc",
"kerberos",
]
name_dict = {
"ftp": "File Transfer Protocol",
"telnet": "Telnet",
"http": "Hypertext Transfer Protocol",
"smtp": "Simple Mail Transfer Protocol",
"pop3": "Post Office Protocol 3",
"imap": "Internet Message Access Protocol",
"netbios": "Network Basic Input/Output System",
"snmp": "Simple Network Management Protocol",
"ldap": "Lightweight Directory Access Protocol",
"smb": "Server Message Block",
"sip": "Session Initiation Protocol",
"rdp": "Remote Desktop Protocol",
"kerberos": "Kerberos",
}
risk_dict = {
"ftp": "FTP",
"telnet": "Telnet",
"http": "HTTP",
"smtp": "SMTP",
"pop3": "POP3",
"imap": "IMAP",
"netbios": "NetBIOS",
"snmp": "SNMP",
"ldap": "LDAP",
"smb": "SMB",
"sip": "SIP",
"rdp": "RDP",
"vnc": "VNC",
"kerberos": "Kerberos",
}
# Create dictionaries for CVSSv2 vector definitions using https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator
av_dict = {
"NETWORK": "A vulnerability exploitable with network access means the vulnerable software is bound to the network stack and the attacker does not require local network access or local access. Such a vulnerability is often termed “remotely exploitable”. An example of a network attack is an RPC buffer overflow.",
"ADJACENT_NETWORK": "A vulnerability exploitable with adjacent network access requires the attacker to have access to either the broadcast or collision domain of the vulnerable software. Examples of local networks include local IP subnet, Bluetooth, IEEE 802.11, and local Ethernet segment.",
"LOCAL": "A vulnerability exploitable with only local access requires the attacker to have either physical access to the vulnerable system or a local (shell) account. Examples of locally exploitable vulnerabilities are peripheral attacks such as Firewire/USB DMA attacks, and local privilege escalations (e.g., sudo).",
}
ac_dict = {
"LOW": "Specialized access conditions or extenuating circumstances do not exist. The following are examples: The affected product typically requires access to a wide range of systems and users, possibly anonymous and untrusted (e.g., Internet-facing web or mail server). The affected configuration is default or ubiquitous. The attack can be performed manually and requires little skill or additional information gathering. The 'race condition' is a lazy one (i.e., it is technically a race but easily winnable).",
"MEDIUM": "The access conditions are somewhat specialized; the following are examples: The attacking party is limited to a group of systems or users at some level of authorization, possibly untrusted. Some information must be gathered before a successful attack can be launched. The affected configuration is non-default, and is not commonly configured (e.g., a vulnerability present when a server performs user account authentication via a specific scheme, but not present for another authentication scheme). The attack requires a small amount of social engineering that might occasionally fool cautious users (e.g., phishing attacks that modify a web browser’s status bar to show a false link, having to be on someone’s “buddy” list before sending an IM exploit).",
"HIGH": "Specialized access conditions exist. For example, in most configurations, the attacking party must already have elevated privileges or spoof additional systems in addition to the attacking system (e.g., DNS hijacking). The attack depends on social engineering methods that would be easily detected by knowledgeable people. For example, the victim must perform several suspicious or atypical actions. The vulnerable configuration is seen very rarely in practice. If a race condition exists, the window is very narrow.",
}
ci_dict = {
"NONE": "There is no impact to the confidentiality of the system",
"PARTIAL": "There is considerable informational disclosure. Access to some system files is possible, but the attacker does not have control over what is obtained, or the scope of the loss is constrained. An example is a vulnerability that divulges only certain tables in a database.",
"COMPLETE": "There is total information disclosure, resulting in all system files being revealed. The attacker is able to read all of the system's data (memory, files, etc.).",
}
return risky_ports, name_dict, risk_dict, av_dict, ac_dict, ci_dict | 2aace61b8339db848e95758fcb9f30856915d6fc | 11,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.