content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def arglast(arr, convert=True, check=True):
"""Return the index of the last true element of the given array.
"""
if convert:
arr = np.asarray(arr).astype(bool)
if np.ndim(arr) != 1:
raise ValueError("`arglast` not yet supported for ND != 1 arrays!")
sel = arr.size - 1
sel = sel - np.argmax(arr[::-1])
if check and (not arr[sel]):
return None
return sel
|
b4c6424523a5a33a926b7530e6a6510fd813a42a
| 31,563 |
def number_formatter(number, pos=None):
"""Convert a number into a human readable format."""
magnitude = 0
while abs(number) >= 100:
magnitude += 1
number /= 100.0
return '%.1f%s' % (number, ['', '', '', '', '', ''][magnitude])
|
a9cfd3482b3a2187b8d18d6e21268e71b69ae2f2
| 31,564 |
from pathlib import Path
import shutil
def simcore_tree(cookies, tmpdir):
"""
bakes cookie, moves it into a osparc-simcore tree structure with
all the stub in place
"""
result = cookies.bake(
extra_context={"project_slug": PROJECT_SLUG, "github_username": "pcrespov"}
)
workdir = Path(result.project).resolve()
template_dir = workdir / "_osparc-simcore-stub"
simcore_dir = tmpdir / "osparc-simcore"
template_dir.rename(simcore_dir)
service_dir = simcore_dir / "services/{}".format(PROJECT_SLUG)
shutil.rmtree(service_dir)
workdir.rename(service_dir)
return (simcore_dir, service_dir)
|
f9889c1b530145eb94cc7ca3547d90759218b1dc
| 31,565 |
def calc_density(temp, pressure, gas_constant):
"""
Calculate density via gas equation.
Parameters
----------
temp : array_like
temperatur in K
pressure : array_like
(partial) pressure in Pa
gas_constant: array_like
specicif gas constant in m^2/(s^2*K)
Returns
-------
out : ndarray
density in kg/m^3
"""
return pressure / (temp * gas_constant)
|
1e492f9fb512b69585035ce2f784d8cf8fd1edb0
| 31,566 |
def address(addr, label=None):
"""Discover the proper class and return instance for a given Oscillate address.
:param addr: the address as a string-like object
:param label: a label for the address (defaults to `None`)
:rtype: :class:`Address`, :class:`SubAddress` or :class:`IntegratedAddress`
"""
addr = str(addr)
if _ADDR_REGEX.match(addr):
netbyte = bytearray(unhexlify(base58.decode(addr)))[0]
if netbyte in Address._valid_netbytes:
return Address(addr, label=label)
elif netbyte in SubAddress._valid_netbytes:
return SubAddress(addr, label=label)
raise ValueError("Invalid address netbyte {nb:x}. Allowed values are: {allowed}".format(
nb=netbyte,
allowed=", ".join(map(
lambda b: '%02x' % b,
sorted(Address._valid_netbytes + SubAddress._valid_netbytes)))))
elif _IADDR_REGEX.match(addr):
return IntegratedAddress(addr)
raise ValueError("Address must be either 95 or 106 characters long base58-encoded string, "
"is {addr} ({len} chars length)".format(addr=addr, len=len(addr)))
|
13b1e24abc7303395ff9bbe82787bc67a4d377d6
| 31,569 |
def retrieve_molecule_number(pdb, resname):
"""
IDENTIFICATION OF MOLECULE NUMBER BASED
ON THE TER'S
"""
count = 0
with open(pdb, 'r') as x:
lines = x.readlines()
for i in lines:
if i.split()[0] == 'TER': count += 1
if i.split()[3] == resname:
molecule_number = count + 1
break
return molecule_number
|
8342d1f5164707185eb1995cedd065a4f3824401
| 31,570 |
import ctypes
import ctypes.wintypes
import io
def _windows_write_string(s, out, skip_errors=True):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(
('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
if skip_errors:
continue
else:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
|
471fd456769e5306525bdd44d41158d2a3b024de
| 31,571 |
def in_relative_frame(
pos_abs: np.ndarray,
rotation_matrix: np.ndarray,
translation: Point3D,
) -> np.ndarray:
"""
Inverse transform of `in_absolute_frame`.
"""
pos_relative = pos_abs + translation
pos_relative = pos_relative @ rotation_matrix
return pos_relative
|
5f7789d7b5ff27047d6bb2df61ba7c841dc05b95
| 31,572 |
def check_url_namespace(app_configs=None, **kwargs):
"""Check NENS_AUTH_URL_NAMESPACE ends with a semicolon"""
namespace = settings.NENS_AUTH_URL_NAMESPACE
if not isinstance(namespace, str):
return [Error("The setting NENS_AUTH_URL_NAMESPACE should be a string")]
if namespace != "" and not namespace.endswith(":"):
return [
Error("The setting NENS_AUTH_URL_NAMESPACE should end with a " "colon (:).")
]
return []
|
e97574a60083cb7a61dbf7a9f9d4c335d68577b5
| 31,573 |
def get_exif_data(fn):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
i = Image.open(fn)
info = i._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
|
b6a97ed68753bb3e7ccb19a242c66465258ae602
| 31,574 |
def _setup_modules(module_cls, variable_reparameterizing_predicate,
module_reparameterizing_predicate, module_init_kwargs):
"""Return `module_cls` instances for reparameterization and for reference."""
# Module to be tested.
module_to_reparameterize = _init_module(module_cls, module_init_kwargs)
# Replacement parameters.
paths, variables, replacement_variables = get_params_and_replacements(
module_to_reparameterize,
variable_reparameterizing_predicate,
module_reparameterizing_predicate,
)
# Reference modules.
before_reference_module = _init_reference_module(module_cls,
module_init_kwargs, paths,
variables)
after_reference_module = _init_reference_module(module_cls,
module_init_kwargs, paths,
replacement_variables)
return (
module_to_reparameterize,
before_reference_module,
after_reference_module,
variables,
replacement_variables,
)
|
367ecae71835044055765ace56f6c0540e9a44ba
| 31,576 |
def external_compatible(request, id):
""" Increment view counter for a compatible view """
increment_hit_counter_task.delay(id, 'compatible_count')
return json_success_response()
|
c82536cdebb2cf620394008d3ff1df13a87a9715
| 31,577 |
def lowpass_xr(da,cutoff,**kw):
"""
Like lowpass(), but ds is a data array with a time coordinate,
and cutoff is a timedelta64.
"""
data=da.values
time_secs=(da.time.values-da.time.values[0])/np.timedelta64(1,'s')
cutoff_secs=cutoff/np.timedelta64(1,'s')
axis=da.get_axis_num('time')
data_lp=lowpass(data,time_secs,cutoff_secs,axis=axis,**kw)
da_lp=da.copy(deep=True)
da_lp.values[:]=data_lp
da_lp.attrs['comment']="lowpass at %g seconds"%(cutoff_secs)
return da_lp
|
0628d63a94c3614a396791c0b5abd52cb3590e04
| 31,578 |
def _calc_zonal_correlation(dat_tau, dat_pr, dat_tas, dat_lats, fig_config):
"""
Calculate zonal partial correlations for sliding windows.
Argument:
--------
dat_tau - data of global tau
dat_pr - precipitation
dat_tas - air temperature
dat_lats - latitude of the given model
fig_config - figure/diagnostic configurations
Return:
------
corr_dat zonal correlations
"""
# get the interval of latitude and create array for partial correlation
lat_int = abs(dat_lats[1] - dat_lats[0])
corr_dat = np.ones((np.shape(dat_tau)[0], 2)) * np.nan
# get the size of the sliding window based on the bandsize in degrees
window_size = round(fig_config['bandsize'] / (lat_int * 2.))
dat_tau, dat_pr, dat_tas = _apply_common_mask(dat_tau, dat_pr, dat_tas)
# minimum 1/8 of the given window has valid data points
min_points = np.shape(dat_tau)[1] * fig_config['min_points_frac']
for lat_index in range(len(corr_dat)):
istart = np.int(max(0, lat_index - window_size))
iend = np.int(min(np.size(dat_lats), lat_index + window_size + 1))
dat_tau_zone = dat_tau[istart:iend, :]
dat_pr_zone = dat_pr[istart:iend, :]
dat_tas_zone = dat_tas[istart:iend, :]
dat_x = np.ma.masked_invalid(dat_tau_zone).compressed().flatten()
dat_y = np.ma.masked_invalid(dat_pr_zone).compressed().flatten()
dat_z = np.ma.masked_invalid(dat_tas_zone).compressed().flatten()
num_valid_points = sum(~np.isnan(dat_x + dat_y + dat_z))
if num_valid_points > min_points:
corr_dat[lat_index, 1] = partial_corr(
np.vstack((dat_x, dat_y, dat_z)).T, fig_config)
corr_dat[lat_index, 0] = partial_corr(
np.vstack((dat_x, dat_z, dat_y)).T, fig_config)
return corr_dat
|
f596536bde5ded45da2ef44e388df19d60da2c75
| 31,579 |
def is_unary(string):
"""
Return true if the string is a defined unary mathematical
operator function.
"""
return string in mathwords.UNARY_FUNCTIONS
|
914785cb757f155bc13f6e1ddcb4f9b41f2dd1a2
| 31,580 |
def GetBucketAndRemotePath(revision, builder_type=PERF_BUILDER,
target_arch='ia32', target_platform='chromium',
deps_patch_sha=None):
"""Returns the location where a build archive is expected to be.
Args:
revision: Revision string, e.g. a git commit hash or SVN revision.
builder_type: Type of build archive.
target_arch: Architecture, e.g. "ia32".
target_platform: Platform name, e.g. "chromium" or "android".
deps_patch_sha: SHA1 hash which identifies a particular combination of
custom revisions for dependency repositories.
Returns:
A pair of strings (bucket, path), where the archive is expected to be.
"""
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform)
bucket = build_archive.BucketName()
remote_path = build_archive.FilePath(revision, deps_patch_sha=deps_patch_sha)
return bucket, remote_path
|
30ced6c37d42d2b531ae6ecafc4066c59fb8f6e4
| 31,581 |
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
|
adf627452ebe25b929cd78242cca382f6a62116d
| 31,582 |
import math
def compute_star_verts(n_points, out_radius, in_radius):
"""Vertices for a star. `n_points` controls the number of points;
`out_radius` controls distance from points to centre; `in_radius` controls
radius from "depressions" (the things between points) to centre."""
assert n_points >= 3
vertices = []
out_vertex = pm.vec2d.Vec2d(0, out_radius)
in_vertex = pm.vec2d.Vec2d(0, in_radius)
for point_num in range(n_points):
out_angle = point_num * 2 * math.pi / n_points
vertices.append(out_vertex.rotated(out_angle))
in_angle = (2 * point_num + 1) * math.pi / n_points
vertices.append(in_vertex.rotated(in_angle))
vertices = [(v.x, v.y) for v in vertices]
return vertices
|
97919efbb501dd41d5e6ee10e27c942167142b24
| 31,583 |
def create_ordering_dict(iterable):
"""Example: converts ['None', 'ResFiles'] to {'None': 0, 'ResFiles': 1}"""
return dict([(a, b) for (b, a) in dict(enumerate(iterable)).iteritems()])
|
389a0875f1542327e4aa5d038988d45a74b61937
| 31,584 |
def sparse2tuple(mx):
"""Convert sparse matrix to tuple representation.
ref: https://github.com/tkipf/gcn/blob/master/gcn/utils.py
"""
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
|
a20b12c3e0c55c2d4739156f731e8db9e2d66feb
| 31,585 |
def correct_predicted(y_true, y_pred):
""" Compare the ground truth and predict labels,
Parameters
----------
y_true: an array like for the true labels
y_pred: an array like for the predicted labels
Returns
-------
correct_predicted_idx: a list of index of correct predicted
correct_score: a rate of accuracy rate
H. J. @ 2018-12-18
"""
if len(y_true) != len(y_pred):
raise "Dimension unmatches"
correct_predicted_idx = []
for idx in range(len(y_true)):
if y_pred[idx] == y_true[idx]:
correct_predicted_idx.append(idx)
correct_score = accuracy_score(y_true, y_pred)
return correct_predicted_idx, correct_score
|
3fae4287cb555b7258adde989ef4ef01cfb949ce
| 31,586 |
def coord_image_to_trimesh(coord_img, validity_mask=None, batch_shape=None, image_dims=None, dev_str=None):
"""Create trimesh, with vertices and triangle indices, from co-ordinate image.
Parameters
----------
coord_img
Image of co-ordinates *[batch_shape,h,w,3]*
validity_mask
Boolean mask of where the coord image contains valid values
*[batch_shape,h,w,1]* (Default value = None)
batch_shape
Shape of batch. Inferred from inputs if None. (Default value = None)
image_dims
Image dimensions. Inferred from inputs in None. (Default value = None)
dev_str
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
Same as x if None. (Default value = None)
Returns
-------
ret
Vertices *[batch_shape,(hxw),3]* amd Trimesh indices *[batch_shape,n,3]*
"""
if dev_str is None:
dev_str = _ivy.dev_str(coord_img)
if batch_shape is None:
batch_shape = _ivy.shape(coord_img)[:-3]
if image_dims is None:
image_dims = _ivy.shape(coord_img)[-3:-1]
# shapes as lists
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x (HxW) x 3
vertices = _ivy.reshape(coord_img, batch_shape + [image_dims[0] * image_dims[1], 3])
if validity_mask is not None:
# BS x H-1 x W-1 x 1
t00_validity = validity_mask[..., 0:image_dims[0] - 1, 0:image_dims[1] - 1, :]
t01_validity = validity_mask[..., 0:image_dims[0] - 1, 1:image_dims[1], :]
t02_validity = validity_mask[..., 1:image_dims[0], 0:image_dims[1] - 1, :]
t10_validity = validity_mask[..., 1:image_dims[0], 1:image_dims[1], :]
t11_validity = t01_validity
t12_validity = t02_validity
# BS x H-1 x W-1 x 1
t0_validity = _ivy.logical_and(t00_validity, _ivy.logical_and(t01_validity, t02_validity))
t1_validity = _ivy.logical_and(t10_validity, _ivy.logical_and(t11_validity, t12_validity))
# BS x (H-1xW-1)
t0_validity_flat = _ivy.reshape(t0_validity, batch_shape + [-1])
t1_validity_flat = _ivy.reshape(t1_validity, batch_shape + [-1])
# BS x 2x(H-1xW-1)
trimesh_index_validity = _ivy.concatenate((t0_validity_flat, t1_validity_flat), -1)
# BS x N
trimesh_valid_indices = _ivy.indices_where(trimesh_index_validity)
# BS x 2x(H-1xW-1) x 3
all_trimesh_indices = create_trimesh_indices_for_image(batch_shape, image_dims, dev_str)
# BS x N x 3
trimesh_indices = _ivy.gather_nd(all_trimesh_indices, trimesh_valid_indices)
else:
# BS x N=2x(H-1xW-1) x 3
trimesh_indices = create_trimesh_indices_for_image(batch_shape, image_dims)
# BS x (HxW) x 3, BS x N x 3
return vertices, trimesh_indices
|
8719498ddf24e67ed2ea245d73ac796662b5d08e
| 31,587 |
def expand_db_html(html, for_editor=False):
"""
Expand database-representation HTML into proper HTML usable in either
templates or the rich text editor
"""
def replace_a_tag(m):
attrs = extract_attrs(m.group(1))
if 'linktype' not in attrs:
# return unchanged
return m.group(0)
handler = get_link_handler(attrs['linktype'])
return handler.expand_db_attributes(attrs, for_editor)
def replace_embed_tag(m):
attrs = extract_attrs(m.group(1))
handler = get_embed_handler(attrs['embedtype'])
return handler.expand_db_attributes(attrs, for_editor)
html = FIND_A_TAG.sub(replace_a_tag, html)
html = FIND_EMBED_TAG.sub(replace_embed_tag, html)
return html
|
2e01f4aff7bc939fac11c031cde760351322d564
| 31,588 |
def hungarian(matrx):
"""Runs the Hungarian Algorithm on a given matrix and returns the optimal matching with potentials. Produces intermediate images while executing."""
frames = []
# Step 1: Prep matrix, get size
matrx = np.array(matrx)
size = matrx.shape[0]
# Step 2: Generate trivial potentials
rpotentials = []
cpotentials = [0 for i in range(size)]
for i in range(len(matrx)):
row = matrx[i]
rpotentials.append(max(row))
# Step 3: Initialize alternating tree
matching = []
S = {0}
T = set()
tree_root = Node(0)
x_nodes = {0: tree_root}
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), INITIAL_STAGE])
# Create helper functions
def neighbours(wset):
"""Finds all firms in equality graph with workers in wset."""
result = []
for x in wset:
# get row of firms for worker x
nbs = matrx[x, :]
for y in range(len(nbs)):
# check for equality
if nbs[y] == rpotentials[x] + cpotentials[y]:
result.append([x, y])
return result
def update_potentials():
"""Find the smallest difference between treed workers and untreed firms
and use it to update potentials."""
# when using functions in functions, if modifying variables, call nonlocal
nonlocal rpotentials, cpotentials
big = np.inf
args = None
# iterate over relevant pairs
for dx in S:
for dy in set(range(size)) - T:
# find the difference and check if its smaller than any we found before
weight = matrx[dx, dy]
alpha = rpotentials[dx] + cpotentials[dy] - weight
if alpha < big:
big = alpha
args = [dx, dy]
# apply difference to potentials as needed
for dx in S:
rpotentials[dx] -= big
for dy in T:
cpotentials[dy] += big
return big, S, T, args
# Step 4: Loop while our matching is too small
while len(matching) != size:
# Step A: Compute neighbours in equality graph
NS = neighbours(S)
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), NEIGHBOUR_STAGE, NS])
if set([b[1] for b in NS]) == T:
# Step B: If all firms are in the tree, update potentials to get a new one
alpha, ds, dt, args = update_potentials()
NS = neighbours(S)
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), UPDATE_STAGE, alpha, ds.copy(), dt.copy(), args])
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), NEIGHBOUR_STAGE, NS])
# get the untreed firm
pair = next(n for n in NS if n[1] not in T)
if pair[1] not in [m[1] for m in matching]:
# Step D: Firm is not matched so add it to matching
thecopy = matching.copy()
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), thecopy, MATCHING_STAGE, pair, thecopy])
matching.append(pair)
# Step E: Swap the alternating path in our alternating tree attached to the worker we matched
source = x_nodes[pair[0]]
matched = 1
while source.parent != None:
above = source.parent
if matched:
# if previously matched, this should be removed from matching
matching.remove([source.val, above.val])
else:
# if previous was a remove, this is a match
matching.append([above.val, source.val])
matched = 1 - matched
source = above
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), FLIPPING_STAGE, pair, thecopy])
# Step F: Destroy the tree, go to Step 4 to check completion, and possibly go to Step A
free = list(set(range(size)) - set([m[0] for m in matching]))
if len(free):
tree_root = Node(free[0])
x_nodes = {free[0]: tree_root}
S = {free[0]}
T = set()
frames.append([rpotentials.copy(), cpotentials.copy(),get_paths(x_nodes), matching.copy(), RESET_STAGE])
else:
x_nodes = {}
S = set()
T = set()
else:
# Step C: Firm is matched so add it to the tree and go back to Step A
matching_x = next(m[0] for m in matching if m[1] == pair[1])
S.add(matching_x)
T.add(pair[1])
source = x_nodes[pair[0]]
y_node = Node(pair[1], source)
x_node = Node(matching_x, y_node)
x_nodes[matching_x] = x_node
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), EXPANSION_STAGE])
revenues = [matrx[m[0], m[1]] for m in matching]
class Result:
"""A simple response object."""
def __init__(self, match, revenues, row_weights, col_weights, revenue_sum, result, matrix):
self.match = match
self.revenues = revenues
self.row_weights = row_weights
self.col_weights = col_weights
self.revenue_sum = revenue_sum
self.frames = process_frames(result, matrix)
def __str__(self):
size = len(self.match)
maxlen = max(len(str(max(self.revenues))), len(str(min(self.revenues))))
baselist = [[" "*maxlen for i in range(size)] for j in range(size)]
for i in range(size):
entry = self.match[i]
baselist[entry[0]][entry[1]] = str(self.revenues[i]).rjust(maxlen)
formatted_list = '\n'.join([str(row) for row in baselist])
return f"Matching:\n{formatted_list}\n\nRow Potentials: {self.row_weights}\nColumn Potentials: {self.col_weights}"
frames.append([rpotentials.copy(), cpotentials.copy(), get_paths(x_nodes), matching.copy(), EXIT_STAGE])
return Result(matching, revenues, rpotentials, cpotentials, sum(revenues), frames, matrx)
|
dc4dffa819ed836a8e4aaffbe23b49b95101bffe
| 31,589 |
def open_spreadsheet_from_args(google_client: gspread.Client, args):
"""
Attempt to open the Google Sheets spreadsheet specified by the given
command line arguments.
"""
if args.spreadsheet_id:
logger.info("Opening spreadsheet by ID '{}'".format(args.spreadsheet_id))
return google_client.open_by_key(args.spreadsheet_id)
elif args.spreadsheet_url:
logger.info("Opening spreadsheet by URL '{}'".format(args.spreadsheet_url))
return google_client.open_by_url(args.spreadsheet_url)
elif args.spreadsheet_name:
logger.info("Opening spreadsheet by name '{}'".format(args.spreadsheet_name))
return google_client.open(args.spreadsheet_name)
else:
raise ValueError("Invalid command line arguments - no spreadsheet identifier was provided")
|
355545a00de77039250269c3c8ddf05b2f72ec48
| 31,590 |
def perturb_BB(image_shape, bb, max_pertub_pixel,
rng=None, max_aspect_ratio_diff=0.3,
max_try=100):
"""
Perturb a bounding box.
:param image_shape: [h, w]
:param bb: a `Rect` instance
:param max_pertub_pixel: pertubation on each coordinate
:param max_aspect_ratio_diff: result can't have an aspect ratio too different from the original
:param max_try: if cannot find a valid bounding box, return the original
:returns: new bounding box
"""
orig_ratio = bb.h * 1.0 / bb.w
if rng is None:
rng = np.random.RandomState()
for _ in range(max_try):
p = rng.randint(-max_pertub_pixel, max_pertub_pixel, [4])
newbb = bb.copy()
newbb.x += p[0]
newbb.y += p[1]
newx1 = bb.x1 + p[2]
newy1 = bb.y1 + p[3]
newbb.w = newx1 - newbb.x
newbb.h = newy1 - newbb.y
if not newbb.validate(image_shape):
continue
new_ratio = newbb.h * 1.0 / newbb.w
diff = abs(new_ratio - orig_ratio)
if diff / orig_ratio > max_aspect_ratio_diff:
continue
return newbb
return bb
|
4044291bdcdf1639e9af86857cac158a67db5229
| 31,591 |
def neural_network(inputs, weights):
"""
Takes an input vector and runs it through a 1-layer neural network
with a given weight matrix and returns the output.
Arg:
inputs - 2 x 1 NumPy array
weights - 2 x 1 NumPy array
Returns (in this order):
out - a 1 x 1 NumPy array, representing the output of the neural network
"""
v = np.matmul(weights.transpose(),inputs)
return np.tanh(v)
|
dc2d5cccf0cf0591c030b5dba2cd905f4583821c
| 31,593 |
def complex_randn(shape):
"""
Returns a complex-valued numpy array of random values with shape `shape`
Args:
shape: (tuple) tuple of ints that will be the shape of the resultant complex numpy array
Returns: (:obj:`np.ndarray`): a complex-valued numpy array of random values with shape `shape`
"""
return np.random.randn(*shape) + 1j * np.random.randn(*shape)
|
6379fb2fb481392dce7fb4eab0e85ea85651b290
| 31,594 |
def sin(x: REAL) -> float:
"""Sine."""
x %= 2 * pi
res = 0
k = 0
while True:
mem_res = res
res += (-1) ** k * x ** (2 * k + 1) / fac(2 * k + 1)
if abs(mem_res - res) < _TAYLOR_DIFFERENCE:
return res
k += 1
|
0ae009139bc640944ad1a90386e6c66a6b874108
| 31,596 |
import tokenize
from operator import getitem
def _getitem_row_chan(avg, idx, dtype):
""" Extract (row,chan,corr) arrays from dask array of tuples """
name = ("row-chan-average-getitem-%d-" % idx) + tokenize(avg, idx)
dim = ("row", "chan", "corr")
layers = db.blockwise(getitem, name, dim,
avg.name, dim,
idx, None,
numblocks={avg.name: avg.numblocks})
graph = HighLevelGraph.from_collections(name, layers, (avg,))
return da.Array(graph, name, avg.chunks,
meta=np.empty((0,)*len(dim), dtype=np.object),
dtype=dtype)
|
ff3da6b935cd4c3e909008fefea7a9c91d51d399
| 31,597 |
import gzip
def make_gzip(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.gzip
"""
tar_contents = open(tar_file, 'rb')
gzipfile = gzip.open(destination + '.tar.gz', 'wb')
gzipfile.writelines(tar_contents)
gzipfile.close()
tar_contents.close()
return True
|
38d9e3de38cb204cc3912091099439b7e0825608
| 31,598 |
def symmetrize_confusion_matrix(CM, take='all'):
"""
Sums over population, symmetrizes, then return upper triangular portion
:param CM: numpy.ndarray confusion matrix in standard format
"""
if CM.ndim > 2:
CM = CM.sum(2)
assert len(CM.shape) == 2, 'This function is meant for single subject confusion matrices'
symmetrized = CM+CM.T
#print symmetrized
#print np.triu_indices(CM.shape[0])
if take == 'all':
rval = symmetrized[np.triu_indices(CM.shape[0])]
elif take == 'diagonal':
rval = symmetrized.diagonal()
elif take == 'off_diagonal':
rval = symmetrized[np.triu_indices(CM.shape[0], 1)]
else:
raise ValueError("Take %s not recognized. Allowed takes are all, diagonal and off_diagonal" % take)
return rval
|
91964cc4fd08f869330413e7485f765696b92614
| 31,599 |
def get_entry_values():
"""Get entry values"""
entry = {}
for key, question in ENTRY_QUESTIONS.items():
input_type = int if key == "time" else str
while True:
print_title(MAIN_MENU[1].__doc__)
print(question)
user_input = validate(get_input(), input_type)
if user_input or key == "notes":
entry[key] = user_input
break
return entry
|
6736ac24bbbe83a0dcbd7a43cd12a1c1b1acbdab
| 31,600 |
def _create_snapshot(provider_id, machine_uuid, skip_store, wait_spawning):
"""Create a snapshot.
"""
_retrieve_machine(provider_id, machine_uuid, skip_store)
manager = _retrieve_manager(provider_id)
return manager.create_snapshot(machine_uuid, wait_spawning)
|
0d35309341dd27cc41e713c4fd950fee735c866d
| 31,601 |
def get_masked_lm_output(bert_config, input_tensor, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias",
shape=[2],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=2, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
|
7668ff4c4bd18cb14ff625dc0de593250cedb794
| 31,602 |
import torch
def binary_classification_loss(logits, targets, reduction='mean'):
"""
Loss.
:param logits: predicted classes
:type logits: torch.autograd.Variable
:param targets: target classes
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
assert logits.size()[0] == targets.size()[0]
assert len(list(targets.size())) == 1# or (len(list(targets.size())) == 2 and targets.size(1) == 1)
assert len(list(logits.size())) == 2
targets = one_hot(targets, logits.size(1))
if logits.size()[1] > 1:
return torch.nn.functional.binary_cross_entropy_with_logits(logits, targets, reduction=reduction)
else:
raise NotImplementedError
|
507f3b076f6b59a8629bf02aa69ece05f5063f45
| 31,603 |
def transform_with(sample, transformers):
"""Transform a list of values using a list of functions.
:param sample: list of values
:param transformers: list of functions
"""
assert not isinstance(sample, dict)
assert isinstance(sample, (tuple, list))
if transformers is None or len(transformers) == 0:
return sample
result = list(sample)
ntransformers = len(transformers)
for i in range(len(sample)):
f = transformers[i%ntransformers]
if f is not None:
result[i] = f(sample[i])
return result
|
9a1d7741070b670e7bf8dbf88e8a23361521265f
| 31,605 |
def concat_eval(x, y):
"""
Helper function to calculate multiple evaluation metrics at once
"""
return {
"recall": recall_score(x, y, average="macro", zero_division=0),
"precision": precision_score(x, y, average="macro", zero_division=0),
"f1_score": f1_score(x, y, average="macro", zero_division=0),
"mcc": mcc(x, y),
}
|
5a0732ac5926173f12e3f0bd6d6e0ace653c7494
| 31,606 |
from typing import List
def split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:
"""
Splits an array into its coherent regions.
:param mode: 0 for orthogonal connection, 1 for full connection
:param arr: Numpy array with shape [W, H]
:return: A list with length #NumberOfRegions of arrays with shape [W, H]
"""
res = []
if mode == 0:
rs, num_regions = label(arr)
elif mode == 1:
rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))
else:
raise Exception("Please specify a valid Neighborhood mode for split_into_regions")
for i in range(1, num_regions + 1):
res.append(rs == i)
return res
|
59e46f5877f3f4fd12a918e9aa26a67a92eb4d5b
| 31,607 |
def register_model(model_uri, name):
"""
Create a new model version in model registry for the model files specified by ``model_uri``.
Note that this method assumes the model registry backend URI is the same as that of the
tracking backend.
:param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to
record the run ID with the model in model registry. ``models:/`` URIs are
currently not supported.
:param name: Name of the registered model under which to create a new model version. If a
registered model with the given name does not exist, it will be created
automatically.
:return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by
backend.
"""
client = MlflowClient()
try:
create_model_response = client.create_registered_model(name)
eprint("Successfully registered model '%s'." % create_model_response.name)
except MlflowException as e:
if e.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS):
eprint("Registered model '%s' already exists. Creating a new version of this model..."
% name)
else:
raise e
if RunsArtifactRepository.is_runs_uri(model_uri):
source = RunsArtifactRepository.get_underlying_uri(model_uri)
(run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)
create_version_response = client.create_model_version(name, source, run_id)
else:
create_version_response = client.create_model_version(name, source=model_uri, run_id=None)
eprint("Created version '{version}' of model '{model_name}'.".format(
version=create_version_response.version, model_name=create_version_response.get_name()))
return create_version_response
|
7dcdaa54717e6e0ea45390a5af48b1e350574d12
| 31,608 |
def noreplace(f):
"""Method decorator to indicate that a method definition shall
silently be ignored if it already exists in the full class."""
f.__noreplace = True
return f
|
88b6e8fdf7064ed04d9a0c310bcf1717e05e7fa8
| 31,609 |
def position_encoding(length, depth,
min_timescale=1,
max_timescale=1e4):
"""
Create Tensor of sinusoids of different frequencies.
Args:
length (int): Length of the Tensor to create, i.e. Number of steps.
depth (int): Dimensions of embedding.
min_timescale (float): Minimum time scale.
max_timescale (float): Maximum time scale.
Returns:
Tensor of shape (T, D)
"""
depth = depth // 2
positions = np.arange(length, dtype=np.float32)
log_timescale_increment = (np.log(max_timescale / min_timescale) / (depth - 1))
inv_timescales = min_timescale * np.exp(
np.arange(depth, dtype=np.float32) * -log_timescale_increment)
scaled_time = np.expand_dims(positions, 1) * np.expand_dims(inv_timescales, 0)
# instead of using SIN and COS interleaved
# it's the same to first use SIN then COS
# as they are applied to the same position
x = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1)
return x
|
9d8c9082d82fd41ea6b6655a50b3e802a12f6694
| 31,610 |
def perform_exchange(ctx):
"""
Attempt to exchange attached NEO for tokens
:param ctx:GetContext() used to access contract storage
:return:bool Whether the exchange was successful
"""
attachments = get_asset_attachments() # [receiver, sender, neo, gas]
address = attachments[1]
neo_amount = attachments[2]
# calculate the amount of tokens that can be exchanged
exchange_amount = calculate_exchange_amount(ctx, attachments, False)
if exchange_amount == 0:
# This should only happen in the case that there are a lot of TX on the final
# block before the total amount is reached. An amount of TX will get through
# the verification phase because the total amount cannot be updated during that phase
# because of this, there should be a process in place to manually refund tokens
if neo_amount > 0:
OnRefund(address, neo_amount)
return False
didMint = mint_tokens(ctx, address, exchange_amount)
# dispatch mintTokens event
if didMint:
OnMintTokens(attachments[0], address, exchange_amount)
return didMint
|
6c2f01a27b40a284e89da1e84de696baa1464e1d
| 31,611 |
def Pose_2_Staubli_v2(H):
"""Converts a pose to a Staubli target target"""
x = H[0,3]
y = H[1,3]
z = H[2,3]
a = H[0,0]
b = H[0,1]
c = H[0,2]
d = H[1,2]
e = H[2,2]
if c > (1.0 - 1e-10):
ry1 = pi/2
rx1 = 0
rz1 = atan2(H[1,0],H[1,1])
elif c < (-1.0 + 1e-10):
ry1 = -pi/2
rx1 = 0
rz1 = atan2(H[1,0],H[1,1])
else:
sy = c
cy1 = +sqrt(1-sy*sy)
sx1 = -d/cy1
cx1 = e/cy1
sz1 = -b/cy1
cz1 = a/cy1
rx1 = atan2(sx1,cx1)
ry1 = atan2(sy,cy1)
rz1 = atan2(sz1,cz1)
return [x, y, z, rx1*180.0/pi, ry1*180.0/pi, rz1*180.0/pi]
|
9fae83e10df544b7d2c096c7a59aca60567de538
| 31,612 |
def create_gru_model(fingerprint_input, model_settings, model_size_info,
is_training):
"""Builds a model with multi-layer GRUs
model_size_info: [number of GRU layers, number of GRU cells per layer]
Optionally, the bi-directional GRUs and/or GRU with layer-normalization
can be explored.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size])
num_classes = model_settings['label_count']
layer_norm = False
bidirectional = False
num_layers = model_size_info[0]
gru_units = model_size_info[1]
gru_cell_fw = []
gru_cell_bw = []
if layer_norm:
for i in range(num_layers):
gru_cell_fw.append(LayerNormGRUCell(gru_units))
if bidirectional:
gru_cell_bw.append(LayerNormGRUCell(gru_units))
else:
for i in range(num_layers):
gru_cell_fw.append(tf.contrib.rnn.GRUCell(gru_units))
if bidirectional:
gru_cell_bw.append(tf.contrib.rnn.GRUCell(gru_units))
if bidirectional:
outputs, output_state_fw, output_state_bw = \
tf.contrib.rnn.stack_bidirectional_dynamic_rnn(gru_cell_fw, gru_cell_bw,
fingerprint_4d, dtype=tf.float32)
flow = outputs[:, -1, :]
else:
cells = tf.contrib.rnn.MultiRNNCell(gru_cell_fw)
_, last = tf.nn.dynamic_rnn(cell=cells, inputs=fingerprint_4d,
dtype=tf.float32)
flow = last[-1]
with tf.name_scope('Output-Layer'):
# linear layer
# # print(flow.get_shape()[-1])
W = tf.get_variable('W', shape=[flow.get_shape()[-1], 128],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b', shape=[128])
linear_output = tf.matmul(flow, W) + b
# first fc
first_fc_weights = tf.Variable(
tf.truncated_normal([128, 256], stddev=0.01), name="first_fc_w")
first_fc_bias = tf.Variable(tf.zeros([256]), name="first_fc_b")
first_fc = tf.matmul(linear_output, first_fc_weights) + first_fc_bias
first_fc = tf.nn.relu(first_fc)
W_o = tf.get_variable('W_o', shape=[first_fc.get_shape()[-1], num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b_o = tf.get_variable('b_o', shape=[num_classes])
logits = tf.matmul(first_fc, W_o) + b_o
if is_training:
return logits, dropout_prob
else:
return logits
|
222581216edaf6225fabe850d977d14955c66c6e
| 31,613 |
def convergence_rates(N, solver_function, num_periods=8):
"""
Returns N-1 empirical estimates of the convergence rate
based on N simulations, where the time step is halved
for each simulation.
solver_function(I, V, F, c, m, dt, T, damping) solves
each problem, where T is based on simulation for
num_periods periods.
"""
def F(t):
"""External driving force"""
return A*np.sin(2*np.pi*f*t)
b, c, m = 0, 1.6, 1.3 # just some chosen values
I = 0 # init. cond. u(0)
V = 0 # init. cond. u'(0)
A = 1.0 # amplitude of driving force
f = 1.0 # chosen frequency of driving force
damping = 'zero'
P = 1/f
dt = P/30 # 30 time step per period 2*pi/w
T = P*num_periods
dt_values = []
E_values = []
for i in range(N):
u, t = solver_function(I, V, F, b, c, m, dt, T, damping)
u_e = u_exact(t, I, V, A, f, c, m)
E = np.sqrt(dt*np.sum((u_e-u)**2))
dt_values.append(dt)
E_values.append(E)
dt = dt/2
#plt.plot(t, u, 'b--', t, u_e, 'r-'); plt.grid(); plt.show()
r = [np.log(E_values[i-1]/E_values[i])/
np.log(dt_values[i-1]/dt_values[i])
for i in range(1, N, 1)]
print r
return r
|
e66b4395557e0a254636546555d87716e4b0cc50
| 31,614 |
import cProfile
import io
import pstats
def profile(fnc):
"""A decorator that uses cProfile to profile a function"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
|
9b5d248e2bd13d792e7c3cce646aa4c0432af8db
| 31,615 |
def _decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
# Arguments
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
# Returns
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
decoded = K.ctc_decode(y_pred=y_pred, input_length=input_length,
greedy=greedy, beam_width=beam_width, top_paths=top_paths)
paths = [path.eval(session=K.get_session()) for path in decoded[0]]
logprobs = decoded[1].eval(session=K.get_session())
return (paths, logprobs)
|
7a73aa329245136ae560e92ebe67d997e57557f9
| 31,616 |
def rand_xyz_box(image_arrays, label, n, depth, img_size):
"""Returns n number of randomly chosen box.
Args:
image_arrays: 3D np array of images.
label: label of images. normally is A or V
n: number of random boxes generated from this function.
depth : number of slices in Z direction. default is 50 if not specified.
img_size: image size in X,Y directions. default is 50 if not specified.
Returns:
List object. ['Z','X','Y','im_array','labels'].
Each im_array is a randomly chosen box with volume of depth*img_size*img_size.
"""
z = np.random.randint(len(image_arrays)-depth+1, size=n)
x = np.random.randint(len(image_arrays[1])-img_size+1, size=n)
y = np.random.randint(len(image_arrays[2])-img_size+1, size=n)
n_box = []
for z, x, y in zip(z, x, y):
box = image_arrays[z:z+depth, x:x+img_size, y:y+img_size]
box = np.reshape(box, (depth, img_size, img_size, 1))
n_box.append([z, x, y, box, label])
return n_box
|
3127522a7d08b5694fc92ab058736db1d7471676
| 31,617 |
def pageviews_by_document(start_date, end_date, verbose=False):
"""Return the number of pageviews by document in a given date range.
* Only returns en-US documents for now since that's what we did with
webtrends.
Returns a dict with pageviews for each document:
{<document_id>: <pageviews>,
1: 42,
7: 1337,...}
"""
counts = {}
request = _build_request()
max_results = 10000
end_date_step = end_date
while True: # To reduce the size of result set request 3 months at a time
start_date_step = end_date_step - timedelta(90)
if start_date_step < start_date:
start_date_step = start_date
if verbose:
print("Fetching data for %s to %s:" % (start_date_step, end_date_step))
start_index = 1
while True: # To deal with pagination
@retry_503
def _make_request():
return request.get(
ids="ga:" + profile_id,
start_date=str(start_date_step),
end_date=str(end_date_step),
metrics="ga:pageviews",
dimensions="ga:pagePath",
filters=("ga:pagePathLevel2==/kb/;" "ga:pagePathLevel1==/en-US/"),
max_results=max_results,
start_index=start_index,
).execute()
results = _make_request()
if verbose:
d = (
max_results - 1
if start_index + max_results - 1 < results["totalResults"]
else results["totalResults"] - start_index
)
print("- Got %s of %s results." % (start_index + d, results["totalResults"]))
for result in results.get("rows", []):
path = result[0]
pageviews = int(result[1])
doc = Document.from_url(path, id_only=True, check_host=False)
if not doc:
continue
# The same document can appear multiple times due to url params
counts[doc.pk] = counts.get(doc.pk, 0) + pageviews
# Move to next page of results.
start_index += max_results
if start_index > results.get("totalResults", 0):
break
end_date_step = start_date_step - timedelta(1)
if start_date_step == start_date or end_date_step < start_date:
break
return counts
|
c1a2c4ba2711803ca4b5e0cb8959a99b36f928ec
| 31,618 |
from re import T
def format_time_string(seconds):
""" Return a formatted and translated time string """
def unit(single, n):
# Seconds and minutes are special due to historical reasons
if single == "minute" or (single == "second" and n == 1):
single = single[:3]
if n == 1:
return T(single)
return T(single + "s")
# Format the string, size by size
seconds = int_conv(seconds)
completestr = []
days = seconds // 86400
if days >= 1:
completestr.append("%s %s" % (days, unit("day", days)))
seconds -= days * 86400
hours = seconds // 3600
if hours >= 1:
completestr.append("%s %s" % (hours, unit("hour", hours)))
seconds -= hours * 3600
minutes = seconds // 60
if minutes >= 1:
completestr.append("%s %s" % (minutes, unit("minute", minutes)))
seconds -= minutes * 60
if seconds > 0:
completestr.append("%s %s" % (seconds, unit("second", seconds)))
# Zero or invalid integer
if not completestr:
completestr.append("0 %s" % unit("second", 0))
return " ".join(completestr)
|
27e0a084165605aa4b1a2b42c87840439686c255
| 31,619 |
import torch
def warp_grid(flow: Tensor) -> Tensor:
"""Creates a warping grid from a given optical flow map.
The warping grid determines the coordinates of the source pixels from which to take the color when inverse warping.
Args:
flow: optical flow tensor of shape (B, H, W, 2). The flow values are expected to already be in normalized range,
see :func:`normalize` for more information.
Returns:
The warping grid
"""
b, h, w, _ = flow.shape
range_x = torch.linspace(-1.0, 1.0, w, device=flow.device)
range_y = torch.linspace(-1.0, 1.0, h, device=flow.device)
grid_y, grid_x = torch.meshgrid(range_y, range_x)
# grid has shape (B, H, W, 2)
grid = torch.stack((grid_x, grid_y), dim=-1).unsqueeze(0).repeat(b, 1, 1, 1)
grid = grid + flow
return grid
|
21f5765603f8fb42d5fe70668ab6d52b60c16bfe
| 31,620 |
def FORMULATEXT(*args) -> Function:
"""
Returns the formula as a string.
Learn more: https//support.google.com/docs/answer/9365792.
"""
return Function("FORMULATEXT", args)
|
17cb21ee8b36439395b64fd410006ff03db7fedc
| 31,621 |
def num_prim_vertices(prim: hou.Prim) -> int:
"""Get the number of vertices belonging to the primitive.
:param prim: The primitive to get the vertex count of.
:return: The vertex count.
"""
return prim.intrinsicValue("vertexcount")
|
298a4a67133fc857c129b922f7f5a0f21d6d0b40
| 31,623 |
def read_geoparquet(path: str) -> GeoDataFrame:
"""
Given the path to a parquet file, construct a geopandas GeoDataFrame by:
- loading the file as a pyarrow table
- reading the geometry column name and CRS from the metadata
- deserialising WKB into shapely geometries
"""
# read parquet file into pyarrow Table
table = pq.read_table(path)
# deserialise metadata for first geometry field
# (geopandas only supports one geometry column)
geometry_metadata = _deserialise_metadata(table)["geometry_fields"][0]
# extract CRS
crs = geometry_metadata["crs"]
# convert pyarrow Table to pandas DataFrame
df = table.to_pandas()
# identify geometry column name
geom_col_name = geometry_metadata["field_name"]
# deserialise geometry column
df = df._deserialise_geometry(geom_col_name)
# convert to geopandas GeoDataFrame
df = GeoDataFrame(df, crs=crs, geometry=geom_col_name)
return df
|
0fddb5452010e5d4546b3b34e7afae93698cd953
| 31,624 |
def cmd_run_json_block_file(file):
"""`file` is a file containing a FullBlock in JSON format"""
return run_json_block_file(file)
|
594e10a7ef4e20b130a5b39c22a834208df846a6
| 31,625 |
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
|
fcb309e0c5ca7bc59e5b39b8fd67a45a5281d262
| 31,626 |
import requests
def fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,
logger=getLogger('IN-GJ')) -> list:
"""Requests the last known production mix (in MW) of a given country."""
session = session or requests.session()
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
value_map = fetch_data(zone_key, session, logger=logger)
data = {
'zoneKey': zone_key,
'datetime': value_map['date'].datetime,
'production': {
'biomass': None,
'coal': value_map.get('coal', 0),
'gas': value_map.get('gas', 0),
'hydro': value_map.get('hydro', 0),
'nuclear': value_map.get('nuclear', 0),
'oil': None,
'solar': value_map.get('solar', 0),
'wind': value_map.get('wind', 0),
'geothermal': None,
'unknown': value_map.get('unknown', 0)
},
'storage': {
'hydro': None
},
'source': 'sldcguj.com',
}
valid_data = validate(data, logger, remove_negative=True, floor=7000)
return valid_data
|
e23e409d24349e998eb9c261805a050de12ed30c
| 31,627 |
def xyz_order(coordsys, name2xyz=None):
""" Vector of orders for sorting coordsys axes in xyz first order
Parameters
----------
coordsys : ``CoordinateSystem`` instance
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
xyz_order : list
Ordering of axes to get xyz first ordering. See the examples.
Raises
------
AxesError : if there are not all of x, y and z axes
Examples
--------
>>> from nipy.core.api import CoordinateSystem
>>> xyzt_cs = mni_cs(4) # coordsys with t (time) last
>>> xyzt_cs
CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64)
>>> xyz_order(xyzt_cs)
[0, 1, 2, 3]
>>> tzyx_cs = CoordinateSystem(xyzt_cs.coord_names[::-1], 'reversed')
>>> tzyx_cs
CoordinateSystem(coord_names=('t', 'mni-z', 'mni-y', 'mni-x'), name='reversed', coord_dtype=float64)
>>> xyz_order(tzyx_cs)
[3, 2, 1, 0]
"""
if name2xyz is None:
name2xyz = known_names
names = coordsys.coord_names
N = len(names)
axvals = np.zeros(N, dtype=int)
for i, name in enumerate(names):
try:
xyz_char = name2xyz[name]
except KeyError:
axvals[i] = N+i
else:
axvals[i] = 'xyz'.index(xyz_char)
if not set(axvals).issuperset(range(3)):
raise AxesError("Not all of x, y, z recognized in coordinate map")
return list(np.argsort(axvals))
|
983c7adc5df8f54ecc92423eed0cd744971d4ec3
| 31,628 |
def parse_item(year, draft_type, row):
"""Parses the given row out into a DraftPick item."""
draft_round = parse_int(row, 'th[data-stat="draft_round"]::text', -1)
draft_pick = parse_int(row, 'td[data-stat="draft_pick"]::text', -1)
franchise = '/'.join(
row.css('td[data-stat="team"] a::attr(href)').get().split('/')[:-1])
player = row.css('td[data-stat="player"] a::attr(href)').get()
if not player:
player = row.css('td[data-stat="player"]::text').get()
position = row.css('td[data-stat="pos"]::text').get()
age = parse_int(row, 'td[data-stat="age"]::text', -1)
first_team_all_pros = parse_int(
row, 'td[data-stat="all_pros_first_team"]::text', 0)
pro_bowls = parse_int(row, 'td[data-stat="pro_bowls"]::text', 0)
career_approx_value = parse_int(row, 'td[data-stat="career_av"]::text', 0)
draft_approx_value = parse_int(row, 'td[data-stat="draft_av"]::text', 0)
college = row.css('td[data-stat="college_id"] a::attr(href)').get()
if not college:
college = row.css('td[data-stat="college_id"]::text').get()
return DraftPick(year=year,
draft_type=draft_type,
draft_round=draft_round,
draft_pick=draft_pick,
franchise=franchise,
player=player,
position=position,
age=age,
first_team_all_pros=first_team_all_pros,
pro_bowls=pro_bowls,
career_approx_value=career_approx_value,
draft_approx_value=draft_approx_value,
college=college)
|
822a596e0c3e381658a853899920347b95a7ff59
| 31,629 |
def buildJointChain(prefix, suffix, startPos, endPos, jointNum, orientJoint="xyz", saoType="yup"):
"""
Build a straight joint chain defined by start and end position.
:param prefix: `string` prefix string in joint name
:param suffix: `string` suffix string in joint name
:param startPos: `list` [x,y,z] start position in the world space
:param endPos: `list` [x,y,z] end position in the world space
:param jointNum: number of joints in the joint chain
:param orientJoint: `string` orient joint flag
:param saoType: `string` secondary axis orient flag
:return: `list` list of joint nodes in the joint chain. sorted by hierarchy.
"""
pm.select(d=1)
step = (om.MVector(*endPos)-om.MVector(*startPos))/(jointNum-1.0)
jnts = []
for i in range(jointNum):
crtPos = om.MVector(*startPos)+step*i
crtSuffix = suffix#suffix[1] if i==jointNum-1 else suffix[0]
jnts.append(pm.joint(p=(crtPos.x, crtPos.y, crtPos.z), n="{0}_{1:0>2d}_{2}".format(prefix, i, crtSuffix)))
pm.joint(jnts, e=True, oj=orientJoint, sao=saoType)
return jnts
|
fda63b96d2e5a1316fab9d2f9dc268ae0ff270d2
| 31,630 |
import time
import torch
def predict(model, img_load, resizeNum, is_silent, gpu=0):
"""
input:
model: model
img_load: A dict of image, which has two keys: 'img_ori' and 'img_data'
the value of the key 'img_ori' means the original numpy array
the value of the key 'img_data' is the list of five resize images
output:
the mean predictions of the resize image list: 'img_data'
"""
starttime = time.time()
segSize = (img_load['img_ori'].shape[0],
img_load['img_ori'].shape[1])
#print('segSize',segSize)
img_resized_list = img_load['img_data']
with torch.no_grad():
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1], device=torch.device("cuda", gpu))
for img in img_resized_list:
feed_dict = img_load.copy()
feed_dict['img_data']=img
del feed_dict['img_ori']
#feed_dict = {'img_data': img}
feed_dict=async_copy_to(feed_dict, gpu)
# forward pass
pred_tmp = model(feed_dict, segSize = segSize) #shape of pred_temp is (1, 150, height, width)
scores = scores + pred_tmp / resizeNum
endtime = time.time()
if not is_silent:
print('model inference time: {}s' .format(endtime-starttime))
return scores
|
04da68453aab79f732deb153cdcbed9ea267355c
| 31,631 |
def reverse_preorder(root):
"""
@ input: root of lcrs tree
@ output: integer list of id's reverse preorder
"""
node_list = []
temp_stack = [root]
while len(temp_stack) != 0:
curr = temp_stack.pop()
node_list.append(curr.value)
if curr.child is not None:
temp_stack.append(curr.child)
if curr.next is not None:
temp_stack.append(curr.next)
return node_list
|
06a53756db0f5c990537d02de4fcaa57cc93169d
| 31,632 |
import scipy
def calc_binned_percentile(bin_edge,xaxis,data,per=75):
"""Calculate the percentile value of an array in some bins.
per is the percentile at which to extract it. """
percen = np.zeros(np.size(bin_edge)-1)
for i in xrange(0,np.size(bin_edge)-1):
ind = np.where((xaxis > bin_edge[i])*(xaxis < bin_edge[i+1]))
if np.size(ind) > 5:
percen[i] = scipy.stats.scoreatpercentile(data[ind],per)
return percen
|
798cd1e4f1070b27766f2390442fa81dfad15aaa
| 31,633 |
def run_services(container_factory, config, make_cometd_server, waiter):
""" Returns services runner
"""
def _run(service_class, responses):
"""
Run testing cometd server and example service with tested entrypoints
Before run, the testing cometd server is preloaded with passed
responses.
"""
cometd_server = make_cometd_server(responses)
container = container_factory(service_class, config)
cometd_server.start()
container.start()
waiter.wait()
container.kill()
cometd_server.stop()
return _run
|
df7d1c3fdf7e99ebf054cfc6881c8073c2cf4dee
| 31,634 |
import requests
def cleaned_request(request_type, *args, **kwargs):
""" Perform a cleaned requests request """
s = requests.Session()
# this removes netrc checking
s.trust_env = False
return s.request(request_type, *args, **kwargs)
|
b6c99c85a64e5fd78cf10cc986c9a4b1542f47d3
| 31,635 |
from typing import List
from typing import Set
def construct_speech_to_text_phrases_context(event: EventIngestionModel) -> List[str]:
"""
Construct a list of phrases to use for Google Speech-to-Text speech adaption.
See: https://cloud.google.com/speech-to-text/docs/speech-adaptation
Parameters
----------
event: EventIngestionModel
The event details to pull context from.
Returns
-------
phrases: List[str]
Compiled list of strings to act as target weights for the model.
Notes
-----
Phrases are added in order of importance until GCP phrase limits are met.
The order of importance is defined as:
1. body name
2. event minutes item names
3. councilmember names
4. matter titles
5. councilmember role titles
"""
# Note: Google Speech-to-Text allows max 500 phrases
phrases: Set[str] = set()
PHRASE_LIMIT = 500
CUM_CHAR_LIMIT = 9900
# In line def for get character count
# Google Speech-to-Text allows cumulative max 9900 characters
def _get_total_char_count(phrases: Set[str]) -> int:
chars = 0
for phrase in phrases:
chars += len(phrase)
return chars
def _get_if_added_sum(phrases: Set[str], next_addition: str) -> int:
current_len = _get_total_char_count(phrases)
return current_len + len(next_addition)
def _within_limit(phrases: Set[str]) -> bool:
return (
_get_total_char_count(phrases) < CUM_CHAR_LIMIT
and len(phrases) < PHRASE_LIMIT
)
# Get body name
if _within_limit(phrases):
if _get_if_added_sum(phrases, event.body.name) < CUM_CHAR_LIMIT:
phrases.add(event.body.name)
# Extras from event minutes items
if event.event_minutes_items is not None:
# Get minutes item name
for event_minutes_item in event.event_minutes_items:
if _within_limit(phrases):
if (
_get_if_added_sum(phrases, event_minutes_item.minutes_item.name)
< CUM_CHAR_LIMIT
):
phrases.add(event_minutes_item.minutes_item.name)
# Get councilmember names from sponsors and votes
for event_minutes_item in event.event_minutes_items:
if event_minutes_item.matter is not None:
if event_minutes_item.matter.sponsors is not None:
for sponsor in event_minutes_item.matter.sponsors:
if _within_limit(phrases):
if (
_get_if_added_sum(phrases, sponsor.name)
< CUM_CHAR_LIMIT
):
phrases.add(sponsor.name)
if event_minutes_item.votes is not None:
for vote in event_minutes_item.votes:
if _within_limit(phrases):
if (
_get_if_added_sum(phrases, vote.person.name)
< CUM_CHAR_LIMIT
):
phrases.add(vote.person.name)
# Get matter titles
for event_minutes_item in event.event_minutes_items:
if event_minutes_item.matter is not None:
if _within_limit(phrases):
if (
_get_if_added_sum(phrases, event_minutes_item.matter.title)
< CUM_CHAR_LIMIT
):
phrases.add(event_minutes_item.matter.title)
# Get councilmember role titles from sponsors and votes
for event_minutes_item in event.event_minutes_items:
if event_minutes_item.matter is not None:
if event_minutes_item.matter.sponsors is not None:
for sponsor in event_minutes_item.matter.sponsors:
if sponsor.seat is not None:
if sponsor.seat.roles is not None:
for role in sponsor.seat.roles:
if (
_get_if_added_sum(phrases, role.title)
< CUM_CHAR_LIMIT
):
phrases.add(role.title)
if event_minutes_item.votes is not None:
for vote in event_minutes_item.votes:
if vote.person.roles is not None:
for role in vote.person.roles:
if _within_limit(phrases):
if (
_get_if_added_sum(phrases, role.title)
< CUM_CHAR_LIMIT
):
phrases.add(role.title)
return list(phrases)
|
e8834afd4e53d446f2dda1fd79383a0266010e5b
| 31,636 |
def data_science_community(articles, authors):
"""
Input: Articles and authors collections. You may use only one of them
Output: 3-tuple reporting on subgraph of authors of data science articles
and their co-authors: (number of connected components,size of largest
connected component, size of smallest connected component)
"""
graph = Graph()
match_1_stage = {"$match": {"fos.name": "Data science"}}
project_stage = {"$project": {"authors.id": 1}}
unwind_stage = {"$unwind": "$authors"}
res_set = list(articles.aggregate([match_1_stage, project_stage,
unwind_stage]))
data_science_authors = set()
for res in res_set:
data_science_authors.add(res["authors"]["id"])
graph.add_node(res["authors"]["id"])
for author in data_science_authors:
author_records = authors.find({"_id": author})
for author_record in author_records:
co_authors = author_record["coauthors"]
for co_author in co_authors:
graph.add_edge(author, co_author)
connected_items = connected_components(graph)
connected_graph = sorted(connected_items, key=len, reverse=True)
largest = len(list(graph.subgraph(connected_graph[0]).nodes()))
smallest = len(list(graph.subgraph(connected_graph[-1]).nodes()))
connected_items = connected_components(graph)
no_connected_components = len(list(connected_items))
return no_connected_components, largest, smallest
|
3a81fc7674a2d421ff4649759e61797a743b7aae
| 31,637 |
def breadth_first_search(G, seed):
"""Breadth First search of a graph.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seed : int
Index of the seed location
Returns
-------
order : int array
Breadth first order
level : int array
Final levels
Examples
--------
0---2
| /
| /
1---4---7---8---9
| /| /
| / | /
3/ 6/
|
|
5
>>> import numpy as np
>>> import pyamg
>>> import scipy.sparse as sparse
>>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5],
... [4,6], [4,7], [6,7], [7,8], [8,9]])
>>> N = np.max(edges.ravel())+1
>>> data = np.ones((edges.shape[0],))
>>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N))
>>> c, l = pyamg.graph.breadth_first_search(A, 0)
>>> print(l)
[0 1 1 2 2 3 3 3 4 5]
>>> print(c)
[0 1 2 3 4 5 6 7 8 9]
"""
G = asgraph(G)
N = G.shape[0]
order = np.empty(N, G.indptr.dtype)
level = np.empty(N, G.indptr.dtype)
level[:] = -1
BFS = amg_core.breadth_first_search
BFS(G.indptr, G.indices, int(seed), order, level)
return order, level
|
047596e378f0496189f2e164e2b7ede4a6212f19
| 31,638 |
def main_page():
"""
Pass table of latest sensor readings as context for main_page
"""
LOG.info("Main Page triggered")
context = dict(
sub_title="Latest readings:",
table=recent_readings_as_html()
)
return render_template('main_page.html', **context)
|
6c9ac7c3306eb10d03269ca4e0cbca9c68a19644
| 31,639 |
import yaml
def load_config_file(filename):
"""Load configuration from YAML file."""
docs = yaml.load_all(open(filename, 'r'), Loader=yaml.SafeLoader)
config_dict = dict()
for doc in docs:
for k, v in doc.items():
config_dict[k] = v
return config_dict
|
d61bb86e605a1e744ce3f4cc03e866c61137835d
| 31,640 |
def CausalConv(x, dilation_rate, filters, kernel_size=2, scope = ""):
"""Performs causal dilated 1D convolutions.
Args:
x : Tensor of shape (batch_size, steps, input_dim).
dilation_rate: Dilation rate of convolution.
filters: Number of convolution filters.
kernel_size: Width of convolution kernel. SNAIL paper uses 2 for all
experiments.
scope: Variable scope for this layer.
Returns:
y: Tensor of shape (batch_size, new_steps, D).
"""
with tf.variable_scope(scope):
causal_pad_size = (kernel_size - 1) * dilation_rate
# Pad sequence dimension.
x = tf.pad(x, [[0, 0], [causal_pad_size, 0], [0, 0]])
return layers.conv1d(
x,
filters,
kernel_size=kernel_size,
padding="VALID",
rate=dilation_rate)
|
08ffde5e4a9ae9ebdbb6ed83a22ee1987bf02b1e
| 31,641 |
import functools
def makeTable(grid):
"""Create a REST table."""
def makeSeparator(num_cols, col_width, header_flag):
if header_flag == 1:
return num_cols * ("+" + (col_width) * "=") + "+\n"
else:
return num_cols * ("+" + (col_width) * "-") + "+\n"
def normalizeCell(string, length):
return string + ((length - len(string)) * " ")
cell_width = 2 + max(
functools.reduce(
lambda x, y: x + y, [[len(item) for item in row] for row in grid], []
)
)
num_cols = len(grid[0])
rst = makeSeparator(num_cols, cell_width, 0)
header_flag = 1
for row in grid:
rst = (
rst
+ "| "
+ "| ".join([normalizeCell(x, cell_width - 1) for x in row])
+ "|\n"
)
rst = rst + makeSeparator(num_cols, cell_width, header_flag)
header_flag = 0
return rst
|
c889a4cf505b5f0b3ef75656acb38f621c7fff31
| 31,642 |
def coords_to_bin(
x: npt.NDArray,
y: npt.NDArray,
x_bin_width: float,
y_bin_width: float,
) -> tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]:
"""
x: list of positive east-west coordinates of some sort
y: list of positive north-south coordinates of some sort
x_bin_width: bin width for x
y_bin_width: bin width for y
"""
assert np.all(x > 0)
assert np.all(y > 0)
assert x_bin_width > 0
assert y_bin_width > 0
# Compute bins
x_bin_list = np.array(np.floor(x / x_bin_width), dtype=int)
y_bin_list = np.array(np.floor(y / y_bin_width), dtype=int)
return (x_bin_list, y_bin_list)
|
874950836d6d03e1dc0f39bdb53653789fe64605
| 31,644 |
from typing import Callable
def _gcs_request(func: Callable):
"""
Wrapper function for gcs requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except NotFound:
raise FileNotFoundError("file {} not found".format(url))
return wrapper
|
a57867df668eb9b139ee8e07a405868676c9e0f2
| 31,645 |
def nllsqfunc(params: np.ndarray, qm: HessianOutput, qm_hessian: np.ndarray, mol: Molecule,
loss: list[float]=None) -> np.ndarray:
"""Residual function for non-linear least-squares optimization based on the difference of MD
and QM hessians.
Keyword arguments
-----------------
params : np.ndarray[float](sum of n_params for each term to be fit,)
stores all parameters for the terms
qm : HessianOutput
output from QM hessian file read
qm_hessian : np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,)
the flattened 1D QM hessian
mol : Molecule
the Molecule object
loss : list[float] (default None)
the list to keep track of the loss function over the optimization process
Returns
-------
The np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,) of residuals
"""
hessian = []
non_fit = []
# print("Calculating the MD hessian matrix elements...")
full_md_hessian = calc_hessian_nl(qm.coords, mol, params)
# print("Fitting the MD hessian parameters to QM hessian values")
for i in range(mol.topo.n_atoms * 3):
for j in range(i + 1):
hes = (full_md_hessian[i, j] + full_md_hessian[j, i]) / 2
hessian.append(hes[:-1])
non_fit.append(hes[-1])
hessian = np.array(hessian)
agg_hessian = np.sum(hessian, axis=1) # Aggregate contribution of terms
difference = qm_hessian - np.array(non_fit)
# Compute residual vector
residual = agg_hessian - difference
# Append loss to history
if loss is not None:
loss.append(0.5 * np.sum(residual**2))
return residual
|
0debdca80de9e7ea136683de04bc838ceb2f42e2
| 31,646 |
import time
def wait_for_mongod_shutdown(mongod_control, timeout=2 * ONE_HOUR_SECS):
"""Wait for for mongod to shutdown; return 0 if shutdown occurs within 'timeout', else 1."""
start = time.time()
status = mongod_control.status()
while status != "stopped":
if time.time() - start >= timeout:
LOGGER.error("The mongod process has not stopped, current status is %s", status)
return 1
LOGGER.info("Waiting for mongod process to stop, current status is %s ", status)
time.sleep(3)
status = mongod_control.status()
LOGGER.info("The mongod process has stopped")
# We wait a bit, since files could still be flushed to disk, which was causing
# rsync "file has vanished" errors.
time.sleep(60)
return 0
|
837271069f8aa672372aec944abedbd44664a3d3
| 31,647 |
from typing import List
import re
def get_installed_antivirus_software() -> List[dict]:
"""
Not happy with it either. But yet here we are... Thanks Microsoft for not having SecurityCenter2 on WinServers
So we need to detect used AV engines by checking what is installed and do "best guesses"
This test does not detect Windows defender since it's not an installed product
"""
av_engines = []
potential_seccenter_av_engines = []
potential_av_engines = []
result = windows_tools.wmi_queries.query_wmi(
"SELECT * FROM AntivirusProduct",
namespace="SecurityCenter",
name="windows_tools.antivirus.get_installed_antivirus_software",
)
try:
for product in result:
av_engine = {
"name": None,
"version": None,
"publisher": None,
"enabled": None,
"is_up_to_date": None,
"type": None,
}
try:
av_engine["name"] = product["displayName"]
except KeyError:
pass
try:
state = product["productState"]
av_engine["enabled"] = securitycenter_get_product_exec_state(state)
av_engine["is_up_to_date"] = securitycenter_get_product_update_state(
state
)
av_engine["type"] = securitycenter_get_product_type(state)
except KeyError:
pass
potential_seccenter_av_engines.append(av_engine)
# TypeError may happen when securityCenter namespace does not exist
except (KeyError, TypeError):
pass
for product in windows_tools.installed_software.get_installed_software():
product["enabled"] = None
product["is_up_to_date"] = None
product["type"] = None
try:
if re.search(
r"anti.*(virus|viral)|malware", product["name"], re.IGNORECASE
):
potential_av_engines.append(product)
continue
if re.search(
r"|".join(KNOWN_ANTIVIRUS_PRODUCTS_REGEX),
product["publisher"],
re.IGNORECASE,
):
potential_av_engines.append(product)
# Specific case where name is unknown
except KeyError:
pass
# SecurityCenter seems to be less precise than registry search
# Now make sure we don't have "double entries" from securiycenter, then add them
for seccenter_engine in potential_seccenter_av_engines:
for engine in potential_av_engines:
if seccenter_engine["name"] not in engine["name"]:
# Do not add already existing entries from securitycenter
av_engines.append(seccenter_engine)
av_engines = av_engines + potential_av_engines
return av_engines
|
b122960b48edfb0e193c354293b28bc1ead0a936
| 31,648 |
def mi(x,y,k=3,base=2):
""" Mutual information of x and y
x,y should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
x = [[entry] for entry in x]
y = [[entry] for entry in y]
assert len(x)==len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 #small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
y = [list(p + intens*nr.rand(len(y[0]))) for p in y]
points = zip2(x,y)
#Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point,k+1,p=float('inf'))[0][k] for point in points]
a,b,c,d = avgdigamma(x,dvec), avgdigamma(y,dvec), digamma(k), digamma(len(x))
return (-a-b+c+d)/log(base)
|
960501be5134dcfe99ca29b50622dbfc0b403b78
| 31,649 |
def _xls_cc_ir_impl_wrapper(ctx):
"""The implementation of the 'xls_cc_ir' rule.
Wrapper for xls_cc_ir_impl. See: xls_cc_ir_impl.
Args:
ctx: The current rule's context object.
Returns:
ConvIRInfo provider
DefaultInfo provider
"""
ir_conv_info, built_files, runfiles = _xls_cc_ir_impl(ctx)
return [
ir_conv_info,
DefaultInfo(
files = depset(
direct = built_files,
transitive = _get_transitive_built_files_for_xls_cc_ir(ctx),
),
runfiles = runfiles,
),
]
|
c76bddc8b05322b2df4af67415f783aa1f2635bb
| 31,650 |
from typing import List
from typing import Tuple
from typing import DefaultDict
def create_dataset(message_sizes: List[int], labels: List[int], window_size: int, num_samples: int, rand: np.random.RandomState) -> Tuple[np.ndarray, np.ndarray]:
"""
Creates the attack dataset by randomly sampling message sizes of the given window.
Args:
message_sizes: The size of each message (in bytes)
labels: The true label for each message
window_size: The size of the model's features (D)
num_samples: The number of samples to create
rand: The random state used to create samples in a reproducible manner
Returns:
A tuple of two elements.
(1) A [N, D] array of input features composed of message sizes
(2) A [N] array of labels for each input
"""
assert len(message_sizes) == len(labels), 'Must provide the same number of message sizes and labels'
num_messages = len(message_sizes)
# Group the message sizes by label
bytes_dist: DefaultDict[int, List[int]] = defaultdict(list)
for label, size in zip(labels, message_sizes):
bytes_dist[label].append(size)
inputs: List[np.ndarray] = []
output: List[int] = []
for label in bytes_dist.keys():
sizes = bytes_dist[label]
num_to_create = int(round(num_samples * (len(sizes) / num_messages)))
for _ in range(num_to_create):
raw_sizes = rand.choice(sizes, size=window_size) # [D]
iqr = np.percentile(raw_sizes, 75) - np.percentile(raw_sizes, 25)
features = [np.average(raw_sizes), np.std(raw_sizes), np.median(raw_sizes), np.max(raw_sizes), np.min(raw_sizes), iqr, geometric_mean(raw_sizes)]
inputs.append(np.expand_dims(features, axis=0))
output.append(label)
return np.vstack(inputs), np.vstack(output).reshape(-1)
|
081e0c6ddc18988d8e24a08ec4a4e565f318d23a
| 31,651 |
def infer_Tmap_from_clonal_info_alone_private(
adata_orig, method="naive", clonal_time_points=None, selected_fates=None
):
"""
Compute transition map using only the lineage information.
Here, we compute the transition map between neighboring time points.
We simply average transitions across all clones (or selected clones when method='Weinreb'),
assuming that the intra-clone transition is uniform within the same clone.
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
method: `str`, optional (default: 'naive')
Method used to compute the transition map. Choice: {'naive',
'weinreb'}. For the naive method, we simply average transitions
across all clones, assuming that the intra-clone transitions are
uniform within the same clone. For the 'weinreb' method, we first
find uni-potent clones, then compute the transition map by simply
averaging across all clonal transitions as the naive method.
selected_fates: `list`, optional (default: all selected)
List of targeted fate clusters to define uni-potent clones for the
weinreb method, which are used to compute the transition map.
clonal_time_points: `list` of `str`, optional (default: all time points)
List of time points to be included for analysis.
We assume that each selected time point has clonal measurements.
later_time_points: `list`, optional (default: None)
If specified, the function will produce a map T between these early
time points among `clonal_time_points` and the `later_time_point`.
If not specified, it produces a map T between neighboring time points.
Returns
-------
adata: :class:`~anndata.AnnData` object
The transition map is stored at adata.uns['clonal_transition_map']
"""
adata_1 = tmap_util.select_time_points(
adata_orig, time_point=clonal_time_points, extend_Tmap_space=True
)
if method not in ["naive", "weinreb"]:
logg.warn("method not in ['naive','weinreb']; set to be 'weinreb'")
method = "weinreb"
cell_id_t2_all = adata_1.uns["Tmap_cell_id_t2"]
cell_id_t1_all = adata_1.uns["Tmap_cell_id_t1"]
T_map = np.zeros((len(cell_id_t1_all), len(cell_id_t2_all)))
clone_annot = adata_1.obsm["X_clone"]
N_points = len(adata_1.uns["multiTime_cell_id_t1"])
for k in range(N_points):
cell_id_t1_temp = adata_1.uns["multiTime_cell_id_t1"][k]
cell_id_t2_temp = adata_1.uns["multiTime_cell_id_t2"][k]
if method == "naive":
logg.info("Use all clones (naive method)")
T_map_temp = clone_annot[cell_id_t1_temp] * clone_annot[cell_id_t2_temp].T
else:
logg.info("Use only uni-potent clones (weinreb et al., 2020)")
state_annote = np.array(adata_1.obs["state_info"])
if selected_fates == None:
selected_fates = list(set(state_annote))
potential_vector_clone, fate_entropy_clone = tl.compute_state_potential(
clone_annot[cell_id_t2_temp].T,
state_annote[cell_id_t2_temp],
selected_fates,
fate_count=True,
)
sel_unipotent_clone_id = np.array(
list(set(np.nonzero(fate_entropy_clone == 1)[0]))
)
clone_annot_unipotent = clone_annot[:, sel_unipotent_clone_id]
T_map_temp = (
clone_annot_unipotent[cell_id_t1_temp]
* clone_annot_unipotent[cell_id_t2_temp].T
)
logg.info(
f"Used uni-potent clone fraction {len(sel_unipotent_clone_id)/clone_annot.shape[1]}"
)
idx_t1 = np.nonzero(np.in1d(cell_id_t1_all, cell_id_t1_temp))[0]
idx_t2 = np.nonzero(np.in1d(cell_id_t2_all, cell_id_t2_temp))[0]
idx_t1_temp = np.nonzero(np.in1d(cell_id_t1_temp, cell_id_t1_all))[0]
idx_t2_temp = np.nonzero(np.in1d(cell_id_t2_temp, cell_id_t2_all))[0]
T_map[idx_t1[:, np.newaxis], idx_t2] = T_map_temp[idx_t1_temp][:, idx_t2_temp].A
T_map = T_map.astype(int)
adata_1.uns["clonal_transition_map"] = ssp.csr_matrix(T_map)
return adata_1
|
9926e2a6faf50bed2d1668de031a600e0f65c1af
| 31,652 |
import math
def percentile(seq: t.Iterable[float], percent: float) -> float:
"""
Find the percentile of a list of values.
prometheus-client 0.6.0 doesn't support percentiles, so we use this implementation
Stolen from https://github.com/heaviss/percentiles that was stolen
from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
"""
if not seq:
raise ValueError('seq must be non-empty iterable')
if not (0 < percent < 100):
raise ValueError('percent parameter must be between 0 and 100')
seq = sorted(seq)
k = (len(seq) - 1) * percent / 100
prev_index = math.floor(k)
next_index = math.ceil(k)
if prev_index == next_index:
return seq[int(k)]
d0 = seq[prev_index] * (next_index - k)
d1 = seq[next_index] * (k - prev_index)
return d0 + d1
|
640f132366bad8bf0c58aa318b5be60136925ab9
| 31,653 |
from typing import Union
def select_view_by_cursors(**kwargs):
"""
Selects the Text View ( visible selection ) for the given cursors
Keyword Args:
sel (Tuple[XTextRange, XTextRange], XTextRange): selection as tuple of left and right range or as text range.
o_doc (GenericTextDocument, optional): current document (xModel). Defaults to current document.
o_text (XText, optional): xText object used only when sel is a xTextRangeObject.
require_selection (bool, optional): If ``True`` then a check is preformed to see if anything is selected;
Otherwise, No check is done. Default ``True``
Raises:
TypeError: if ``sel`` is ``None``
ValueError: if ``sel`` is passed in as ``tuple`` and length is not ``2``.
ValueError: if ``sel`` is missing.
Excpetion: If Error selecting view.
"""
o_doc: 'GenericTextDocument' = kwargs.get('o_doc', None)
if o_doc is None:
o_doc = get_xModel()
_sel_check = kwargs.get('require_selection', True)
if _sel_check == True and is_anything_selected(o_doc=o_doc) == False:
return None
l_cursor: 'XTextCursor' = None
r_cursor: 'XTextCursor' = None
_sel: 'Union[tuple, XTextRange]' = kwargs.get('sel', None)
if _sel is None:
raise ValueError("select_view_by_cursors() 'sel' argument is required")
if isinstance(_sel, tuple):
if len(_sel) < 2:
raise ValueError(
"select_view_by_cursors() sel argument when passed as a tuple is expected to have two elements")
l_cursor = _sel[0]
r_cursor = _sel[1]
else:
x_text: 'Union[XText, None]' = kwargs.get("o_text", None)
if x_text is None:
x_text = get_selected_text(o_doc=o_doc)
if x_text == None:
# there is an issue. Something should be selected.
# msg = "select_view_by_cursors() Something was expected to be selected but xText object does not exist"
return None
l_cursor = _get_left_cursor(o_sel=_sel, o_text=x_text)
r_cursor = _get_right_cursor(o_sel=_sel, o_text=x_text)
vc = get_view_cursor(o_doc=o_doc)
try:
vc.setVisible(False)
vc.gotoStart(False)
vc.collapseToStart()
vc.gotoRange(l_cursor, False)
vc.gotoRange(r_cursor, True)
except Exception as e:
raise e
finally:
if not vc.isVisible():
vc.setVisible(True)
|
42c42c4b60d802a66e942ac8fa8efe97a8253ea3
| 31,654 |
from typing import List
from typing import Dict
def load_types(
directories: List[str],
loads: LoadedFiles = DEFAULT_LOADS,
) -> Dict[str, dict]:
"""Load schema types and optionally register them."""
schema_data: Dict[str, dict] = {}
# load raw data
for directory in directories:
load_dir(directory, schema_data, None, loads)
return schema_data
|
8dc1f3625c03451eb9ac28804715ccf260400536
| 31,655 |
def fit_circle(img, show_rect_or_cut='show'):
"""
fit an ellipse to the contour in the image and find the overlaying square.
Either cut the center square or just plot the resulting square
Code partly taken from here:
https://stackoverflow.com/questions/55621959/opencv-fitting-a-single-circle-to-an-image-in-python
:param img: numpy array with width, height,3
:param show_rect_or_cut: string 'show' or 'cut'
:return: image, either cut center piece or with drawn square
flag, whether algorithm thinks this image is difficult (if the circle is too small or narrow
"""
# convert image to grayscale and use otsu threshold to binarize
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
thresh = cv2.bitwise_not(thresh)
# fill holes
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(15, 15))
morph_img = thresh.copy()
cv2.morphologyEx(src=thresh, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img)
# find contours in image and use the biggest found contour
contours, _ = cv2.findContours(morph_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
sorted_areas = np.sort(areas)
cnt = contours[areas.index(sorted_areas[-1])] # the biggest contour
if len(cnt) < 10:
return img, 'Diff'
# fit ellipse and use found center as center for square
ellipse = cv2.fitEllipse(cnt)
if np.min((ellipse[1][0], ellipse[1][1])) < 900:
flag = 'Diff'
else:
flag = False
r_center_x = int(ellipse[0][0])
r_center_y = int(ellipse[0][1])
r_center_x = np.max((r_center_x, 1024))
r_center_x = np.min((r_center_x, img.shape[0] - 1024))
r_center_y = np.max((r_center_y, 1024))
r_center_y = np.min((r_center_y, img.shape[1] - 1024))
if show_rect_or_cut == 'show':
half_width = 1024
cv2.rectangle(img,
(r_center_x - half_width, r_center_y - half_width),
(r_center_x + half_width, r_center_y + half_width),
(0, 150, 0), 40)
elif show_rect_or_cut == 'cut':
img = img[r_center_y - 1024:r_center_y + 1024,
r_center_x - 1024:r_center_x + 1024, :]
return img, flag
|
fdeb8f9a24159236609eac271016624f95f62504
| 31,656 |
from typing import Mapping
from typing import Any
from typing import MutableMapping
def unflatten_dict(
d: Mapping[str, Any],
separator: str = '.',
unflatten_list: bool = False,
sort: bool = False
) -> MutableMapping[str, Any]:
"""
Example:
In []: unflatten_dict({'count.chans.HU_SN': 10})
Out[]: {'count': {'chans': {'HU_SN': 10}}}
In []: unflatten_dict({'a.0.b.f.0': 1, 'a.0.b.f.1': 2, 'a.0.b.f.2': 3, 'a.1.c': 2, 'd.e': 1}, unflatten_list=True)
Out[]: {'a': [{'b': {'f': [1, 2, 3]}}, {'c': 2}], 'd': {'e': 1}}
"""
out: dict[str, Any] = {}
for key in sorted(d.keys()) if sort else d:
parts = key.split(separator)
target: dict[str, Any] = out
for part in parts[:-1]:
if part not in target:
target[part] = {}
target = target[part]
target[parts[-1]] = d[key]
if unflatten_list:
return _unflatten_lists(out)
return out
|
40662a4884171c444ed40c654497f6a0e17a132d
| 31,657 |
def _xinf_1D(xdot,x0,args=(),xddot=None,xtol=1.49012e-8):
"""Private function for wrapping the solving for x_infinity
for a variable x in 1 dimension"""
try:
if xddot is None:
xinf_val = float(fsolve(xdot,x0,args,xtol=xtol))
else:
xinf_val = float(newton_meth(xdot,x0,fprime=xddot,args=args))
except RuntimeError:
xinf_val = NaN
return xinf_val
|
e69c08b914395d93a94544d9ba085a440951a03c
| 31,658 |
import types
from typing import Dict
import operator
def to_bag_of_words(
doclike: types.DocLike,
*,
by: TokenGroupByType = "lemma_",
weighting: WeightingType = "count",
**kwargs,
) -> Dict[int, int | float] | Dict[str, int | float]:
"""
Transform a ``Doc`` or ``Span`` into a bag-of-words: the set of unique words therein
mapped to their absolute, relative, or binary frequencies of occurrence.
Args:
doclike
by: Attribute by which spaCy ``Token`` s are grouped before counting,
as given by ``getattr(token, by)``.
If "lemma", tokens are grouped by their base form w/o inflectional suffixes;
if "lower", by the lowercase form of the token text;
if "norm", by the normalized form of the token text;
if "orth", by the token text exactly as it appears in ``doc``.
To output keys as strings, simply append an underscore to any of these;
for example, "lemma_" creates a bag whose keys are token lemmas as strings.
weighting: Type of weighting to assign to unique words given by ``by``.
If "count", weights are the absolute number of occurrences (i.e. counts);
if "freq", weights are counts normalized by the total token count,
giving their relative frequency of occurrence;
if "binary", weights are set equal to 1.
**kwargs: Passed directly on to :func:`textacy.extract.words()`
- filter_stops: If True, stop words are removed before counting.
- filter_punct: If True, punctuation tokens are removed before counting.
- filter_nums: If True, number-like tokens are removed before counting.
Returns:
Mapping of a unique word id or string (depending on the value of ``by``)
to its absolute, relative, or binary frequency of occurrence
(depending on the value of ``weighting``).
Note:
For "freq" weighting, the resulting set of frequencies won't (necessarily) sum
to 1.0, since all tokens are used when normalizing counts but some (punctuation,
stop words, etc.) may be filtered out of the bag afterwards.
See Also:
:func:`textacy.extract.words()`
"""
words = basics.words(doclike, **kwargs)
bow = cytoolz.recipes.countby(operator.attrgetter(by), words)
bow = _reweight_bag(weighting, bow, doclike)
return bow
|
0065eba8ff7f74b420efc8c65688ab293dee1dda
| 31,659 |
def get_trip_info(origin, destination, date):
"""
Provides basic template for response, you can change as many things as you like.
:param origin: from which airport your trip beings
:param destination: where are you flying to
:param date: when
:return:
"""
template = {
"kind": "qpxExpress#tripsSearch",
"trips": {
"kind": "qpxexpress#tripOptions",
"requestId": "SYzLMFMFPCrebUp5H0NaGL",
"data": {
"kind": "qpxexpress#data",
"airport": [
{
"kind": "qpxexpress#airportData",
"code": "AMS",
"city": "AMS",
"name": "Amsterdam Schiphol Airport"
},
{
"kind": "qpxexpress#airportData",
"code": "LGW",
"city": "LON",
"name": "London Gatwick"
}
],
"city": [
{
"kind": "qpxexpress#cityData",
"code": "AMS",
"name": "Amsterdam"
},
{
"kind": "qpxexpress#cityData",
"code": "LON",
"name": "London"
}
],
"aircraft": [
{
"kind": "qpxexpress#aircraftData",
"code": "319",
"name": "Airbus A319"
},
{
"kind": "qpxexpress#aircraftData",
"code": "320",
"name": "Airbus A320"
}
],
"tax": [
{
"kind": "qpxexpress#taxData",
"id": "GB_001",
"name": "United Kingdom Air Passengers Duty"
},
{
"kind": "qpxexpress#taxData",
"id": "UB",
"name": "United Kingdom Passenger Service Charge"
}
],
"carrier": [
{
"kind": "qpxexpress#carrierData",
"code": "BA",
"name": "British Airways p.l.c."
}
]
},
"tripOption": [
{
"kind": "qpxexpress#tripOption",
"saleTotal": "GBP47.27",
"id": "OAcAQw8rr9MNhwQoBntUKJ001",
"slice": [
{
"kind": "qpxexpress#sliceInfo",
"duration": 75,
"segment": [
{
"kind": "qpxexpress#segmentInfo",
"duration": 75,
"flight": {
"carrier": "BA",
"number": "2762"
},
"id": "GStLakphRYJX3LbK",
"cabin": "COACH",
"bookingCode": "O",
"bookingCodeCount": 1,
"marriedSegmentGroup": "0",
"leg": [
{
"kind": "qpxexpress#legInfo",
"id": "LgJHYCVgG0AiE1PH",
"aircraft": "320",
"arrivalTime": "%sT18:05+01:00" % date,
"departureTime": "%sT15:50+00:00" % date,
"origin": origin,
"destination": destination,
"originTerminal": "N",
"duration": 75,
"mileage": 226,
"meal": "Snack or Brunch"
}
]
}
]
}
],
"pricing": [
{
"kind": "qpxexpress#pricingInfo",
"fare": [
{
"kind": "qpxexpress#fareInfo",
"id": "A855zsItBCELBykaeqeBDQb5hPZQIOtkOZ8uDq0lD5VU",
"carrier": "BA",
"origin": origin,
"destination": destination,
"basisCode": "OV1KO"
}
],
"segmentPricing": [
{
"kind": "qpxexpress#segmentPricing",
"fareId": "A855zsItBCELBykaeqeBDQb5hPZQIOtkOZ8uDq0lD5VU",
"segmentId": "GStLakphRYJX3LbK"
}
],
"baseFareTotal": "GBP22.00",
"saleFareTotal": "GBP22.00",
"saleTaxTotal": "GBP25.27",
"saleTotal": "GBP47.27",
"passengers": {
"kind": "qpxexpress#passengerCounts",
"adultCount": 1
},
"tax": [
{
"kind": "qpxexpress#taxInfo",
"id": "UB",
"chargeType": "GOVERNMENT",
"code": "UB",
"country": "GB",
"salePrice": "GBP12.27"
},
{
"kind": "qpxexpress#taxInfo",
"id": "GB_001",
"chargeType": "GOVERNMENT",
"code": "GB",
"country": "GB",
"salePrice": "GBP13.00"
}
],
"fareCalculation": "LON BA AMS 33.71OV1KO NUC 33.71 END ROE 0.652504 FARE GBP 22.00 XT 13.00GB 12.27UB",
"latestTicketingTime": "2016-01-11T23:59-05:00",
"ptc": "ADT"
}
]
},
{
"kind": "qpxexpress#tripOption",
"saleTotal": "GBP62.27",
"id": "OAcAQw8rr9MNhwQoBntUKJ002",
"slice": [
{
"kind": "qpxexpress#sliceInfo",
"duration": 80,
"segment": [
{
"kind": "qpxexpress#segmentInfo",
"duration": 80,
"flight": {
"carrier": "BA",
"number": "2758"
},
"id": "GW8rUjsDA234DdHV",
"cabin": "COACH",
"bookingCode": "Q",
"bookingCodeCount": 9,
"marriedSegmentGroup": "0",
"leg": [
{
"kind": "qpxexpress#legInfo",
"id": "Lp08eKxnXnyWfJo4",
"aircraft": "319",
"arrivalTime": "%sT10:05+01:00" % date,
"departureTime": "%sT07:45+00:00" % date,
"origin": origin,
"destination": destination,
"originTerminal": "N",
"duration": 80,
"mileage": 226,
"meal": "Snack or Brunch"
}
]
}
]
}
],
"pricing": [
{
"kind": "qpxexpress#pricingInfo",
"fare": [
{
"kind": "qpxexpress#fareInfo",
"id": "AslXz8S1h3mMcnYUQ/v0Zt0p9Es2hj8U0We0xFAU1qDE",
"carrier": "BA",
"origin": origin,
"destination": destination,
"basisCode": "QV1KO"
}
],
"segmentPricing": [
{
"kind": "qpxexpress#segmentPricing",
"fareId": "AslXz8S1h3mMcnYUQ/v0Zt0p9Es2hj8U0We0xFAU1qDE",
"segmentId": "GW8rUjsDA234DdHV"
}
],
"baseFareTotal": "GBP37.00",
"saleFareTotal": "GBP37.00",
"saleTaxTotal": "GBP25.27",
"saleTotal": "GBP62.27",
"passengers": {
"kind": "qpxexpress#passengerCounts",
"adultCount": 1
},
"tax": [
{
"kind": "qpxexpress#taxInfo",
"id": "UB",
"chargeType": "GOVERNMENT",
"code": "UB",
"country": "GB",
"salePrice": "GBP12.27"
},
{
"kind": "qpxexpress#taxInfo",
"id": "GB_001",
"chargeType": "GOVERNMENT",
"code": "GB",
"country": "GB",
"salePrice": "GBP13.00"
}
],
"fareCalculation": "LON BA AMS 56.70QV1KO NUC 56.70 END ROE 0.652504 FARE GBP 37.00 XT 13.00GB 12.27UB",
"latestTicketingTime": "%sT23:59-05:00" % date,
"ptc": "ADT"
}
]
}
]
}
}
return template
|
d1dfd35f41538e800b5c6f5986faac7fcd30ebf3
| 31,660 |
def serialise(data, data_type=None):
"""
Serialises the specified data.
The result is a ``bytes`` object. The ``deserialise`` operation turns it
back into a copy of the original object.
:param data: The data that must be serialised.
:param data_type: The type of data that will be provided. If no data type is
provided, the data type is found automatically.
:return: A ``bytes`` object representing exactly the state of the data.
"""
if data_type is None:
data_type = type_of(data)
if data_type is None:
raise SerialisationException("The data type of object {instance} could not automatically be determined.".format(instance=str(data)))
try:
return luna.plugins.plugins_by_type["data"][data_type]["data"]["serialise"](data)
except KeyError as e: #Plug-in with specified data type is not available.
raise KeyError("There is no activated data plug-in with data type {data_type} to serialise with.".format(data_type=data_type)) from e
|
6c4e7b144e3e938d30cceee5503290f8cf31ca27
| 31,661 |
def topopebreptool_RegularizeShells(*args):
"""
* Returns <False> if the shell is valid (the solid is a set of faces connexed by edges with connexity 2). Else, splits faces of the shell; <OldFacesnewFaces> describes (face, splits of face).
:param aSolid:
:type aSolid: TopoDS_Solid &
:param OldSheNewShe:
:type OldSheNewShe: TopTools_DataMapOfShapeListOfShape &
:param FSplits:
:type FSplits: TopTools_DataMapOfShapeListOfShape &
:rtype: bool
"""
return _TopOpeBRepTool.topopebreptool_RegularizeShells(*args)
|
8aa44c5b79f98f06596a5e6d9db8a4cf18f7dad3
| 31,663 |
def empty_filter(item, *args, **kwargs):
"""
Placeholder function to pass along instead of filters
"""
return True
|
d72ac5a0f787557b78644bcedd75e71f92c38a0b
| 31,665 |
def Get_User_Tags(df, json_response, i, github_user):
"""
Calculate the tags for a user.
"""
all_repos_tags = pd.DataFrame(0, columns=df.columns, index=pyjq.all(".[] | .name", json_response))
num_repos = len(pyjq.all(".[] | .name", json_response))
#
new_element = pd.DataFrame(0, np.zeros(1), columns =df.columns)
tags = {}
#
for i in range(num_repos):
repo_names = pyjq.all(".[%s] | .name" % i, json_response)
repo_languages = pyjq.all(".[%s] | .language" % i, json_response)
repo_description = pyjq.all(".[%s] | .description" % i, json_response)
repo_topics = pyjq.all(".[%s] | .topics" % i, json_response)
#
# print (repo_names,repo_languages,repo_languages,repo_topics)
#
# We have two structure:
#
# all_repos_tags = a dataframe with a row per repo with values [0,1]
# new_element = One row dataframa with the sum of frecuencies of all repos.
reponame_lower = repo_names[0].lower()
all_repos_tags.loc[reponame_lower] = 0
if repo_description[0] is None: repo_description = ['kk']
if repo_languages[0] is None: repo_languages = ['kk']
#
if repo_topics[0] is None: repo_topics = ['kk']
#
try: repo_names[0] = repo_names[0].lower()
except Exception: pass
try: repo_languages[0] = repo_languages[0].lower()
except Exception: pass
try: repo_description[0] = repo_description[0].lower()
except Exception: pass
try: repo_topics[0] = repo_topics[0].lower()
except Exception: pass
#
# Avoid this names because of are substring of another tag ()
COLUMNS_TO_SKIP=["java" , "c"]
if repo_languages[0] in df.columns :
new_element[repo_languages[0]] += (i+1)
tags[repo_languages[0]] = 0
all_repos_tags.loc[reponame_lower][repo_languages[0]] = 1
#print("Added tag 1 : ", (i+1)," " ,repo_names[0] ," " , repo_languages[0])
for column in df.columns:
if column in COLUMNS_TO_SKIP : continue
if column in repo_topics[0] :
new_element[column] += (i+1)
all_repos_tags.loc[reponame_lower][column] = 1
tags[column] = 0
#print("Added tag 2 : ", (i+1)," " ,repo_names[0] ," " , column)
else:
if len(column) > 4 :
if column in repo_names[0] or column.replace("-"," ") in repo_names[0]:
#print("Added tag 3 : ", (i+1)," " ,repo_names[0] ," " , column)
new_element[column] += (i+1)
all_repos_tags.loc[reponame_lower][column] = 1
tags[column] = 0
else :
if column in repo_description[0] or column.replace("-"," ") in repo_description[0]:
#print("Added tag 4 : ", (i+1)," " ,repo_names[0] ," " , column)
new_element[column] += (i+1)
all_repos_tags.loc[reponame_lower][column] = 1
tags[column] = 0
# end range repos
#print("new_element.shape: ", new_element.shape , " github_user:", github_user)
#
total=new_element.iloc[0].sum()
#print(tags)
if total != 0 :
for i in tags :
if new_element[i].iloc[0] != 0 :
new_element[i] = ( new_element[i].iloc[0]/total)
#print (i , new_element[i].iloc[0] )
#
try:
all_repos_tags['repos'] = all_repos_tags['Unnamed: 0']
del all_repos_tags['Unnamed: 0']
all_repos_tags = all_repos_tags.set_index('repos')
except Exception:
pass
new_element['names']=github_user
new_element = new_element.set_index(new_element.names)
del(new_element['names'])
#
df = pd.concat([df, new_element])
print("Added : ", github_user ,df.shape)
return df, all_repos_tags
|
80955e2794e9f9d4f65a3f048bc7dc0d450ebb3d
| 31,666 |
import ctypes
def ssize(newsize, cell):
"""
Set the size (maximum cardinality) of a CSPICE cell of any data type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ssize_c.html
:param newsize: Size (maximum cardinality) of the cell.
:type newsize: int
:param cell: The cell.
:type cell: spiceypy.utils.support_types.SpiceCell
:return: The updated cell.
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cell, stypes.SpiceCell)
newsize = ctypes.c_int(newsize)
libspice.ssize_c(newsize, ctypes.byref(cell))
return cell
|
52eb884e7477ddb98dc905ab848c61b83ac16123
| 31,667 |
def extend_data(data, length, offset):
"""Extend data using a length and an offset."""
if length >= offset:
new_data = data[-offset:] * (alignValue(length, offset) // offset)
return data + new_data[:length]
else:
return data + data[-offset:-offset+length]
|
923372c1fde14335331eb38b40e118b426cc9219
| 31,669 |
def RAND_egd(path): # real signature unknown; restored from __doc__
"""
RAND_egd(path) -> bytes
Queries the entropy gather daemon (EGD) on the socket named by 'path'.
Returns number of bytes read. Raises SSLError if connection to EGD
fails or if it does not provide enough data to seed PRNG.
"""
return ""
|
5ef4e3e065c44058996c1793541cd9f2a599b106
| 31,670 |
from typing import List
from typing import Dict
from typing import Any
def get_types_map(types_array: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
"""Get the type name of a metadata or a functionality."""
return {type_["name"]: type_ for type_ in types_array}
|
9354eff434b589a19360ee13d8bf7d9ab9e1002d
| 31,671 |
def update_flavor(request, **kwargs):
"""Update a flavor.
"""
data = request.DATA
flavor_id = data['flavor']['id']
conn = _get_sdk_connection(request)
flavor = conn.load_balancer.update_flavor(
flavor_id,
name=data['flavor'].get('name'),
description=data['flavor'].get('description'),
enabled=data['flavor'].get('enabled'),
)
return _get_sdk_object_dict(flavor)
|
9f165df73f3c557956d466e3fec6d720a1ee76cb
| 31,672 |
from typing import List
import re
async def get_all_product_features_from_cluster() -> List[str]:
"""
Returns a list of all product.feature in the cluster.
"""
show_lic_output = await scontrol_show_lic()
PRODUCT_FEATURE = r"LicenseName=(?P<product>[a-zA-Z0-9_]+)[_\-.](?P<feature>\w+)"
RX_PRODUCT_FEATURE = re.compile(PRODUCT_FEATURE)
parsed_features = []
output = show_lic_output.split("\n")
for line in output:
parsed_line = RX_PRODUCT_FEATURE.match(line)
if parsed_line:
parsed_data = parsed_line.groupdict()
product = parsed_data["product"]
feature = parsed_data["feature"]
parsed_features.append(f"{product}.{feature}")
return parsed_features
|
9822c952654b3e2516e0ec3b5cf397ced8b3eaaf
| 31,673 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.