content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_fixxation_map(eye_x, eye_y, fixxation_classifier):
"""
:param eye_x: an indexable datastructure with the x eye coordinates
:param eye_y: an indexable datastructure with the y eye coordinates
:param fixxation_classifier: a list with values which indicate if the move from the previos is a fixxations.
:return: a List of circles which bound around the fixxation and witch saccades they dont bound.
The List is organized Liked this [((circle1_x, circle1_y), circle1_radius), ...])
"""
# process into fixxation and saccade movements
points_array = []
currently_fixxation = False
current_points = []
for idx, classifier in enumerate(fixxation_classifier):
if classifier == 1 and currently_fixxation == False:
current_points = [(eye_x[idx], eye_y[idx])]
elif classifier == 1:
current_points.append((eye_x[idx], eye_y[idx]))
elif classifier == 0 and currently_fixxation == True:
points_array.append((current_points.copy(), True))
current_points = []
currently_fixxation = False
points_array.append(([(eye_x[idx], eye_y[idx])], False))
else:
points_array.append(([(eye_x[idx], eye_y[idx])], True))
circles = [(make_circle(points), is_fixxation) for points, is_fixxation in points_array]
circles = [((x, y), radius, is_fixxation) for ((x, y, radius), is_fixxation) in circles]
return circles | bccf37777eb4d74fcb48a8316fc3d2695a209371 | 12,400 |
from re import T
from typing import Any
def with_metadata(obj: T, key: str, value: Any) -> T:
"""
Adds meta-data to an object.
:param obj: The object to add meta-data to.
:param key: The key to store the meta-data under.
:param value: The meta-data value to store.
:return: obj.
"""
# Create the meta-data map
if not hasattr(obj, META_DATA_KEY):
try:
setattr(obj, META_DATA_KEY, {})
except AttributeError as e:
raise ValueError(f"Cannot set meta-data against objects of type {obj.__class__.__name__}") from e
# Put this mapping in the map
getattr(obj, META_DATA_KEY)[key] = value
return obj | 566f9a2c1d083bbe44b86f0a8716e5bb44892b13 | 12,401 |
import hashlib
def checksum(uploaded_file: 'SimpleUploadedFile', **options):
"""
Function to calculate checksum for file,
can be used to verify downloaded file integrity
"""
hash_type = options['type']
if hash_type == ChecksumType.MD5:
hasher = hashlib.md5()
elif hash_type == ChecksumType.SHA256:
hasher = hashlib.sha256()
else:
raise ValueError(f'Hash type "{hash_type}" in "checksum" function is not valid')
if uploaded_file.multiple_chunks():
for data in uploaded_file.chunks(HASH_CHUNK_SIZE):
hasher.update(data)
else:
hasher.update(uploaded_file.read())
return {
'checksum': hasher.hexdigest()
} | 766a288a09791242029669a63734143cf8e2c007 | 12,402 |
import requests
def get_page(url):
"""Get source of url
Keyword Arguments:
url : string
"""
logger.debug(f'Getting source for {url}')
res = requests.get(url)
logger.debug(f'Status code for {url} is {res.status_code}')
if res.status_code < 200 or res.status_code >= 300:
raise Exception(f'''
Not succesful retrieving {url},
the status code is {res.status_code}
''')
logger.debug(f'Request succesful, returning text')
return res.text | 3b273e895fe36f050423b514e046961045a30017 | 12,403 |
import types
from typing import Optional
from typing import Tuple
def preceding_words(document: Document, position: types.Position) -> Optional[Tuple[str, str]]:
"""
Get the word under the cursor returning the start and end positions.
"""
lines = document.lines
if position.line >= len(lines):
return None
row, col = position_from_utf16(lines, position)
line = lines[row]
try:
word = line[:col].strip().split()[-2:]
return word
except ValueError:
return None | 9d1078084045ac468639a903c74dd24e45ed1087 | 12,404 |
def check_gpu(gpu, *args):
"""Move data in *args to GPU?
gpu: options.gpu (None, or 0, 1, .. gpu index)
"""
if gpu == None:
if isinstance(args[0], dict):
d = args[0]
#print(d.keys())
var_dict = {}
for key in d:
var_dict[key] = Variable(d[key])
if len(args) > 1:
return [var_dict] + check_gpu(gpu, *args[1:])
else:
return [var_dict]
# it's a list of arguments
if len(args) > 1:
return [Variable(a) for a in args]
else: # single argument, don't make a list
return Variable(args[0])
else:
if isinstance(args[0], dict):
d = args[0]
#print(d.keys())
var_dict = {}
for key in d:
var_dict[key] = Variable(d[key].cuda(gpu))
if len(args) > 1:
return [var_dict] + check_gpu(gpu, *args[1:])
else:
return [var_dict]
# it's a list of arguments
if len(args) > 1:
return [Variable(a.cuda(gpu)) for a in args]
else: # single argument, don't make a list
return Variable(args[0].cuda(gpu)) | e4849a0a99dd6ca7baeacadc130e46006dd23c3a | 12,405 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the SleepIQ config entry."""
conf = entry.data
email = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
client_session = async_get_clientsession(hass)
gateway = AsyncSleepIQ(client_session=client_session)
try:
await gateway.login(email, password)
except SleepIQLoginException:
_LOGGER.error("Could not authenticate with SleepIQ server")
return False
except SleepIQTimeoutException as err:
raise ConfigEntryNotReady(
str(err) or "Timed out during authentication"
) from err
try:
await gateway.init_beds()
except SleepIQTimeoutException as err:
raise ConfigEntryNotReady(
str(err) or "Timed out during initialization"
) from err
except SleepIQAPIException as err:
raise ConfigEntryNotReady(str(err) or "Error reading from SleepIQ API") from err
coordinator = SleepIQDataUpdateCoordinator(hass, gateway, email)
# Call the SleepIQ API to refresh data
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | e4a4765113c7bc1e3c50290c72f3ca8196ba2bf2 | 12,406 |
def expansion(svsal,temp,pres,salt=None,dliq=None,dvap=None,
chkvals=False,chktol=_CHKTOL,salt0=None,dliq0=None,dvap0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate seawater-vapour thermal expansion coefficient.
Calculate the thermal expansion coefficient of a seawater-vapour
parcel.
:arg float svsal: Total sea-vapour salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown,
pass None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg dvap0: Initial guess for the salinity in kg/kg. If None
(default) then `flu3a._dvap_default` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Expansion coefficient in 1/K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> expansion(0.035,274.,610.)
0.4588634213
"""
salt, dliq, dvap = eq_seavap(svsal,temp,pres,salt=salt,dliq=dliq,
dvap=dvap,chkvals=chkvals,chktol=chktol,salt0=salt0,dliq0=dliq0,
dvap0=dvap0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
g_p = seavap_g(0,0,1,svsal,temp,pres,salt=salt,dliq=dliq,dvap=dvap,
useext=useext)
g_tp = seavap_g(0,1,1,svsal,temp,pres,salt=salt,dliq=dliq,dvap=dvap,
useext=useext)
alpha = g_tp / g_p
return alpha | 78c47eabf1d8e96c655652c3c8847b391264b05b | 12,407 |
import yaml
def yaml_to_dict(yaml_str=None, str_or_buffer=None):
"""
Load YAML from a string, file, or buffer (an object with a .read method).
Parameters are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A string of YAML.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
dict
Conversion from YAML.
"""
if not yaml_str and not str_or_buffer:
raise ValueError('One of yaml_str or str_or_buffer is required.')
if yaml_str:
d = yaml.load(yaml_str)
elif isinstance(str_or_buffer, str):
with open(str_or_buffer) as f:
d = yaml.load(f)
else:
d = yaml.load(str_or_buffer)
return d | 37aefe8e5b1bcc734626cbf7177e3b3dffda2416 | 12,408 |
from typing import Dict
from typing import Any
from typing import Tuple
def verify_block_arguments(
net_part: str,
block: Dict[str, Any],
num_block: int,
) -> Tuple[int, int]:
"""Verify block arguments are valid.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
block: Block parameters.
num_block: Block ID.
Return:
block_io: Input and output dimension of the block.
"""
block_type = block.get("type")
if block_type is None:
raise ValueError(
"Block %d in %s doesn't a type assigned.", (num_block, net_part)
)
if block_type == "transformer":
arguments = {"d_hidden", "d_ff", "heads"}
elif block_type == "conformer":
arguments = {
"d_hidden",
"d_ff",
"heads",
"macaron_style",
"use_conv_mod",
}
if block.get("use_conv_mod", None) is True and "conv_mod_kernel" not in block:
raise ValueError(
"Block %d: 'use_conv_mod' is True but "
" 'conv_mod_kernel' is not specified" % num_block
)
elif block_type == "causal-conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "encoder":
raise ValueError("Encoder does not support 'causal-conv1d.'")
elif block_type == "conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "decoder":
raise ValueError("Decoder does not support 'conv1d.'")
else:
raise NotImplementedError(
"Wrong type. Currently supported: "
"causal-conv1d, conformer, conv-nd or transformer."
)
if not arguments.issubset(block):
raise ValueError(
"%s in %s in position %d: Expected block arguments : %s."
" See tutorial page for more information."
% (block_type, net_part, num_block, arguments)
)
if block_type in ("transformer", "conformer"):
block_io = (block["d_hidden"], block["d_hidden"])
else:
block_io = (block["idim"], block["odim"])
return block_io | cead023afcd72d1104e02b2d67406b9c47102589 | 12,409 |
from pathlib import Path
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at [email protected]
save_dir: Plot save directory
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
print("n_p: n_l:", n_p, n_l, flush=True)
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
#print("recall: ", recall, flush=True)
#print("recall.shape: ", recall.shape, flush=True)
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
#print("precision: ", precision, flush=True)
#print("precision.shape: ", precision.shape, flush=True)
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
if plot:
plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
i = f1.mean(0).argmax() # max F1 index
return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') | 9a41478f8b85b7d43ceeaaaf6425ece67672fc64 | 12,410 |
from typing import Optional
def frame_aligned_point_error(
pred_frames: r3.Rigids,
target_frames: r3.Rigids,
frames_mask: paddle.Tensor,
pred_positions: r3.Vecs,
target_positions: r3.Vecs,
positions_mask: paddle.Tensor,
length_scale: float,
l1_clamp_distance: Optional[float] = None,
epsilon=1e-4) -> paddle.Tensor:
"""Measure point error under different alignments.
Jumper et al. (2021) Suppl. Alg. 28 "computeFAPE"
Computes error between two structures with B points under A alignments derived
from the given pairs of frames.
Args:
pred_frames: num_frames reference frames for 'pred_positions'.
target_frames: num_frames reference frames for 'target_positions'.
frames_mask: Mask for frame pairs to use.
pred_positions: num_positions predicted positions of the structure.
target_positions: num_positions target positions of the structure.
positions_mask: Mask on which positions to score.
length_scale: length scale to divide loss by.
l1_clamp_distance: Distance cutoff on error beyond which gradients will
be zero.
epsilon: small value used to regularize denominator for masked average.
Returns:
Masked Frame Aligned Point Error.
"""
def unsqueeze_rigids(rigid, axis=-1):
"""add an axis in the axis of rot.xx and trans.x"""
if axis < 0:
axis_t = axis - 1
axis_r = axis - 2
else:
axis_t = axis
axis_r = axis
rotation = paddle.unsqueeze(rigid.rot.rotation, axis=axis_r)
translation = paddle.unsqueeze(rigid.trans.translation, axis=axis_t)
return r3.Rigids(rot=r3.Rots(rotation), trans=r3.Vecs(translation))
def unsqueeze_vecs(vecs, axis=-1):
"""add an axis in the axis of rot.xx and trans.x"""
if axis < 0:
axis_t = axis - 1
else:
axis_t = axis
translation = paddle.unsqueeze(vecs.translation, axis=axis_t)
return r3.Vecs(translation)
# Compute array of predicted positions in the predicted frames.
# r3.Vecs (num_frames, num_positions)
local_pred_pos = r3.rigids_mul_vecs(
unsqueeze_rigids(r3.invert_rigids(pred_frames)),
unsqueeze_vecs(pred_positions, axis=1))
# Compute array of target positions in the target frames.
# r3.Vecs (num_frames, num_positions)
local_target_pos = r3.rigids_mul_vecs(
unsqueeze_rigids(r3.invert_rigids(target_frames)),
unsqueeze_vecs(target_positions, axis=1))
# Compute errors between the structures.
# paddle.Tensor (num_frames, num_positions)
error_dist = paddle.sqrt(r3.vecs_squared_distance(local_pred_pos, local_target_pos) + epsilon)
if l1_clamp_distance:
error_dist = paddle.clip(error_dist, min=0, max=l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error *= paddle.unsqueeze(frames_mask, axis=-1)
normed_error *= paddle.unsqueeze(positions_mask, axis=-2)
normalization_factor = (
paddle.sum(frames_mask, axis=-1) *
paddle.sum(positions_mask, axis=-1))
return (paddle.sum(normed_error, axis=[-2, -1]) /
(epsilon + normalization_factor)) | fe66fea6d3d6ca418b64a2d18bdc75a6e10d6707 | 12,411 |
def remove_app_restriction_request(machine_id, comment):
"""Enable execution of any application on the machine.
Args:
machine_id (str): Machine ID
comment (str): Comment to associate with the action
Notes:
Machine action is a collection of actions you can apply on the machine, for more info
https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/machineaction
Returns:
dict. Machine action
"""
cmd_url = '/machines/{}/unrestrictCodeExecution'.format(machine_id)
json = {
'Comment': comment
}
response = http_request('POST', cmd_url, json=json)
return response | f4dd44cbef6194b9fcc301fb19bb5c3ba77ad269 | 12,412 |
import torch
def fix_bond_lengths(
dist_mat: torch.Tensor,
bond_lengths: torch.Tensor,
delim: int = None,
delim_value: float = ARBITRARILY_LARGE_VALUE) -> torch.Tensor:
"""
Replace one-offset diagonal entries with ideal bond lengths
"""
mat_len = dist_mat.shape[1]
bond_lengths = torch.cat([bond_lengths] * (mat_len // 3))[:mat_len - 1]
dist_mat[1:, :-1][torch.eye(mat_len - 1) == 1] = bond_lengths
dist_mat[:-1, 1:][torch.eye(mat_len - 1) == 1] = bond_lengths
# Set chain break distance to arbitrarily large value for replacement by F-W algorithm
if delim is not None:
dist_mat[delim * 3 + 2, (delim + 1) * 3] = delim_value
dist_mat[(delim + 1) * 3, delim * 3 + 2] = delim_value
return dist_mat | 1112ad7019c1cb82360ad6e784f7f8262dc7b4a0 | 12,413 |
def CommandToString(command):
"""Returns quoted command that can be run in bash shell."""
return ' '.join(cmd_helper.SingleQuote(c) for c in command) | bcb6d3f108997b35336a68a559243931ca50a2c5 | 12,414 |
def sample_trilinear(t, coords, img_h=128, img_w=128):
"""
Samples noise octaves in one shot
:param t: noise cube [n_octaves, noise_res, noise_res, noise_res] (same noise foreach sample in batch)
:param coords: octave-transformed sampling positions [bs, n_octaves, 3, img_h*img_w]
:param img_h: height of image to synthesize
:param img_w: width of image to synthesize
:return: sampled noise octaves [bs, n_octaves, img_h, img_w]
"""
# in- and output dimensions
n_octaves, noise_res = t.get_shape().as_list()[:2]
bs = coords.get_shape().as_list()[0]
# all contributing source coordinates (interpolation endpoints)
x0 = tf.floor(coords[:, :, 0, :])
x1 = x0 + 1
y0 = tf.floor(coords[:, :, 1, :])
y1 = y0 + 1
z0 = tf.floor(coords[:, :, 2, :])
z1 = z0 + 1
# interpolation weights
w_x = coords[:, :, 0, :] - x0
w_y = coords[:, :, 1, :] - y0
w_z = coords[:, :, 2, :] - z0
# modulo for out-of-bound indices
x0 = tf.floormod(x0, tf.ones_like(x0) * noise_res)
x1 = tf.floormod(x1, tf.ones_like(x1) * noise_res)
y0 = tf.floormod(y0, tf.ones_like(y0) * noise_res)
y1 = tf.floormod(y1, tf.ones_like(y1) * noise_res)
z0 = tf.floormod(z0, tf.ones_like(z0) * noise_res)
z1 = tf.floormod(z1, tf.ones_like(z1) * noise_res)
# for mem efficiency we flatten voxels s.t. we need only one index per element instead of 3
t = tf.reshape(t, [n_octaves, noise_res**3])
# index arrays (in flattened voxel array)
idx_x0_y0_z0 = tf.cast(y0 * noise_res + x0 * noise_res**2 + z0, tf.int32)
idx_x0_y0_z1 = tf.cast(y0 * noise_res + x0 * noise_res**2 + z1, tf.int32)
idx_x0_y1_z0 = tf.cast(y1 * noise_res + x0 * noise_res**2 + z0, tf.int32)
idx_x0_y1_z1 = tf.cast(y1 * noise_res + x0 * noise_res**2 + z1, tf.int32)
idx_x1_y0_z0 = tf.cast(y0 * noise_res + x1 * noise_res**2 + z0, tf.int32)
idx_x1_y0_z1 = tf.cast(y0 * noise_res + x1 * noise_res**2 + z1, tf.int32)
idx_x1_y1_z0 = tf.cast(y1 * noise_res + x1 * noise_res**2 + z0, tf.int32)
idx_x1_y1_z1 = tf.cast(y1 * noise_res + x1 * noise_res**2 + z1, tf.int32)
def _gather(idx):
# TODO: not quite efficient. ;)
out = []
for i in range(n_octaves):
g = tf.gather(t[i], idx[:, i, :], axis=0, batch_dims=1)
out.append(tf.expand_dims(g, 1))
return tf.concat(out, 1)
# gather contributing samples --> now 2D!
val_x0_y0_z0 = _gather(idx_x0_y0_z0)
val_x0_y0_z1 = _gather(idx_x0_y0_z1)
val_x0_y1_z0 = _gather(idx_x0_y1_z0)
val_x0_y1_z1 = _gather(idx_x0_y1_z1)
val_x1_y0_z0 = _gather(idx_x1_y0_z0)
val_x1_y0_z1 = _gather(idx_x1_y0_z1)
val_x1_y1_z0 = _gather(idx_x1_y1_z0)
val_x1_y1_z1 = _gather(idx_x1_y1_z1)
# interpolate along z ...
c_00 = val_x0_y0_z0 * (1.0 - w_z) + val_x0_y0_z1 * w_z
c_01 = val_x0_y1_z0 * (1.0 - w_z) + val_x0_y1_z1 * w_z
c_10 = val_x1_y0_z0 * (1.0 - w_z) + val_x1_y0_z1 * w_z
c_11 = val_x1_y1_z0 * (1.0 - w_z) + val_x1_y1_z1 * w_z
# ... along y ...
c_0 = c_00 * (1.0 - w_y) + c_01 * w_y
c_1 = c_10 * (1.0 - w_y) + c_11 * w_y
# ... and along x
c = c_0 * (1.0 - w_x) + c_1 * w_x
# reshape
c = tf.reshape(c, [bs, n_octaves, img_h, img_w])
return c | 251a0eeaefe3eccd1d4d72ba16aa121a5f17c483 | 12,415 |
import re
def version(output):
"""
`git --version` > git version 1.8.1.1
"""
output = output.rstrip()
words = re.split('\s+', output, 3)
if not words or words[0] != 'git' or words[1] != 'version':
raise WrongOutputError()
version = words[2]
parts = version.split('.')
try:
major = int(parts[0]) if len(parts) > 0 else None
except ValueError:
major = None
try:
minor = int(parts[1]) if len(parts) > 1 else None
except ValueError:
minor = None
return Version(version, parts, major, minor) | 21a16245cf7729b56588016f358667b210113eec | 12,416 |
def set_up_s3_encryption_configuration(kms_arn=None):
"""
Use the default SSE-S3 configuration for the journal export if a KMS key ARN was not given.
:type kms_arn: str
:param kms_arn: The Amazon Resource Name to encrypt.
:rtype: dict
:return: The encryption configuration for JournalS3Export.
"""
if kms_arn is None:
return {'ObjectEncryptionType': 'SSE_S3'}
return {'ObjectEncryptionType': {'S3ObjectEncryptionType': 'SSE_KMS', 'KmsKeyArn': kms_arn}} | dd8663c17e040423a08c772fd9ca64d25abd2850 | 12,417 |
import click
import json
def search(dataset, node, aoi, start_date, end_date, lng, lat, dist, lower_left, upper_right, where, geojson, extended, api_key):
"""
Search for images.
"""
node = get_node(dataset, node)
if aoi == "-":
src = click.open_file('-')
if not src.isatty():
lines = src.readlines()
if len(lines) > 0:
aoi = json.loads(''.join([ line.strip() for line in lines ]))
bbox = map(get_bbox, aoi.get('features') or [aoi])[0]
lower_left = bbox[0:2]
upper_right = bbox[2:4]
if where:
# Query the dataset fields endpoint for queryable fields
resp = api.dataset_fields(dataset, node)
def format_fieldname(s):
return ''.join(c for c in s if c.isalnum()).lower()
field_lut = { format_fieldname(field['name']): field['fieldId'] for field in resp['data'] }
where = { field_lut[format_fieldname(k)]: v for k, v in where if format_fieldname(k) in field_lut }
if lower_left:
lower_left = dict(zip(['longitude', 'latitude'], lower_left))
upper_right = dict(zip(['longitude', 'latitude'], upper_right))
result = api.search(dataset, node, lat=lat, lng=lng, distance=dist, ll=lower_left, ur=upper_right, start_date=start_date, end_date=end_date, where=where, extended=extended, api_key=api_key)
if geojson:
result = to_geojson(result)
print(json.dumps(result)) | 309a98cf3cfc81f12631bbc15ee0325d16385338 | 12,418 |
from typing import Callable
def _make_rnn_cell(spec: RNNSpec) -> Callable[[], tf.nn.rnn_cell.RNNCell]:
"""Return the graph template for creating RNN cells."""
return RNN_CELL_TYPES[spec.cell_type](spec.size) | 48cf85bcb8d39ab7b4dd150fc890eb281d9b83d9 | 12,419 |
def run_baselines(env, seed, log_dir):
"""Create baselines model and training.
Replace the ppo and its training with the algorithm you want to run.
Args:
env (gym.Env): Environment of the task.
seed (int): Random seed for the trial.
log_dir (str): Log dir path.
Returns:
str: The log file path.
"""
seed = seed + 1000000
set_global_seeds(seed)
env.seed(seed)
# Set up logger for baselines
configure(dir=log_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'])
baselines_logger.info('seed={}, logdir={}'.format(
seed, baselines_logger.get_dir()))
env = DummyVecEnv([
lambda: bench.Monitor(
env, baselines_logger.get_dir(), allow_early_resets=True)
])
ddpg.learn(network='mlp',
env=env,
nb_epochs=params['n_epochs'],
nb_epoch_cycles=params['steps_per_epoch'],
normalize_observations=False,
critic_l2_reg=0,
actor_lr=params['policy_lr'],
critic_lr=params['qf_lr'],
gamma=params['discount'],
nb_train_steps=params['n_train_steps'],
nb_rollout_steps=params['n_rollout_steps'],
nb_eval_steps=100)
return osp.join(log_dir, 'progress.csv') | 2a020c5efe548d3722155569fbe69cd836efeebd | 12,420 |
from config import STL10_PATH
import os
def load_stl():
"""Loads the STL-10 dataset from config.STL10_PATH or downloads it if necessary.
:return: (x_train, y_train), (x_test, y_test), min, max
:rtype: tuple of numpy.ndarray), (tuple of numpy.ndarray), float, float
"""
min_, max_ = 0., 1.
# Download and extract data if needed
path = data_utils.get_file('stl10_binary', cache_subdir=STL10_PATH, untar=True,
origin='https://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz')
with open(os.path.join(path, 'train_X.bin'), 'rb') as f:
x_train = np.fromfile(f, dtype=np.uint8)
x_train = np.reshape(x_train, (-1, 3, 96, 96))
with open(os.path.join(path, 'test_X.bin'), 'rb') as f:
x_test = np.fromfile(f, dtype=np.uint8)
x_test = np.reshape(x_test, (-1, 3, 96, 96))
if k.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
with open(os.path.join(path, 'train_y.bin'), 'rb') as f:
y_train = np.fromfile(f, dtype=np.uint8)
y_train -= 1
with open(os.path.join(path, 'test_y.bin'), 'rb') as f:
y_test = np.fromfile(f, dtype=np.uint8)
y_test -= 1
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_ | a521cb4d5ff5bcc095cccaab256297ac21b38cc2 | 12,421 |
def count_transitions(hypno):
"""
return the count for all possible transitions
"""
possible_transitions = [(0,1), (0,2), (0,4), # W -> S1, S2, REM
(1,2), (1,0), (1,3), # S1 -> W, S2, REM
(2,0), (2,1), (2,3), (2,4), # S2 -> W, S1, SWS, REM
(3,0), (3,2), # SWS -> W, S2
(4,0), (4,1), (4,2)] #
counts = []
for trans in possible_transitions:
counts += [transition_index(hypno, trans)]
return counts | 4a0dc835c2e72bf46ad8d3ebe33256f32ce2ede9 | 12,422 |
def mu_ref_normal_sampler_tridiag(loc=0.0, scale=1.0, beta=2, size=10,
random_state=None):
"""Implementation of the tridiagonal model to sample from
.. math::
\\Delta(x_{1}, \\dots, x_{N})^{\\beta}
\\prod_{n=1}^{N} \\exp(-\\frac{(x_i-\\mu)^2}{2\\sigma^2} ) dx_i
.. seealso::
:cite:`DuEd02` II-C
"""
rng = check_random_state(random_state)
if not (beta > 0):
raise ValueError('`beta` must be positive. Given: {}'.format(beta))
# beta/2*[N-1, N-2, ..., 1]
b_2_Ni = 0.5 * beta * np.arange(size - 1, 0, step=-1)
alpha_coef = rng.normal(loc=loc, scale=scale, size=size)
beta_coef = rng.gamma(shape=b_2_Ni, scale=scale**2)
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef)) | 75e7d46ec4816bbfa46443537f66cd27043b212d | 12,423 |
import os
import re
def get_ofi_info(hosts, supported=None, verbose=True):
"""Get the OFI provider information from the specified hosts.
Args:
hosts (NodeSet): hosts from which to gather the information
supported (list, optional): list of supported providers when if provided will limit the
inclusion to only those providers specified. Defaults to None.
verbose (bool, optional): display command details. Defaults to True.
Returns:
dict: a dictionary of interface keys with a dictionary value of a comma-separated string of
providers key with a NodeSet value where the providers where detected.
"""
task = run_task(hosts, "fi_info", verbose=verbose)
if verbose:
display_task(task)
# Populate a dictionary of interfaces with a list of provider lists and NodSet of hosts on which
# the providers were detected.
providers = {}
results = dict(task.iter_retcodes())
if 0 in results:
for output, nodelist in task.iter_buffers(results[0]):
output_lines = [line.decode("utf-8").rstrip(os.linesep) for line in output]
nodeset = NodeSet.fromlist(nodelist)
# Find all the provider and domain pairings. The fi_info output reports these on
# separate lines when processing the re matches ensure each domain is preceded by a
# provider.
interface_providers = {}
data = re.findall(r"(provider|domain):\s+([A-Za-z0-9;_+]+)", "\n".join(output_lines))
while data:
provider = list(data.pop(0))
if provider[0] == "provider" and data[0][0] == "domain":
provider.pop(0)
domain = list(data.pop(0))
domain.pop(0)
# A provider and domain must be specified
if not provider or not domain:
continue
# Add 'ofi+' to the provider
provider = ["+".join(["ofi", item]) for item in provider]
# Only include supported providers if a supported list is provided
if supported and provider[0] not in supported:
continue
if domain[0] not in interface_providers:
interface_providers[domain[0]] = set()
interface_providers[domain[0]].update(provider)
for interface, provider_set in interface_providers.items():
if interface not in providers:
providers[interface] = {}
provider_key = ",".join(list(provider_set))
if provider_key not in providers[interface]:
providers[interface][provider_key] = NodeSet()
providers[interface][provider_key].update(nodeset)
return providers | dbb9669d642a68e70aaaee7bc0e4f78e15474b0b | 12,424 |
def get_pokemon(name:str) -> dict:
"""
Busca el pokémon dado su nombre en la base de datos y crea un diccionario con su información básica.
Paramétros:
name(str): Nombre del pokémon a buscar
Retorna:
Diccionario con la información básica del pokémon y sus evoluciones.
"""
try:
p = Pokemon.objects.get(name=name)
pokemon = {
"name": p.name,
"id": p.id,
"weight": p.weight,
"height": p.height,
"stats": [],
"evolutions": []
}
stats = PokemonStat.objects.filter(pokemon_name=p)
for stat in stats:
pokemon["stats"].append({"stat": stat.stat_id, "base": stat.base})
evolutionChain = PokemonEvolution.objects.get(pokemon=p)
evolutionId = evolutionChain.evolution_chain
position = evolutionChain.position
chain = PokemonEvolution.objects.filter(evolution_chain=evolutionId)
for evolution in chain:
if evolution.position > position:
pokemon["evolutions"].append({"name": evolution.pokemon.name, "id": evolution.pokemon.id,
"evolution_type": "Evolution"})
elif evolution.position < position:
pokemon["evolutions"].append({"name": evolution.pokemon.name, "id": evolution.pokemon.id,
"evolution_type": "Preevolution"})
return pokemon
except ObjectDoesNotExist:
return None | fa19704b2dfb6d2223a73264df6b5dc9e866fb8e | 12,425 |
def create_cluster(module, switch, name, node1, node2):
"""
Method to create a cluster between two switches.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the local switch.
:param name: The name of the cluster to create.
:param node1: First node of the cluster.
:param node2: Second node of the cluster.
:return: String describing if cluster got created or if it's already exists.
"""
global CHANGED_FLAG
cli = pn_cli(module)
clicopy = cli
cli += ' switch %s cluster-show format name no-show-headers ' % node1
cluster_list = run_cli(module, cli).split()
if name not in cluster_list:
cli = clicopy
cli += ' switch %s cluster-create name %s ' % (switch, name)
cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)
if 'Success' in run_cli(module, cli):
CHANGED_FLAG.append(True)
return ' %s: %s created successfully \n' % (switch, name)
else:
return ' %s: %s already exists \n' % (switch, name) | a7f0a415d019b7fa3622d18da396879df566b365 | 12,426 |
from typing import List
import random
def random_terminals_for_primitive(
primitive_set: dict, primitive: Primitive
) -> List[Terminal]:
""" Return a list with a random Terminal for each required input to Primitive. """
return [random.choice(primitive_set[term_type]) for term_type in primitive.input] | b3160800bb5da87c0215ed4857f2596934d28c05 | 12,427 |
def _specie_is_intermediate_old(
specie_id: str,
specie_dict: dict = None,
) -> bool:
"""Detect is a specie should be considered as an intermediate compound.
FIXME, this needs to be refined so that we don't rely on the specie ID.
:param specie_id: specie ID
:type: str
:param specie_dict: dictionary about the specie
:type specie_dict: dict
:return: true if it is, otherwise false
:rtype: bool
"""
if specie_id.startswith('CMPD_'):
return True
return False | b1a4f99abf0379ea3cacf6ab53f45fca636072c5 | 12,428 |
def where_from_pos(text, pos):
"""
Format a textual representation of the given position in the text.
"""
return "%d:%d" % (line_from_pos(text, pos), col_from_pos(text, pos)) | 587387f017fe32b297c06123fc3853c18a7aea46 | 12,429 |
def generateHuffmanCodes (huffsize):
""" Calculate the huffman code of each length. """
huffcode = []
k = 0
code = 0
# Magic
for i in range (len (huffsize)):
si = huffsize[i]
for k in range (si):
huffcode.append ((i + 1, code))
code += 1
code <<= 1
return huffcode | 60d5a2bd5524627dd5cc624dbb6b0ea09b8032d4 | 12,430 |
def one_hot_df(df, cat_col_list):
"""
Make one hot encoding on categoric columns.
Returns a dataframe for the categoric columns provided.
-------------------------
inputs
- df: original input DataFrame
- cat_col_list: list of categorical columns to encode.
outputs
- df_hot: one hot encoded subset of the original DataFrame.
"""
df_hot = pd.DataFrame()
for col in cat_col_list:
encoded_matrix = col_encoding(df, col)
df_ = pd.DataFrame(encoded_matrix,
columns = [col+ ' ' + str(int(i))\
for i in range(encoded_matrix.shape[1])])
df_hot = pd.concat([df_hot, df_], axis = 1)
return df_hot | d47978a551edbc11f93f9a2e87dbe1598e39161b | 12,431 |
from typing import List
from typing import Optional
import select
from typing import Dict
async def load_users_by_id(user_ids: List[int]) -> List[Optional[User]]:
"""
Batch-loads users by their IDs.
"""
query = select(User).filter(User.id.in_(user_ids))
async with get_session() as session:
result: Result = await session.execute(query)
user_map: Dict[int, User] = {user.id: user for user in result.scalars()}
return [user_map.get(user_id) for user_id in user_ids] | ac9d0a16a40d478ed7fec590bf591aa0124270d9 | 12,432 |
def create_timetravel_model(for_model):
"""
Returns the newly created timetravel model class for the
model given.
"""
if for_model._meta.proxy:
_tt_model = for_model._meta.concrete_model._tt_model
for_model._tt_model = _tt_model
for_model._meta._tt_model = _tt_model
return
opt = for_model._meta
name = 'tt_%s' % opt.db_table
class Meta:
app_label = get_migration_app()
db_table = name
index_together = [[OK, VU]]
verbose_name = name[:39]
attrs = {'Meta': Meta,
'_tt_is_timetravel_model': True,
'__module__': for_model.__module__}
fields = copy_fields(for_model)
attrs.update(fields)
for_model._tt_has_history = True
ret = type(str(name), (Model,), attrs)
for_model._tt_model = ret
for_model._meta._tt_model = ret
return ret | 6a2557f3737ce014e14ba9dd36cd7a6d9c8c78b7 | 12,433 |
def public_route_server_has_read(server_id, user_id=None):
"""
check if current user has read access to the given server
"""
user = user_id and User.query.get_or_404(user_id) or current_user
server = DockerServer.query.get_or_404(server_id)
if server.has_group_read(user):
return Response("read access", 200)
abort(403) | b9f812feac7c7e951f8c37178fd1dc2913601631 | 12,434 |
def isValidPublicAddress(address: str) -> bool:
"""Check if address is a valid NEO address"""
valid = False
if len(address) == 34 and address[0] == 'A':
try:
base58.b58decode_check(address.encode())
valid = True
except ValueError:
# checksum mismatch
valid = False
return valid | a99f08c289f9d3136adf7e17697645131e785ecb | 12,435 |
def cost_to_go_np(cost_seq, gamma_seq):
"""
Calculate (discounted) cost to go for given cost sequence
"""
# if np.any(gamma_seq == 0):
# return cost_seq
cost_seq = gamma_seq * cost_seq # discounted reward sequence
cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq /= gamma_seq # un-scale it to get true discounted cost to go
return cost_seq | bea4de4cb32c3a346ebe8ea532c2c94589893e65 | 12,436 |
import re
def parse_args():
"""
Parses the command line arguments.
"""
# Override epilog formatting
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage="usage: %prog -f secret.txt | --file secret.txt | --folder allmysecrets", epilog=EXAMPLES)
parser.add_option("-p", "--password", dest="password", help="set password file for AES decryption")
parser.add_option("-f", "--file", dest="file", help="encrypt/decrypt this file")
parser.add_option("-F", "--folder", dest="folder", help="encrypt/decrypt all files in this folder")
parser.add_option("--encrypt", action="store_true", dest="encrypt", help="encrypt file(s)")
parser.add_option("--decrypt", action="store_true", dest="decrypt", help="decrypt file(s)")
parser.add_option("--recursive", action="store_true", dest="recursive", help="encrypt/decrypt files in folder recursively")
parser.formatter.store_option_strings(parser)
parser.formatter.store_option_strings = lambda _: None
for option, value in parser.formatter.option_strings.items():
value = re.sub(r"\A(-\w+) (\w+), (--[\w-]+=(\2))\Z", r"\g<1>/\g<3>",
value)
value = value.replace(", ", '/')
if len(value) > MAX_HELP_OPTION_LENGTH:
value = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH -
parser.formatter.indent_increment)) % value
parser.formatter.option_strings[option] = value
args = parser.parse_args()[0]
if not any((args.file, args.folder)):
parser.error("Required argument is missing. Use '-h' for help.")
if not any((args.encrypt, args.decrypt)):
parser.error("Required argument is missing. Use '-h' for help.")
if args.decrypt and not args.password:
parser.error("Required password file is missing. Use '-h' for help.")
return args | 8f696bb8b419269766bceadb42b36f2a3e052e5b | 12,437 |
def x_to_ggsg(seq):
"""replace Xs with a Serine-Glycine linker (GGSG pattern)
seq and return value are strings
"""
if "X" not in seq:
return seq
replacement = []
ggsg = _ggsg_generator()
for aa in seq:
if aa != "X":
replacement.append(aa)
# restart linker iterator for next stretch of Xs
ggsg = _ggsg_generator()
else:
replacement.append(next(ggsg))
return "".join(replacement) | 53885ca76484f25a04ffc4220af0d7b0e56defd4 | 12,438 |
from typing import OrderedDict
def gini_pairwise(idadf, target=None, features=None, ignore_indexer=True):
"""
Compute the conditional gini coefficients between a set of features and a
set of target in an IdaDataFrame.
Parameters
----------
idadf : IdaDataFrame
target : str or list of str, optional
A column or list of columns against to be used as target. Per default,
consider all columns
features : str or list of str, optional
A column or list of columns to be used as features. Per default,
consider all columns.
ignore_indexer : bool, default: True
Per default, ignore the column declared as indexer in idadf
Returns
-------
Pandas.DataFrame or Pandas.Series if only one target
Notes
-----
Input columns as target and features should be categorical, otherwise
this measure does not make much sense.
Examples
--------
>>> idadf = IdaDataFrame(idadb, "IRIS")
>>> gini_pairwise(idadf)
"""
# Check input
target, features = _check_input(idadf, target, features, ignore_indexer)
gini_dict = OrderedDict()
length = len(idadf)
for t in target:
gini_dict[t] = OrderedDict()
features_notarget = [x for x in features if (x != t)]
for feature in features_notarget:
if t not in gini_dict:
gini_dict[t] = OrderedDict()
query = ("SELECT SUM((POWER(c,2) - gini)/c)/%s FROM "+
"(SELECT SUM(POWER(count,2)) as gini, SUM(count) as c FROM "+
"(SELECT CAST(COUNT(*) AS FLOAT) AS count, \"%s\" FROM %s GROUP BY \"%s\",\"%s\") "+
"GROUP BY \"%s\")")
query0 = query%(length, feature, idadf.name, t, feature, feature)
gini_dict[t][feature] = idadf.ida_scalar_query(query0)
result = pd.DataFrame(gini_dict).fillna(np.nan)
if len(result.columns) > 1:
order = [x for x in result.columns if x in features] + [x for x in features if x not in result.columns]
result = result.reindex(order)
result = result.dropna(axis=1, how="all")
if len(result.columns) == 1:
if len(result) == 1:
result = result.iloc[0,0]
else:
result = result[result.columns[0]].copy()
result.sort_values(ascending = True)
else:
result = result.fillna(0)
return result | aa886c8d44e54597e86f0736ea383671bda2e13f | 12,439 |
def init_isolated_80():
"""
Real Name: b'init Isolated 80'
Original Eqn: b'0'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 0 | 5511cac38bf9bd68446fcb1dc41ac96807ea57a2 | 12,440 |
def xcom_api_setup():
"""Instantiate api"""
return XComApi(API_CLIENT) | 1a47066f389ab2846f1aa31ce8338389def07e6d | 12,441 |
def zeros_tensor(*args, **kwargs):
"""Construct a tensor of a given shape with every entry equal to zero."""
labels = kwargs.pop("labels", [])
dtype = kwargs.pop("dtype", np.float)
base_label = kwargs.pop("base_label", "i")
return Tensor(np.zeros(*args, dtype=dtype), labels=labels,
base_label=base_label) | 3baba23ba763afb51c715a85aa6f84c8c2d99c43 | 12,442 |
from typing import Tuple
def reg_split_from(
splitted_mappings: np.ndarray,
splitted_sizes: np.ndarray,
splitted_weights: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
When creating the regularization matrix of a source pixelization, this function assumes each source pixel has been
split into a cross of four points (the size of which is based on the area of the source pixel). This cross of
points represents points which together can evaluate the gradient of the pixelization's reconstructed values.
This function takes each cross of points and determines the regularization weights of every point on the cross,
to construct a regulariaztion matrix based on the gradient of each pixel.
The size of each cross depends on the Voronoi pixel area, thus this regularization scheme and its weights depend
on the pixel area (there are larger weights for bigger pixels). This ensures that bigger pixels are regularized
more.
The number of pixel neighbors over which regularization is 4 * the total number of source pixels. This contrasts
other regularization schemes, where the number of neighbors changes depending on, for example, the Voronoi mesh
geometry. By having a fixed number of neighbors this removes stochasticty in the regularization that is applied
to a solution.
There are cases where a grid has over 100 neighbors, corresponding to very coordinate transformations. In such
extreme cases, we raise a `exc.FitException`.
Parameters
----------
splitted_mappings
splitted_sizes
splitted_weights
Returns
-------
"""
max_j = np.shape(splitted_weights)[1] - 1
splitted_weights *= -1.0
for i in range(len(splitted_mappings)):
pixel_index = i // 4
flag = 0
for j in range(splitted_sizes[i]):
if splitted_mappings[i][j] == pixel_index:
splitted_weights[i][j] += 1.0
flag = 1
if j >= max_j:
raise exc.PixelizationException(
"the number of Voronoi natural neighbours exceeds 100."
)
if flag == 0:
splitted_mappings[i][j + 1] = pixel_index
splitted_sizes[i] += 1
splitted_weights[i][j + 1] = 1.0
return splitted_mappings, splitted_sizes, splitted_weights | 545f0bd7345a8ab908d2338eaa7cb4c3562f4234 | 12,443 |
def get_initiator_IP(json_isessions):
"""
pull the IP from the host session
"""
print("-" * 20 + " get_initiator started")
for session in json_isessions['sessions']:
session_array[session['initiatorIP']] = session['initiatorName']
return session_array | 4140b9f32727d1e5e1e98fd6714e8d91276b2272 | 12,444 |
def get_data_for_recent_jobs(recency_msec=DEFAULT_RECENCY_MSEC):
"""Get a list containing data about recent jobs.
This list is arranged in descending order based on the time the job
was enqueued. At most NUM_JOBS_IN_DASHBOARD_LIMIT job descriptions are
returned.
Args:
- recency_secs: the threshold for a recent job, in seconds.
"""
recent_job_models = job_models.JobModel.get_recent_jobs(
NUM_JOBS_IN_DASHBOARD_LIMIT, recency_msec)
return [_get_job_dict_from_job_model(model) for model in recent_job_models] | 032f27b55c70947a44cd6ed244291118e3660f77 | 12,445 |
def construct_outgoing_multicast_answers(answers: _AnswerWithAdditionalsType) -> DNSOutgoing:
"""Add answers and additionals to a DNSOutgoing."""
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, multicast=True)
_add_answers_additionals(out, answers)
return out | 65f0a2a42f9d3f1bd8fbc74e8303248adf01e65d | 12,446 |
import struct
def load_analog_binary_v1(filename):
"""Load analog traces stored in the binary format by Logic 1.2.0+
The format is documented at
https://support.saleae.com/faq/technical-faq/data-export-format-analog-binary
Returns (data, period) where data is a numpy array of 32-bit floats
of shape (nchannels, nsamples) and period is the sampling period in seconds.
"""
with open(filename, 'rb') as f:
nsamples, nchannels, period = struct.unpack('<QId', f.read(20))
if nchannels > 16:
raise RuntimeError(f'Invalid nchannels={nchannels}. Are you sure this is binary analog data from v1.2.0+?')
if period < 1 / 50e6 or period > 1:
raise RuntimeError(f'Invalid period={period}. Are you sure this is binary analog data from v1.2.0+?')
data = np.fromfile(f, dtype=np.dtype('<f'), count=nsamples * nchannels).reshape(nchannels, nsamples).astype('=f')
return data, period | 5fcb97c4da367a8abeb12d7dc2852dbb7412956d | 12,447 |
import click
def setup_phantomjs():
"""Create and return a PhantomJS browser object."""
try:
# Setup capabilities for the PhantomJS browser
phantomjs_capabilities = DesiredCapabilities.PHANTOMJS
# Some basic creds to use against an HTTP Basic Auth prompt
phantomjs_capabilities['phantomjs.page.settings.userName'] = 'none'
phantomjs_capabilities['phantomjs.page.settings.password'] = 'none'
# Flags to ignore SSL problems and get screenshots
service_args = []
service_args.append('--ignore-ssl-errors=true')
service_args.append('--web-security=no')
service_args.append('--ssl-protocol=any')
# Create the PhantomJS browser and set the window size
browser = webdriver.PhantomJS(desired_capabilities=phantomjs_capabilities,service_args=service_args)
browser.set_window_size(1920,1080)
except Exception as error:
click.secho("[!] Bad news: PhantomJS failed to load (not installed?), so activities \
requiring a web browser will be skipped.",fg="red")
click.secho("L.. Details: {}".format(error),fg="red")
browser = None
return browser | 5a8e536850e2a3c39adaf3228fc1a1f7ad4694dd | 12,448 |
def normal_pdf(x, mu, cov, log=True):
"""
Calculate the probability density of Gaussian (Normal) distribution.
Parameters
----------
x : float, 1-D array_like (K, ), or 2-D array_like (K, N)
The variable for calculating the probability density.
mu : float or 1-D array_like, (K, )
The mean of the Gaussian distribution.
cov : float or 2-D array_like, (K, K)
The variance or the covariance matrix of the Gaussian distribution.
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : numpy float
The probability density of x.
if N==1, return a float
elif N>1, return an array
"""
if len(np.array(mu).shape) == 0:
x = np.array(x).reshape(-1,1)
elif len(np.array(x).shape) <= 1:
x = np.array(x).reshape(1, -1)
x = x - np.array(mu)
N, K = x.shape
if len(np.array(cov).shape) < 2:
cov = np.array(cov).reshape(-1,1)
cov_inv = np.linalg.inv(cov)
cov_det = np.linalg.det(cov)
if cov_det <= 0:
print("Warning: the det of covariance is not positive!")
return None
pdf_all = np.zeros(N)
pdf_part1 = -(K*np.log(2*np.pi) + np.log(cov_det)) / 2.0
for i in range(N):
pdf_all[i] = pdf_part1 - np.dot(np.dot(x[i,:], cov_inv), x[i,:]) / 2.0
if log == False: pdf_all = np.exp(pdf_all)
if N == 1: pdf_all = pdf_all[0]
return pdf_all | 4cdb573e1283a5740cb8d5b518b69c02bc013fe6 | 12,449 |
import sqlite3
from datetime import datetime
def get_quiz(id, user):
"""Get Quiz"""
conn = sqlite3.connect(DBNAME)
cursor = conn.cursor()
if user == 'admin' or user == 'fabioja':
cursor.execute(
"SELECT id, release, expire, problem, tests, results, diagnosis, numb from QUIZ where id = {0}".format(id))
else:
cursor.execute("SELECT id, release, expire, problem, tests, results, diagnosis, numb from QUIZ where id = {0} and release < '{1}'".format(
id, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
info = [reg for reg in cursor.fetchall()]
conn.close()
return info | 7e517e2ca84ebd320883950d4c3d6e572f82c226 | 12,450 |
def filesystem_entry(filesystem):
"""
Filesystem tag {% filesystem_entry filesystem %} is used to display a single
filesystem.
Arguments
---------
filesystem: filesystem object
Returns
-------
A context which maps the filesystem object to filesystem.
"""
return {'filesystem': filesystem} | 3afbd0b8ee9e72ab8841ca5c5517396650d2a898 | 12,451 |
def haversine(lat1, lon1, lat2, lon2, units='miles'):
"""
Calculates arc length distance between two lat_lon points (must be in radians)
lat2 & and lon2 can be numpy arrays
units can be 'miles' or 'km' (kilometers)
"""
earth_radius = {'miles': 3959., 'km': 6371.}
a = np.square(np.sin((lat2 - lat1)/2.)) + np.cos(lat1) * np.cos(lat2) * np.square(np.sin((lon2 - lon1)/2.))
return 2 * earth_radius[units] * np.arcsin(np.sqrt(a)) | cadfa496f39e0a02115140d827bebfa6ff96a2dd | 12,452 |
from typing import Optional
def OptionalDateField(description='',validators=[]):
""" A custom field that makes the DateField optional """
validators.append(Optional())
field = DateField(description,validators)
return field | 66695ca94ff7d7283ff5508b4ef3f78efba9a988 | 12,453 |
def init_brats_metrics():
"""Initialize dict for BraTS Dice metrics"""
metrics = {}
metrics['ET'] = {'labels': [3]}
metrics['TC'] = {'labels': [1, 3]}
metrics['WT'] = {'labels': [1, 2, 3]}
for _, value in metrics.items():
value.update({'tp':0, 'tot':0})
return metrics | 755dc706f7090d78dac18a989745041b8617a9d6 | 12,454 |
def add_rse(rse, issuer, vo='def', deterministic=True, volatile=False, city=None, region_code=None,
country_name=None, continent=None, time_zone=None, ISP=None,
staging_area=False, rse_type=None, latitude=None, longitude=None, ASN=None,
availability=None):
"""
Creates a new Rucio Storage Element(RSE).
:param rse: The RSE name.
:param issuer: The issuer account.
:param vo: The VO to act on.
:param deterministic: Boolean to know if the pfn is generated deterministically.
:param volatile: Boolean for RSE cache.
:param city: City for the RSE.
:param region_code: The region code for the RSE.
:param country_name: The country.
:param continent: The continent.
:param time_zone: Timezone.
:param staging_area: staging area.
:param ISP: Internet service provider.
:param rse_type: RSE type.
:param latitude: Latitude coordinate of RSE.
:param longitude: Longitude coordinate of RSE.
:param ASN: Access service network.
:param availability: Availability.
"""
validate_schema(name='rse', obj=rse, vo=vo)
kwargs = {'rse': rse}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_rse', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not add RSE' % (issuer))
return rse_module.add_rse(rse, vo=vo, deterministic=deterministic, volatile=volatile, city=city,
region_code=region_code, country_name=country_name, staging_area=staging_area,
continent=continent, time_zone=time_zone, ISP=ISP, rse_type=rse_type, latitude=latitude,
longitude=longitude, ASN=ASN, availability=availability) | 3b41e227ea64c5f03d80ae8734c29b24f9c3bed9 | 12,455 |
from typing import Dict
from typing import Tuple
from typing import List
def multi_graph_partition(costs: Dict, probs: Dict, p_t: np.ndarray,
idx2nodes: Dict, ot_hyperpara: Dict,
weights: Dict = None,
predefine_barycenter: bool = False) -> \
Tuple[List[Dict], List[Dict], List[Dict], Dict, np.ndarray]:
"""
Achieve multi-graph partition via calculating Gromov-Wasserstein barycenter
between the target graphs and a proposed one
Args:
costs: a dictionary of graphs {key: graph idx,
value: (n_s, n_s) adjacency matrix of source graph}
probs: a dictionary of graphs {key: graph idx,
value: (n_s, 1) the distribution of source nodes}
p_t: (n_t, 1) the distribution of target nodes
idx2nodes: a dictionary of graphs {key: graph idx,
value: a dictionary {key: idx of row in cost,
value: name of node}}
ot_hyperpara: a dictionary of hyperparameters
weights: a dictionary of graph {key: graph idx,
value: the weight of the graph}
predefine_barycenter: False: learn barycenter, True: use predefined barycenter
Returns:
sub_costs_all: a list of graph dictionary: a dictionary {key: graph idx,
value: sub cost matrices}}
sub_idx2nodes: a list of graph dictionary: a dictionary {key: graph idx,
value: a dictionary mapping indices to nodes' names}}
trans: a dictionary {key: graph idx,
value: an optimal transport between the graph and the barycenter}
cost_t: the reference graph corresponding to partition result
"""
sub_costs_cluster = []
sub_idx2nodes_cluster = []
sub_probs_cluster = []
sub_costs_all = {}
sub_idx2nodes_all = {}
sub_probs_all = {}
if predefine_barycenter is True:
cost_t = csr_matrix(np.diag(p_t[:, 0]))
trans = {}
for n in costs.keys():
sub_costs_all[n], sub_probs_all[n], sub_idx2nodes_all[n], trans[n] = graph_partition(costs[n],
probs[n],
p_t,
idx2nodes[n],
ot_hyperpara)
else:
cost_t, trans, _ = Gwl.gromov_wasserstein_barycenter(costs, probs, p_t, ot_hyperpara, weights)
for n in costs.keys():
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(costs[n],
trans[n],
probs[n],
p_t,
idx2nodes[n])
sub_costs_all[n] = sub_costs
sub_idx2nodes_all[n] = sub_idx2nodes
sub_probs_all[n] = sub_probs
for i in range(p_t.shape[0]):
sub_costs = {}
sub_idx2nodes = {}
sub_probs = {}
for n in costs.keys():
if i in sub_costs_all[n].keys():
sub_costs[n] = sub_costs_all[n][i]
sub_idx2nodes[n] = sub_idx2nodes_all[n][i]
sub_probs[n] = sub_probs_all[n][i]
sub_costs_cluster.append(sub_costs)
sub_idx2nodes_cluster.append(sub_idx2nodes)
sub_probs_cluster.append(sub_probs)
return sub_costs_cluster, sub_probs_cluster, sub_idx2nodes_cluster, trans, cost_t | a3743cd9cc9e7f9a10eb84992fb74e7fe57f5792 | 12,456 |
def TDataStd_BooleanArray_Set(*args):
"""
* Finds or creates an attribute with the array.
:param label:
:type label: TDF_Label &
:param lower:
:type lower: int
:param upper:
:type upper: int
:rtype: Handle_TDataStd_BooleanArray
"""
return _TDataStd.TDataStd_BooleanArray_Set(*args) | c458a1182474432d2df049ae3126a6b6b2b49a8e | 12,457 |
def py_list_to_tcl_list(py_list):
""" Convert Python list to Tcl list using Tcl interpreter.
:param py_list: Python list.
:type py_list: list
:return: string representing the Tcl string equivalent to the Python list.
"""
py_list_str = [str(s) for s in py_list]
return tcl_str(tcl_interp_g.eval('split' + tcl_str('\t'.join(py_list_str)) + '\\t')) | 7f42044b8a0b28089abf453e7a1b65d5cb1fb399 | 12,458 |
def get_post_count(user):
"""
Get number of posts published by the requst user.
Parameters
------------
user: The request user
Returns
-------
count: int
The number of posts published by the requst user.
"""
count = Post.objects.filter(publisher=user).count()
return count | 6000bcd43ef2b8edf3c1dd04df89dcef38f110d5 | 12,459 |
from config import employee_required_fields
def create_new_employee(employees):
"""
Create a new employee record with the employees dictionary
Use the employee_sections dictionary template to create a
new employee record.
"""
subsidiary = input('Employee Subsidiary (SK, CZ):')
employee_id = generate_employee_id(subsidiary, employees)
employee = {} # Storage for new employee
print('Please, enter records for new employee ID: ' + employee_id)
# Iterating over 'employee_sections'
for section in employee_sections['<employee_id>']:
# Inserting empty section
employee[section] = {}
for field in employee_sections['<employee_id>'][section]:
_input = ''
while not _input:
_input = input(section + '/' + field + ': ')
if not _input and field in employee_required_fields:
print('This field is required, please enter the value.')
else:
employee[section][field] = _input
break
print(employee)
employees[employee_id] = employee
print('Thank you, entry has been completed for ID: ' + employee_id)
input('Press ENTER to continue')
commit_changes(file_with_employees, str(employees))
return employees | aa5d0981c2b81ad65ed5ad0368fd1b3b79796a40 | 12,460 |
def gather_squares_triangles(p1,p2,depth):
""" Draw Square and Right Triangle given 2 points,
Recurse on new points
args:
p1,p2 (float,float) : absolute position on base vertices
depth (int) : decrementing counter that terminates recursion
return:
squares [(float,float,float,float)...] : absolute positions of
vertices of squares
triangles [(float,float,float)...] : absolute positions of
vertices of right triangles
"""
# Break Recursion if depth is met
if depth == 0:
return [],[]
# Generate Points
pd = (p2[0] - p1[0]),(p1[1] - p2[1])
p3 = (p2[0] - pd[1]),(p2[1] - pd[0])
p4 = (p1[0] - pd[1]),(p1[1] - pd[0])
p5 = (p4[0] + (pd[0] - pd[1])/2),(p4[1] - (pd[0] + pd[1])/2)
# Gather Points further down the tree
squares_left,triangles_left = gather_squares_triangles(p4,p5,depth-1)
squares_right,triangles_right = gather_squares_triangles(p5,p3,depth-1)
# Merge and Return
squares = [[p1,p2,p3,p4]]+squares_left+squares_right
triangles = [[p3,p4,p5]]+triangles_left+triangles_right
return squares,triangles | de4e720eb10cb378f00086a6e8e45886746055c0 | 12,461 |
def update_node(node_name, node_type, root=None):
"""
! Node is assumed to have only one input and one output port with a maximum
of one connection for each.
Returns:
NodegraphAPI.Node: newly created node
"""
new = NodegraphAPI.CreateNode(node_type, root or NodegraphAPI.GetRootNode())
if new.getType() == "Group":
new_in = new.addInputPort("in")
new_out = new.addOutputPort("out")
else:
new_in = new.getInputPortByIndex(0)
new_out = new.getOutputPortByIndex(0)
existingn = NodegraphAPI.GetNode(node_name)
if existingn:
# we assume there is only 1 input/output port with only one connection
in_port = existingn.getInputPorts()[0]
in_port = in_port.getConnectedPort(0)
out_port = existingn.getOutputPorts()[0]
out_port = out_port.getConnectedPort(0)
pos = NodegraphAPI.GetNodePosition(existingn) # type: tuple
existingn.delete()
NodegraphAPI.SetNodePosition(new, pos)
if in_port:
in_port.connect(new_in)
if out_port:
out_port.connect(new_out)
logger.info("[update_node] Found existing node, it has been updated.")
new.setName(node_name)
logger.info("[update_node] Finished for node <{}>".format(node_name))
return new | 916beec7de527ee56d5326061aa2c367af17434f | 12,462 |
def dan_acf(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
Args:
x (array): The time series. If multidimensional, set the time axis
using the ``axis`` keyword argument and the function will be
computed for every other axis.
axis (Optional[int]): The time axis of ``x``. Assumed to be the first
axis if not specified.
fast (Optional[bool]): If ``True``, only use the largest ``2^n``
entries for efficiency. (default: False)
Returns:
acf (array): The acf array.
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real
m[axis] = 0
return acf / acf[m] | 85273d95564f0e8c0afb9ff00ac23dc04539f291 | 12,463 |
from datetime import datetime
def schedule_decision():
"""最適化の実行と結果の表示を行う関数"""
# トップページを表示する(GETリクエストがきた場合)
if request.method == "GET":
return render_template("scheduler/schedule_decision.html", solution_html=None)
# POSTリクエストである「最適化を実行」ボタンが押された場合に実行
# データがアップロードされているかチェックする。適切でなければ元のページに戻る
if not check_request(request):
return redirect(request.url)
# 前処理(データ読み込み)
df_kagisime, df_gomisute = preprocess(request)
# 最適化実行
prob = KandGProblem(df_kagisime, df_gomisute)
solution_df = prob.solve()
L_gomisute_members = list(prob.L_gomisute_members)
# ログインしている場合,DBに決定した予定表を追加.
if current_user.is_authenticated:
yyyy, mm, _ = solution_df.index[0].split("/")
user_id = session["_user_id"]
print(user_id)
print("currentuser:", current_user)
is_new_schedule = not ScheduleLists.query.filter_by(
user_id=user_id, yyyymm=yyyy + mm
).all()
if is_new_schedule:
schedule_list = ScheduleLists(user_id=user_id, yyyymm=yyyy + mm)
db.session.add(schedule_list)
db.session.commit()
schedulelist_id = (
ScheduleLists.query.filter_by(user_id=user_id, yyyymm=yyyy + mm)
.group_by("id")
.first()
)
print(schedulelist_id.id)
for row in solution_df.itertuples():
if not is_new_schedule:
print(datetime.strptime(row[0], "%Y/%m/%d"))
old_schedule = Schedules.query.filter_by(
schedulelist_id=schedulelist_id.id,
date=datetime.strptime(row[0], "%Y/%m/%d"),
).first()
print(old_schedule)
if old_schedule:
old_schedule.k_members = row[1]
old_schedule.g_members = row[2]
db.session.add(old_schedule)
db.session.commit()
else:
schedule = Schedules(
schedulelist_id=schedulelist_id.id,
date=datetime.strptime(row[0], "%Y/%m/%d"),
k_members=row[1],
g_members=row[2],
)
db.session.add(schedule)
db.session.commit()
# 後処理(最適化結果をHTMLに表示できる形式にする)
solution_html = postprocess(solution_df)
return render_template(
"scheduler/schedule_decision.html",
solution_html=solution_html,
solution_df=solution_df,
L_gomisute_members=" ".join(L_gomisute_members),
) | 6f259961d027b6e4a3dc88289a5ba62b162705f6 | 12,464 |
def infection_rate_asymptomatic_30x40():
"""
Real Name: b'infection rate asymptomatic 30x40'
Original Eqn: b'contact infectivity asymptomatic 30x40*(social distancing policy SWITCH self 40*social distancing policy 40\\\\ +(1-social distancing policy SWITCH self 40))*Infected asymptomatic 30x40*Susceptible 40\\\\ /non controlled pop 30x40'
Units: b'person/Day'
Limits: (None, None)
Type: component
b''
"""
return contact_infectivity_asymptomatic_30x40() * (
social_distancing_policy_switch_self_40() * social_distancing_policy_40() +
(1 - social_distancing_policy_switch_self_40())
) * infected_asymptomatic_30x40() * susceptible_40() / non_controlled_pop_30x40() | 16aebdca2259933dcdab1a00ed8d37b10d5b8714 | 12,465 |
def slug(hans, style=Style.NORMAL, heteronym=False, separator='-',
errors='default', strict=True):
"""将汉字转换为拼音,然后生成 slug 字符串.
:param hans: 汉字字符串( ``'你好吗'`` )或列表( ``['你好', '吗']`` ).
可以使用自己喜爱的分词模块对字符串进行分词处理,
只需将经过分词处理的字符串列表传进来就可以了。
:type hans: unicode 字符串或字符串列表
:param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。
更多拼音风格详见 :class:`~pypinyin.Style`
:param heteronym: 是否启用多音字
:param separator: 两个拼音间的分隔符/连接符
:param errors: 指定如何处理没有拼音的字符,详情请参考
:py:func:`~pypinyin.pinyin`
:param strict: 只获取声母或只获取韵母相关拼音风格的返回结果
是否严格遵照《汉语拼音方案》来处理声母和韵母,
详见 :ref:`strict`
:return: slug 字符串.
:raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常
::
>>> import pypinyin
>>> from pypinyin import Style
>>> pypinyin.slug('中国人')
'zhong-guo-ren'
>>> pypinyin.slug('中国人', separator=' ')
'zhong guo ren'
>>> pypinyin.slug('中国人', style=Style.FIRST_LETTER)
'z-g-r'
>>> pypinyin.slug('中国人', style=Style.CYRILLIC)
'чжун1-го2-жэнь2'
"""
return separator.join(
chain(
*_default_pinyin.pinyin(
hans, style=style, heteronym=heteronym,
errors=errors, strict=strict
)
)
) | 124431e3ea8747dfdc024f93e88f692746797013 | 12,466 |
def A_weight(signal, fs):
"""
Return the given signal after passing through an A-weighting filter
signal : array_like
Input signal
fs : float
Sampling frequency
"""
b, a = A_weighting(fs)
return lfilter(b, a, signal) | 1c6abdd90b85762db4383972de7508d00b561065 | 12,467 |
import argparse
from pathlib import Path
def getCommandLine() -> argparse.ArgumentParser:
"""Получить агрументы коммандной строки
Returns:
argparse.ArgumentParser: _description_
"""
parser = argparse.ArgumentParser(description='Конвертация тестов с TFS в xml формат Testrail/TestIT')
action = parser.add_subparsers(dest='action', required=True)
subparser = action.add_parser('to-xml',help='Скачивание и конвертация тестов с TFS в xml',add_help=False)
subparser.add_argument('-i','--id',type=int,required=True,help='ID тест плана')
subparser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Показывает команды для конверта в xml')
subparser.add_argument('--project','-pj',required=True, type=str,help='Название проекта в коллекции')
subparser.add_argument('--collection','-ct',required=True,type=str,help='Название коллекции')
subparser.add_argument('--cert','-c',required=False,type=Path,help='Сертификат для доступа к TFS',default=None)
return parser.parse_args() | c0b27d2488115c059a713776dd4fc209f1db7011 | 12,468 |
def listall(context, uri=None):
"""
*musicpd.org, music database section:*
``listall [URI]``
Lists all songs and directories in ``URI``.
"""
result = []
root_path = translator.normalize_path(uri)
# TODO: doesn't the dispatcher._call_handler have enough info to catch
# the error this can produce, set the command and then 'raise'?
try:
uri = context.directory_path_to_uri(root_path)
except MpdNoExistError as e:
e.command = 'listall'
e.message = 'Not found'
raise
browse_futures = [(root_path, context.core.library.browse(uri))]
while browse_futures:
base_path, future = browse_futures.pop()
for ref in future.get():
if ref.type == Ref.DIRECTORY:
path = '/'.join([base_path, ref.name.replace('/', '')])
result.append(('directory', path))
browse_futures.append(
(path, context.core.library.browse(ref.uri)))
elif ref.type == Ref.TRACK:
result.append(('file', ref.uri))
if not result:
raise MpdNoExistError('Not found')
return [('directory', root_path)] + result | f9d2be6b8155b3110aa085ac2664665ee8393c23 | 12,469 |
from typing import Tuple
from typing import Union
import traceback
def send_task_to_executor(task_tuple: TaskInstanceInCelery) \
-> Tuple[TaskInstanceKey, CommandType, Union[AsyncResult, ExceptionWithTraceback]]:
"""Sends task to executor."""
key, _, command, queue, task_to_run = task_tuple
try:
with timeout(seconds=OPERATION_TIMEOUT):
result = task_to_run.apply_async(args=[command], queue=queue)
except Exception as e: # pylint: disable=broad-except
exception_traceback = "Celery Task ID: {}\n{}".format(key, traceback.format_exc())
result = ExceptionWithTraceback(e, exception_traceback)
return key, command, result | cbc93ac3a3c146b748c0ec88eaa9cb2cd631ac85 | 12,470 |
import pathlib
import os
def createPyarchFilePath(filePath):
"""
This method translates from an ESRF "visitor" path to a "pyarch" path:
/data/visitor/mx415/id14eh1/20100209 -> /data/pyarch/2010/id14eh1/mx415/20100209
"""
pyarchFilePath = None
if isinstance(filePath, str):
filePath = pathlib.Path(filePath)
listOfDirectories = filePath.parts
if UtilsConfig.isEMBL():
if 'p13' in listOfDirectories[0:3] or 'P13' in listOfDirectories[0:3]:
pyarchFilePath = os.path.join('/data/ispyb/p13',
*listOfDirectories[4:])
else:
pyarchFilePath = os.path.join('/data/ispyb/p14',
*listOfDirectories[4:])
return pyarchFilePath
listBeamlines = ['bm30a', 'id14eh1', 'id14eh2', 'id14eh3', 'id14eh4',
'id23eh1', 'id23eh2', 'id29', 'id30a1',
'id30a2', 'id30a3', 'id30b']
# Check that we have at least four levels of directories:
if len(listOfDirectories) > 5:
dataDirectory = listOfDirectories[1]
secondDirectory = listOfDirectories[2]
thirdDirectory = listOfDirectories[3]
fourthDirectory = listOfDirectories[4]
fifthDirectory = listOfDirectories[5]
year = fifthDirectory[0:4]
proposal = None
beamline = None
if dataDirectory == 'data' and secondDirectory == 'gz':
if thirdDirectory == 'visitor':
proposal = fourthDirectory
beamline = fifthDirectory
elif fourthDirectory == 'inhouse':
proposal = fifthDirectory
beamline = thirdDirectory
else:
raise RuntimeError(
'Illegal path for UtilsPath.createPyarchFilePath: ' +
'{0}'.format(filePath))
listOfRemainingDirectories = listOfDirectories[6:]
elif dataDirectory == 'data' and secondDirectory == 'visitor':
proposal = listOfDirectories[3]
beamline = listOfDirectories[4]
listOfRemainingDirectories = listOfDirectories[5:]
elif dataDirectory == 'data' and secondDirectory in listBeamlines:
beamline = secondDirectory
proposal = listOfDirectories[4]
listOfRemainingDirectories = listOfDirectories[5:]
if proposal is not None and beamline is not None:
pyarchFilePath = pathlib.Path('/data/pyarch') / year / beamline
pyarchFilePath = pyarchFilePath / proposal
for directory in listOfRemainingDirectories:
pyarchFilePath = pyarchFilePath / directory
if pyarchFilePath is None:
logger.warning(
'UtilsPath.createPyarchFilePath: path not converted for' +
' pyarch: %s ' % filePath)
else:
pyarchFilePath = pyarchFilePath.as_posix()
return pyarchFilePath | daf7cf23126ae51cd30492981dbcae9fe0431afd | 12,471 |
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed | b4c280e10df338633b69cf4b3c65967c1f80b9c5 | 12,472 |
def geometries_from_bbox(north, south, east, west, tags):
"""
Create a GeoDataFrame of OSM entities within a N, S, E, W bounding box.
Parameters
----------
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
tags : dict
Dict of tags used for finding objects in the selected area. Results
returned are the union, not intersection of each individual tag.
Each result matches at least one given tag. The dict keys should be
OSM tags, (e.g., `building`, `landuse`, `highway`, etc) and the dict
values should be either `True` to retrieve all items with the given
tag, or a string to get a single tag-value combination, or a list of
strings to get multiple values for the given tag. For example,
`tags = {'building': True}` would return all building footprints in
the area. `tags = {'amenity':True, 'landuse':['retail','commercial'],
'highway':'bus_stop'}` would return all amenities, landuse=retail,
landuse=commercial, and highway=bus_stop.
Returns
-------
gdf : geopandas.GeoDataFrame
Notes
-----
You can configure the Overpass server timeout, memory allocation, and
other custom settings via ox.config().
"""
# convert bounding box to a polygon
polygon = utils_geo.bbox_to_poly(north, south, east, west)
# create GeoDataFrame of geometries within this polygon
gdf = geometries_from_polygon(polygon, tags)
return gdf | 32aeebe7f644df00b613ef6e0d4f30baef1a5743 | 12,473 |
def dBzdtAnalCircT(a, t, sigma):
"""
Hz component of analytic solution for half-space (Circular-loop source)
Src and Rx are on the surface and receiver is located at the center of the loop.
Src waveform here is step-off.
.. math::
\\frac{\partial h_z}{\partial t} = -\\frac{I}{\mu_0\sigma a^3} \
\left( 3erf(\\theta a) - \\frac{2}{\sqrt{\pi}}\\theta a (3+2\\theta^2 a^2) e^{-\\theta^2a^2}\\right)
.. math::
\\theta = \sqrt{\\frac{\sigma\mu}{4t}}
"""
theta = np.sqrt((sigma*mu_0)/(4*t))
const = -1/(mu_0*sigma*a**3)
ta = theta*a
eta = erf(ta)
t1 = 3*eta
t2 = -2/(np.pi**0.5)*ta*(3+2*ta**2)*np.exp(-ta**2)
dhzdt = const*(t1+t2)
return mu_0*dhzdt | 18b9428528ed11a121ad01578d2bfc35faceae21 | 12,474 |
def count_increasing(ratings, n):
"""
Only considering the increasing case
"""
arr = [1] * n
cnt = 1
for i in range(1, n):
cnt = cnt + 1 if ratings[i - 1] < ratings[i] else 1
arr[i] = cnt
return arr | 9fe274527fbba505467a195bf555c77d2f3e6aed | 12,475 |
import copy
def load_train_data_frame(train_small, target, keras_options, model_options, verbose=0):
"""
### CAUTION: TF2.4 Still cannot load a DataFrame with Nulls in string or categoricals!
############################################################################
#### TF 2.4 still cannot load tensor_slices into ds if an object or string column
#### that has nulls in it! So we need to find other ways to load tensor_slices by
#### first filling dataframe with pandas fillna() function!
#############################################################################
"""
train_small = copy.deepcopy(train_small)
DS_LEN = model_options['DS_LEN']
#### do this for dataframes ##################
try:
batch_size = keras_options["batchsize"]
if isinstance(keras_options["batchsize"], str):
batch_size = find_batch_size(DS_LEN)
except:
#### If it is not given find it here ####
batch_size = find_batch_size(DS_LEN)
######### Modify or Convert column names to fit tensorflow rules of no space in names!
sel_preds = ["_".join(x.split(" ")) for x in list(train_small) ]
#### This can also be a problem with other special characters ###
sel_preds = ["_".join(x.split("(")) for x in sel_preds ]
sel_preds = ["_".join(x.split(")")) for x in sel_preds ]
sel_preds = ["_".join(x.split("/")) for x in sel_preds ]
sel_preds = ["_".join(x.split("\\")) for x in sel_preds ]
sel_preds = ["_".join(x.split("?")) for x in sel_preds ]
sel_preds = [x.lower() for x in sel_preds ]
if isinstance(target, str):
target = "_".join(target.split(" "))
target = "_".join(target.split("("))
target = "_".join(target.split(")"))
target = "_".join(target.split("/"))
target = "_".join(target.split("\\"))
target = "_".join(target.split("?"))
target = target.lower()
model_label = 'Single_Label'
else:
target = ["_".join(x.split(" ")) for x in target ]
target = ["_".join(x.split("(")) for x in target ]
target = ["_".join(x.split(")")) for x in target ]
target = ["_".join(x.split("/")) for x in target ]
target = ["_".join(x.split("\\")) for x in target ]
target = ["_".join(x.split("?")) for x in target ]
target = [x.lower() for x in target ]
model_label = 'Multi_Label'
train_small.columns = sel_preds
print('Alert! Modified column names to satisfy rules for column names in Tensorflow...')
#### if target is changed you must send that modified target back to other processes ######
### usecols is basically target in a list format. Very handy to know when target is a list.
try:
modeltype = model_options["modeltype"]
if model_options["modeltype"] == '':
### usecols is basically target in a list format. Very handy to know when target is a list.
modeltype, model_label, usecols = find_problem_type(train_small, target, model_options, verbose)
else:
if isinstance(target, str):
usecols = [target]
else:
usecols = copy.deepcopy(target)
except:
### if modeltype is given, then do not find the model type using this function
modeltype, model_label, usecols = find_problem_type(train_small, target, model_options, verbose)
### Cat_Vocab_Dict contains all info about vocabulary in each variable and their size
print(' Classifying variables using data sample in pandas...')
train_small, var_df, cat_vocab_dict = classify_features_using_pandas(train_small, target, model_options, verbose=verbose)
########## Just transfer all the values from var_df to cat_vocab_dict ##################################
for each_key in var_df:
cat_vocab_dict[each_key] = var_df[each_key]
############################################################################################################
model_options['modeltype'] = modeltype
model_options['model_label'] = model_label
cat_vocab_dict['target_variables'] = usecols
cat_vocab_dict['modeltype'] = modeltype
model_options['batch_size'] = batch_size
########## Find small details about the data to help create the right model ###
target_transformed = False
if modeltype != 'Regression':
if isinstance(target, str):
#### This is for Single Label Problems ######
if train_small[target].dtype == 'object' or str(train_small[target].dtype).lower() == 'category':
target_transformed = True
target_vocab = train_small[target].unique()
num_classes = len(target_vocab)
else:
if 0 not in np.unique(train_small[target]):
target_transformed = True ### label encoding must be done since no zero class!
target_vocab = train_small[target].unique()
num_classes = len(train_small[target].value_counts())
elif isinstance(target, list):
#### This is for Multi-Label Problems #######
copy_target = copy.deepcopy(target)
num_classes = []
for each_target in copy_target:
if train_small[target[0]].dtype == 'object' or str(train_small[target[0]].dtype).lower() == 'category':
target_transformed = True
target_vocab = train_small[target].unique().tolist()
num_classes_each = len(target_vocab)
else:
if 0 not in np.unique(train_small[target[0]]):
target_transformed = True ### label encoding must be done since no zero class!
target_vocab = train_small[target[0]].unique()
num_classes_each = train_small[target].apply(np.unique).apply(len).max()
num_classes.append(int(num_classes_each))
else:
num_classes = 1
target_vocab = []
########### find the number of labels in data ####
if isinstance(target, str):
num_labels = 1
elif isinstance(target, list):
if len(target) == 1:
num_labels = 1
else:
num_labels = len(target)
#### This is where we set the model_options for num_classes and num_labels #########
model_options['num_labels'] = num_labels
model_options['num_classes'] = num_classes
cat_vocab_dict['num_labels'] = num_labels
cat_vocab_dict['num_classes'] = num_classes
cat_vocab_dict["target_transformed"] = target_transformed
#### fill missing values using this function ##############
train_small = fill_missing_values_for_TF2(train_small, cat_vocab_dict)
##### Do the deletion of cols after filling with missing values since otherwise fill errors!
drop_cols = var_df['cols_delete']
cat_vocab_dict['columns_deleted'] = drop_cols
if len(drop_cols) > 0: ### drop cols that have been identified for deletion ###
print(' Dropping %s columns marked for deletion...' %drop_cols)
train_small.drop(drop_cols,axis=1,inplace=True)
######### Now load the train Dataframe into a tf.data.dataset #############
if target_transformed:
####################### T R A N S F O R M I N G T A R G E T ########################
train_small[target], cat_vocab_dict = transform_train_target(train_small, target, modeltype,
model_label, cat_vocab_dict)
if isinstance(target, str):
#### For single label do this: labels can be without names since there is only one label
if target != '':
labels = train_small[target]
features = train_small.drop(target, axis=1)
ds = tf.data.Dataset.from_tensor_slices((dict(features), labels))
else:
print('target variable is blank - please fix input and try again')
return
elif isinstance(target, list):
#### For multi label do this: labels must be dict and hence with names since there are many targets
labels = train_small[target]
features = train_small.drop(target, axis=1)
ds = tf.data.Dataset.from_tensor_slices((dict(features), dict(labels)))
else:
ds = tf.data.Dataset.from_tensor_slices(dict(train_small))
###### Now save some defaults in cat_vocab_dict ##########################
try:
keras_options["batchsize"] = batch_size
cat_vocab_dict['batch_size'] = batch_size
except:
batch_size = find_batch_size(DS_LEN)
keras_options["batchsize"] = batch_size
cat_vocab_dict['batch_size'] = batch_size
##########################################################################
#### C H E C K F O R I N F I N I T E V A L U E S H E R E ##########
##########################################################################
cols_with_infinity = find_columns_with_infinity(train_small)
if cols_with_infinity:
train_small = drop_rows_with_infinity(train_small, cols_with_infinity, fill_value=True)
return train_small, ds, var_df, cat_vocab_dict, keras_options, model_options | 85c496b485bbc26afbadf181a2231e3f5bd93706 | 12,476 |
def stat_float_times(space, newval=-1):
"""stat_float_times([newval]) -> oldval
Determine whether os.[lf]stat represents time stamps as float objects.
If newval is True, future calls to stat() return floats, if it is False,
future calls return ints.
If newval is omitted, return the current setting.
"""
state = space.fromcache(StatState)
if newval == -1:
return space.newbool(state.stat_float_times)
else:
state.stat_float_times = (newval != 0) | e183f0cc2ce56bc7b4ac6ce95d8cb671a963422f | 12,477 |
def decorate(rvecs):
"""Output range vectors into some desired string format"""
return ', '.join(['{%s}' % ','.join([str(x) for x in rvec]) for rvec in rvecs]) | 31a3d4414b0b88ffd92a5ddd8eb09aaf90ef3742 | 12,478 |
def update_topic_collection_items(request_ctx, collection_item_id, topic_id, **request_kwargs):
"""
Accepts the same parameters as create
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param collection_item_id: (required) ID
:type collection_item_id: string
:param topic_id: (required) ID
:type topic_id: string
:return: Update a topic
:rtype: requests.Response (with void data)
"""
path = '/v1/collection_items/{collection_item_id}/discussion_topics/{topic_id}'
url = request_ctx.base_api_url + path.format(collection_item_id=collection_item_id, topic_id=topic_id)
response = client.put(request_ctx, url, **request_kwargs)
return response | 06b0709f5fa4acf189baef8f2665bee81b3c4993 | 12,479 |
def upsample(inputs, factor=(2, 2), interpolation='nearest'):
"""
Upsampling layer by factor
Parameters
----------
inputs: Input tensor
factor: The upsampling factors for (height, width). One integer or tuple of
two integers
interpolation: A string, one of [`nearest`, `bilinear`, 'bicubic', 'area'].
"""
# get new_size
_, height, width, _ = inputs.get_shape().as_list()
factor = _make_pair(factor)
new_height = height * factor[0]
new_width = width * factor[1]
new_size = (new_height, new_width)
# get interpolation type
interp_types = {
'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR,
'bilinear': tf.image.ResizeMethod.BILINEAR,
'bicubic': tf.image.ResizeMethod.BICUBIC,
'area': tf.image.ResizeMethod.AREA,
}
if interpolation not in interp_types.keys():
raise ValueError("interpolation must be one of "
"['nearest', 'bilinear', 'bicubic', 'area']")
interp_type = interp_types.get(interpolation)
return tf.image.resize_images(inputs, size=new_size, method=interp_type) | dfbd42871e63cb685f9cfbf9185da38839a9ee4e | 12,480 |
def root_mean_squared_error(*args, **kwargs):
"""
Returns the square-root of ``scikit-learn``'s ``mean_squared_error`` metric.
All arguments are forwarded to that function.
"""
return np.sqrt(mean_squared_error(*args, **kwargs)) | 51084b2ec55d14657fa128f0df2bd3f438c2367b | 12,481 |
def idwt2(Wimg, level=4):
""" inverse 2d wavelet transform
:param Wimg: 2d array
wavelet coefficients
:param level: int
level of wavelet transform - image shape has to be multiples of 2**level
:return: 2d array
image
"""
coeffs = _from_img_to_coeffs(Wimg, levels=level)
return pywt.waverec2(coeffs, wavelet='db4', mode='per') | 521ceca879b0961730b1efd6dac54772a2b41ca3 | 12,482 |
def get_color(card):
"""Returns the card's color
Args:
card (webelement): a visible card
Returns:
str: card's color
"""
color = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][2]").get_attribute("stroke")
# both light and dark theme
if (color == "#ff0101" or color == "#ffb047"):
color = "red"
elif (color == "#800080" or color == "#ff47ff"):
color = "purple"
else:
color = "green"
return color | 452266b81d70973149fed4ab2e6cbc9c93591180 | 12,483 |
from typing import Dict
from typing import Any
def is_valid_path(parameters: Dict[str, Any]) -> bool:
"""Single "." chars and empty strings "" are excluded from path by urllib3.
A path containing to "/" or "%2F" will lead to ambiguous path resolution in
many frameworks and libraries, such behaviour have been observed in both
WSGI and ASGI applications.
In this case one variable in the path template will be empty, which will lead to 404 in most of the cases.
Because of it this case doesn't bring much value and might lead to false positives results of Schemathesis runs.
"""
path_parameter_blacklist = (".", SLASH, "")
return not any(
(value in path_parameter_blacklist or is_illegal_surrogate(value) or isinstance(value, str) and SLASH in value)
for value in parameters.values()
) | 5f80ff76c535b3913efc7ba83e04c4c049a9e50b | 12,484 |
import torch
def to_tensor(x):
"""
Arguments:
x: an instance of PIL image.
Returns:
a float tensor with shape [3, h, w],
it represents a RGB image with
pixel values in [0, 1] range.
"""
x = np.array(x)
x = torch.FloatTensor(x)
return x.permute(2, 0, 1).unsqueeze(0).div(255.0) | 6ff19bd7549a4fce455f03559420216020658c44 | 12,485 |
import tqdm
import os
def load_messier_catalog_images(path, img_size=None, disable_tqdm=False):
"""
Data loader for Messier catalog images. The images are available
in `messier-catalog-images` repository of MiraPy organisation.
:param path: String. Directory path.
:param img_size: Final dimensions of the image.
:param disable_tqdm: Boolean. Set True to disable progress bar.
:return: Array of images.
"""
images = []
for filename in tqdm(os.listdir(path), disable=disable_tqdm):
filepath = os.path.join(path, filename)
img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
img = img/img.max()
img = img * 255.
if img_size:
img = cv2.resize(img, img_size)
images.append(np.array(img))
return np.array(images) | 2a0df9e99ff0c442df9bf78776af59a2659cdc46 | 12,486 |
import os
def load_stl10(data_dir, flatten=False, one_hot=True, normalize_range=False,
whiten_pixels=True, border_pad_size=0):
"""
Large part of this loader and the associated functions has been inspired from,
https://github.com/mttk/STL10/blob/master/stl10_input.py
"""
# path to the binary train file with image data
train_img_path = os.path.join(data_dir,'stl10_binary','train_X.bin')
# path to the binary train file with labels
train_label_path = os.path.join(data_dir,'stl10_binary','train_y.bin')
# path to the binary test file with image data
test_img_path = os.path.join(data_dir,'stl10_binary','test_X.bin')
# path to the binary test file with labels
test_label_path = os.path.join(data_dir,'stl10_binary','test_y.bin')
download_and_extract(data_dir)
# test to check if the whole dataset is read correctly
images_train = read_all_images(train_img_path)
print("Training images",images_train.shape)
labels_train = read_labels(train_label_path)
print("Training labels",labels_train.shape)
images_test = read_all_images(test_img_path)
print("Test images",images_test.shape)
labels_test = read_labels(test_label_path)
print("Test labels",labels_test.shape)
Xtrain = images_train.astype(np.float32) / 255.0
ytrain = labels_train
for i in range(len(ytrain)) :
ytrain[i] -= 1
split = int(np.floor(0.9 * Xtrain.shape[0]))
Xval = Xtrain[split:Xtrain.shape[0]]
yval = ytrain[split:Xtrain.shape[0]]
Xtrain = Xtrain[:split]
ytrain = ytrain[:split]
Xtest, ytest = images_test.astype(np.float32) / 255.0, labels_test
for i in range(len(ytest)) :
ytest[i] -= 1
if flatten :
print("Flatten Not Supported")
if normalize_range :
print("Normalize Range Not Supported")
if one_hot:
print("Train Shapes before one hot encoding ",Xtrain.shape, ytrain.shape)
ytest = idx_to_onehot(ytest, 10).astype(np.float32)
ytrain = idx_to_onehot(ytrain, 10).astype(np.float32)
yval = idx_to_onehot(yval, 10).astype(np.float32)
print("Train Shapes after one hot encoding",Xtrain.shape, ytrain.shape)
if whiten_pixels:
mean = Xtrain.mean(axis=0)[None, :]
std = Xtrain.std(axis=0)[None, :]
print("Other mean/std", mean.shape, std.shape)
Xtrain = (Xtrain - mean) / std
Xval = (Xval - mean) / std
Xtest = (Xtest - mean) / std
# NOTE: the zero padding is done after the potential whitening
if border_pad_size > 0:
Xtrain = zero_pad_border(Xtrain, border_pad_size)
Xval = zero_pad_border(Xval, border_pad_size)
Xtest = zero_pad_border(Xtest, border_pad_size)
return (Xtrain, ytrain, Xval, yval, Xtest, ytest) | d72f033175c8377442215fbab3f43b53705dfa03 | 12,487 |
def fetch_data(fold_path):
"""Fetch data saving in fold path.
Convert data into suitable format, using csv files in fold path.
:param fold_path: String. The fold in which data files are saved.
:return:
training_data: Dataframe. Combined dataframe to create training data.
testing_data: Dataframe. Combined dataframe to create testing data.
"""
# Read all the data from target fold path.
pokemon = pd.read_csv(fold_path+'/pokemon.csv')
combats = pd.read_csv(fold_path+'/combats.csv')
test_data = pd.read_csv(fold_path+'/tests.csv')
# Convert data into suitable format for training and testing.
training_data = convert_data(combats, pokemon, win_column='Winner')
testing_data = convert_data(test_data, pokemon)
return training_data, testing_data | 42ea9ea6d1d9d597acc4ed1a14099711642608f4 | 12,488 |
def add_chr_prefix(band):
"""
Return the band string with chr prefixed
"""
return ''.join(['chr', band]) | 08a99220023f10d79bdacdb062a27efcb51086ce | 12,489 |
def disable_text_recog_aug_test(cfg, set_types=None):
"""Remove aug_test from test pipeline of text recognition.
Args:
cfg (mmcv.Config): Input config.
set_types (list[str]): Type of dataset source. Should be
None or sublist of ['test', 'val']
Returns:
cfg (mmcv.Config): Output config removing
`MultiRotateAugOCR` in test pipeline.
"""
assert set_types is None or isinstance(set_types, list)
if set_types is None:
set_types = ['val', 'test']
for set_type in set_types:
if cfg.data[set_type].pipeline[1].type == 'MultiRotateAugOCR':
cfg.data[set_type].pipeline = [
cfg.data[set_type].pipeline[0],
*cfg.data[set_type].pipeline[1].transforms
]
return cfg | bda3a5420d32d55062b23a6af27cee3e203b878c | 12,490 |
def layer_svg(svg_bottom, svg_top, offset: list = [0.0, 0.0]):
"""
Adds one SVG over another. Modifies the bottom SVG in place.
:param svg_bottom: The bottom SVG, in in xml.etree.ElementTree form
:param svg_top: The top SVG, in in xml.etree.ElementTree form
:param offset: How far to offset the top SVG elements
"""
if svg_top is None:
return
# print(svg_top.tag)
for child in list(svg_top):
apply_offset(child, offset, offset_children=True)
svg_bottom.append(child)
return svg_bottom | 6c6a8151d17f4aff9f1491d1ed71772d9434ae4c | 12,491 |
import os
def get_data_home(data_home=None):
"""Return a path to the cache directory for example datasets.
This directory is then used by :func:`load_dataset`.
If the ``data_home`` argument is not specified, it tries to read from the
``CF_DATA`` environment variable and defaults to ``~/cf-data``.
"""
if data_home is None:
data_home = os.environ.get("CF_DATA", os.path.join("~", "cf-data"))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home | 8e530b44db3387cb07da78dfbf3bc43f4855e9e0 | 12,492 |
def utxo_cmd(ctx, dry_run):
"""Get the node's current UTxO with the option of filtering by address(es)"""
try:
CardanoCli.execute(cmd=["cardano-cli", "query", "utxo"], dry_run=dry_run, include_network=True)
except CardanoPyError as cpe:
ctx.fail(cpe.message)
return cpe.return_code | 52807294a445fc2f641c1b921807bba898ad8c34 | 12,493 |
def delta_in_ms(delta):
"""
Convert a timedelta object to milliseconds.
"""
return delta.seconds*1000.0+delta.microseconds/1000.0 | 4ed048155daf4a4891488e28c674e905e1bbe947 | 12,494 |
import slicer, collections, fnmatch
def getNodes(pattern="*", scene=None, useLists=False):
"""Return a dictionary of nodes where the name or id matches the ``pattern``.
By default, ``pattern`` is a wildcard and it returns all nodes associated
with ``slicer.mrmlScene``.
If multiple node share the same name, using ``useLists=False`` (default behavior)
returns only the last node with that name. If ``useLists=True``, it returns
a dictionary of lists of nodes.
"""
nodes = collections.OrderedDict()
if scene is None:
scene = slicer.mrmlScene
count = scene.GetNumberOfNodes()
for idx in range(count):
node = scene.GetNthNode(idx)
name = node.GetName()
id = node.GetID()
if (fnmatch.fnmatchcase(name, pattern) or
fnmatch.fnmatchcase(id, pattern)):
if useLists:
nodes.setdefault(node.GetName(), []).append(node)
else:
nodes[node.GetName()] = node
return nodes | 6d6c44987a800f361d45f4538167acb65e738418 | 12,495 |
from typing import Union
from typing import Type
from re import X
from typing import Mapping
from typing import Optional
def get_cls(
query: Union[None, str, Type[X]],
base: Type[X],
lookup_dict: Mapping[str, Type[X]],
lookup_dict_synonyms: Optional[Mapping[str, Type[X]]] = None,
default: Optional[Type[X]] = None,
suffix: Optional[str] = None,
) -> Type[X]:
"""Get a class by string, default, or implementation."""
if query is None:
if default is None:
raise ValueError(f'No default {base.__name__} set')
return default
elif not isinstance(query, (str, type)):
raise TypeError(f'Invalid {base.__name__} type: {type(query)} - {query}')
elif isinstance(query, str):
key = normalize_string(query, suffix=suffix)
if key in lookup_dict:
return lookup_dict[key]
if lookup_dict_synonyms is not None and key in lookup_dict_synonyms:
return lookup_dict_synonyms[key]
raise ValueError(f'Invalid {base.__name__} name: {query}')
elif issubclass(query, base):
return query
raise TypeError(f'Not subclass of {base.__name__}: {query}') | e5f805df5ef19de9939344beee21834e3f2556ab | 12,496 |
def selection_sort(data):
"""Sort a list of unique numbers in ascending order using selection sort. O(n^2).
The process includes repeatedly iterating through a list, finding the smallest element, and sorting that element.
Args:
data: data to sort (list of int)
Returns:
sorted list
"""
sorted_data = data[:]
for i, value in enumerate(sorted_data):
# find smallest value in unsorted subset
min_value = min(sorted_data[i:])
index_min = sorted_data.index(min_value)
# place smallest value at start of unsorted subset
sorted_data[i], sorted_data[index_min] = min_value, value
return sorted_data | 8b745be41c857669aedecb25b3006bbdc1ef04eb | 12,497 |
def _conv(args, filter_size, num_features, bias, reuse, w_init=None, b_init=0.0, scope='_conv'):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D
batch x n, Tensors.
filter_size: int tuple of filter height and width.
reuse: None/True, whether to reuse variables
w_init: weights initializer object
b_init: a `int`, bias initializer value
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3, 4, 5]:
raise ValueError("Conv Linear expects 3D, 4D or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args to be of same Dimensiton: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = tf.nn.conv1d
strides = 1
elif shape_length == 4:
conv_op = tf.nn.conv2d
strides = shape_length * [1]
elif shape_length == 5:
conv_op = tf.nn.conv3d
strides = shape_length * [1]
# Now the computation.
with tf.variable_scope(scope, reuse=reuse):
kernel = tf.get_variable(
"W", filter_size + [total_arg_size_depth, num_features], dtype=dtype, initializer=w_init)
if len(args) == 1:
res = conv_op(args[0], kernel, strides, padding='SAME')
else:
res = conv_op(tf.concat(axis=shape_length - 1, values=args), kernel, strides, padding='SAME')
if not bias:
return res
bias_term = tf.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=tf.constant_initializer(b_init, dtype=dtype))
return res + bias_term | 104d91623949e4506c4b72001c23b6ab7fb312ca | 12,498 |
import time
import os
def make_bright_star_mask_in_hp(nside, pixnum, verbose=True, gaiaepoch=2015.5,
maglim=12., matchrad=1., maskepoch=2023.0):
"""Make a bright star mask in a HEALPixel using Tycho, Gaia and URAT.
Parameters
----------
nside : :class:`int`
(NESTED) HEALPixel nside.
pixnum : :class:`int`
A single HEALPixel number.
verbose : :class:`bool`
If ``True`` then log informational messages.
Returns
-------
:class:`recarray`
The bright star mask in the form of `maskdatamodel.dtype`.
Notes
-----
- Runs in a a minute or so for a typical nside=4 pixel.
- See :func:`~desitarget.brightmask.make_bright_star_mask` for
descriptions of the output mask and the other input parameters.
"""
# ADM start the clock.
t0 = time()
# ADM read in the Tycho files.
tychofns = find_tycho_files_hp(nside, pixnum, neighbors=False)
tychoobjs = []
for fn in tychofns:
tychoobjs.append(fitsio.read(fn, ext='TYCHOHPX'))
tychoobjs = np.concatenate(tychoobjs)
# ADM create the Tycho reference magnitude, which is VT then HP
# ADM then BT in order of preference.
tychomag = tychoobjs["MAG_VT"].copy()
tychomag[tychomag == 0] = tychoobjs["MAG_HP"][tychomag == 0]
tychomag[tychomag == 0] = tychoobjs["MAG_BT"][tychomag == 0]
# ADM discard any Tycho objects below the input magnitude limit
# ADM and outside of the HEALPixels of interest.
theta, phi = np.radians(90-tychoobjs["DEC"]), np.radians(tychoobjs["RA"])
tychohpx = hp.ang2pix(nside, theta, phi, nest=True)
ii = (tychohpx == pixnum) & (tychomag < maglim)
tychomag, tychoobjs = tychomag[ii], tychoobjs[ii]
if verbose:
log.info('Read {} (mag < {}) Tycho objects (pix={})...t={:.1f} mins'.
format(np.sum(ii), maglim, pixnum, (time()-t0)/60))
# ADM read in the associated Gaia files. Also grab
# ADM neighboring pixels to prevent edge effects.
gaiafns = find_gaia_files(tychoobjs, neighbors=True)
gaiaobjs = []
cols = 'SOURCE_ID', 'RA', 'DEC', 'PHOT_G_MEAN_MAG', 'PMRA', 'PMDEC'
for fn in gaiafns:
if os.path.exists(fn):
gaiaobjs.append(fitsio.read(fn, ext='GAIAHPX', columns=cols))
gaiaobjs = np.concatenate(gaiaobjs)
gaiaobjs = rfn.rename_fields(gaiaobjs, {"SOURCE_ID": "REF_ID"})
# ADM limit Gaia objects to 3 magnitudes fainter than the passed
# ADM limit. This leaves some (!) leeway when matching to Tycho.
gaiaobjs = gaiaobjs[gaiaobjs['PHOT_G_MEAN_MAG'] < maglim + 3]
if verbose:
log.info('Read {} (G < {}) Gaia sources (pix={})...t={:.1f} mins'.format(
len(gaiaobjs), maglim+3, pixnum, (time()-t0)/60))
# ADM substitute URAT where Gaia proper motions don't exist.
ii = ((np.isnan(gaiaobjs["PMRA"]) | (gaiaobjs["PMRA"] == 0)) &
(np.isnan(gaiaobjs["PMDEC"]) | (gaiaobjs["PMDEC"] == 0)))
if verbose:
log.info('Add URAT for {} Gaia objs with no PMs (pix={})...t={:.1f} mins'
.format(np.sum(ii), pixnum, (time()-t0)/60))
urat = add_urat_pms(gaiaobjs[ii], numproc=1)
if verbose:
log.info('Found an additional {} URAT objects (pix={})...t={:.1f} mins'
.format(np.sum(urat["URAT_ID"] != -1), pixnum, (time()-t0)/60))
for col in "PMRA", "PMDEC":
gaiaobjs[col][ii] = urat[col]
# ADM need to track the URATID to track which objects have
# ADM substituted proper motions.
uratid = np.zeros_like(gaiaobjs["REF_ID"])-1
uratid[ii] = urat["URAT_ID"]
# ADM match to remove Tycho objects already in Gaia. Prefer the more
# ADM accurate Gaia proper motions. Note, however, that Tycho epochs
# ADM can differ from the mean (1991.5) by as as much as 0.86 years,
# ADM so a star with a proper motion as large as Barnard's Star
# ADM (10.3 arcsec) can be off by a significant margin (~10").
margin = 10.
ra, dec = rewind_coords(gaiaobjs["RA"], gaiaobjs["DEC"],
gaiaobjs["PMRA"], gaiaobjs["PMDEC"],
epochnow=gaiaepoch)
# ADM match Gaia to Tycho with a suitable margin.
if verbose:
log.info('Match Gaia to Tycho with margin={}" (pix={})...t={:.1f} mins'
.format(margin, pixnum, (time()-t0)/60))
igaia, itycho = radec_match_to([ra, dec],
[tychoobjs["RA"], tychoobjs["DEC"]],
sep=margin, radec=True)
if verbose:
log.info('{} matches. Refining at 1" (pix={})...t={:.1f} mins'.format(
len(itycho), pixnum, (time()-t0)/60))
# ADM match Gaia to Tycho at the more exact reference epoch.
epoch_ra = tychoobjs[itycho]["EPOCH_RA"]
epoch_dec = tychoobjs[itycho]["EPOCH_DEC"]
# ADM some of the Tycho epochs aren't populated.
epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
ra, dec = rewind_coords(gaiaobjs["RA"][igaia], gaiaobjs["DEC"][igaia],
gaiaobjs["PMRA"][igaia], gaiaobjs["PMDEC"][igaia],
epochnow=gaiaepoch,
epochpast=epoch_ra, epochpastdec=epoch_dec)
# ADM catch the corner case where there are no initial matches.
if ra.size > 0:
_, refined = radec_match_to([ra, dec], [tychoobjs["RA"][itycho],
tychoobjs["DEC"][itycho]], radec=True)
else:
refined = np.array([], dtype='int')
# ADM retain Tycho objects that DON'T match Gaia.
keep = np.ones(len(tychoobjs), dtype='bool')
keep[itycho[refined]] = False
tychokeep, tychomag = tychoobjs[keep], tychomag[keep]
if verbose:
log.info('Kept {} Tychos with no Gaia match (pix={})...t={:.1f} mins'
.format(len(tychokeep), pixnum, (time()-t0)/60))
# ADM now we're done matching to Gaia, limit Gaia to the passed
# ADM magnitude limit and to the HEALPixel boundary of interest.
theta, phi = np.radians(90-gaiaobjs["DEC"]), np.radians(gaiaobjs["RA"])
gaiahpx = hp.ang2pix(nside, theta, phi, nest=True)
ii = (gaiahpx == pixnum) & (gaiaobjs['PHOT_G_MEAN_MAG'] < maglim)
gaiakeep, uratid = gaiaobjs[ii], uratid[ii]
if verbose:
log.info('Mask also comprises {} Gaia sources (pix={})...t={:.1f} mins'
.format(len(gaiakeep), pixnum, (time()-t0)/60))
# ADM move the coordinates forwards to the input mask epoch.
epoch_ra, epoch_dec = tychokeep["EPOCH_RA"], tychokeep["EPOCH_DEC"]
# ADM some of the Tycho epochs aren't populated.
epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
ra, dec = rewind_coords(
tychokeep["RA"], tychokeep["DEC"], tychokeep["PM_RA"], tychokeep["PM_DEC"],
epochnow=epoch_ra, epochnowdec=epoch_dec, epochpast=maskepoch)
tychokeep["RA"], tychokeep["DEC"] = ra, dec
ra, dec = rewind_coords(
gaiakeep["RA"], gaiakeep["DEC"], gaiakeep["PMRA"], gaiakeep["PMDEC"],
epochnow=gaiaepoch, epochpast=maskepoch)
gaiakeep["RA"], gaiakeep["DEC"] = ra, dec
# ADM finally, format according to the mask data model...
gaiamask = np.zeros(len(gaiakeep), dtype=maskdatamodel.dtype)
tychomask = np.zeros(len(tychokeep), dtype=maskdatamodel.dtype)
for col in "RA", "DEC":
gaiamask[col] = gaiakeep[col]
gaiamask["PM"+col] = gaiakeep["PM"+col]
tychomask[col] = tychokeep[col]
tychomask["PM"+col] = tychokeep["PM_"+col]
gaiamask["REF_ID"] = gaiakeep["REF_ID"]
# ADM take care to rigorously convert to int64 for Tycho.
tychomask["REF_ID"] = tychokeep["TYC1"].astype('int64')*int(1e6) + \
tychokeep["TYC2"].astype('int64')*10 + tychokeep["TYC3"]
gaiamask["REF_CAT"], tychomask["REF_CAT"] = 'G2', 'T2'
gaiamask["REF_MAG"] = gaiakeep['PHOT_G_MEAN_MAG']
tychomask["REF_MAG"] = tychomag
gaiamask["URAT_ID"], tychomask["URAT_ID"] = uratid, -1
gaiamask["TYPE"], tychomask["TYPE"] = 'PSF', 'PSF'
mask = np.concatenate([gaiamask, tychomask])
# ADM ...and add the mask radii.
mask["IN_RADIUS"], mask["NEAR_RADIUS"] = radii(mask["REF_MAG"])
if verbose:
log.info("Done making mask...(pix={})...t={:.1f} mins".format(
pixnum, (time()-t0)/60.))
return mask | c82768cd652feca5f07f857f6225a96931669d07 | 12,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.